hash
stringlengths 40
40
| date
stringdate 2018-12-11 14:31:19
2025-03-22 02:45:31
| author
stringclasses 280
values | commit_message
stringlengths 14
176
| is_merge
bool 1
class | git_diff
stringlengths 198
25.8M
⌀ | type
stringclasses 83
values | masked_commit_message
stringlengths 8
170
|
|---|---|---|---|---|---|---|---|
5f380872b407fe90f5b1c09be0d5363a64d1712a
|
2023-07-10 11:46:38
|
DoomAndLove
|
helm: fix formatting in CHANGELOG.md (#9891)
| false
|
diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md
index 011de21dd26c2..2004fb1afb084 100644
--- a/production/helm/loki/CHANGELOG.md
+++ b/production/helm/loki/CHANGELOG.md
@@ -25,7 +25,7 @@ Entries should include a reference to the pull request that introduced the chang
- [BUGFIX] Remove persistentVolumeClaimRetentionPolicy from single-binary StatefulSet when persistence is disabled
-## 5.8.6
+## 5.8.6
- [ENHANCEMENT] Add serviceMonitor.metricRelabelings to support metric relabelings
|
helm
|
fix formatting in CHANGELOG.md (#9891)
|
3b0fa184c542969c6c355fd65e33341f62172de3
|
2024-04-16 01:45:22
|
J Stickler
|
docs: hide the sizing calculator until updated (#12598)
| false
|
diff --git a/docs/sources/setup/_index.md b/docs/sources/setup/_index.md
index e1e1caef768ff..2464feb75f350 100644
--- a/docs/sources/setup/_index.md
+++ b/docs/sources/setup/_index.md
@@ -7,7 +7,6 @@ weight: 300
# Setup Loki
-- Estimate the initial [size]({{< relref "./size" >}}) for your Loki cluster.
- [Install]({{< relref "./install" >}}) Loki.
- [Migrate]({{< relref "./migrate" >}}) from one Loki implementation to another.
- [Upgrade]({{< relref "./upgrade" >}}) from one Loki version to a newer version.
diff --git a/docs/sources/setup/install/_index.md b/docs/sources/setup/install/_index.md
index 11521f1158ed5..2b56cba78cb69 100644
--- a/docs/sources/setup/install/_index.md
+++ b/docs/sources/setup/install/_index.md
@@ -17,10 +17,6 @@ There are several methods of installing Loki and Promtail:
- [Install and run locally]({{< relref "./local" >}})
- [Install from source]({{< relref "./install-from-source" >}})
-The [Sizing Tool]({{< relref "../size" >}}) can be used to determine the proper cluster sizing
-given an expected ingestion rate and query performance. It targets the Helm
-installation on Kubernetes.
-
## General process
In order to run Loki, you must:
diff --git a/docs/sources/setup/size/_index.md b/docs/sources/setup/size/_index.md
index e2215c7e80f72..74dcb8e504964 100644
--- a/docs/sources/setup/size/_index.md
+++ b/docs/sources/setup/size/_index.md
@@ -6,7 +6,7 @@ aliases:
- ../installation/sizing/
- ../installation/helm/generate
weight: 100
-keywords: []
+draft: true
---
<link rel="stylesheet" href="../../query/analyzer/style.css">
|
docs
|
hide the sizing calculator until updated (#12598)
|
3df08bd39f45bbf8314f31a59f35b955b9a31a5e
|
2025-01-24 13:33:59
|
Christian Haudum
|
chore(ci): Fix unused-parameter linter error (#15929)
| false
|
diff --git a/cmd/logql-analyzer/main.go b/cmd/logql-analyzer/main.go
index 6b4ceb8a53ca6..d8ac11c10e475 100644
--- a/cmd/logql-analyzer/main.go
+++ b/cmd/logql-analyzer/main.go
@@ -4,7 +4,6 @@ import (
"flag"
"net/http"
- "github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/gorilla/mux"
"github.com/grafana/dskit/server"
@@ -19,7 +18,7 @@ func main() {
util_log.InitLogger(&server.Config{
LogLevel: cfg.LogLevel,
}, prometheus.DefaultRegisterer, false)
- s, err := createServer(cfg, util_log.Logger)
+ s, err := createServer(cfg)
if err != nil {
level.Error(util_log.Logger).Log("msg", "error while creating the server", "err", err)
}
@@ -38,7 +37,7 @@ func getConfig() server.Config {
return cfg
}
-func createServer(cfg server.Config, logger log.Logger) (*server.Server, error) {
+func createServer(cfg server.Config) (*server.Server, error) {
s, err := server.New(cfg)
if err != nil {
return nil, err
|
chore
|
Fix unused-parameter linter error (#15929)
|
39fa9b47e9e729bc27839455f45f3030d7e66063
|
2024-09-17 14:36:24
|
Christian Haudum
|
chore: Fix linter errors (#14145)
| false
|
diff --git a/pkg/storage/bloom/v1/bloom_tester_test.go b/pkg/storage/bloom/v1/bloom_tester_test.go
index 9f70386954753..fa4e0f6e82870 100644
--- a/pkg/storage/bloom/v1/bloom_tester_test.go
+++ b/pkg/storage/bloom/v1/bloom_tester_test.go
@@ -5,8 +5,9 @@ import (
"github.com/stretchr/testify/require"
- "github.com/grafana/loki/pkg/push"
"github.com/grafana/loki/v3/pkg/logql/syntax"
+
+ "github.com/grafana/loki/pkg/push"
)
type fakeLineBloom []string
diff --git a/pkg/storage/bloom/v1/fuse.go b/pkg/storage/bloom/v1/fuse.go
index 97e16d196a862..37a0de06c4890 100644
--- a/pkg/storage/bloom/v1/fuse.go
+++ b/pkg/storage/bloom/v1/fuse.go
@@ -253,7 +253,7 @@ func (fq *FusedQuerier) Run() error {
return nil
}
-func (fq *FusedQuerier) runSeries(schema Schema, series *SeriesWithMeta, reqs []Request) {
+func (fq *FusedQuerier) runSeries(_ Schema, series *SeriesWithMeta, reqs []Request) {
// For a given chunk|series to be removed, it must fail to match all blooms.
// Because iterating/loading blooms can be expensive, we iterate blooms one at a time, collecting
// the removals (failures) for each (bloom, chunk) pair.
diff --git a/pkg/storage/bloom/v1/test_util.go b/pkg/storage/bloom/v1/test_util.go
index da5b04a868fb0..3bca46865c75b 100644
--- a/pkg/storage/bloom/v1/test_util.go
+++ b/pkg/storage/bloom/v1/test_util.go
@@ -9,9 +9,10 @@ import (
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
- "github.com/grafana/loki/pkg/push"
"github.com/grafana/loki/v3/pkg/chunkenc"
iter "github.com/grafana/loki/v3/pkg/iter/v2"
+
+ "github.com/grafana/loki/pkg/push"
)
// TODO(owen-d): this should probably be in it's own testing-util package
diff --git a/pkg/storage/bloom/v1/tokenizer.go b/pkg/storage/bloom/v1/tokenizer.go
index bf7e12983b6bd..5cbf199448f68 100644
--- a/pkg/storage/bloom/v1/tokenizer.go
+++ b/pkg/storage/bloom/v1/tokenizer.go
@@ -4,8 +4,9 @@ import (
"fmt"
"unicode/utf8"
- "github.com/grafana/loki/pkg/push"
iter "github.com/grafana/loki/v3/pkg/iter/v2"
+
+ "github.com/grafana/loki/pkg/push"
)
const (
diff --git a/pkg/storage/bloom/v1/tokenizer_test.go b/pkg/storage/bloom/v1/tokenizer_test.go
index e95e4649bd3ec..f21aceca06402 100644
--- a/pkg/storage/bloom/v1/tokenizer_test.go
+++ b/pkg/storage/bloom/v1/tokenizer_test.go
@@ -6,8 +6,9 @@ import (
"github.com/stretchr/testify/require"
- "github.com/grafana/loki/pkg/push"
v2 "github.com/grafana/loki/v3/pkg/iter/v2"
+
+ "github.com/grafana/loki/pkg/push"
)
const BigFile = "../../../logql/sketch/testdata/war_peace.txt"
|
chore
|
Fix linter errors (#14145)
|
00d58e6053b41e9c2df4bc170b5d3ce1cca2377a
|
2024-12-10 15:12:02
|
renovate[bot]
|
fix(deps): update module github.com/prometheus/common to v0.61.0 (#15336)
| false
|
diff --git a/go.mod b/go.mod
index 747c569ddfa4f..8abe7e4bfbfe8 100644
--- a/go.mod
+++ b/go.mod
@@ -84,7 +84,7 @@ require (
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.20.5
github.com/prometheus/client_model v0.6.1
- github.com/prometheus/common v0.60.1
+ github.com/prometheus/common v0.61.0
github.com/prometheus/prometheus v0.53.2-0.20240726125539-d4f098ae80fb
github.com/redis/go-redis/v9 v9.7.0
github.com/segmentio/fasthash v1.0.3
@@ -98,8 +98,8 @@ require (
go.etcd.io/bbolt v1.3.11
go.uber.org/atomic v1.11.0
go.uber.org/goleak v1.3.0
- golang.org/x/crypto v0.29.0
- golang.org/x/net v0.31.0
+ golang.org/x/crypto v0.30.0
+ golang.org/x/net v0.32.0
golang.org/x/sync v0.10.0
golang.org/x/sys v0.28.0
golang.org/x/time v0.8.0
@@ -361,7 +361,7 @@ require (
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.21.0 // indirect
golang.org/x/mod v0.19.0 // indirect
- golang.org/x/term v0.26.0 // indirect
+ golang.org/x/term v0.27.0 // indirect
golang.org/x/tools v0.23.0 // indirect
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20241113202542-65e8d215514f // indirect
diff --git a/go.sum b/go.sum
index 463ff97f6ac07..e65383cb18d65 100644
--- a/go.sum
+++ b/go.sum
@@ -2427,8 +2427,8 @@ github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
-github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc=
-github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
+github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ=
+github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/exporter-toolkit v0.11.0 h1:yNTsuZ0aNCNFQ3aFTD2uhPOvr4iD7fdBvKPAEGkNf+g=
@@ -2871,8 +2871,8 @@ golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
-golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ=
-golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg=
+golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY=
+golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -3043,8 +3043,8 @@ golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
-golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo=
-golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM=
+golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI=
+golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -3271,8 +3271,8 @@ golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
-golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU=
-golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E=
+golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
+golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go
index e6bdd4c035dab..57ec252adffaa 100644
--- a/vendor/github.com/prometheus/common/config/http_config.go
+++ b/vendor/github.com/prometheus/common/config/http_config.go
@@ -357,33 +357,33 @@ func nonZeroCount[T comparable](values ...T) int {
func (c *HTTPClientConfig) Validate() error {
// Backwards compatibility with the bearer_token field.
if len(c.BearerToken) > 0 && len(c.BearerTokenFile) > 0 {
- return fmt.Errorf("at most one of bearer_token & bearer_token_file must be configured")
+ return errors.New("at most one of bearer_token & bearer_token_file must be configured")
}
if (c.BasicAuth != nil || c.OAuth2 != nil) && (len(c.BearerToken) > 0 || len(c.BearerTokenFile) > 0) {
- return fmt.Errorf("at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured")
+ return errors.New("at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured")
}
if c.BasicAuth != nil && nonZeroCount(string(c.BasicAuth.Username) != "", c.BasicAuth.UsernameFile != "", c.BasicAuth.UsernameRef != "") > 1 {
- return fmt.Errorf("at most one of basic_auth username, username_file & username_ref must be configured")
+ return errors.New("at most one of basic_auth username, username_file & username_ref must be configured")
}
if c.BasicAuth != nil && nonZeroCount(string(c.BasicAuth.Password) != "", c.BasicAuth.PasswordFile != "", c.BasicAuth.PasswordRef != "") > 1 {
- return fmt.Errorf("at most one of basic_auth password, password_file & password_ref must be configured")
+ return errors.New("at most one of basic_auth password, password_file & password_ref must be configured")
}
if c.Authorization != nil {
if len(c.BearerToken) > 0 || len(c.BearerTokenFile) > 0 {
- return fmt.Errorf("authorization is not compatible with bearer_token & bearer_token_file")
+ return errors.New("authorization is not compatible with bearer_token & bearer_token_file")
}
if nonZeroCount(string(c.Authorization.Credentials) != "", c.Authorization.CredentialsFile != "", c.Authorization.CredentialsRef != "") > 1 {
- return fmt.Errorf("at most one of authorization credentials & credentials_file must be configured")
+ return errors.New("at most one of authorization credentials & credentials_file must be configured")
}
c.Authorization.Type = strings.TrimSpace(c.Authorization.Type)
if len(c.Authorization.Type) == 0 {
c.Authorization.Type = "Bearer"
}
if strings.ToLower(c.Authorization.Type) == "basic" {
- return fmt.Errorf(`authorization type cannot be set to "basic", use "basic_auth" instead`)
+ return errors.New(`authorization type cannot be set to "basic", use "basic_auth" instead`)
}
if c.BasicAuth != nil || c.OAuth2 != nil {
- return fmt.Errorf("at most one of basic_auth, oauth2 & authorization must be configured")
+ return errors.New("at most one of basic_auth, oauth2 & authorization must be configured")
}
} else {
if len(c.BearerToken) > 0 {
@@ -399,16 +399,16 @@ func (c *HTTPClientConfig) Validate() error {
}
if c.OAuth2 != nil {
if c.BasicAuth != nil {
- return fmt.Errorf("at most one of basic_auth, oauth2 & authorization must be configured")
+ return errors.New("at most one of basic_auth, oauth2 & authorization must be configured")
}
if len(c.OAuth2.ClientID) == 0 {
- return fmt.Errorf("oauth2 client_id must be configured")
+ return errors.New("oauth2 client_id must be configured")
}
if len(c.OAuth2.TokenURL) == 0 {
- return fmt.Errorf("oauth2 token_url must be configured")
+ return errors.New("oauth2 token_url must be configured")
}
if nonZeroCount(len(c.OAuth2.ClientSecret) > 0, len(c.OAuth2.ClientSecretFile) > 0, len(c.OAuth2.ClientSecretRef) > 0) > 1 {
- return fmt.Errorf("at most one of oauth2 client_secret, client_secret_file & client_secret_ref must be configured")
+ return errors.New("at most one of oauth2 client_secret, client_secret_file & client_secret_ref must be configured")
}
}
if err := c.ProxyConfig.Validate(); err != nil {
@@ -735,7 +735,7 @@ func (s *FileSecret) Fetch(ctx context.Context) (string, error) {
}
func (s *FileSecret) Description() string {
- return fmt.Sprintf("file %s", s.file)
+ return "file " + s.file
}
func (s *FileSecret) Immutable() bool {
@@ -753,7 +753,7 @@ func (s *refSecret) Fetch(ctx context.Context) (string, error) {
}
func (s *refSecret) Description() string {
- return fmt.Sprintf("ref %s", s.ref)
+ return "ref " + s.ref
}
func (s *refSecret) Immutable() bool {
@@ -1045,7 +1045,7 @@ func NewTLSConfigWithContext(ctx context.Context, cfg *TLSConfig, optFuncs ...TL
if cfg.MaxVersion != 0 && cfg.MinVersion != 0 {
if cfg.MaxVersion < cfg.MinVersion {
- return nil, fmt.Errorf("tls_config.max_version must be greater than or equal to tls_config.min_version if both are specified")
+ return nil, errors.New("tls_config.max_version must be greater than or equal to tls_config.min_version if both are specified")
}
}
@@ -1144,19 +1144,19 @@ func (c *TLSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
// used.
func (c *TLSConfig) Validate() error {
if nonZeroCount(len(c.CA) > 0, len(c.CAFile) > 0, len(c.CARef) > 0) > 1 {
- return fmt.Errorf("at most one of ca, ca_file & ca_ref must be configured")
+ return errors.New("at most one of ca, ca_file & ca_ref must be configured")
}
if nonZeroCount(len(c.Cert) > 0, len(c.CertFile) > 0, len(c.CertRef) > 0) > 1 {
- return fmt.Errorf("at most one of cert, cert_file & cert_ref must be configured")
+ return errors.New("at most one of cert, cert_file & cert_ref must be configured")
}
if nonZeroCount(len(c.Key) > 0, len(c.KeyFile) > 0, len(c.KeyRef) > 0) > 1 {
- return fmt.Errorf("at most one of key and key_file must be configured")
+ return errors.New("at most one of key and key_file must be configured")
}
if c.usingClientCert() && !c.usingClientKey() {
- return fmt.Errorf("exactly one of key or key_file must be configured when a client certificate is configured")
+ return errors.New("exactly one of key or key_file must be configured when a client certificate is configured")
} else if c.usingClientKey() && !c.usingClientCert() {
- return fmt.Errorf("exactly one of cert or cert_file must be configured when a client key is configured")
+ return errors.New("exactly one of cert or cert_file must be configured when a client key is configured")
}
return nil
@@ -1460,16 +1460,16 @@ type ProxyConfig struct {
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *ProxyConfig) Validate() error {
if len(c.ProxyConnectHeader) > 0 && (!c.ProxyFromEnvironment && (c.ProxyURL.URL == nil || c.ProxyURL.String() == "")) {
- return fmt.Errorf("if proxy_connect_header is configured, proxy_url or proxy_from_environment must also be configured")
+ return errors.New("if proxy_connect_header is configured, proxy_url or proxy_from_environment must also be configured")
}
if c.ProxyFromEnvironment && c.ProxyURL.URL != nil && c.ProxyURL.String() != "" {
- return fmt.Errorf("if proxy_from_environment is configured, proxy_url must not be configured")
+ return errors.New("if proxy_from_environment is configured, proxy_url must not be configured")
}
if c.ProxyFromEnvironment && c.NoProxy != "" {
- return fmt.Errorf("if proxy_from_environment is configured, no_proxy must not be configured")
+ return errors.New("if proxy_from_environment is configured, no_proxy must not be configured")
}
if c.ProxyURL.URL == nil && c.NoProxy != "" {
- return fmt.Errorf("if no_proxy is configured, proxy_url must also be configured")
+ return errors.New("if no_proxy is configured, proxy_url must also be configured")
}
return nil
}
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
index cf0c150c2e184..d7f3d76f55d97 100644
--- a/vendor/github.com/prometheus/common/expfmt/encode.go
+++ b/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -68,7 +68,7 @@ func Negotiate(h http.Header) Format {
if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
switch Format(escapeParam) {
case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
- escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam))
+ escapingScheme = Format("; escaping=" + escapeParam)
default:
// If the escaping parameter is unknown, ignore it.
}
@@ -101,7 +101,7 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format {
if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
switch Format(escapeParam) {
case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
- escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam))
+ escapingScheme = Format("; escaping=" + escapeParam)
default:
// If the escaping parameter is unknown, ignore it.
}
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
index d942af8edd46d..b26886560d74b 100644
--- a/vendor/github.com/prometheus/common/expfmt/expfmt.go
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -15,7 +15,7 @@
package expfmt
import (
- "fmt"
+ "errors"
"strings"
"github.com/prometheus/common/model"
@@ -109,7 +109,7 @@ func NewOpenMetricsFormat(version string) (Format, error) {
if version == OpenMetricsVersion_1_0_0 {
return FmtOpenMetrics_1_0_0, nil
}
- return FmtUnknown, fmt.Errorf("unknown open metrics version string")
+ return FmtUnknown, errors.New("unknown open metrics version string")
}
// WithEscapingScheme returns a copy of Format with the specified escaping
diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
index 11c8ff4b9dbc5..f1c495dd6060d 100644
--- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
@@ -152,8 +152,8 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") {
compliantName = name[:len(name)-6]
}
- if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) {
- compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit)
+ if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, "_"+*in.Unit) {
+ compliantName = compliantName + "_" + *in.Unit
}
// Comments, first HELP, then TYPE.
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
index f085a923f6cdc..b4607fe4d274f 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -895,7 +895,7 @@ func histogramMetricName(name string) string {
func parseFloat(s string) (float64, error) {
if strings.ContainsAny(s, "pP_") {
- return 0, fmt.Errorf("unsupported character in float")
+ return 0, errors.New("unsupported character in float")
}
return strconv.ParseFloat(s, 64)
}
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
index 80d1fe944ea1c..bd3a39e3e1471 100644
--- a/vendor/github.com/prometheus/common/model/alert.go
+++ b/vendor/github.com/prometheus/common/model/alert.go
@@ -14,6 +14,7 @@
package model
import (
+ "errors"
"fmt"
"time"
)
@@ -89,16 +90,16 @@ func (a *Alert) StatusAt(ts time.Time) AlertStatus {
// Validate checks whether the alert data is inconsistent.
func (a *Alert) Validate() error {
if a.StartsAt.IsZero() {
- return fmt.Errorf("start time missing")
+ return errors.New("start time missing")
}
if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
- return fmt.Errorf("start time must be before end time")
+ return errors.New("start time must be before end time")
}
if err := a.Labels.Validate(); err != nil {
return fmt.Errorf("invalid label set: %w", err)
}
if len(a.Labels) == 0 {
- return fmt.Errorf("at least one label pair required")
+ return errors.New("at least one label pair required")
}
if err := a.Annotations.Validate(); err != nil {
return fmt.Errorf("invalid annotations: %w", err)
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
index f50966bc494e4..0daca836afa77 100644
--- a/vendor/github.com/prometheus/common/model/metric.go
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -14,9 +14,11 @@
package model
import (
+ "errors"
"fmt"
"regexp"
"sort"
+ "strconv"
"strings"
"unicode/utf8"
@@ -269,10 +271,6 @@ func metricNeedsEscaping(m *dto.Metric) bool {
return false
}
-const (
- lowerhex = "0123456789abcdef"
-)
-
// EscapeName escapes the incoming name according to the provided escaping
// scheme. Depending on the rules of escaping, this may cause no change in the
// string that is returned. (Especially NoEscaping, which by definition is a
@@ -307,7 +305,7 @@ func EscapeName(name string, scheme EscapingScheme) string {
} else if isValidLegacyRune(b, i) {
escaped.WriteRune(b)
} else {
- escaped.WriteRune('_')
+ escaped.WriteString("__")
}
}
return escaped.String()
@@ -317,21 +315,15 @@ func EscapeName(name string, scheme EscapingScheme) string {
}
escaped.WriteString("U__")
for i, b := range name {
- if isValidLegacyRune(b, i) {
+ if b == '_' {
+ escaped.WriteString("__")
+ } else if isValidLegacyRune(b, i) {
escaped.WriteRune(b)
} else if !utf8.ValidRune(b) {
escaped.WriteString("_FFFD_")
- } else if b < 0x100 {
- escaped.WriteRune('_')
- for s := 4; s >= 0; s -= 4 {
- escaped.WriteByte(lowerhex[b>>uint(s)&0xF])
- }
- escaped.WriteRune('_')
- } else if b < 0x10000 {
+ } else {
escaped.WriteRune('_')
- for s := 12; s >= 0; s -= 4 {
- escaped.WriteByte(lowerhex[b>>uint(s)&0xF])
- }
+ escaped.WriteString(strconv.FormatInt(int64(b), 16))
escaped.WriteRune('_')
}
}
@@ -389,8 +381,9 @@ func UnescapeName(name string, scheme EscapingScheme) string {
// We think we are in a UTF-8 code, process it.
var utf8Val uint
for j := 0; i < len(escapedName); j++ {
- // This is too many characters for a utf8 value.
- if j > 4 {
+ // This is too many characters for a utf8 value based on the MaxRune
+ // value of '\U0010FFFF'.
+ if j >= 6 {
return name
}
// Found a closing underscore, convert to a rune, check validity, and append.
@@ -443,7 +436,7 @@ func (e EscapingScheme) String() string {
func ToEscapingScheme(s string) (EscapingScheme, error) {
if s == "" {
- return NoEscaping, fmt.Errorf("got empty string instead of escaping scheme")
+ return NoEscaping, errors.New("got empty string instead of escaping scheme")
}
switch s {
case AllowUTF8:
diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go
index 910b0b71fcce4..8f91a9702e0ca 100644
--- a/vendor/github.com/prometheus/common/model/silence.go
+++ b/vendor/github.com/prometheus/common/model/silence.go
@@ -15,6 +15,7 @@ package model
import (
"encoding/json"
+ "errors"
"fmt"
"regexp"
"time"
@@ -34,7 +35,7 @@ func (m *Matcher) UnmarshalJSON(b []byte) error {
}
if len(m.Name) == 0 {
- return fmt.Errorf("label name in matcher must not be empty")
+ return errors.New("label name in matcher must not be empty")
}
if m.IsRegex {
if _, err := regexp.Compile(m.Value); err != nil {
@@ -77,7 +78,7 @@ type Silence struct {
// Validate returns true iff all fields of the silence have valid values.
func (s *Silence) Validate() error {
if len(s.Matchers) == 0 {
- return fmt.Errorf("at least one matcher required")
+ return errors.New("at least one matcher required")
}
for _, m := range s.Matchers {
if err := m.Validate(); err != nil {
@@ -85,22 +86,22 @@ func (s *Silence) Validate() error {
}
}
if s.StartsAt.IsZero() {
- return fmt.Errorf("start time missing")
+ return errors.New("start time missing")
}
if s.EndsAt.IsZero() {
- return fmt.Errorf("end time missing")
+ return errors.New("end time missing")
}
if s.EndsAt.Before(s.StartsAt) {
- return fmt.Errorf("start time must be before end time")
+ return errors.New("start time must be before end time")
}
if s.CreatedBy == "" {
- return fmt.Errorf("creator information missing")
+ return errors.New("creator information missing")
}
if s.Comment == "" {
- return fmt.Errorf("comment missing")
+ return errors.New("comment missing")
}
if s.CreatedAt.IsZero() {
- return fmt.Errorf("creation timestamp missing")
+ return errors.New("creation timestamp missing")
}
return nil
}
diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go
index ae35cc2ab4b99..6bfc757d18b0f 100644
--- a/vendor/github.com/prometheus/common/model/value_float.go
+++ b/vendor/github.com/prometheus/common/model/value_float.go
@@ -15,6 +15,7 @@ package model
import (
"encoding/json"
+ "errors"
"fmt"
"math"
"strconv"
@@ -39,7 +40,7 @@ func (v SampleValue) MarshalJSON() ([]byte, error) {
// UnmarshalJSON implements json.Unmarshaler.
func (v *SampleValue) UnmarshalJSON(b []byte) error {
if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
- return fmt.Errorf("sample value must be a quoted string")
+ return errors.New("sample value must be a quoted string")
}
f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
if err != nil {
diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go
index 54bb038cfff36..895e6a3e8393e 100644
--- a/vendor/github.com/prometheus/common/model/value_histogram.go
+++ b/vendor/github.com/prometheus/common/model/value_histogram.go
@@ -15,6 +15,7 @@ package model
import (
"encoding/json"
+ "errors"
"fmt"
"strconv"
"strings"
@@ -32,7 +33,7 @@ func (v FloatString) MarshalJSON() ([]byte, error) {
func (v *FloatString) UnmarshalJSON(b []byte) error {
if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
- return fmt.Errorf("float value must be a quoted string")
+ return errors.New("float value must be a quoted string")
}
f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
if err != nil {
@@ -141,7 +142,7 @@ type SampleHistogramPair struct {
func (s SampleHistogramPair) MarshalJSON() ([]byte, error) {
if s.Histogram == nil {
- return nil, fmt.Errorf("histogram is nil")
+ return nil, errors.New("histogram is nil")
}
t, err := json.Marshal(s.Timestamp)
if err != nil {
@@ -164,7 +165,7 @@ func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error {
return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
}
if s.Histogram == nil {
- return fmt.Errorf("histogram is null")
+ return errors.New("histogram is null")
}
return nil
}
diff --git a/vendor/github.com/prometheus/common/version/info.go b/vendor/github.com/prometheus/common/version/info.go
index 197d95e5c8b02..61ed1ba314b75 100644
--- a/vendor/github.com/prometheus/common/version/info.go
+++ b/vendor/github.com/prometheus/common/version/info.go
@@ -90,6 +90,14 @@ func GetTags() string {
return computedTags
}
+func PrometheusUserAgent() string {
+ return ComponentUserAgent("Prometheus")
+}
+
+func ComponentUserAgent(component string) string {
+ return component + "/" + Version
+}
+
func init() {
computedRevision, computedTags = computeRevision()
}
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
index 105c3b279c0f4..81faec7e75d60 100644
--- a/vendor/golang.org/x/net/http2/frame.go
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -1490,7 +1490,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error {
pf := mh.PseudoFields()
for i, hf := range pf {
switch hf.Name {
- case ":method", ":path", ":scheme", ":authority":
+ case ":method", ":path", ":scheme", ":authority", ":protocol":
isRequest = true
case ":status":
isResponse = true
@@ -1498,7 +1498,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error {
return pseudoHeaderError(hf.Name)
}
// Check for duplicates.
- // This would be a bad algorithm, but N is 4.
+ // This would be a bad algorithm, but N is 5.
// And this doesn't allocate.
for _, hf2 := range pf[:i] {
if hf.Name == hf2.Name {
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
index 7688c356b7cba..c7601c909ffb9 100644
--- a/vendor/golang.org/x/net/http2/http2.go
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -34,10 +34,11 @@ import (
)
var (
- VerboseLogs bool
- logFrameWrites bool
- logFrameReads bool
- inTests bool
+ VerboseLogs bool
+ logFrameWrites bool
+ logFrameReads bool
+ inTests bool
+ disableExtendedConnectProtocol bool
)
func init() {
@@ -50,6 +51,9 @@ func init() {
logFrameWrites = true
logFrameReads = true
}
+ if strings.Contains(e, "http2xconnect=0") {
+ disableExtendedConnectProtocol = true
+ }
}
const (
@@ -141,6 +145,10 @@ func (s Setting) Valid() error {
if s.Val < 16384 || s.Val > 1<<24-1 {
return ConnectionError(ErrCodeProtocol)
}
+ case SettingEnableConnectProtocol:
+ if s.Val != 1 && s.Val != 0 {
+ return ConnectionError(ErrCodeProtocol)
+ }
}
return nil
}
@@ -150,21 +158,23 @@ func (s Setting) Valid() error {
type SettingID uint16
const (
- SettingHeaderTableSize SettingID = 0x1
- SettingEnablePush SettingID = 0x2
- SettingMaxConcurrentStreams SettingID = 0x3
- SettingInitialWindowSize SettingID = 0x4
- SettingMaxFrameSize SettingID = 0x5
- SettingMaxHeaderListSize SettingID = 0x6
+ SettingHeaderTableSize SettingID = 0x1
+ SettingEnablePush SettingID = 0x2
+ SettingMaxConcurrentStreams SettingID = 0x3
+ SettingInitialWindowSize SettingID = 0x4
+ SettingMaxFrameSize SettingID = 0x5
+ SettingMaxHeaderListSize SettingID = 0x6
+ SettingEnableConnectProtocol SettingID = 0x8
)
var settingName = map[SettingID]string{
- SettingHeaderTableSize: "HEADER_TABLE_SIZE",
- SettingEnablePush: "ENABLE_PUSH",
- SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
- SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
- SettingMaxFrameSize: "MAX_FRAME_SIZE",
- SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
+ SettingHeaderTableSize: "HEADER_TABLE_SIZE",
+ SettingEnablePush: "ENABLE_PUSH",
+ SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
+ SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
+ SettingMaxFrameSize: "MAX_FRAME_SIZE",
+ SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
+ SettingEnableConnectProtocol: "ENABLE_CONNECT_PROTOCOL",
}
func (s SettingID) String() string {
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 832414b450c2f..b55547aec6403 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -932,14 +932,18 @@ func (sc *serverConn) serve(conf http2Config) {
sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
}
+ settings := writeSettings{
+ {SettingMaxFrameSize, conf.MaxReadFrameSize},
+ {SettingMaxConcurrentStreams, sc.advMaxStreams},
+ {SettingMaxHeaderListSize, sc.maxHeaderListSize()},
+ {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize},
+ {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)},
+ }
+ if !disableExtendedConnectProtocol {
+ settings = append(settings, Setting{SettingEnableConnectProtocol, 1})
+ }
sc.writeFrame(FrameWriteRequest{
- write: writeSettings{
- {SettingMaxFrameSize, conf.MaxReadFrameSize},
- {SettingMaxConcurrentStreams, sc.advMaxStreams},
- {SettingMaxHeaderListSize, sc.maxHeaderListSize()},
- {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize},
- {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)},
- },
+ write: settings,
})
sc.unackedSettings++
@@ -1801,6 +1805,9 @@ func (sc *serverConn) processSetting(s Setting) error {
sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31
case SettingMaxHeaderListSize:
sc.peerMaxHeaderListSize = s.Val
+ case SettingEnableConnectProtocol:
+ // Receipt of this parameter by a server does not
+ // have any impact
default:
// Unknown setting: "An endpoint that receives a SETTINGS
// frame with any unknown or unsupported identifier MUST
@@ -2231,11 +2238,17 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
scheme: f.PseudoValue("scheme"),
authority: f.PseudoValue("authority"),
path: f.PseudoValue("path"),
+ protocol: f.PseudoValue("protocol"),
+ }
+
+ // extended connect is disabled, so we should not see :protocol
+ if disableExtendedConnectProtocol && rp.protocol != "" {
+ return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
}
isConnect := rp.method == "CONNECT"
if isConnect {
- if rp.path != "" || rp.scheme != "" || rp.authority == "" {
+ if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") {
return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
}
} else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
@@ -2259,6 +2272,9 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
if rp.authority == "" {
rp.authority = rp.header.Get("Host")
}
+ if rp.protocol != "" {
+ rp.header.Set(":protocol", rp.protocol)
+ }
rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
if err != nil {
@@ -2285,6 +2301,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
type requestParam struct {
method string
scheme, authority, path string
+ protocol string
header http.Header
}
@@ -2326,7 +2343,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r
var url_ *url.URL
var requestURI string
- if rp.method == "CONNECT" {
+ if rp.method == "CONNECT" && rp.protocol == "" {
url_ = &url.URL{Host: rp.authority}
requestURI = rp.authority // mimic HTTP/1 server behavior
} else {
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index f5968f44071f8..090d0e1bdb5de 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -368,25 +368,26 @@ type ClientConn struct {
idleTimeout time.Duration // or 0 for never
idleTimer timer
- mu sync.Mutex // guards following
- cond *sync.Cond // hold mu; broadcast on flow/closed changes
- flow outflow // our conn-level flow control quota (cs.outflow is per stream)
- inflow inflow // peer's conn-level flow control
- doNotReuse bool // whether conn is marked to not be reused for any future requests
- closing bool
- closed bool
- seenSettings bool // true if we've seen a settings frame, false otherwise
- wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
- goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
- goAwayDebug string // goAway frame's debug data, retained as a string
- streams map[uint32]*clientStream // client-initiated
- streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip
- nextStreamID uint32
- pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams
- pings map[[8]byte]chan struct{} // in flight ping data to notification channel
- br *bufio.Reader
- lastActive time.Time
- lastIdle time.Time // time last idle
+ mu sync.Mutex // guards following
+ cond *sync.Cond // hold mu; broadcast on flow/closed changes
+ flow outflow // our conn-level flow control quota (cs.outflow is per stream)
+ inflow inflow // peer's conn-level flow control
+ doNotReuse bool // whether conn is marked to not be reused for any future requests
+ closing bool
+ closed bool
+ seenSettings bool // true if we've seen a settings frame, false otherwise
+ seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails
+ wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
+ goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
+ goAwayDebug string // goAway frame's debug data, retained as a string
+ streams map[uint32]*clientStream // client-initiated
+ streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip
+ nextStreamID uint32
+ pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams
+ pings map[[8]byte]chan struct{} // in flight ping data to notification channel
+ br *bufio.Reader
+ lastActive time.Time
+ lastIdle time.Time // time last idle
// Settings from peer: (also guarded by wmu)
maxFrameSize uint32
maxConcurrentStreams uint32
@@ -396,6 +397,17 @@ type ClientConn struct {
initialStreamRecvWindowSize int32
readIdleTimeout time.Duration
pingTimeout time.Duration
+ extendedConnectAllowed bool
+
+ // rstStreamPingsBlocked works around an unfortunate gRPC behavior.
+ // gRPC strictly limits the number of PING frames that it will receive.
+ // The default is two pings per two hours, but the limit resets every time
+ // the gRPC endpoint sends a HEADERS or DATA frame. See golang/go#70575.
+ //
+ // rstStreamPingsBlocked is set after receiving a response to a PING frame
+ // bundled with an RST_STREAM (see pendingResets below), and cleared after
+ // receiving a HEADERS or DATA frame.
+ rstStreamPingsBlocked bool
// pendingResets is the number of RST_STREAM frames we have sent to the peer,
// without confirming that the peer has received them. When we send a RST_STREAM,
@@ -819,6 +831,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
streams: make(map[uint32]*clientStream),
singleUse: singleUse,
+ seenSettingsChan: make(chan struct{}),
wantSettingsAck: true,
readIdleTimeout: conf.SendPingTimeout,
pingTimeout: conf.PingTimeout,
@@ -1466,6 +1479,8 @@ func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)
cs.cleanupWriteRequest(err)
}
+var errExtendedConnectNotSupported = errors.New("net/http: extended connect not supported by peer")
+
// writeRequest sends a request.
//
// It returns nil after the request is written, the response read,
@@ -1481,12 +1496,31 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre
return err
}
+ // wait for setting frames to be received, a server can change this value later,
+ // but we just wait for the first settings frame
+ var isExtendedConnect bool
+ if req.Method == "CONNECT" && req.Header.Get(":protocol") != "" {
+ isExtendedConnect = true
+ }
+
// Acquire the new-request lock by writing to reqHeaderMu.
// This lock guards the critical section covering allocating a new stream ID
// (requires mu) and creating the stream (requires wmu).
if cc.reqHeaderMu == nil {
panic("RoundTrip on uninitialized ClientConn") // for tests
}
+ if isExtendedConnect {
+ select {
+ case <-cs.reqCancel:
+ return errRequestCanceled
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-cc.seenSettingsChan:
+ if !cc.extendedConnectAllowed {
+ return errExtendedConnectNotSupported
+ }
+ }
+ }
select {
case cc.reqHeaderMu <- struct{}{}:
case <-cs.reqCancel:
@@ -1714,10 +1748,14 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
ping := false
if !closeOnIdle {
cc.mu.Lock()
- if cc.pendingResets == 0 {
- ping = true
+ // rstStreamPingsBlocked works around a gRPC behavior:
+ // see comment on the field for details.
+ if !cc.rstStreamPingsBlocked {
+ if cc.pendingResets == 0 {
+ ping = true
+ }
+ cc.pendingResets++
}
- cc.pendingResets++
cc.mu.Unlock()
}
cc.writeStreamReset(cs.ID, ErrCodeCancel, ping, err)
@@ -2030,7 +2068,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
func validateHeaders(hdrs http.Header) string {
for k, vv := range hdrs {
- if !httpguts.ValidHeaderFieldName(k) {
+ if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" {
return fmt.Sprintf("name %q", k)
}
for _, v := range vv {
@@ -2046,6 +2084,10 @@ func validateHeaders(hdrs http.Header) string {
var errNilRequestURL = errors.New("http2: Request.URI is nil")
+func isNormalConnect(req *http.Request) bool {
+ return req.Method == "CONNECT" && req.Header.Get(":protocol") == ""
+}
+
// requires cc.wmu be held.
func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
cc.hbuf.Reset()
@@ -2066,7 +2108,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
}
var path string
- if req.Method != "CONNECT" {
+ if !isNormalConnect(req) {
path = req.URL.RequestURI()
if !validPseudoPath(path) {
orig := path
@@ -2103,7 +2145,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
m = http.MethodGet
}
f(":method", m)
- if req.Method != "CONNECT" {
+ if !isNormalConnect(req) {
f(":path", path)
f(":scheme", req.URL.Scheme)
}
@@ -2461,7 +2503,7 @@ func (rl *clientConnReadLoop) run() error {
cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
}
if se, ok := err.(StreamError); ok {
- if cs := rl.streamByID(se.StreamID); cs != nil {
+ if cs := rl.streamByID(se.StreamID, notHeaderOrDataFrame); cs != nil {
if se.Cause == nil {
se.Cause = cc.fr.errDetail
}
@@ -2507,13 +2549,16 @@ func (rl *clientConnReadLoop) run() error {
if VerboseLogs {
cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err)
}
+ if !cc.seenSettings {
+ close(cc.seenSettingsChan)
+ }
return err
}
}
}
func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
- cs := rl.streamByID(f.StreamID)
+ cs := rl.streamByID(f.StreamID, headerOrDataFrame)
if cs == nil {
// We'd get here if we canceled a request while the
// server had its response still in flight. So if this
@@ -2842,7 +2887,7 @@ func (b transportResponseBody) Close() error {
func (rl *clientConnReadLoop) processData(f *DataFrame) error {
cc := rl.cc
- cs := rl.streamByID(f.StreamID)
+ cs := rl.streamByID(f.StreamID, headerOrDataFrame)
data := f.Data()
if cs == nil {
cc.mu.Lock()
@@ -2977,9 +3022,22 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) {
cs.abortStream(err)
}
-func (rl *clientConnReadLoop) streamByID(id uint32) *clientStream {
+// Constants passed to streamByID for documentation purposes.
+const (
+ headerOrDataFrame = true
+ notHeaderOrDataFrame = false
+)
+
+// streamByID returns the stream with the given id, or nil if no stream has that id.
+// If headerOrData is true, it clears rst.StreamPingsBlocked.
+func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientStream {
rl.cc.mu.Lock()
defer rl.cc.mu.Unlock()
+ if headerOrData {
+ // Work around an unfortunate gRPC behavior.
+ // See comment on ClientConn.rstStreamPingsBlocked for details.
+ rl.cc.rstStreamPingsBlocked = false
+ }
cs := rl.cc.streams[id]
if cs != nil && !cs.readAborted {
return cs
@@ -3073,6 +3131,21 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error {
case SettingHeaderTableSize:
cc.henc.SetMaxDynamicTableSize(s.Val)
cc.peerMaxHeaderTableSize = s.Val
+ case SettingEnableConnectProtocol:
+ if err := s.Valid(); err != nil {
+ return err
+ }
+ // If the peer wants to send us SETTINGS_ENABLE_CONNECT_PROTOCOL,
+ // we require that it do so in the first SETTINGS frame.
+ //
+ // When we attempt to use extended CONNECT, we wait for the first
+ // SETTINGS frame to see if the server supports it. If we let the
+ // server enable the feature with a later SETTINGS frame, then
+ // users will see inconsistent results depending on whether we've
+ // seen that frame or not.
+ if !cc.seenSettings {
+ cc.extendedConnectAllowed = s.Val == 1
+ }
default:
cc.vlogf("Unhandled Setting: %v", s)
}
@@ -3090,6 +3163,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error {
// connection can establish to our default.
cc.maxConcurrentStreams = defaultMaxConcurrentStreams
}
+ close(cc.seenSettingsChan)
cc.seenSettings = true
}
@@ -3098,7 +3172,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error {
func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
cc := rl.cc
- cs := rl.streamByID(f.StreamID)
+ cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame)
if f.StreamID != 0 && cs == nil {
return nil
}
@@ -3127,7 +3201,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
}
func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
- cs := rl.streamByID(f.StreamID)
+ cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame)
if cs == nil {
// TODO: return error if server tries to RST_STREAM an idle stream
return nil
@@ -3205,6 +3279,7 @@ func (rl *clientConnReadLoop) processPing(f *PingFrame) error {
if cc.pendingResets > 0 {
// See clientStream.cleanupWriteRequest.
cc.pendingResets = 0
+ cc.rstStreamPingsBlocked = true
cc.cond.Broadcast()
}
return nil
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 0f37a836dff45..c2e100287a4e5 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1386,7 +1386,7 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint/validations
# github.com/prometheus/client_model v0.6.1
## explicit; go 1.19
github.com/prometheus/client_model/go
-# github.com/prometheus/common v0.60.1
+# github.com/prometheus/common v0.61.0
## explicit; go 1.21
github.com/prometheus/common/config
github.com/prometheus/common/expfmt
@@ -1812,7 +1812,7 @@ go.uber.org/zap/zapgrpc
# go4.org/netipx v0.0.0-20230125063823-8449b0a6169f
## explicit; go 1.18
go4.org/netipx
-# golang.org/x/crypto v0.29.0
+# golang.org/x/crypto v0.30.0
## explicit; go 1.20
golang.org/x/crypto/argon2
golang.org/x/crypto/bcrypt
@@ -1838,7 +1838,7 @@ golang.org/x/exp/slices
# golang.org/x/mod v0.19.0
## explicit; go 1.18
golang.org/x/mod/semver
-# golang.org/x/net v0.31.0
+# golang.org/x/net v0.32.0
## explicit; go 1.18
golang.org/x/net/bpf
golang.org/x/net/context
@@ -1886,7 +1886,7 @@ golang.org/x/sys/unix
golang.org/x/sys/windows
golang.org/x/sys/windows/registry
golang.org/x/sys/windows/svc/eventlog
-# golang.org/x/term v0.26.0
+# golang.org/x/term v0.27.0
## explicit; go 1.18
golang.org/x/term
# golang.org/x/text v0.21.0
|
fix
|
update module github.com/prometheus/common to v0.61.0 (#15336)
|
a2a603a00bc43dffb40cab5e9804933ddb55f730
|
2025-02-01 01:37:54
|
axi92
|
docs(promtail): Add group permission note for journal (#16027)
| false
|
diff --git a/docs/sources/send-data/promtail/configuration.md b/docs/sources/send-data/promtail/configuration.md
index 0cd4d4eccc431..2d768f22f3667 100644
--- a/docs/sources/send-data/promtail/configuration.md
+++ b/docs/sources/send-data/promtail/configuration.md
@@ -836,7 +836,9 @@ replace:
The `journal` block configures reading from the systemd journal from
Promtail. Requires a build of Promtail that has journal support _enabled_. If
-using the AMD64 Docker image, this is enabled by default.
+using the AMD64 Docker image, this is enabled by default. On some systems a
+permission is needed for the user promtail to access journal logs.
+For Ubuntu (24.04) you need to add `promtail` to the group `systemd-journal` with `sudo usermod -a -G systemd-journal promtail`.
```yaml
# When true, log messages from the journal are passed through the
|
docs
|
Add group permission note for journal (#16027)
|
d91a64f910086a7bf5c295cebbd9713a09901e69
|
2022-06-01 19:27:51
|
Periklis Tsirakidis
|
operator: Add support for custom S3 CA (#6198)
| false
|
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md
index 1a72b2e1e056c..dbfebcbca8612 100644
--- a/operator/CHANGELOG.md
+++ b/operator/CHANGELOG.md
@@ -1,5 +1,6 @@
## Main
+- [6198](https://github.com/grafana/loki/pull/6198) **periklis**: Add support for custom S3 CA
- [6199](https://github.com/grafana/loki/pull/6199) **Red-GV**: Update GCP secret volume path
- [6125](https://github.com/grafana/loki/pull/6125) **sasagarw**: Add method to get authenticated from GCP
- [5986](https://github.com/grafana/loki/pull/5986) **periklis**: Add support for Loki Rules reconciliation
diff --git a/operator/api/v1beta1/lokistack_types.go b/operator/api/v1beta1/lokistack_types.go
index 2563fb2c6169e..77246a4704a02 100644
--- a/operator/api/v1beta1/lokistack_types.go
+++ b/operator/api/v1beta1/lokistack_types.go
@@ -350,15 +350,33 @@ type ObjectStorageSecretSpec struct {
Name string `json:"name"`
}
+// ObjectStorageTLSSpec is the TLS configuration for reaching the object storage endpoint.
+type ObjectStorageTLSSpec struct {
+ // CA is the name of a ConfigMap containing a CA certificate.
+ // It needs to be in the same namespace as the LokiStack custom resource.
+ //
+ // +optional
+ // +kubebuilder:validation:optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:io.kubernetes:ConfigMap",displayName="CA ConfigMap Name"
+ CA string `json:"caName,omitempty"`
+}
+
// ObjectStorageSpec defines the requirements to access the object
// storage bucket to persist logs by the ingester component.
type ObjectStorageSpec struct {
// Secret for object storage authentication.
- // Name of a secret in the same namespace as the cluster logging operator.
+ // Name of a secret in the same namespace as the LokiStack custom resource.
//
// +required
// +kubebuilder:validation:Required
Secret ObjectStorageSecretSpec `json:"secret"`
+
+ // TLS configuration for reaching the object storage endpoint.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="TLS Config"
+ TLS *ObjectStorageTLSSpec `json:"tls,omitempty"`
}
// QueryLimitSpec defines the limits applies at the query path.
@@ -612,6 +630,11 @@ const (
ReasonMissingObjectStorageSecret LokiStackConditionReason = "MissingObjectStorageSecret"
// ReasonInvalidObjectStorageSecret when the format of the secret is invalid.
ReasonInvalidObjectStorageSecret LokiStackConditionReason = "InvalidObjectStorageSecret"
+ // ReasonMissingObjectStorageCAConfigMap when the required configmap to verify object storage
+ // certificates is missing.
+ ReasonMissingObjectStorageCAConfigMap LokiStackConditionReason = "MissingObjectStorageCAConfigMap"
+ // ReasonInvalidObjectStorageCAConfigMap when the format of the CA configmap is invalid.
+ ReasonInvalidObjectStorageCAConfigMap LokiStackConditionReason = "InvalidObjectStorageCAConfigMap"
// ReasonInvalidReplicationConfiguration when the configurated replication factor is not valid
// with the select cluster size.
ReasonInvalidReplicationConfiguration LokiStackConditionReason = "InvalidReplicationConfiguration"
diff --git a/operator/api/v1beta1/zz_generated.deepcopy.go b/operator/api/v1beta1/zz_generated.deepcopy.go
index 316cf5800a14b..438158ac21081 100644
--- a/operator/api/v1beta1/zz_generated.deepcopy.go
+++ b/operator/api/v1beta1/zz_generated.deepcopy.go
@@ -520,7 +520,7 @@ func (in *LokiStackList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LokiStackSpec) DeepCopyInto(out *LokiStackSpec) {
*out = *in
- out.Storage = in.Storage
+ in.Storage.DeepCopyInto(&out.Storage)
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = new(RulesSpec)
@@ -685,6 +685,11 @@ func (in *ObjectStorageSecretSpec) DeepCopy() *ObjectStorageSecretSpec {
func (in *ObjectStorageSpec) DeepCopyInto(out *ObjectStorageSpec) {
*out = *in
out.Secret = in.Secret
+ if in.TLS != nil {
+ in, out := &in.TLS, &out.TLS
+ *out = new(ObjectStorageTLSSpec)
+ **out = **in
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageSpec.
@@ -697,6 +702,21 @@ func (in *ObjectStorageSpec) DeepCopy() *ObjectStorageSpec {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectStorageTLSSpec) DeepCopyInto(out *ObjectStorageTLSSpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageTLSSpec.
+func (in *ObjectStorageTLSSpec) DeepCopy() *ObjectStorageTLSSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ObjectStorageTLSSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in PodStatusMap) DeepCopyInto(out *PodStatusMap) {
{
diff --git a/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml
index 7dc3032df8ebb..fe768f9bc439d 100644
--- a/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml
@@ -394,6 +394,15 @@ spec:
- urn:alm:descriptor:com.tectonic.ui:select:gcs
- urn:alm:descriptor:com.tectonic.ui:select:s3
- urn:alm:descriptor:com.tectonic.ui:select:swift
+ - description: TLS configuration for reaching the object storage endpoint.
+ displayName: TLS Config
+ path: storage.tls
+ - description: CA is the name of a ConfigMap containing a CA certificate. It
+ needs to be in the same namespace as the LokiStack custom resource.
+ displayName: CA ConfigMap Name
+ path: storage.tls.caName
+ x-descriptors:
+ - urn:alm:descriptor:io.kubernetes:ConfigMap
- description: Storage class name defines the storage class for ingester/querier
PVCs.
displayName: Storage Class Name
diff --git a/operator/bundle/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/manifests/loki.grafana.com_lokistacks.yaml
index 839617badfa29..c4583198af118 100644
--- a/operator/bundle/manifests/loki.grafana.com_lokistacks.yaml
+++ b/operator/bundle/manifests/loki.grafana.com_lokistacks.yaml
@@ -316,7 +316,7 @@ spec:
properties:
secret:
description: Secret for object storage authentication. Name of
- a secret in the same namespace as the cluster logging operator.
+ a secret in the same namespace as the LokiStack custom resource.
properties:
name:
description: Name of a secret in the namespace configured
@@ -334,6 +334,16 @@ spec:
- name
- type
type: object
+ tls:
+ description: TLS configuration for reaching the object storage
+ endpoint.
+ properties:
+ caName:
+ description: CA is the name of a ConfigMap containing a CA
+ certificate. It needs to be in the same namespace as the
+ LokiStack custom resource.
+ type: string
+ type: object
required:
- secret
type: object
diff --git a/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml b/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml
index 629c40770c1e6..18e8dd13fd879 100644
--- a/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml
+++ b/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml
@@ -311,7 +311,7 @@ spec:
properties:
secret:
description: Secret for object storage authentication. Name of
- a secret in the same namespace as the cluster logging operator.
+ a secret in the same namespace as the LokiStack custom resource.
properties:
name:
description: Name of a secret in the namespace configured
@@ -329,6 +329,16 @@ spec:
- name
- type
type: object
+ tls:
+ description: TLS configuration for reaching the object storage
+ endpoint.
+ properties:
+ caName:
+ description: CA is the name of a ConfigMap containing a CA
+ certificate. It needs to be in the same namespace as the
+ LokiStack custom resource.
+ type: string
+ type: object
required:
- secret
type: object
diff --git a/operator/config/manifests/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/bases/loki-operator.clusterserviceversion.yaml
index 036625d996fe5..841f68f505323 100644
--- a/operator/config/manifests/bases/loki-operator.clusterserviceversion.yaml
+++ b/operator/config/manifests/bases/loki-operator.clusterserviceversion.yaml
@@ -308,6 +308,15 @@ spec:
- urn:alm:descriptor:com.tectonic.ui:select:gcs
- urn:alm:descriptor:com.tectonic.ui:select:s3
- urn:alm:descriptor:com.tectonic.ui:select:swift
+ - description: TLS configuration for reaching the object storage endpoint.
+ displayName: TLS Config
+ path: storage.tls
+ - description: CA is the name of a ConfigMap containing a CA certificate. It
+ needs to be in the same namespace as the LokiStack custom resource.
+ displayName: CA ConfigMap Name
+ path: storage.tls.caName
+ x-descriptors:
+ - urn:alm:descriptor:io.kubernetes:ConfigMap
- description: Storage class name defines the storage class for ingester/querier
PVCs.
displayName: Storage Class Name
diff --git a/operator/internal/handlers/internal/gateway/tenant_secrets.go b/operator/internal/handlers/internal/gateway/tenant_secrets.go
index 4cc677b4fc824..d2c79e6824ea9 100644
--- a/operator/internal/handlers/internal/gateway/tenant_secrets.go
+++ b/operator/internal/handlers/internal/gateway/tenant_secrets.go
@@ -8,7 +8,6 @@ import (
lokiv1beta1 "github.com/grafana/loki/operator/api/v1beta1"
"github.com/grafana/loki/operator/internal/external/k8s"
- "github.com/grafana/loki/operator/internal/handlers/internal/secrets"
"github.com/grafana/loki/operator/internal/manifests"
"github.com/grafana/loki/operator/internal/status"
@@ -48,7 +47,7 @@ func GetTenantSecrets(
}
var ts *manifests.TenantSecrets
- ts, err := secrets.ExtractGatewaySecret(&gatewaySecret, tenant.TenantName)
+ ts, err := extractSecret(&gatewaySecret, tenant.TenantName)
if err != nil {
return nil, &status.DegradedError{
Message: "Invalid gateway tenant secret contents",
@@ -61,3 +60,27 @@ func GetTenantSecrets(
return tenantSecrets, nil
}
+
+// extractSecret reads a k8s secret into a manifest tenant secret struct if valid.
+func extractSecret(s *corev1.Secret, tenantName string) (*manifests.TenantSecrets, error) {
+ // Extract and validate mandatory fields
+ clientID := s.Data["clientID"]
+ if len(clientID) == 0 {
+ return nil, kverrors.New("missing clientID field", "field", "clientID")
+ }
+ clientSecret := s.Data["clientSecret"]
+ if len(clientSecret) == 0 {
+ return nil, kverrors.New("missing clientSecret field", "field", "clientSecret")
+ }
+ issuerCAPath := s.Data["issuerCAPath"]
+ if len(issuerCAPath) == 0 {
+ return nil, kverrors.New("missing issuerCAPath field", "field", "issuerCAPath")
+ }
+
+ return &manifests.TenantSecrets{
+ TenantName: tenantName,
+ ClientID: string(clientID),
+ ClientSecret: string(clientSecret),
+ IssuerCAPath: string(issuerCAPath),
+ }, nil
+}
diff --git a/operator/internal/handlers/internal/gateway/tenant_secrets_test.go b/operator/internal/handlers/internal/gateway/tenant_secrets_test.go
index c6bcba8122cea..86b58e6f0610b 100644
--- a/operator/internal/handlers/internal/gateway/tenant_secrets_test.go
+++ b/operator/internal/handlers/internal/gateway/tenant_secrets_test.go
@@ -142,3 +142,66 @@ func TestGetTenantSecrets_DynamicMode(t *testing.T) {
}
require.ElementsMatch(t, ts, expected)
}
+
+func TestExtractSecret(t *testing.T) {
+ type test struct {
+ name string
+ tenantName string
+ secret *corev1.Secret
+ wantErr bool
+ }
+ table := []test{
+ {
+ name: "missing clientID",
+ tenantName: "tenant-a",
+ secret: &corev1.Secret{},
+ wantErr: true,
+ },
+ {
+ name: "missing clientSecret",
+ tenantName: "tenant-a",
+ secret: &corev1.Secret{
+ Data: map[string][]byte{
+ "clientID": []byte("test"),
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "missing issuerCAPath",
+ tenantName: "tenant-a",
+ secret: &corev1.Secret{
+ Data: map[string][]byte{
+ "clientID": []byte("test"),
+ "clientSecret": []byte("test"),
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "all set",
+ tenantName: "tenant-a",
+ secret: &corev1.Secret{
+ Data: map[string][]byte{
+ "clientID": []byte("test"),
+ "clientSecret": []byte("test"),
+ "issuerCAPath": []byte("/tmp/test"),
+ },
+ },
+ },
+ }
+ for _, tst := range table {
+ tst := tst
+ t.Run(tst.name, func(t *testing.T) {
+ t.Parallel()
+
+ _, err := extractSecret(tst.secret, tst.tenantName)
+ if !tst.wantErr {
+ require.NoError(t, err)
+ }
+ if tst.wantErr {
+ require.NotNil(t, err)
+ }
+ })
+ }
+}
diff --git a/operator/internal/handlers/internal/storage/ca_configmap.go b/operator/internal/handlers/internal/storage/ca_configmap.go
new file mode 100644
index 0000000000000..f415879d28fd6
--- /dev/null
+++ b/operator/internal/handlers/internal/storage/ca_configmap.go
@@ -0,0 +1,9 @@
+package storage
+
+import corev1 "k8s.io/api/core/v1"
+
+// IsValidCAConfigMap checks if the given CA configMap has an
+// non-empty entry for key `service-ca.crt`
+func IsValidCAConfigMap(cm *corev1.ConfigMap) bool {
+ return cm.Data["service-ca.crt"] != ""
+}
diff --git a/operator/internal/handlers/internal/storage/ca_configmap_test.go b/operator/internal/handlers/internal/storage/ca_configmap_test.go
new file mode 100644
index 0000000000000..fdf6522bee6ff
--- /dev/null
+++ b/operator/internal/handlers/internal/storage/ca_configmap_test.go
@@ -0,0 +1,49 @@
+package storage_test
+
+import (
+ "testing"
+
+ "github.com/grafana/loki/operator/internal/handlers/internal/storage"
+ "github.com/stretchr/testify/require"
+ corev1 "k8s.io/api/core/v1"
+)
+
+func TestIsValidConfigMap(t *testing.T) {
+ type test struct {
+ name string
+ cm *corev1.ConfigMap
+ valid bool
+ }
+ table := []test{
+ {
+ name: "valid CA configmap",
+ cm: &corev1.ConfigMap{
+ Data: map[string]string{
+ "service-ca.crt": "has-some-data",
+ },
+ },
+ valid: true,
+ },
+ {
+ name: "missing `service-ca.crt` key",
+ cm: &corev1.ConfigMap{},
+ },
+ {
+ name: "missing CA content",
+ cm: &corev1.ConfigMap{
+ Data: map[string]string{
+ "service-ca.crt": "",
+ },
+ },
+ },
+ }
+ for _, tst := range table {
+ tst := tst
+ t.Run(tst.name, func(t *testing.T) {
+ t.Parallel()
+
+ ok := storage.IsValidCAConfigMap(tst.cm)
+ require.Equal(t, tst.valid, ok)
+ })
+ }
+}
diff --git a/operator/internal/handlers/internal/secrets/secrets.go b/operator/internal/handlers/internal/storage/secrets.go
similarity index 67%
rename from operator/internal/handlers/internal/secrets/secrets.go
rename to operator/internal/handlers/internal/storage/secrets.go
index ce1e3eed2325d..5f20fc831dde8 100644
--- a/operator/internal/handlers/internal/secrets/secrets.go
+++ b/operator/internal/handlers/internal/storage/secrets.go
@@ -1,42 +1,20 @@
-package secrets
+package storage
import (
"github.com/ViaQ/logerr/v2/kverrors"
lokiv1beta1 "github.com/grafana/loki/operator/api/v1beta1"
- "github.com/grafana/loki/operator/internal/manifests"
"github.com/grafana/loki/operator/internal/manifests/storage"
corev1 "k8s.io/api/core/v1"
)
-// ExtractGatewaySecret reads a k8s secret into a manifest tenant secret struct if valid.
-func ExtractGatewaySecret(s *corev1.Secret, tenantName string) (*manifests.TenantSecrets, error) {
- // Extract and validate mandatory fields
- clientID, ok := s.Data["clientID"]
- if !ok {
- return nil, kverrors.New("missing clientID field", "field", "clientID")
- }
- clientSecret, ok := s.Data["clientSecret"]
- if !ok {
- return nil, kverrors.New("missing clientSecret field", "field", "clientSecret")
- }
- issuerCAPath, ok := s.Data["issuerCAPath"]
- if !ok {
- return nil, kverrors.New("missing issuerCAPath field", "field", "issuerCAPath")
- }
-
- return &manifests.TenantSecrets{
- TenantName: tenantName,
- ClientID: string(clientID),
- ClientSecret: string(clientSecret),
- IssuerCAPath: string(issuerCAPath),
- }, nil
-}
-
-// ExtractStorageSecret reads a k8s secret into a manifest object storage struct if valid.
-func ExtractStorageSecret(s *corev1.Secret, secretType lokiv1beta1.ObjectStorageSecretType) (*storage.Options, error) {
+// ExtractSecret reads a k8s secret into a manifest object storage struct if valid.
+func ExtractSecret(s *corev1.Secret, secretType lokiv1beta1.ObjectStorageSecretType) (*storage.Options, error) {
var err error
- storageOpts := storage.Options{SharedStore: secretType}
+ storageOpts := storage.Options{
+ SecretName: s.Name,
+ SharedStore: secretType,
+ }
switch secretType {
case lokiv1beta1.ObjectStorageSecretAzure:
@@ -59,20 +37,20 @@ func ExtractStorageSecret(s *corev1.Secret, secretType lokiv1beta1.ObjectStorage
func extractAzureConfigSecret(s *corev1.Secret) (*storage.AzureStorageConfig, error) {
// Extract and validate mandatory fields
- env, ok := s.Data["environment"]
- if !ok {
+ env := s.Data["environment"]
+ if len(env) == 0 {
return nil, kverrors.New("missing secret field", "field", "environment")
}
- container, ok := s.Data["container"]
- if !ok {
+ container := s.Data["container"]
+ if len(container) == 0 {
return nil, kverrors.New("missing secret field", "field", "container")
}
- name, ok := s.Data["account_name"]
- if !ok {
+ name := s.Data["account_name"]
+ if len(name) == 0 {
return nil, kverrors.New("missing secret field", "field", "account_name")
}
- key, ok := s.Data["account_key"]
- if !ok {
+ key := s.Data["account_key"]
+ if len(key) == 0 {
return nil, kverrors.New("missing secret field", "field", "account_key")
}
@@ -86,14 +64,14 @@ func extractAzureConfigSecret(s *corev1.Secret) (*storage.AzureStorageConfig, er
func extractGCSConfigSecret(s *corev1.Secret) (*storage.GCSStorageConfig, error) {
// Extract and validate mandatory fields
- bucket, ok := s.Data["bucketname"]
- if !ok {
+ bucket := s.Data["bucketname"]
+ if len(bucket) == 0 {
return nil, kverrors.New("missing secret field", "field", "bucketname")
}
// Check if google authentication credentials is provided
- _, ok = s.Data["key.json"]
- if !ok {
+ keyJSON := s.Data["key.json"]
+ if len(keyJSON) == 0 {
return nil, kverrors.New("missing google authentication credentials", "field", "key.json")
}
@@ -104,21 +82,21 @@ func extractGCSConfigSecret(s *corev1.Secret) (*storage.GCSStorageConfig, error)
func extractS3ConfigSecret(s *corev1.Secret) (*storage.S3StorageConfig, error) {
// Extract and validate mandatory fields
- endpoint, ok := s.Data["endpoint"]
- if !ok {
+ endpoint := s.Data["endpoint"]
+ if len(endpoint) == 0 {
return nil, kverrors.New("missing secret field", "field", "endpoint")
}
- buckets, ok := s.Data["bucketnames"]
- if !ok {
+ buckets := s.Data["bucketnames"]
+ if len(buckets) == 0 {
return nil, kverrors.New("missing secret field", "field", "bucketnames")
}
// TODO buckets are comma-separated list
- id, ok := s.Data["access_key_id"]
- if !ok {
+ id := s.Data["access_key_id"]
+ if len(id) == 0 {
return nil, kverrors.New("missing secret field", "field", "access_key_id")
}
- secret, ok := s.Data["access_key_secret"]
- if !ok {
+ secret := s.Data["access_key_secret"]
+ if len(secret) == 0 {
return nil, kverrors.New("missing secret field", "field", "access_key_secret")
}
@@ -136,40 +114,40 @@ func extractS3ConfigSecret(s *corev1.Secret) (*storage.S3StorageConfig, error) {
func extractSwiftConfigSecret(s *corev1.Secret) (*storage.SwiftStorageConfig, error) {
// Extract and validate mandatory fields
- url, ok := s.Data["auth_url"]
- if !ok {
+ url := s.Data["auth_url"]
+ if len(url) == 0 {
return nil, kverrors.New("missing secret field", "field", "auth_url")
}
- username, ok := s.Data["username"]
- if !ok {
+ username := s.Data["username"]
+ if len(username) == 0 {
return nil, kverrors.New("missing secret field", "field", "username")
}
- userDomainName, ok := s.Data["user_domain_name"]
- if !ok {
+ userDomainName := s.Data["user_domain_name"]
+ if len(userDomainName) == 0 {
return nil, kverrors.New("missing secret field", "field", "user_domain_name")
}
- userDomainID, ok := s.Data["user_domain_id"]
- if !ok {
+ userDomainID := s.Data["user_domain_id"]
+ if len(userDomainID) == 0 {
return nil, kverrors.New("missing secret field", "field", "user_domain_id")
}
- userID, ok := s.Data["user_id"]
- if !ok {
+ userID := s.Data["user_id"]
+ if len(userID) == 0 {
return nil, kverrors.New("missing secret field", "field", "user_id")
}
- password, ok := s.Data["password"]
- if !ok {
+ password := s.Data["password"]
+ if len(password) == 0 {
return nil, kverrors.New("missing secret field", "field", "password")
}
- domainID, ok := s.Data["domain_id"]
- if !ok {
+ domainID := s.Data["domain_id"]
+ if len(domainID) == 0 {
return nil, kverrors.New("missing secret field", "field", "domain_id")
}
- domainName, ok := s.Data["domain_name"]
- if !ok {
+ domainName := s.Data["domain_name"]
+ if len(domainName) == 0 {
return nil, kverrors.New("missing secret field", "field", "domain_name")
}
- containerName, ok := s.Data["container_name"]
- if !ok {
+ containerName := s.Data["container_name"]
+ if len(containerName) == 0 {
return nil, kverrors.New("missing secret field", "field", "container_name")
}
diff --git a/operator/internal/handlers/internal/secrets/secrets_test.go b/operator/internal/handlers/internal/storage/secrets_test.go
similarity index 80%
rename from operator/internal/handlers/internal/secrets/secrets_test.go
rename to operator/internal/handlers/internal/storage/secrets_test.go
index de4c195905e91..279c548ac5749 100644
--- a/operator/internal/handlers/internal/secrets/secrets_test.go
+++ b/operator/internal/handlers/internal/storage/secrets_test.go
@@ -1,10 +1,10 @@
-package secrets_test
+package storage_test
import (
"testing"
lokiv1beta1 "github.com/grafana/loki/operator/api/v1beta1"
- "github.com/grafana/loki/operator/internal/handlers/internal/secrets"
+ "github.com/grafana/loki/operator/internal/handlers/internal/storage"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
)
@@ -68,7 +68,7 @@ func TestAzureExtract(t *testing.T) {
t.Run(tst.name, func(t *testing.T) {
t.Parallel()
- _, err := secrets.ExtractStorageSecret(tst.secret, lokiv1beta1.ObjectStorageSecretAzure)
+ _, err := storage.ExtractSecret(tst.secret, lokiv1beta1.ObjectStorageSecretAzure)
if !tst.wantErr {
require.NoError(t, err)
}
@@ -115,7 +115,7 @@ func TestGCSExtract(t *testing.T) {
t.Run(tst.name, func(t *testing.T) {
t.Parallel()
- _, err := secrets.ExtractStorageSecret(tst.secret, lokiv1beta1.ObjectStorageSecretGCS)
+ _, err := storage.ExtractSecret(tst.secret, lokiv1beta1.ObjectStorageSecretGCS)
if !tst.wantErr {
require.NoError(t, err)
}
@@ -185,7 +185,7 @@ func TestS3Extract(t *testing.T) {
t.Run(tst.name, func(t *testing.T) {
t.Parallel()
- _, err := secrets.ExtractStorageSecret(tst.secret, lokiv1beta1.ObjectStorageSecretS3)
+ _, err := storage.ExtractSecret(tst.secret, lokiv1beta1.ObjectStorageSecretS3)
if !tst.wantErr {
require.NoError(t, err)
}
@@ -330,70 +330,7 @@ func TestSwiftExtract(t *testing.T) {
t.Run(tst.name, func(t *testing.T) {
t.Parallel()
- _, err := secrets.ExtractStorageSecret(tst.secret, lokiv1beta1.ObjectStorageSecretSwift)
- if !tst.wantErr {
- require.NoError(t, err)
- }
- if tst.wantErr {
- require.NotNil(t, err)
- }
- })
- }
-}
-
-func TestExtractGatewaySecret(t *testing.T) {
- type test struct {
- name string
- tenantName string
- secret *corev1.Secret
- wantErr bool
- }
- table := []test{
- {
- name: "missing clientID",
- tenantName: "tenant-a",
- secret: &corev1.Secret{},
- wantErr: true,
- },
- {
- name: "missing clientSecret",
- tenantName: "tenant-a",
- secret: &corev1.Secret{
- Data: map[string][]byte{
- "clientID": []byte("test"),
- },
- },
- wantErr: true,
- },
- {
- name: "missing issuerCAPath",
- tenantName: "tenant-a",
- secret: &corev1.Secret{
- Data: map[string][]byte{
- "clientID": []byte("test"),
- "clientSecret": []byte("test"),
- },
- },
- wantErr: true,
- },
- {
- name: "all set",
- tenantName: "tenant-a",
- secret: &corev1.Secret{
- Data: map[string][]byte{
- "clientID": []byte("test"),
- "clientSecret": []byte("test"),
- "issuerCAPath": []byte("/tmp/test"),
- },
- },
- },
- }
- for _, tst := range table {
- tst := tst
- t.Run(tst.name, func(t *testing.T) {
- t.Parallel()
-
- _, err := secrets.ExtractGatewaySecret(tst.secret, tst.tenantName)
+ _, err := storage.ExtractSecret(tst.secret, lokiv1beta1.ObjectStorageSecretSwift)
if !tst.wantErr {
require.NoError(t, err)
}
diff --git a/operator/internal/handlers/lokistack_create_or_update.go b/operator/internal/handlers/lokistack_create_or_update.go
index d760620f19db5..b08ec605e7d80 100644
--- a/operator/internal/handlers/lokistack_create_or_update.go
+++ b/operator/internal/handlers/lokistack_create_or_update.go
@@ -9,8 +9,9 @@ import (
"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/grafana/loki/operator/internal/handlers/internal/gateway"
"github.com/grafana/loki/operator/internal/handlers/internal/rules"
- "github.com/grafana/loki/operator/internal/handlers/internal/secrets"
+ "github.com/grafana/loki/operator/internal/handlers/internal/storage"
"github.com/grafana/loki/operator/internal/manifests"
+ storageoptions "github.com/grafana/loki/operator/internal/manifests/storage"
"github.com/grafana/loki/operator/internal/metrics"
"github.com/grafana/loki/operator/internal/status"
@@ -68,7 +69,7 @@ func CreateOrUpdateLokiStack(
return kverrors.Wrap(err, "failed to lookup lokistack storage secret", "name", key)
}
- storage, err := secrets.ExtractStorageSecret(&storageSecret, stack.Spec.Storage.Secret.Type)
+ objstorage, err := storage.ExtractSecret(&storageSecret, stack.Spec.Storage.Secret.Type)
if err != nil {
return &status.DegradedError{
Message: fmt.Sprintf("Invalid object storage secret contents: %s", err),
@@ -77,6 +78,31 @@ func CreateOrUpdateLokiStack(
}
}
+ if stack.Spec.Storage.TLS != nil {
+ var cm corev1.ConfigMap
+ key := client.ObjectKey{Name: stack.Spec.Storage.TLS.CA, Namespace: stack.Namespace}
+ if err = k.Get(ctx, key, &cm); err != nil {
+ if apierrors.IsNotFound(err) {
+ return &status.DegradedError{
+ Message: "Missing object storage CA config map",
+ Reason: lokiv1beta1.ReasonMissingObjectStorageCAConfigMap,
+ Requeue: false,
+ }
+ }
+ return kverrors.Wrap(err, "failed to lookup lokistack object storage CA config map", "name", key)
+ }
+
+ if !storage.IsValidCAConfigMap(&cm) {
+ return &status.DegradedError{
+ Message: "Invalid object storage CA configmap contents: missing key `service-ca.crt` or no contents",
+ Reason: lokiv1beta1.ReasonInvalidObjectStorageCAConfigMap,
+ Requeue: false,
+ }
+ }
+
+ objstorage.TLS = &storageoptions.TLSConfig{CA: cm.Name}
+ }
+
var (
baseDomain string
tenantSecrets []*manifests.TenantSecrets
@@ -138,7 +164,7 @@ func CreateOrUpdateLokiStack(
GatewayBaseDomain: baseDomain,
Stack: stack.Spec,
Flags: flags,
- ObjectStorage: *storage,
+ ObjectStorage: *objstorage,
AlertingRules: alertingRules,
RecordingRules: recordingRules,
Tenants: manifests.Tenants{
diff --git a/operator/internal/handlers/lokistack_create_or_update_test.go b/operator/internal/handlers/lokistack_create_or_update_test.go
index 5dde215e7e4bb..dd1b3db35bf09 100644
--- a/operator/internal/handlers/lokistack_create_or_update_test.go
+++ b/operator/internal/handlers/lokistack_create_or_update_test.go
@@ -75,6 +75,14 @@ var (
},
Data: map[string][]byte{},
}
+
+ invalidCAConfigMap = corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "some-stack-ca-configmap",
+ Namespace: "some-ns",
+ },
+ Data: map[string]string{},
+ }
)
func TestMain(m *testing.M) {
@@ -724,6 +732,137 @@ func TestCreateOrUpdateLokiStack_WhenInvalidSecret_SetDegraded(t *testing.T) {
require.Equal(t, degradedErr, err)
}
+func TestCreateOrUpdateLokiStack_WhenMissingCAConfigMap_SetDegraded(t *testing.T) {
+ sw := &k8sfakes.FakeStatusWriter{}
+ k := &k8sfakes.FakeClient{}
+ r := ctrl.Request{
+ NamespacedName: types.NamespacedName{
+ Name: "my-stack",
+ Namespace: "some-ns",
+ },
+ }
+
+ degradedErr := &status.DegradedError{
+ Message: "Missing object storage CA config map",
+ Reason: lokiv1beta1.ReasonMissingObjectStorageCAConfigMap,
+ Requeue: false,
+ }
+
+ stack := &lokiv1beta1.LokiStack{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "LokiStack",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-stack",
+ Namespace: "some-ns",
+ UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
+ },
+ Spec: lokiv1beta1.LokiStackSpec{
+ Size: lokiv1beta1.SizeOneXExtraSmall,
+ Storage: lokiv1beta1.ObjectStorageSpec{
+ Secret: lokiv1beta1.ObjectStorageSecretSpec{
+ Name: defaultSecret.Name,
+ Type: lokiv1beta1.ObjectStorageSecretS3,
+ },
+ TLS: &lokiv1beta1.ObjectStorageTLSSpec{
+ CA: "not-existing",
+ },
+ },
+ },
+ }
+
+ // GetStub looks up the CR first, so we need to return our fake stack
+ // return NotFound for everything else to trigger create.
+ k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error {
+ if r.Name == name.Name && r.Namespace == name.Namespace {
+ k.SetClientObject(object, stack)
+ return nil
+ }
+
+ if name.Name == defaultSecret.Name {
+ k.SetClientObject(object, &defaultSecret)
+ return nil
+ }
+
+ return apierrors.NewNotFound(schema.GroupResource{}, "something is not found")
+ }
+
+ k.StatusStub = func() client.StatusWriter { return sw }
+
+ err := handlers.CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, flags)
+
+ // make sure error is returned
+ require.Error(t, err)
+ require.Equal(t, degradedErr, err)
+}
+
+func TestCreateOrUpdateLokiStack_WhenInvalidCAConfigMap_SetDegraded(t *testing.T) {
+ sw := &k8sfakes.FakeStatusWriter{}
+ k := &k8sfakes.FakeClient{}
+ r := ctrl.Request{
+ NamespacedName: types.NamespacedName{
+ Name: "my-stack",
+ Namespace: "some-ns",
+ },
+ }
+
+ degradedErr := &status.DegradedError{
+ Message: "Invalid object storage CA configmap contents: missing key `service-ca.crt` or no contents",
+ Reason: lokiv1beta1.ReasonInvalidObjectStorageCAConfigMap,
+ Requeue: false,
+ }
+
+ stack := &lokiv1beta1.LokiStack{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "LokiStack",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-stack",
+ Namespace: "some-ns",
+ UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
+ },
+ Spec: lokiv1beta1.LokiStackSpec{
+ Size: lokiv1beta1.SizeOneXExtraSmall,
+ Storage: lokiv1beta1.ObjectStorageSpec{
+ Secret: lokiv1beta1.ObjectStorageSecretSpec{
+ Name: defaultSecret.Name,
+ Type: lokiv1beta1.ObjectStorageSecretS3,
+ },
+ TLS: &lokiv1beta1.ObjectStorageTLSSpec{
+ CA: invalidCAConfigMap.Name,
+ },
+ },
+ },
+ }
+
+ // GetStub looks up the CR first, so we need to return our fake stack
+ // return NotFound for everything else to trigger create.
+ k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error {
+ if r.Name == name.Name && r.Namespace == name.Namespace {
+ k.SetClientObject(object, stack)
+ return nil
+ }
+ if name.Name == defaultSecret.Name {
+ k.SetClientObject(object, &defaultSecret)
+ return nil
+ }
+
+ if name.Name == invalidCAConfigMap.Name {
+ k.SetClientObject(object, &invalidCAConfigMap)
+ return nil
+ }
+ return apierrors.NewNotFound(schema.GroupResource{}, "something is not found")
+ }
+
+ k.StatusStub = func() client.StatusWriter { return sw }
+
+ err := handlers.CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, flags)
+
+ // make sure error is returned
+ require.Error(t, err)
+ require.Equal(t, degradedErr, err)
+}
+
func TestCreateOrUpdateLokiStack_WhenInvalidTenantsConfiguration_SetDegraded(t *testing.T) {
sw := &k8sfakes.FakeStatusWriter{}
k := &k8sfakes.FakeClient{}
diff --git a/operator/internal/manifests/compactor.go b/operator/internal/manifests/compactor.go
index 5a49035e43b89..5958ef15e16c7 100644
--- a/operator/internal/manifests/compactor.go
+++ b/operator/internal/manifests/compactor.go
@@ -5,6 +5,7 @@ import (
"path"
"github.com/grafana/loki/operator/internal/manifests/internal/config"
+ "github.com/grafana/loki/operator/internal/manifests/storage"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
@@ -25,9 +26,7 @@ func BuildCompactor(opts Options) ([]client.Object, error) {
}
}
- storageType := opts.Stack.Storage.Secret.Type
- secretName := opts.Stack.Storage.Secret.Name
- if err := configureStatefulSetForStorageType(statefulSet, storageType, secretName); err != nil {
+ if err := storage.ConfigureStatefulSet(statefulSet, opts.ObjectStorage); err != nil {
return nil, err
}
diff --git a/operator/internal/manifests/indexgateway.go b/operator/internal/manifests/indexgateway.go
index 989b28289750b..959066e24e2e1 100644
--- a/operator/internal/manifests/indexgateway.go
+++ b/operator/internal/manifests/indexgateway.go
@@ -5,6 +5,7 @@ import (
"path"
"github.com/grafana/loki/operator/internal/manifests/internal/config"
+ "github.com/grafana/loki/operator/internal/manifests/storage"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
@@ -24,9 +25,7 @@ func BuildIndexGateway(opts Options) ([]client.Object, error) {
}
}
- storageType := opts.Stack.Storage.Secret.Type
- secretName := opts.Stack.Storage.Secret.Name
- if err := configureStatefulSetForStorageType(statefulSet, storageType, secretName); err != nil {
+ if err := storage.ConfigureStatefulSet(statefulSet, opts.ObjectStorage); err != nil {
return nil, err
}
diff --git a/operator/internal/manifests/ingester.go b/operator/internal/manifests/ingester.go
index 8f325ba1d404f..5551ce1ed7570 100644
--- a/operator/internal/manifests/ingester.go
+++ b/operator/internal/manifests/ingester.go
@@ -5,6 +5,7 @@ import (
"path"
"github.com/grafana/loki/operator/internal/manifests/internal/config"
+ "github.com/grafana/loki/operator/internal/manifests/storage"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
@@ -24,9 +25,7 @@ func BuildIngester(opts Options) ([]client.Object, error) {
}
}
- storageType := opts.Stack.Storage.Secret.Type
- secretName := opts.Stack.Storage.Secret.Name
- if err := configureStatefulSetForStorageType(statefulSet, storageType, secretName); err != nil {
+ if err := storage.ConfigureStatefulSet(statefulSet, opts.ObjectStorage); err != nil {
return nil, err
}
diff --git a/operator/internal/manifests/object_storage.go b/operator/internal/manifests/object_storage.go
deleted file mode 100644
index 0e2e9000979a2..0000000000000
--- a/operator/internal/manifests/object_storage.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package manifests
-
-import (
- lokiv1beta1 "github.com/grafana/loki/operator/api/v1beta1"
- "github.com/grafana/loki/operator/internal/manifests/storage"
- appsv1 "k8s.io/api/apps/v1"
-)
-
-func configureDeploymentForStorageType(
- d *appsv1.Deployment,
- t lokiv1beta1.ObjectStorageSecretType,
- secretName string) error {
- switch t {
- case lokiv1beta1.ObjectStorageSecretGCS:
- return storage.ConfigureDeployment(d, secretName)
- default:
- return nil
- }
-}
-
-func configureStatefulSetForStorageType(
- d *appsv1.StatefulSet,
- t lokiv1beta1.ObjectStorageSecretType,
- secretName string) error {
- switch t {
- case lokiv1beta1.ObjectStorageSecretGCS:
- return storage.ConfigureStatefulSet(d, secretName)
- default:
- return nil
- }
-}
diff --git a/operator/internal/manifests/querier.go b/operator/internal/manifests/querier.go
index 2c5f6588caba1..8f46602239be4 100644
--- a/operator/internal/manifests/querier.go
+++ b/operator/internal/manifests/querier.go
@@ -5,6 +5,7 @@ import (
"path"
"github.com/grafana/loki/operator/internal/manifests/internal/config"
+ "github.com/grafana/loki/operator/internal/manifests/storage"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -23,9 +24,7 @@ func BuildQuerier(opts Options) ([]client.Object, error) {
}
}
- storageType := opts.Stack.Storage.Secret.Type
- secretName := opts.Stack.Storage.Secret.Name
- if err := configureDeploymentForStorageType(deployment, storageType, secretName); err != nil {
+ if err := storage.ConfigureDeployment(deployment, opts.ObjectStorage); err != nil {
return nil, err
}
diff --git a/operator/internal/manifests/storage/configure.go b/operator/internal/manifests/storage/configure.go
index 7c59c2819a9a6..41fc78d64223a 100644
--- a/operator/internal/manifests/storage/configure.go
+++ b/operator/internal/manifests/storage/configure.go
@@ -1,9 +1,11 @@
package storage
import (
+ "fmt"
"path"
"github.com/ViaQ/logerr/v2/kverrors"
+ lokiv1beta1 "github.com/grafana/loki/operator/api/v1beta1"
"github.com/imdario/mergo"
appsv1 "k8s.io/api/apps/v1"
@@ -17,11 +19,56 @@ const (
GCSFileName = "key.json"
secretDirectory = "/etc/storage/secrets"
+ caDirectory = "/etc/storage/ca"
)
+// ConfigureDeployment appends additional pod volumes and container env vars, args, volume mounts
+// based on the object storage type. Currently supported amendments:
+// - GCS: Ensure env var GOOGLE_APPLICATION_CREDENTIALS in container
+// - S3: Ensure mounting custom CA configmap if any TLSConfig given
+func ConfigureDeployment(
+ d *appsv1.Deployment,
+ opts Options,
+) error {
+ switch opts.SharedStore {
+ case lokiv1beta1.ObjectStorageSecretGCS:
+ return configureDeployment(d, opts.SecretName)
+ case lokiv1beta1.ObjectStorageSecretS3:
+ if opts.TLS == nil {
+ return nil
+ }
+
+ return configureDeploymentCA(d, opts.TLS.CA)
+ default:
+ return nil
+ }
+}
+
+// ConfigureStatefulSet appends additional pod volumes and container env vars, args, volume mounts
+// based on the object storage type. Currently supported amendments:
+// - GCS: Ensure env var GOOGLE_APPLICATION_CREDENTIALS in container
+// - S3: Ensure mounting custom CA configmap if any TLSConfig given
+func ConfigureStatefulSet(
+ d *appsv1.StatefulSet,
+ opts Options,
+) error {
+ switch opts.SharedStore {
+ case lokiv1beta1.ObjectStorageSecretGCS:
+ return configureStatefulSet(d, opts.SecretName)
+ case lokiv1beta1.ObjectStorageSecretS3:
+ if opts.TLS == nil {
+ return nil
+ }
+
+ return configureStatefulSetCA(d, opts.TLS.CA)
+ default:
+ return nil
+ }
+}
+
// ConfigureDeployment merges a GCS Object Storage volume into the deployment spec.
// With this, the deployment will expose an environment variable for Google authentication.
-func ConfigureDeployment(d *appsv1.Deployment, secretName string) error {
+func configureDeployment(d *appsv1.Deployment, secretName string) error {
p := ensureCredentialsForGCS(&d.Spec.Template.Spec, secretName)
if err := mergo.Merge(&d.Spec.Template.Spec, p, mergo.WithOverride); err != nil {
@@ -31,9 +78,20 @@ func ConfigureDeployment(d *appsv1.Deployment, secretName string) error {
return nil
}
+// ConfigureDeploymentCA merges a S3 CA ConfigMap volume into the deployment spec.
+func configureDeploymentCA(d *appsv1.Deployment, cmName string) error {
+ p := ensureCAForS3(&d.Spec.Template.Spec, cmName)
+
+ if err := mergo.Merge(&d.Spec.Template.Spec, p, mergo.WithOverride); err != nil {
+ return kverrors.Wrap(err, "failed to merge s3 object storage ca options ")
+ }
+
+ return nil
+}
+
// ConfigureStatefulSet merges a GCS Object Storage volume into the statefulset spec.
// With this, the statefulset will expose an environment variable for Google authentication.
-func ConfigureStatefulSet(s *appsv1.StatefulSet, secretName string) error {
+func configureStatefulSet(s *appsv1.StatefulSet, secretName string) error {
p := ensureCredentialsForGCS(&s.Spec.Template.Spec, secretName)
if err := mergo.Merge(&s.Spec.Template.Spec, p, mergo.WithOverride); err != nil {
@@ -43,6 +101,17 @@ func ConfigureStatefulSet(s *appsv1.StatefulSet, secretName string) error {
return nil
}
+// ConfigureStatefulSetCA merges a S3 CA ConfigMap volume into the statefulset spec.
+func configureStatefulSetCA(s *appsv1.StatefulSet, cmName string) error {
+ p := ensureCAForS3(&s.Spec.Template.Spec, cmName)
+
+ if err := mergo.Merge(&s.Spec.Template.Spec, p, mergo.WithOverride); err != nil {
+ return kverrors.Wrap(err, "failed to merge s3 object storage ca options ")
+ }
+
+ return nil
+}
+
func ensureCredentialsForGCS(p *corev1.PodSpec, secretName string) corev1.PodSpec {
container := p.Containers[0].DeepCopy()
volumes := p.Volumes
@@ -74,3 +143,36 @@ func ensureCredentialsForGCS(p *corev1.PodSpec, secretName string) corev1.PodSpe
Volumes: volumes,
}
}
+
+func ensureCAForS3(p *corev1.PodSpec, cmName string) corev1.PodSpec {
+ container := p.Containers[0].DeepCopy()
+ volumes := p.Volumes
+
+ volumes = append(volumes, corev1.Volume{
+ Name: cmName,
+ VolumeSource: corev1.VolumeSource{
+ ConfigMap: &corev1.ConfigMapVolumeSource{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: cmName,
+ },
+ },
+ },
+ })
+
+ container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{
+ Name: cmName,
+ ReadOnly: false,
+ MountPath: caDirectory,
+ })
+
+ container.Args = append(container.Args,
+ fmt.Sprintf("-s3.http.ca-file=%s", path.Join(caDirectory, "service-ca.crt")),
+ )
+
+ return corev1.PodSpec{
+ Containers: []corev1.Container{
+ *container,
+ },
+ Volumes: volumes,
+ }
+}
diff --git a/operator/internal/manifests/object_storage_test.go b/operator/internal/manifests/storage/configure_test.go
similarity index 51%
rename from operator/internal/manifests/object_storage_test.go
rename to operator/internal/manifests/storage/configure_test.go
index e902418a426e1..55c751396cd56 100644
--- a/operator/internal/manifests/object_storage_test.go
+++ b/operator/internal/manifests/storage/configure_test.go
@@ -1,10 +1,9 @@
-package manifests
+package storage_test
import (
"testing"
lokiv1beta1 "github.com/grafana/loki/operator/api/v1beta1"
- "github.com/grafana/loki/operator/internal/manifests/internal/config"
"github.com/grafana/loki/operator/internal/manifests/storage"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
@@ -13,18 +12,19 @@ import (
func TestConfigureDeploymentForStorageType(t *testing.T) {
type tt struct {
- desc string
- storageType lokiv1beta1.ObjectStorageSecretType
- secretName string
- dpl *appsv1.Deployment
- want *appsv1.Deployment
+ desc string
+ opts storage.Options
+ dpl *appsv1.Deployment
+ want *appsv1.Deployment
}
tc := []tt{
{
- desc: "object storage other than GCS",
- storageType: lokiv1beta1.ObjectStorageSecretS3,
- secretName: "test",
+ desc: "object storage other than GCS",
+ opts: storage.Options{
+ SecretName: "test",
+ SharedStore: lokiv1beta1.ObjectStorageSecretS3,
+ },
dpl: &appsv1.Deployment{
Spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
@@ -32,26 +32,6 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
Containers: []corev1.Container{
{
Name: "loki-ingester",
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: configVolumeName,
- ReadOnly: false,
- MountPath: config.LokiConfigMountDir,
- },
- },
- },
- },
- Volumes: []corev1.Volume{
- {
- Name: configVolumeName,
- VolumeSource: corev1.VolumeSource{
- ConfigMap: &corev1.ConfigMapVolumeSource{
- DefaultMode: &defaultConfigMapMode,
- LocalObjectReference: corev1.LocalObjectReference{
- Name: lokiConfigMapName("stack-name"),
- },
- },
- },
},
},
},
@@ -65,26 +45,6 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
Containers: []corev1.Container{
{
Name: "loki-ingester",
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: configVolumeName,
- ReadOnly: false,
- MountPath: config.LokiConfigMountDir,
- },
- },
- },
- },
- Volumes: []corev1.Volume{
- {
- Name: configVolumeName,
- VolumeSource: corev1.VolumeSource{
- ConfigMap: &corev1.ConfigMapVolumeSource{
- DefaultMode: &defaultConfigMapMode,
- LocalObjectReference: corev1.LocalObjectReference{
- Name: lokiConfigMapName("stack-name"),
- },
- },
- },
},
},
},
@@ -93,9 +53,11 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
},
},
{
- desc: "object storage GCS",
- storageType: lokiv1beta1.ObjectStorageSecretGCS,
- secretName: "test",
+ desc: "object storage GCS",
+ opts: storage.Options{
+ SecretName: "test",
+ SharedStore: lokiv1beta1.ObjectStorageSecretGCS,
+ },
dpl: &appsv1.Deployment{
Spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
@@ -103,26 +65,6 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
Containers: []corev1.Container{
{
Name: "loki-ingester",
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: configVolumeName,
- ReadOnly: false,
- MountPath: config.LokiConfigMountDir,
- },
- },
- },
- },
- Volumes: []corev1.Volume{
- {
- Name: configVolumeName,
- VolumeSource: corev1.VolumeSource{
- ConfigMap: &corev1.ConfigMapVolumeSource{
- DefaultMode: &defaultConfigMapMode,
- LocalObjectReference: corev1.LocalObjectReference{
- Name: lokiConfigMapName("stack-name"),
- },
- },
- },
},
},
},
@@ -137,11 +79,7 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
{
Name: "loki-ingester",
VolumeMounts: []corev1.VolumeMount{
- {
- Name: configVolumeName,
- ReadOnly: false,
- MountPath: config.LokiConfigMountDir,
- },
+
{
Name: "test",
ReadOnly: false,
@@ -157,17 +95,6 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
},
},
Volumes: []corev1.Volume{
- {
- Name: configVolumeName,
- VolumeSource: corev1.VolumeSource{
- ConfigMap: &corev1.ConfigMapVolumeSource{
- DefaultMode: &defaultConfigMapMode,
- LocalObjectReference: corev1.LocalObjectReference{
- Name: lokiConfigMapName("stack-name"),
- },
- },
- },
- },
{
Name: "test",
VolumeSource: corev1.VolumeSource{
@@ -188,7 +115,7 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
tc := tc
t.Run(tc.desc, func(t *testing.T) {
t.Parallel()
- err := configureDeploymentForStorageType(tc.dpl, tc.storageType, tc.secretName)
+ err := storage.ConfigureDeployment(tc.dpl, tc.opts)
require.NoError(t, err)
require.Equal(t, tc.want, tc.dpl)
})
@@ -197,18 +124,19 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
func TestConfigureStatefulSetForStorageType(t *testing.T) {
type tt struct {
- desc string
- storageType lokiv1beta1.ObjectStorageSecretType
- secretName string
- sts *appsv1.StatefulSet
- want *appsv1.StatefulSet
+ desc string
+ opts storage.Options
+ sts *appsv1.StatefulSet
+ want *appsv1.StatefulSet
}
tc := []tt{
{
- desc: "object storage other than GCS",
- storageType: lokiv1beta1.ObjectStorageSecretS3,
- secretName: "test",
+ desc: "object storage other than GCS",
+ opts: storage.Options{
+ SecretName: "test",
+ SharedStore: lokiv1beta1.ObjectStorageSecretS3,
+ },
sts: &appsv1.StatefulSet{
Spec: appsv1.StatefulSetSpec{
Template: corev1.PodTemplateSpec{
@@ -216,26 +144,39 @@ func TestConfigureStatefulSetForStorageType(t *testing.T) {
Containers: []corev1.Container{
{
Name: "loki-ingester",
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: configVolumeName,
- ReadOnly: false,
- MountPath: config.LokiConfigMountDir,
- },
- },
},
},
- Volumes: []corev1.Volume{
+ },
+ },
+ },
+ },
+ want: &appsv1.StatefulSet{
+ Spec: appsv1.StatefulSetSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
{
- Name: configVolumeName,
- VolumeSource: corev1.VolumeSource{
- ConfigMap: &corev1.ConfigMapVolumeSource{
- DefaultMode: &defaultConfigMapMode,
- LocalObjectReference: corev1.LocalObjectReference{
- Name: lokiConfigMapName("stack-name"),
- },
- },
- },
+ Name: "loki-ingester",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "object storage GCS",
+ opts: storage.Options{
+ SecretName: "test",
+ SharedStore: lokiv1beta1.ObjectStorageSecretGCS,
+ },
+ sts: &appsv1.StatefulSet{
+ Spec: appsv1.StatefulSetSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "loki-ingester",
},
},
},
@@ -251,22 +192,25 @@ func TestConfigureStatefulSetForStorageType(t *testing.T) {
Name: "loki-ingester",
VolumeMounts: []corev1.VolumeMount{
{
- Name: configVolumeName,
+ Name: "test",
ReadOnly: false,
- MountPath: config.LokiConfigMountDir,
+ MountPath: "/etc/storage/secrets",
+ },
+ },
+ Env: []corev1.EnvVar{
+ {
+ Name: storage.EnvGoogleApplicationCredentials,
+ Value: "/etc/storage/secrets/key.json",
},
},
},
},
Volumes: []corev1.Volume{
{
- Name: configVolumeName,
+ Name: "test",
VolumeSource: corev1.VolumeSource{
- ConfigMap: &corev1.ConfigMapVolumeSource{
- DefaultMode: &defaultConfigMapMode,
- LocalObjectReference: corev1.LocalObjectReference{
- Name: lokiConfigMapName("stack-name"),
- },
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: "test",
},
},
},
@@ -276,34 +220,109 @@ func TestConfigureStatefulSetForStorageType(t *testing.T) {
},
},
},
+ }
+
+ for _, tc := range tc {
+ tc := tc
+ t.Run(tc.desc, func(t *testing.T) {
+ t.Parallel()
+ err := storage.ConfigureStatefulSet(tc.sts, tc.opts)
+ require.NoError(t, err)
+ require.Equal(t, tc.want, tc.sts)
+ })
+ }
+}
+
+func TestConfigureDeploymentForStorageCA(t *testing.T) {
+ type tt struct {
+ desc string
+ opts storage.Options
+ dpl *appsv1.Deployment
+ want *appsv1.Deployment
+ }
+
+ tc := []tt{
{
- desc: "object storage GCS",
- storageType: lokiv1beta1.ObjectStorageSecretGCS,
- secretName: "test",
- sts: &appsv1.StatefulSet{
- Spec: appsv1.StatefulSetSpec{
+ desc: "object storage other than S3",
+ opts: storage.Options{
+ SecretName: "test",
+ SharedStore: lokiv1beta1.ObjectStorageSecretAzure,
+ },
+ dpl: &appsv1.Deployment{
+ Spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
- Name: "loki-ingester",
+ Name: "loki-querier",
+ },
+ },
+ },
+ },
+ },
+ },
+ want: &appsv1.Deployment{
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "loki-querier",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "object storage S3",
+ opts: storage.Options{
+ SecretName: "test",
+ SharedStore: lokiv1beta1.ObjectStorageSecretS3,
+ TLS: &storage.TLSConfig{
+ CA: "test",
+ },
+ },
+ dpl: &appsv1.Deployment{
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "loki-querier",
+ },
+ },
+ },
+ },
+ },
+ },
+ want: &appsv1.Deployment{
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "loki-querier",
VolumeMounts: []corev1.VolumeMount{
{
- Name: configVolumeName,
+ Name: "test",
ReadOnly: false,
- MountPath: config.LokiConfigMountDir,
+ MountPath: "/etc/storage/ca",
},
},
+ Args: []string{
+ "-s3.http.ca-file=/etc/storage/ca/service-ca.crt",
+ },
},
},
Volumes: []corev1.Volume{
{
- Name: configVolumeName,
+ Name: "test",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
- DefaultMode: &defaultConfigMapMode,
LocalObjectReference: corev1.LocalObjectReference{
- Name: lokiConfigMapName("stack-name"),
+ Name: "test",
},
},
},
@@ -313,6 +332,87 @@ func TestConfigureStatefulSetForStorageType(t *testing.T) {
},
},
},
+ },
+ }
+
+ for _, tc := range tc {
+ tc := tc
+ t.Run(tc.desc, func(t *testing.T) {
+ t.Parallel()
+ err := storage.ConfigureDeployment(tc.dpl, tc.opts)
+ require.NoError(t, err)
+ require.Equal(t, tc.want, tc.dpl)
+ })
+ }
+}
+
+func TestConfigureStatefulSetForStorageCA(t *testing.T) {
+ type tt struct {
+ desc string
+ opts storage.Options
+ sts *appsv1.StatefulSet
+ want *appsv1.StatefulSet
+ }
+
+ tc := []tt{
+ {
+ desc: "object storage other than S3",
+ opts: storage.Options{
+ SecretName: "test",
+ SharedStore: lokiv1beta1.ObjectStorageSecretAzure,
+ TLS: &storage.TLSConfig{
+ CA: "test",
+ },
+ },
+ sts: &appsv1.StatefulSet{
+ Spec: appsv1.StatefulSetSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "loki-ingester",
+ },
+ },
+ },
+ },
+ },
+ },
+ want: &appsv1.StatefulSet{
+ Spec: appsv1.StatefulSetSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "loki-ingester",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "object storage S3",
+ opts: storage.Options{
+ SecretName: "test",
+ SharedStore: lokiv1beta1.ObjectStorageSecretS3,
+ TLS: &storage.TLSConfig{
+ CA: "test",
+ },
+ },
+ sts: &appsv1.StatefulSet{
+ Spec: appsv1.StatefulSetSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "loki-ingester",
+ },
+ },
+ },
+ },
+ },
+ },
want: &appsv1.StatefulSet{
Spec: appsv1.StatefulSetSpec{
Template: corev1.PodTemplateSpec{
@@ -321,45 +421,28 @@ func TestConfigureStatefulSetForStorageType(t *testing.T) {
{
Name: "loki-ingester",
VolumeMounts: []corev1.VolumeMount{
- {
- Name: configVolumeName,
- ReadOnly: false,
- MountPath: config.LokiConfigMountDir,
- },
{
Name: "test",
ReadOnly: false,
- MountPath: "/etc/storage/secrets",
+ MountPath: "/etc/storage/ca",
},
},
- Env: []corev1.EnvVar{
- {
- Name: storage.EnvGoogleApplicationCredentials,
- Value: "/etc/storage/secrets/key.json",
- },
+ Args: []string{
+ "-s3.http.ca-file=/etc/storage/ca/service-ca.crt",
},
},
},
Volumes: []corev1.Volume{
{
- Name: configVolumeName,
+ Name: "test",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
- DefaultMode: &defaultConfigMapMode,
LocalObjectReference: corev1.LocalObjectReference{
- Name: lokiConfigMapName("stack-name"),
+ Name: "test",
},
},
},
},
- {
- Name: "test",
- VolumeSource: corev1.VolumeSource{
- Secret: &corev1.SecretVolumeSource{
- SecretName: "test",
- },
- },
- },
},
},
},
@@ -372,7 +455,7 @@ func TestConfigureStatefulSetForStorageType(t *testing.T) {
tc := tc
t.Run(tc.desc, func(t *testing.T) {
t.Parallel()
- err := configureStatefulSetForStorageType(tc.sts, tc.storageType, tc.secretName)
+ err := storage.ConfigureStatefulSet(tc.sts, tc.opts)
require.NoError(t, err)
require.Equal(t, tc.want, tc.sts)
})
diff --git a/operator/internal/manifests/storage/options.go b/operator/internal/manifests/storage/options.go
index 465e45424f845..a23451c780e44 100644
--- a/operator/internal/manifests/storage/options.go
+++ b/operator/internal/manifests/storage/options.go
@@ -7,11 +7,13 @@ import (
// Options is used to configure Loki to integrate with
// supported object storages.
type Options struct {
+ SecretName string
SharedStore lokiv1beta1.ObjectStorageSecretType
Azure *AzureStorageConfig
GCS *GCSStorageConfig
S3 *S3StorageConfig
Swift *SwiftStorageConfig
+ TLS *TLSConfig
}
// AzureStorageConfig for Azure storage config
@@ -53,3 +55,9 @@ type SwiftStorageConfig struct {
Region string
Container string
}
+
+// TLSConfig for object storage endpoints. Currently supported only by:
+// - S3
+type TLSConfig struct {
+ CA string
+}
|
operator
|
Add support for custom S3 CA (#6198)
|
2ca1ac66a3bcebe9b2eb139c6aecc6820c840df9
|
2024-06-19 13:58:50
|
Vladyslav Diachenko
|
feat: flush not owned streams (#13254)
| false
|
diff --git a/pkg/ingester/flush.go b/pkg/ingester/flush.go
index 81407abcb2e25..fbc571e6d14ce 100644
--- a/pkg/ingester/flush.go
+++ b/pkg/ingester/flush.go
@@ -33,11 +33,12 @@ const (
nameLabel = "__name__"
logsValue = "logs"
- flushReasonIdle = "idle"
- flushReasonMaxAge = "max_age"
- flushReasonForced = "forced"
- flushReasonFull = "full"
- flushReasonSynced = "synced"
+ flushReasonIdle = "idle"
+ flushReasonMaxAge = "max_age"
+ flushReasonForced = "forced"
+ flushReasonNotOwned = "not_owned"
+ flushReasonFull = "full"
+ flushReasonSynced = "synced"
)
// Note: this is called both during the WAL replay (zero or more times)
@@ -124,7 +125,7 @@ func (i *Ingester) sweepStream(instance *instance, stream *stream, immediate boo
lastChunk := stream.chunks[len(stream.chunks)-1]
shouldFlush, _ := i.shouldFlushChunk(&lastChunk)
- if len(stream.chunks) == 1 && !immediate && !shouldFlush {
+ if len(stream.chunks) == 1 && !immediate && !shouldFlush && !instance.ownedStreamsSvc.isStreamNotOwned(stream.fp) {
return
}
@@ -217,10 +218,14 @@ func (i *Ingester) collectChunksToFlush(instance *instance, fp model.Fingerprint
stream.chunkMtx.Lock()
defer stream.chunkMtx.Unlock()
+ notOwnedStream := instance.ownedStreamsSvc.isStreamNotOwned(fp)
var result []*chunkDesc
for j := range stream.chunks {
shouldFlush, reason := i.shouldFlushChunk(&stream.chunks[j])
+ if !shouldFlush && notOwnedStream {
+ shouldFlush, reason = true, flushReasonNotOwned
+ }
if immediate || shouldFlush {
// Ensure no more writes happen to this chunk.
if !stream.chunks[j].closed {
diff --git a/pkg/ingester/flush_test.go b/pkg/ingester/flush_test.go
index 460a50ffc8fac..1287be3d4bfdb 100644
--- a/pkg/ingester/flush_test.go
+++ b/pkg/ingester/flush_test.go
@@ -253,6 +253,56 @@ func TestFlushingCollidingLabels(t *testing.T) {
}
}
+func Test_flush_not_owned_stream(t *testing.T) {
+ cfg := defaultIngesterTestConfig(t)
+ cfg.FlushCheckPeriod = time.Millisecond * 100
+ cfg.MaxChunkAge = time.Minute
+ cfg.MaxChunkIdle = time.Hour
+
+ store, ing := newTestStore(t, cfg, nil)
+ defer store.Stop()
+
+ now := time.Unix(0, 0)
+
+ entries := []logproto.Entry{
+ {Timestamp: now.Add(time.Nanosecond), Line: "1"},
+ {Timestamp: now.Add(time.Minute), Line: "2"},
+ }
+
+ labelSet := model.LabelSet{"app": "l"}
+ req := &logproto.PushRequest{Streams: []logproto.Stream{
+ {Labels: labelSet.String(), Entries: entries},
+ }}
+
+ const userID = "testUser"
+ ctx := user.InjectOrgID(context.Background(), userID)
+
+ _, err := ing.Push(ctx, req)
+ require.NoError(t, err)
+
+ time.Sleep(2 * cfg.FlushCheckPeriod)
+
+ // ensure chunk is not flushed after flush period elapses
+ store.checkData(t, map[string][]logproto.Stream{})
+
+ instance, found := ing.getInstanceByID(userID)
+ require.True(t, found)
+ fingerprint := instance.getHashForLabels(labels.FromStrings("app", "l"))
+ require.Equal(t, model.Fingerprint(16794418009594958), fingerprint)
+ instance.ownedStreamsSvc.trackStreamOwnership(fingerprint, false)
+
+ time.Sleep(2 * cfg.FlushCheckPeriod)
+
+ // assert stream is now both batches
+ store.checkData(t, map[string][]logproto.Stream{
+ userID: {
+ {Labels: labelSet.String(), Entries: entries},
+ },
+ })
+
+ require.NoError(t, services.StopAndAwaitTerminated(context.Background(), ing))
+}
+
func TestFlushMaxAge(t *testing.T) {
cfg := defaultIngesterTestConfig(t)
cfg.FlushCheckPeriod = time.Millisecond * 100
diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go
index 65389a3cb04a0..1d30e7e23ece4 100644
--- a/pkg/ingester/instance.go
+++ b/pkg/ingester/instance.go
@@ -357,7 +357,8 @@ func (i *instance) onStreamCreated(s *stream) {
i.streamsCreatedTotal.Inc()
i.addTailersToNewStream(s)
streamsCountStats.Add(1)
- i.ownedStreamsSvc.incOwnedStreamCount()
+ // we count newly created stream as owned
+ i.ownedStreamsSvc.trackStreamOwnership(s.fp, true)
if i.configs.LogStreamCreation(i.instanceID) {
level.Debug(util_log.Logger).Log(
"msg", "successfully created stream",
@@ -421,7 +422,7 @@ func (i *instance) removeStream(s *stream) {
memoryStreams.WithLabelValues(i.instanceID).Dec()
memoryStreamsLabelsBytes.Sub(float64(len(s.labels.String())))
streamsCountStats.Add(-1)
- i.ownedStreamsSvc.decOwnedStreamCount()
+ i.ownedStreamsSvc.trackRemovedStream(s.fp)
}
}
@@ -1181,11 +1182,7 @@ func (i *instance) updateOwnedStreams(ownedTokenRange ring.TokenRanges) error {
i.streams.WithLock(func() {
i.ownedStreamsSvc.resetStreamCounts()
err = i.streams.ForEach(func(s *stream) (bool, error) {
- if ownedTokenRange.IncludesKey(uint32(s.fp)) {
- i.ownedStreamsSvc.incOwnedStreamCount()
- } else {
- i.ownedStreamsSvc.incNotOwnedStreamCount()
- }
+ i.ownedStreamsSvc.trackStreamOwnership(s.fp, ownedTokenRange.IncludesKey(uint32(s.fp)))
return true, nil
})
})
diff --git a/pkg/ingester/owned_streams.go b/pkg/ingester/owned_streams.go
index db9d0e94715fe..55f7eb480482b 100644
--- a/pkg/ingester/owned_streams.go
+++ b/pkg/ingester/owned_streams.go
@@ -5,6 +5,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
+ "github.com/prometheus/common/model"
"go.uber.org/atomic"
"github.com/grafana/loki/v3/pkg/util/constants"
@@ -17,19 +18,20 @@ var notOwnedStreamsMetric = promauto.NewGaugeVec(prometheus.GaugeOpts{
}, []string{"tenant"})
type ownedStreamService struct {
- tenantID string
- limiter *Limiter
- fixedLimit *atomic.Int32
- ownedStreamCount int
- notOwnedStreamCount int
- lock sync.RWMutex
+ tenantID string
+ limiter *Limiter
+ fixedLimit *atomic.Int32
+ ownedStreamCount int
+ lock sync.RWMutex
+ notOwnedStreams map[model.Fingerprint]any
}
func newOwnedStreamService(tenantID string, limiter *Limiter) *ownedStreamService {
svc := &ownedStreamService{
- tenantID: tenantID,
- limiter: limiter,
- fixedLimit: atomic.NewInt32(0),
+ tenantID: tenantID,
+ limiter: limiter,
+ fixedLimit: atomic.NewInt32(0),
+ notOwnedStreams: make(map[model.Fingerprint]any),
}
svc.updateFixedLimit()
@@ -51,25 +53,24 @@ func (s *ownedStreamService) getFixedLimit() int {
return int(s.fixedLimit.Load())
}
-func (s *ownedStreamService) incOwnedStreamCount() {
- s.lock.Lock()
- defer s.lock.Unlock()
- s.ownedStreamCount++
-}
-
-func (s *ownedStreamService) incNotOwnedStreamCount() {
+func (s *ownedStreamService) trackStreamOwnership(fp model.Fingerprint, owned bool) {
s.lock.Lock()
defer s.lock.Unlock()
+ if owned {
+ s.ownedStreamCount++
+ return
+ }
notOwnedStreamsMetric.WithLabelValues(s.tenantID).Inc()
- s.notOwnedStreamCount++
+ s.notOwnedStreams[fp] = nil
}
-func (s *ownedStreamService) decOwnedStreamCount() {
+func (s *ownedStreamService) trackRemovedStream(fp model.Fingerprint) {
s.lock.Lock()
defer s.lock.Unlock()
- if s.notOwnedStreamCount > 0 {
+
+ if _, notOwned := s.notOwnedStreams[fp]; notOwned {
notOwnedStreamsMetric.WithLabelValues(s.tenantID).Dec()
- s.notOwnedStreamCount--
+ delete(s.notOwnedStreams, fp)
return
}
s.ownedStreamCount--
@@ -79,6 +80,14 @@ func (s *ownedStreamService) resetStreamCounts() {
s.lock.Lock()
defer s.lock.Unlock()
s.ownedStreamCount = 0
- s.notOwnedStreamCount = 0
notOwnedStreamsMetric.WithLabelValues(s.tenantID).Set(0)
+ s.notOwnedStreams = make(map[model.Fingerprint]any)
+}
+
+func (s *ownedStreamService) isStreamNotOwned(fp model.Fingerprint) bool {
+ s.lock.RLock()
+ defer s.lock.RUnlock()
+
+ _, notOwned := s.notOwnedStreams[fp]
+ return notOwned
}
diff --git a/pkg/ingester/owned_streams_test.go b/pkg/ingester/owned_streams_test.go
index 876954b8579a0..7f114922fa447 100644
--- a/pkg/ingester/owned_streams_test.go
+++ b/pkg/ingester/owned_streams_test.go
@@ -4,6 +4,7 @@ import (
"sync"
"testing"
+ "github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/grafana/loki/v3/pkg/validation"
@@ -28,51 +29,60 @@ func Test_OwnedStreamService(t *testing.T) {
service.updateFixedLimit()
require.Equal(t, 100, service.getFixedLimit())
- service.incOwnedStreamCount()
- service.incOwnedStreamCount()
- service.incOwnedStreamCount()
+ service.trackStreamOwnership(model.Fingerprint(1), true)
+ service.trackStreamOwnership(model.Fingerprint(2), true)
+ service.trackStreamOwnership(model.Fingerprint(3), true)
require.Equal(t, 3, service.getOwnedStreamCount())
+ require.Len(t, service.notOwnedStreams, 0)
- service.incOwnedStreamCount()
- service.decOwnedStreamCount()
- service.notOwnedStreamCount = 1
- service.ownedStreamCount = 2
- require.Equal(t, 2, service.getOwnedStreamCount())
- require.Equal(t, 1, service.notOwnedStreamCount)
+ service.resetStreamCounts()
+ service.trackStreamOwnership(model.Fingerprint(3), true)
+ service.trackStreamOwnership(model.Fingerprint(3), false)
+ require.Equal(t, 1, service.getOwnedStreamCount(),
+ "owned streams count must not be changed because not owned stream can be reported only by recalculate_owned_streams job that resets the counters before checking all the streams")
+ require.Len(t, service.notOwnedStreams, 1)
+ require.True(t, service.isStreamNotOwned(model.Fingerprint(3)))
+
+ service.resetStreamCounts()
+ service.trackStreamOwnership(model.Fingerprint(1), true)
+ service.trackStreamOwnership(model.Fingerprint(2), true)
+ service.trackStreamOwnership(model.Fingerprint(3), false)
- service.decOwnedStreamCount()
- require.Equal(t, 2, service.getOwnedStreamCount(), "owned stream count must be decremented only when notOwnedStreamCount is set to 0")
- require.Equal(t, 0, service.notOwnedStreamCount)
+ service.trackRemovedStream(model.Fingerprint(3))
+ require.Equal(t, 2, service.getOwnedStreamCount(), "owned stream count must be decremented only when notOwnedStream does not contain this fingerprint")
+ require.Len(t, service.notOwnedStreams, 0)
- service.decOwnedStreamCount()
+ service.trackRemovedStream(model.Fingerprint(2))
require.Equal(t, 1, service.getOwnedStreamCount())
- require.Equal(t, 0, service.notOwnedStreamCount, "notOwnedStreamCount must not be decremented lower than 0")
+ require.Len(t, service.notOwnedStreams, 0)
group := sync.WaitGroup{}
- group.Add(200)
+ group.Add(100)
for i := 0; i < 100; i++ {
- go func() {
+ go func(i int) {
defer group.Done()
- service.incOwnedStreamCount()
- }()
+ service.trackStreamOwnership(model.Fingerprint(i+1000), true)
+ }(i)
}
+ group.Wait()
+ group.Add(100)
for i := 0; i < 100; i++ {
- go func() {
+ go func(i int) {
defer group.Done()
- service.decOwnedStreamCount()
- }()
+ service.trackRemovedStream(model.Fingerprint(i + 1000))
+ }(i)
}
group.Wait()
require.Equal(t, 1, service.getOwnedStreamCount(), "owned stream count must not be changed")
// simulate the effect from the recalculation job
- service.notOwnedStreamCount = 1
- service.ownedStreamCount = 2
+ service.trackStreamOwnership(model.Fingerprint(44), false)
+ service.trackStreamOwnership(model.Fingerprint(45), true)
service.resetStreamCounts()
require.Equal(t, 0, service.getOwnedStreamCount())
- require.Equal(t, 0, service.notOwnedStreamCount)
+ require.Len(t, service.notOwnedStreams, 0)
}
diff --git a/pkg/ingester/recalculate_owned_streams_test.go b/pkg/ingester/recalculate_owned_streams_test.go
index d2752fbd76499..dda027ca2e443 100644
--- a/pkg/ingester/recalculate_owned_streams_test.go
+++ b/pkg/ingester/recalculate_owned_streams_test.go
@@ -96,7 +96,7 @@ func Test_recalculateOwnedStreams_recalculate(t *testing.T) {
createStream(t, tenant, 250)
require.Equal(t, 7, tenant.ownedStreamsSvc.ownedStreamCount)
- require.Equal(t, 0, tenant.ownedStreamsSvc.notOwnedStreamCount)
+ require.Len(t, tenant.ownedStreamsSvc.notOwnedStreams, 0)
mockTenantsSupplier := &mockTenantsSuplier{tenants: []*instance{tenant}}
@@ -110,7 +110,7 @@ func Test_recalculateOwnedStreams_recalculate(t *testing.T) {
require.Equal(t, 50, tenant.ownedStreamsSvc.getFixedLimit(), "fixed limit must be updated after recalculation")
}
require.Equal(t, testData.expectedOwnedStreamCount, tenant.ownedStreamsSvc.ownedStreamCount)
- require.Equal(t, testData.expectedNotOwnedStreamCount, tenant.ownedStreamsSvc.notOwnedStreamCount)
+ require.Len(t, tenant.ownedStreamsSvc.notOwnedStreams, testData.expectedNotOwnedStreamCount)
})
}
|
feat
|
flush not owned streams (#13254)
|
ddf3cfbba7a6529a6902036c486b523b588818e3
|
2025-02-26 14:42:01
|
Robert Jacob
|
fix(operator): Update maximum OpenShift version (#16443)
| false
|
diff --git a/operator/bundle/openshift/metadata/properties.yaml b/operator/bundle/openshift/metadata/properties.yaml
index 15834c6fb28f5..5adb262ebe822 100644
--- a/operator/bundle/openshift/metadata/properties.yaml
+++ b/operator/bundle/openshift/metadata/properties.yaml
@@ -1,3 +1,3 @@
properties:
- type: olm.maxOpenShiftVersion
- value: 4.18
+ value: 4.19
|
fix
|
Update maximum OpenShift version (#16443)
|
8b34751e170c26292c378b94706ea3207916e27d
|
2024-05-03 11:41:12
|
Owen Diehl
|
chore(blooms): additional spans for bloom read path (#12866)
| false
|
diff --git a/pkg/storage/stores/shipper/bloomshipper/client.go b/pkg/storage/stores/shipper/bloomshipper/client.go
index 1c2314691caef..56e81a4d3fe40 100644
--- a/pkg/storage/stores/shipper/bloomshipper/client.go
+++ b/pkg/storage/stores/shipper/bloomshipper/client.go
@@ -23,6 +23,7 @@ import (
"github.com/grafana/loki/v3/pkg/storage/config"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb"
"github.com/grafana/loki/v3/pkg/util/encoding"
+ "github.com/grafana/loki/v3/pkg/util/spanlogger"
)
const (
@@ -480,12 +481,25 @@ func newCachedListOpObjectClient(oc client.ObjectClient, ttl, interval time.Dura
}
func (c *cachedListOpObjectClient) List(ctx context.Context, prefix string, delimiter string) ([]client.StorageObject, []client.StorageCommonPrefix, error) {
+ var (
+ logger = spanlogger.FromContext(ctx)
+ start = time.Now()
+ cacheDur time.Duration
+ )
+ defer func() {
+ logger.LogKV(
+ "cache_duration", cacheDur,
+ "total_duration", time.Since(start),
+ )
+ }()
+
if delimiter != "" {
return nil, nil, fmt.Errorf("does not support LIST calls with delimiter: %s", delimiter)
}
c.mtx.RLock()
cached, found := c.cache[prefix]
c.mtx.RUnlock()
+ cacheDur = time.Since(start)
if found {
return cached.objects, cached.prefixes, nil
}
diff --git a/pkg/storage/stores/shipper/bloomshipper/fetcher.go b/pkg/storage/stores/shipper/bloomshipper/fetcher.go
index de02d1effba8c..69715158950e0 100644
--- a/pkg/storage/stores/shipper/bloomshipper/fetcher.go
+++ b/pkg/storage/stores/shipper/bloomshipper/fetcher.go
@@ -19,6 +19,7 @@ import (
v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
"github.com/grafana/loki/v3/pkg/storage/chunk/cache"
"github.com/grafana/loki/v3/pkg/util/constants"
+ "github.com/grafana/loki/v3/pkg/util/spanlogger"
)
var downloadQueueCapacity = 10000
@@ -119,6 +120,8 @@ func (f *Fetcher) Close() {
// FetchMetas implements fetcher
func (f *Fetcher) FetchMetas(ctx context.Context, refs []MetaRef) ([]Meta, error) {
+ logger := spanlogger.FromContextWithFallback(ctx, f.logger)
+
if ctx.Err() != nil {
return nil, errors.Wrap(ctx.Err(), "fetch Metas")
}
@@ -127,9 +130,12 @@ func (f *Fetcher) FetchMetas(ctx context.Context, refs []MetaRef) ([]Meta, error
for _, ref := range refs {
keys = append(keys, f.client.Meta(ref).Addr())
}
+
+ cacheStart := time.Now()
cacheHits, cacheBufs, _, err := f.metasCache.Fetch(ctx, keys)
+ cacheDur := time.Since(cacheStart)
if err != nil {
- level.Error(f.logger).Log("msg", "failed to fetch metas from cache", "err", err)
+ level.Error(logger).Log("msg", "failed to fetch metas from cache", "err", err)
return nil, nil
}
@@ -138,16 +144,31 @@ func (f *Fetcher) FetchMetas(ctx context.Context, refs []MetaRef) ([]Meta, error
return nil, err
}
+ storageStart := time.Now()
fromStorage, err := f.client.GetMetas(ctx, missing)
+ storageDur := time.Since(storageStart)
if err != nil {
return nil, err
}
+ writeBackStart := time.Now()
err = f.writeBackMetas(ctx, fromStorage)
+ writeBackDur := time.Since(writeBackStart)
if err != nil {
return nil, err
}
+ logger.LogKV(
+ "phase", "fetch_metas",
+ "err", err,
+ "keys", len(keys),
+ "hits", len(cacheHits),
+ "misses", len(missing),
+ "cache_dur", cacheDur.String(),
+ "storage_dur", storageDur.String(),
+ "write_back_dur", writeBackDur.String(),
+ )
+
results := append(fromCache, fromStorage...)
f.metrics.metasFetched.Observe(float64(len(results)))
// TODO(chaudum): get metas size from storage
diff --git a/pkg/storage/stores/shipper/bloomshipper/store.go b/pkg/storage/stores/shipper/bloomshipper/store.go
index 5e1363d0cb731..9b18427bacf10 100644
--- a/pkg/storage/stores/shipper/bloomshipper/store.go
+++ b/pkg/storage/stores/shipper/bloomshipper/store.go
@@ -21,6 +21,7 @@ import (
"github.com/grafana/loki/v3/pkg/storage/chunk/client/util"
"github.com/grafana/loki/v3/pkg/storage/config"
"github.com/grafana/loki/v3/pkg/util/constants"
+ "github.com/grafana/loki/v3/pkg/util/spanlogger"
)
var (
@@ -114,7 +115,11 @@ func (b *bloomStoreEntry) ResolveMetas(ctx context.Context, params MetaSearchPar
// FetchMetas implements store.
func (b *bloomStoreEntry) FetchMetas(ctx context.Context, params MetaSearchParams) ([]Meta, error) {
+ logger := spanlogger.FromContext(ctx)
+
+ resolverStart := time.Now()
metaRefs, fetchers, err := b.ResolveMetas(ctx, params)
+ resolverDuration := time.Since(resolverStart)
if err != nil {
return nil, err
}
@@ -122,6 +127,16 @@ func (b *bloomStoreEntry) FetchMetas(ctx context.Context, params MetaSearchParam
return nil, errors.New("metaRefs and fetchers have unequal length")
}
+ var metaCt int
+ for i := range metaRefs {
+ metaCt += len(metaRefs[i])
+ }
+ logger.LogKV(
+ "msg", "resolved metas",
+ "metas", metaCt,
+ "duration", resolverDuration,
+ )
+
var metas []Meta
for i := range fetchers {
res, err := fetchers[i].FetchMetas(ctx, metaRefs[i])
|
chore
|
additional spans for bloom read path (#12866)
|
aa82a7c804edd6df99d3fddc581d02c3b7fa6774
|
2024-10-01 22:30:08
|
renovate[bot]
|
fix(deps): update module github.com/ibm/ibm-cos-sdk-go to v1.11.1 (#14342)
| false
|
diff --git a/go.mod b/go.mod
index 37430cea41ebd..62887962ff38b 100644
--- a/go.mod
+++ b/go.mod
@@ -114,7 +114,7 @@ require (
github.com/DataDog/sketches-go v1.4.6
github.com/DmitriyVTitov/size v1.5.0
github.com/IBM/go-sdk-core/v5 v5.17.5
- github.com/IBM/ibm-cos-sdk-go v1.11.0
+ github.com/IBM/ibm-cos-sdk-go v1.11.1
github.com/axiomhq/hyperloglog v0.0.0-20240507144631-af9851f82b27
github.com/buger/jsonparser v1.1.1
github.com/coder/quartz v0.1.0
diff --git a/go.sum b/go.sum
index 06070fbd14afc..7fbf5705ba189 100644
--- a/go.sum
+++ b/go.sum
@@ -252,8 +252,8 @@ github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob
github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
github.com/IBM/go-sdk-core/v5 v5.17.5 h1:AjGC7xNee5tgDIjndekBDW5AbypdERHSgib3EZ1KNsA=
github.com/IBM/go-sdk-core/v5 v5.17.5/go.mod h1:KsAAI7eStAWwQa4F96MLy+whYSh39JzNjklZRbN/8ns=
-github.com/IBM/ibm-cos-sdk-go v1.11.0 h1:Jp55NLN3OvBwucMGpP5wNybyjncsmTZ9+GPHai/1cE8=
-github.com/IBM/ibm-cos-sdk-go v1.11.0/go.mod h1:FnWOym0CvrPM0nHoXvceClOEvGVXecPpmVIO5RFjlFk=
+github.com/IBM/ibm-cos-sdk-go v1.11.1 h1:Pye61hmWA4ZVCfOfFLTJBjPka4HIGrLqmpZ2d2KlrCE=
+github.com/IBM/ibm-cos-sdk-go v1.11.1/go.mod h1:d8vET3w8wgmGwCsCVs+0y4V8+1hRNT6+pbpGaEHvSCI=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/MasslessParticle/azure-storage-blob-go v0.14.1-0.20240322194317-344980fda573 h1:DCPjdUAi+jcGnL7iN+A7uNY8xG584oMRuisYh/VE21E=
github.com/MasslessParticle/azure-storage-blob-go v0.14.1-0.20240322194317-344980fda573/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck=
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/version.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/version.go
index 12babed70e3d3..982d490db1aee 100644
--- a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/version.go
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/version.go
@@ -7,6 +7,6 @@ package aws
const SDKName = "ibm-cos-sdk-go"
// SDKVersion is the version of this SDK
-const SDKVersion = "1.11.0"
+const SDKVersion = "1.11.1"
// IBM COS SDK Code -- END
diff --git a/vendor/modules.txt b/vendor/modules.txt
index a6bda4744eb90..5bd662067c5c5 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -209,7 +209,7 @@ github.com/DmitriyVTitov/size
# github.com/IBM/go-sdk-core/v5 v5.17.5
## explicit; go 1.20
github.com/IBM/go-sdk-core/v5/core
-# github.com/IBM/ibm-cos-sdk-go v1.11.0
+# github.com/IBM/ibm-cos-sdk-go v1.11.1
## explicit; go 1.19
github.com/IBM/ibm-cos-sdk-go/aws
github.com/IBM/ibm-cos-sdk-go/aws/arn
|
fix
|
update module github.com/ibm/ibm-cos-sdk-go to v1.11.1 (#14342)
|
7a81d264a4ba54efdb1d79d382fd4188c036aaee
|
2024-04-16 01:05:48
|
Joshua Ford
|
fix: lambda-promtail, update s3 filename regex to allow finding of log files from AWS GovCloud regions (#12482)
| false
|
diff --git a/tools/lambda-promtail/lambda-promtail/s3.go b/tools/lambda-promtail/lambda-promtail/s3.go
index 5dca5cf7d6090..77694ba603432 100644
--- a/tools/lambda-promtail/lambda-promtail/s3.go
+++ b/tools/lambda-promtail/lambda-promtail/s3.go
@@ -75,9 +75,9 @@ var (
// source: https://docs.aws.amazon.com/waf/latest/developerguide/logging-s3.html
// format: aws-waf-logs-suffix[/prefix]/AWSLogs/aws-account-id/WAFLogs/region/webacl-name/year/month/day/hour/minute/aws-account-id_waflogs_region_webacl-name_timestamp_hash.log.gz
// example: aws-waf-logs-test/AWSLogs/11111111111/WAFLogs/us-east-1/TEST-WEBACL/2021/10/28/19/50/11111111111_waflogs_us-east-1_TEST-WEBACL_20211028T1950Z_e0ca43b5.log.gz
- defaultFilenameRegex = regexp.MustCompile(`AWSLogs\/(?P<account_id>\d+)\/(?P<type>[a-zA-Z0-9_\-]+)\/(?P<region>[\w-]+)\/(?P<year>\d+)\/(?P<month>\d+)\/(?P<day>\d+)\/\d+\_(?:elasticloadbalancing|vpcflowlogs)\_\w+-\w+-\d_(?:(?P<lb_type>app|net)\.*?)?(?P<src>[a-zA-Z0-9\-]+)`)
+ defaultFilenameRegex = regexp.MustCompile(`AWSLogs\/(?P<account_id>\d+)\/(?P<type>[a-zA-Z0-9_\-]+)\/(?P<region>[\w-]+)\/(?P<year>\d+)\/(?P<month>\d+)\/(?P<day>\d+)\/\d+\_(?:elasticloadbalancing|vpcflowlogs)_(?:\w+-\w+-(?:\w+-)?\d)_(?:(?P<lb_type>app|net)\.*?)?(?P<src>[a-zA-Z0-9\-]+)`)
defaultTimestampRegex = regexp.MustCompile(`(?P<timestamp>\d+-\d+-\d+T\d+:\d+:\d+(?:\.\d+Z)?)`)
- cloudtrailFilenameRegex = regexp.MustCompile(`AWSLogs\/(?P<organization_id>o-[a-z0-9]{10,32})?\/?(?P<account_id>\d+)\/(?P<type>[a-zA-Z0-9_\-]+)\/(?P<region>[\w-]+)\/(?P<year>\d+)\/(?P<month>\d+)\/(?P<day>\d+)\/\d+\_(?:CloudTrail|CloudTrail-Digest)\_\w+-\w+-\d_(?:(?:app|nlb|net)\.*?)?.+_(?P<src>[a-zA-Z0-9\-]+)`)
+ cloudtrailFilenameRegex = regexp.MustCompile(`AWSLogs\/(?P<organization_id>o-[a-z0-9]{10,32})?\/?(?P<account_id>\d+)\/(?P<type>[a-zA-Z0-9_\-]+)\/(?P<region>[\w-]+)\/(?P<year>\d+)\/(?P<month>\d+)\/(?P<day>\d+)\/\d+\_(?:CloudTrail|CloudTrail-Digest)_(?:\w+-\w+-(?:\w+-)?\d)_(?:(?:app|nlb|net)\.*?)?.+_(?P<src>[a-zA-Z0-9\-]+)`)
cloudfrontFilenameRegex = regexp.MustCompile(`(?P<prefix>.*)\/(?P<src>[A-Z0-9]+)\.(?P<year>\d+)-(?P<month>\d+)-(?P<day>\d+)-(.+)`)
cloudfrontTimestampRegex = regexp.MustCompile(`(?P<timestamp>\d+-\d+-\d+\s\d+:\d+:\d+)`)
wafFilenameRegex = regexp.MustCompile(`AWSLogs\/(?P<account_id>\d+)\/(?P<type>WAFLogs)\/(?P<region>[\w-]+)\/(?P<src>[\w-]+)\/(?P<year>\d+)\/(?P<month>\d+)\/(?P<day>\d+)\/(?P<hour>\d+)\/(?P<minute>\d+)\/\d+\_waflogs\_[\w-]+_[\w-]+_\d+T\d+Z_\w+`)
diff --git a/tools/lambda-promtail/lambda-promtail/s3_test.go b/tools/lambda-promtail/lambda-promtail/s3_test.go
index 60a22abba7a36..644ad12f17276 100644
--- a/tools/lambda-promtail/lambda-promtail/s3_test.go
+++ b/tools/lambda-promtail/lambda-promtail/s3_test.go
@@ -126,6 +126,39 @@ func Test_getLabels(t *testing.T) {
},
wantErr: false,
},
+ {
+ name: "s3_govcloud_flow_logs",
+ args: args{
+ record: events.S3EventRecord{
+ AWSRegion: "us-gov-east-1",
+ S3: events.S3Entity{
+ Bucket: events.S3Bucket{
+ Name: "vpc_logs_test",
+ OwnerIdentity: events.S3UserIdentity{
+ PrincipalID: "test",
+ },
+ },
+ Object: events.S3Object{
+ Key: "my-bucket/AWSLogs/123456789012/vpcflowlogs/us-gov-east-1/2022/01/24/123456789012_vpcflowlogs_us-gov-east-1_fl-1234abcd_20180620T1620Z_fe123456.log.gz",
+ },
+ },
+ },
+ },
+ want: map[string]string{
+ "account_id": "123456789012",
+ "bucket": "vpc_logs_test",
+ "bucket_owner": "test",
+ "bucket_region": "us-gov-east-1",
+ "day": "24",
+ "key": "my-bucket/AWSLogs/123456789012/vpcflowlogs/us-gov-east-1/2022/01/24/123456789012_vpcflowlogs_us-gov-east-1_fl-1234abcd_20180620T1620Z_fe123456.log.gz",
+ "month": "01",
+ "region": "us-gov-east-1",
+ "src": "fl-1234abcd",
+ "type": FLOW_LOG_TYPE,
+ "year": "2022",
+ },
+ wantErr: false,
+ },
{
name: "cloudtrail_digest_logs",
args: args{
@@ -192,6 +225,39 @@ func Test_getLabels(t *testing.T) {
},
wantErr: false,
},
+ {
+ name: "cloudtrail_govcloud_logs",
+ args: args{
+ record: events.S3EventRecord{
+ AWSRegion: "us-gov-east-1",
+ S3: events.S3Entity{
+ Bucket: events.S3Bucket{
+ Name: "cloudtrail_logs_test",
+ OwnerIdentity: events.S3UserIdentity{
+ PrincipalID: "test",
+ },
+ },
+ Object: events.S3Object{
+ Key: "my-bucket/AWSLogs/123456789012/CloudTrail/us-gov-east-1/2022/01/24/123456789012_CloudTrail_us-gov-east-1_20220124T0000Z_4jhzXFO2Jlvu2b3y.json.gz",
+ },
+ },
+ },
+ },
+ want: map[string]string{
+ "account_id": "123456789012",
+ "bucket": "cloudtrail_logs_test",
+ "bucket_owner": "test",
+ "bucket_region": "us-gov-east-1",
+ "day": "24",
+ "key": "my-bucket/AWSLogs/123456789012/CloudTrail/us-gov-east-1/2022/01/24/123456789012_CloudTrail_us-gov-east-1_20220124T0000Z_4jhzXFO2Jlvu2b3y.json.gz",
+ "month": "01",
+ "region": "us-gov-east-1",
+ "src": "4jhzXFO2Jlvu2b3y",
+ "type": CLOUDTRAIL_LOG_TYPE,
+ "year": "2022",
+ },
+ wantErr: false,
+ },
{
name: "organization_cloudtrail_logs",
args: args{
@@ -293,6 +359,41 @@ func Test_getLabels(t *testing.T) {
},
wantErr: false,
},
+ {
+ name: "s3_govcloud_waf",
+ args: args{
+ record: events.S3EventRecord{
+ AWSRegion: "us-gov-east-1",
+ S3: events.S3Entity{
+ Bucket: events.S3Bucket{
+ Name: "waf_logs_test",
+ OwnerIdentity: events.S3UserIdentity{
+ PrincipalID: "test",
+ },
+ },
+ Object: events.S3Object{
+ Key: "prefix/AWSLogs/11111111111/WAFLogs/us-gov-east-1/TEST-WEBACL/2021/10/28/19/50/11111111111_waflogs_us-gov-east-1_TEST-WEBACL_20211028T1950Z_e0ca43b5.log.gz",
+ },
+ },
+ },
+ },
+ want: map[string]string{
+ "account_id": "11111111111",
+ "bucket_owner": "test",
+ "bucket_region": "us-gov-east-1",
+ "bucket": "waf_logs_test",
+ "day": "28",
+ "hour": "19",
+ "key": "prefix/AWSLogs/11111111111/WAFLogs/us-gov-east-1/TEST-WEBACL/2021/10/28/19/50/11111111111_waflogs_us-gov-east-1_TEST-WEBACL_20211028T1950Z_e0ca43b5.log.gz",
+ "minute": "50",
+ "month": "10",
+ "region": "us-gov-east-1",
+ "src": "TEST-WEBACL",
+ "type": WAF_LOG_TYPE,
+ "year": "2021",
+ },
+ wantErr: false,
+ },
{
name: "missing_type",
args: args{
|
fix
|
lambda-promtail, update s3 filename regex to allow finding of log files from AWS GovCloud regions (#12482)
|
02416736d0d1b9dafe04e3217cecb167636e6447
|
2022-02-10 16:58:03
|
gotjosh
|
ruler: Rule group not found API message (#5362)
| false
|
diff --git a/pkg/ruler/rulestore/objectclient/rule_store.go b/pkg/ruler/rulestore/objectclient/rule_store.go
index 747cf728f6cc6..b945c298af0b9 100644
--- a/pkg/ruler/rulestore/objectclient/rule_store.go
+++ b/pkg/ruler/rulestore/objectclient/rule_store.go
@@ -54,7 +54,7 @@ func NewRuleStore(client chunk.ObjectClient, loadConcurrency int, logger log.Log
func (o *RuleStore) getRuleGroup(ctx context.Context, objectKey string, rg *rulespb.RuleGroupDesc) (*rulespb.RuleGroupDesc, error) {
reader, _, err := o.client.GetObject(ctx, objectKey)
if err != nil {
- if err.Error() == chunk.ErrStorageObjectNotFound.Error() {
+ if o.client.IsObjectNotFoundErr(err) {
level.Debug(o.logger).Log("msg", "rule group does not exist", "name", objectKey)
return nil, errors.Wrapf(rulestore.ErrGroupNotFound, "get rule group user=%q, namespace=%q, name=%q", rg.GetUser(), rg.GetNamespace(), rg.GetName())
}
@@ -214,7 +214,7 @@ func (o *RuleStore) SetRuleGroup(ctx context.Context, userID string, namespace s
func (o *RuleStore) DeleteRuleGroup(ctx context.Context, userID string, namespace string, groupName string) error {
objectKey := generateRuleObjectKey(userID, namespace, groupName)
err := o.client.DeleteObject(ctx, objectKey)
- if err == chunk.ErrStorageObjectNotFound {
+ if o.client.IsObjectNotFoundErr(err) {
return rulestore.ErrGroupNotFound
}
return err
|
ruler
|
Rule group not found API message (#5362)
|
f0a16f971b1d198d998c97c577b86ca5cd85d7e8
|
2025-03-07 01:25:43
|
Trevor Whitney
|
chore: auto-merge npm updates (#16577)
| false
|
diff --git a/.github/renovate.json5 b/.github/renovate.json5
index 0bf7dbcb810cb..0f70418768db1 100644
--- a/.github/renovate.json5
+++ b/.github/renovate.json5
@@ -42,6 +42,14 @@
"matchPackageNames": ["tailwindcss"],
"enabled": false
},
+ {
+ // Auto-merge the rest of the npm updates
+ "matchManagers": ["npm"],
+ "matchPackageNames": ["!tailwindcss"],
+ "enabled": true,
+ "autoApprove": true,
+ "automerge": true
+ },
{
// Don't automatically merge GitHub Actions updates
"matchManagers": ["github-actions"],
|
chore
|
auto-merge npm updates (#16577)
|
034e2a63cf2e245d21734c159a48aa3768fccbae
|
2021-08-27 21:16:13
|
Arve Knudsen
|
makefile: Add format target (#4226)
| false
|
diff --git a/Makefile b/Makefile
index 7637549411493..1141623fe60d3 100644
--- a/Makefile
+++ b/Makefile
@@ -1,5 +1,5 @@
.DEFAULT_GOAL := all
-.PHONY: all images check-generated-files logcli loki loki-debug promtail promtail-debug loki-canary lint test clean yacc protos touch-protobuf-sources touch-protos
+.PHONY: all images check-generated-files logcli loki loki-debug promtail promtail-debug loki-canary lint test clean yacc protos touch-protobuf-sources touch-protos format
.PHONY: docker-driver docker-driver-clean docker-driver-enable docker-driver-push
.PHONY: fluent-bit-image, fluent-bit-push, fluent-bit-test
.PHONY: fluentd-image, fluentd-push, fluentd-test
@@ -627,3 +627,9 @@ endif
test-fuzz:
go test -timeout 30s -tags dev,gofuzz -cpuprofile cpu.prof -memprofile mem.prof \
-run ^Test_Fuzz$$ github.com/grafana/loki/pkg/logql -v -count=1 -timeout=0s
+
+format:
+ find . $(DONT_FIND) -name '*.pb.go' -prune -o -name '*.y.go' -prune -o -name '*.rl.go' -prune -o \
+ -type f -name '*.go' -exec gofmt -w -s {} \;
+ find . $(DONT_FIND) -name '*.pb.go' -prune -o -name '*.y.go' -prune -o -name '*.rl.go' -prune -o \
+ -type f -name '*.go' -exec goimports -w -local github.com/grafana/loki {} \;
|
makefile
|
Add format target (#4226)
|
eb7dae4583b28ace35e5faa314738f7e011a7251
|
2023-06-07 21:02:36
|
Ivana Huckova
|
loki: Improve error message when step too low (#9641)
| false
|
diff --git a/pkg/loghttp/query.go b/pkg/loghttp/query.go
index acd37c3412835..bc3b8a1265ded 100644
--- a/pkg/loghttp/query.go
+++ b/pkg/loghttp/query.go
@@ -18,7 +18,7 @@ import (
var (
errEndBeforeStart = errors.New("end timestamp must not be before or equal to start time")
errNegativeStep = errors.New("zero or negative query resolution step widths are not accepted. Try a positive integer")
- errStepTooSmall = errors.New("exceeded maximum resolution of 11,000 points per timeseries. Try decreasing the query resolution (?step=XX)")
+ errStepTooSmall = errors.New("exceeded maximum resolution of 11,000 points per time series. Try increasing the value of the step parameter")
errNegativeInterval = errors.New("interval must be >= 0")
)
diff --git a/pkg/querier/queryrange/queryrangebase/query_range.go b/pkg/querier/queryrange/queryrangebase/query_range.go
index 5e7ad793ca29f..35b26b5df6a50 100644
--- a/pkg/querier/queryrange/queryrangebase/query_range.go
+++ b/pkg/querier/queryrange/queryrangebase/query_range.go
@@ -38,7 +38,7 @@ var (
}.Froze()
errEndBeforeStart = httpgrpc.Errorf(http.StatusBadRequest, "end timestamp must not be before start time")
errNegativeStep = httpgrpc.Errorf(http.StatusBadRequest, "zero or negative query resolution step widths are not accepted. Try a positive integer")
- errStepTooSmall = httpgrpc.Errorf(http.StatusBadRequest, "exceeded maximum resolution of 11,000 points per timeseries. Try decreasing the query resolution (?step=XX)")
+ errStepTooSmall = httpgrpc.Errorf(http.StatusBadRequest, "exceeded maximum resolution of 11,000 points per time series. Try increasing the value of the step parameter")
// PrometheusCodec is a codec to encode and decode Prometheus query range requests and responses.
PrometheusCodec Codec = &prometheusCodec{}
|
loki
|
Improve error message when step too low (#9641)
|
4cae003ecedd474e4c15feab4ea2ef435afff83f
|
2024-03-02 01:19:49
|
Christian Haudum
|
chore(blooms): Make blocks cache preloading less verbose (#12107)
| false
|
diff --git a/pkg/storage/stores/shipper/bloomshipper/cache.go b/pkg/storage/stores/shipper/bloomshipper/cache.go
index e2a7ea4042d62..72668c308427f 100644
--- a/pkg/storage/stores/shipper/bloomshipper/cache.go
+++ b/pkg/storage/stores/shipper/bloomshipper/cache.go
@@ -67,13 +67,11 @@ func loadBlockDirectories(root string, logger log.Logger) (keys []string, values
}
if !dirEntry.IsDir() {
- level.Warn(logger).Log("msg", "skip directory entry", "err", "not a directory", "path", path)
return nil
}
ref, err := resolver.ParseBlockKey(key(path))
if err != nil {
- level.Warn(logger).Log("msg", "skip directory entry", "err", err, "path", path)
return nil
}
@@ -82,7 +80,7 @@ func loadBlockDirectories(root string, logger log.Logger) (keys []string, values
values = append(values, NewBlockDirectory(ref, path, logger))
level.Debug(logger).Log("msg", "found block directory", "ref", ref, "path", path)
} else {
- level.Warn(logger).Log("msg", "skip directory entry", "err", "not a block directory", "path", path)
+ level.Warn(logger).Log("msg", "skip directory entry", "err", "not a block directory containing blooms and series", "path", path)
_ = clean(path)
}
|
chore
|
Make blocks cache preloading less verbose (#12107)
|
50dc872dabf0c87084e5a7f90204328e3e0372c0
|
2025-03-18 06:04:39
|
renovate[bot]
|
chore(deps): update dependency @types/react to v19.0.11 (main) (#16796)
| false
|
diff --git a/pkg/ui/frontend/package-lock.json b/pkg/ui/frontend/package-lock.json
index 9fd68437c9ef0..ff163ebb8afd2 100644
--- a/pkg/ui/frontend/package-lock.json
+++ b/pkg/ui/frontend/package-lock.json
@@ -2768,9 +2768,9 @@
"license": "MIT"
},
"node_modules/@types/react": {
- "version": "19.0.10",
- "resolved": "https://registry.npmjs.org/@types/react/-/react-19.0.10.tgz",
- "integrity": "sha512-JuRQ9KXLEjaUNjTWpzuR231Z2WpIwczOkBEIvbHNCzQefFIT0L8IqE6NV6ULLyC1SI/i234JnDoMkfg+RjQj2g==",
+ "version": "19.0.11",
+ "resolved": "https://registry.npmjs.org/@types/react/-/react-19.0.11.tgz",
+ "integrity": "sha512-vrdxRZfo9ALXth6yPfV16PYTLZwsUWhVjjC+DkfE5t1suNSbBrWC9YqSuuxJZ8Ps6z1o2ycRpIqzZJIgklq4Tw==",
"devOptional": true,
"license": "MIT",
"dependencies": {
|
chore
|
update dependency @types/react to v19.0.11 (main) (#16796)
|
1fc278623e39d653ddb2079da9570fb8c02b72e7
|
2019-08-02 00:29:31
|
Edward Welch
|
fix(promtail): Improving promtail asset generation makefile targets
| false
|
diff --git a/Makefile b/Makefile
index 04c604420c9ce..1fffc8c9f4172 100644
--- a/Makefile
+++ b/Makefile
@@ -60,6 +60,9 @@ PROTO_GOS := $(patsubst %.proto,%.pb.go,$(PROTO_DEFS))
YACC_DEFS := $(shell find . $(DONT_FIND) -type f -name *.y -print)
YACC_GOS := $(patsubst %.y,%.y.go,$(YACC_DEFS))
+# Promtail UI files
+PROMTAIL_GENERATED_FILE := pkg/promtail/server/ui/assets_vfsdata.go
+PROMTAIL_UI_FILES := $(shell find ./pkg/promtail/server/ui -type f -name assets_vfsdata.go -prune -o -print)
##########
# Docker #
@@ -96,9 +99,9 @@ binfmt:
all: promtail logcli loki loki-canary check-generated-files
# This is really a check for the CI to make sure generated files are built and checked in manually
-check-generated-files: yacc protos
- @if ! (git diff --exit-code $(YACC_GOS) $(PROTO_GOS)); then \
- echo "\nChanges found in either generated protos or yaccs"; \
+check-generated-files: yacc protos pkg/promtail/server/ui/assets_vfsdata.go
+ @if ! (git diff --exit-code $(YACC_GOS) $(PROTO_GOS) $(PROMTAIL_GENERATED_FILE)); then \
+ echo "\nChanges found in generated files"; \
echo "Run 'make all' and commit the changes to fix this error."; \
echo "If you are actively developing these files you can ignore this error"; \
echo "(Don't forget to check in the generated files when finished)\n"; \
@@ -148,12 +151,15 @@ cmd/loki-canary/loki-canary: $(APP_GO_FILES) cmd/loki-canary/main.go
promtail: yacc cmd/promtail/promtail
promtail-debug: yacc cmd/promtail/promtail-debug
+promtail-clean-assets:
+ rm -rf pkg/promtail/server/ui/assets_vfsdata.go
+
# Rule to generate promtail static assets file
-pkg/promtail/server/ui/assets_vfsdata.go:
+$(PROMTAIL_GENERATED_FILE): $(PROMTAIL_UI_FILES)
@echo ">> writing assets"
GOOS=$(shell go env GOHOSTOS) go generate -x -v ./pkg/promtail/server/ui
-cmd/promtail/promtail: $(APP_GO_FILES) pkg/promtail/server/ui/assets_vfsdata.go cmd/promtail/main.go
+cmd/promtail/promtail: $(APP_GO_FILES) $(PROMTAIL_GENERATED_FILE) cmd/promtail/main.go
CGO_ENABLED=0 go build $(GO_FLAGS) -o $@ ./$(@D)
$(NETGO_CHECK)
@@ -180,7 +186,7 @@ test: all
#########
clean:
- rm -rf cmd/promtail/promtail pkg/promtail/server/ui/assets_vfsdata.go
+ rm -rf cmd/promtail/promtail
rm -rf cmd/loki/loki
rm -rf cmd/logcli/logcli
rm -rf cmd/loki-canary/loki-canary
|
fix
|
Improving promtail asset generation makefile targets
|
a1fbd8b137e775fabf2055c294d72c9c54420cfc
|
2025-03-19 17:31:40
|
Jack Baldry
|
chore: remove doc-validator workflow (#16819)
| false
|
diff --git a/.github/workflows/doc-validator.yml b/.github/workflows/doc-validator.yml
deleted file mode 100644
index 1d755f887324e..0000000000000
--- a/.github/workflows/doc-validator.yml
+++ /dev/null
@@ -1,30 +0,0 @@
-name: "doc-validator"
-on:
- pull_request:
- paths: ["docs/sources/**"]
- workflow_dispatch:
-jobs:
- doc-validator:
- runs-on: "ubuntu-latest"
- container:
- image: "grafana/doc-validator:v5.2.0"
- steps:
- - name: "Checkout code"
- uses: "actions/checkout@v4"
- with:
- fetch-depth: 0
- - name: "Run doc-validator"
- run: |
- doc-validator \
- "--include=$(git config --global --add safe.directory $(realpath .); printf '^docs/sources/(%s)$' "$(git --no-pager diff --name-only --diff-filter=ACMRT origin/${{ github.event.pull_request.base.ref }}...${{ github.event.pull_request.head.sha }} -- docs/sources | sed 's/^docs\/sources\///' | awk -F'\n' '{if(NR == 1) {printf $0} else {printf "|"$0}}')")" \
- '--skip-checks=^image' \
- docs/sources \
- /docs/loki/latest \
- | reviewdog \
- -f=rdjsonl \
- --fail-on-error \
- --filter-mode=nofilter \
- --name=doc-validator \
- --reporter=github-pr-review
- env:
- REVIEWDOG_GITHUB_API_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
chore
|
remove doc-validator workflow (#16819)
|
7ae1588200396b73a16fadd2610670a5ce5fd747
|
2024-09-04 23:19:32
|
Periklis Tsirakidis
|
feat(operator): Update Loki operand to v3.1.1 (#14042)
| false
|
diff --git a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml
index 8468d36696af4..89db0644458c5 100644
--- a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml
@@ -150,7 +150,7 @@ metadata:
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: docker.io/grafana/loki-operator:0.6.1
- createdAt: "2024-07-05T08:22:21Z"
+ createdAt: "2024-09-04T13:09:06Z"
description: The Community Loki Operator provides Kubernetes native deployment
and management of Loki and related logging components.
features.operators.openshift.io/disconnected: "true"
diff --git a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml
index d0cd0a035dee8..c431273519e6f 100644
--- a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml
@@ -150,7 +150,7 @@ metadata:
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: docker.io/grafana/loki-operator:0.6.1
- createdAt: "2024-07-05T08:22:19Z"
+ createdAt: "2024-09-04T13:09:04Z"
description: The Community Loki Operator provides Kubernetes native deployment
and management of Loki and related logging components.
operators.operatorframework.io/builder: operator-sdk-unknown
@@ -1712,7 +1712,7 @@ spec:
- /manager
env:
- name: RELATED_IMAGE_LOKI
- value: docker.io/grafana/loki:3.1.0
+ value: docker.io/grafana/loki:3.1.1
- name: RELATED_IMAGE_GATEWAY
value: quay.io/observatorium/api:latest
- name: RELATED_IMAGE_OPA
@@ -1824,7 +1824,7 @@ spec:
provider:
name: Grafana Loki SIG Operator
relatedImages:
- - image: docker.io/grafana/loki:3.1.0
+ - image: docker.io/grafana/loki:3.1.1
name: loki
- image: quay.io/observatorium/api:latest
name: gateway
diff --git a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
index f1fc360f9d4ea..f943d3921ff3a 100644
--- a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
@@ -150,7 +150,7 @@ metadata:
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: quay.io/openshift-logging/loki-operator:0.1.0
- createdAt: "2024-07-05T08:22:23Z"
+ createdAt: "2024-09-04T13:09:07Z"
description: |
The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging.
## Prerequisites and Requirements
@@ -1717,7 +1717,7 @@ spec:
- /manager
env:
- name: RELATED_IMAGE_LOKI
- value: quay.io/openshift-logging/loki:v3.1.0
+ value: quay.io/openshift-logging/loki:v3.1.1
- name: RELATED_IMAGE_GATEWAY
value: quay.io/observatorium/api:latest
- name: RELATED_IMAGE_OPA
@@ -1841,7 +1841,7 @@ spec:
provider:
name: Red Hat
relatedImages:
- - image: quay.io/openshift-logging/loki:v3.1.0
+ - image: quay.io/openshift-logging/loki:v3.1.1
name: loki
- image: quay.io/observatorium/api:latest
name: gateway
diff --git a/operator/config/overlays/community/manager_related_image_patch.yaml b/operator/config/overlays/community/manager_related_image_patch.yaml
index 5e454e400cc87..52681b95a4b1c 100644
--- a/operator/config/overlays/community/manager_related_image_patch.yaml
+++ b/operator/config/overlays/community/manager_related_image_patch.yaml
@@ -9,7 +9,7 @@ spec:
- name: manager
env:
- name: RELATED_IMAGE_LOKI
- value: docker.io/grafana/loki:3.1.0
+ value: docker.io/grafana/loki:3.1.1
- name: RELATED_IMAGE_GATEWAY
value: quay.io/observatorium/api:latest
- name: RELATED_IMAGE_OPA
diff --git a/operator/config/overlays/development/manager_related_image_patch.yaml b/operator/config/overlays/development/manager_related_image_patch.yaml
index 3ae62832b1500..21aa79f99a7d7 100644
--- a/operator/config/overlays/development/manager_related_image_patch.yaml
+++ b/operator/config/overlays/development/manager_related_image_patch.yaml
@@ -9,6 +9,6 @@ spec:
- name: manager
env:
- name: RELATED_IMAGE_LOKI
- value: docker.io/grafana/loki:3.1.0
+ value: docker.io/grafana/loki:3.1.1
- name: RELATED_IMAGE_GATEWAY
value: quay.io/observatorium/api:latest
diff --git a/operator/config/overlays/openshift/manager_related_image_patch.yaml b/operator/config/overlays/openshift/manager_related_image_patch.yaml
index 675a31564035f..3bb2b65cf4617 100644
--- a/operator/config/overlays/openshift/manager_related_image_patch.yaml
+++ b/operator/config/overlays/openshift/manager_related_image_patch.yaml
@@ -9,7 +9,7 @@ spec:
- name: manager
env:
- name: RELATED_IMAGE_LOKI
- value: quay.io/openshift-logging/loki:v3.1.0
+ value: quay.io/openshift-logging/loki:v3.1.1
- name: RELATED_IMAGE_GATEWAY
value: quay.io/observatorium/api:latest
- name: RELATED_IMAGE_OPA
diff --git a/operator/docs/operator/compatibility.md b/operator/docs/operator/compatibility.md
index 5887c7a68cfa4..382e7e6a7673f 100644
--- a/operator/docs/operator/compatibility.md
+++ b/operator/docs/operator/compatibility.md
@@ -27,17 +27,5 @@ Due to the use of apiextensions.k8s.io/v1 CustomResourceDefinitions, requires Ku
The versions of Loki compatible to be run with the Loki Operator are:
-* v2.7.1
-* v2.7.2
-* v2.7.3
-* v2.7.4
-* v2.8.0
-* v2.8.3
-* v2.9.0
-* v2.9.1
-* v2.9.2
-* v2.9.3
-* v2.9.4
-* v2.9.6
-* v2.9.8
* v3.1.0
+* v3.1.1
diff --git a/operator/hack/addons_dev.yaml b/operator/hack/addons_dev.yaml
index feb9781d269d4..320743bc37325 100644
--- a/operator/hack/addons_dev.yaml
+++ b/operator/hack/addons_dev.yaml
@@ -29,7 +29,7 @@ spec:
spec:
containers:
- name: logcli
- image: docker.io/grafana/logcli:3.1.0-amd64
+ image: docker.io/grafana/logcli:3.1.1-amd64
imagePullPolicy: IfNotPresent
command:
- /bin/sh
@@ -73,7 +73,7 @@ spec:
spec:
containers:
- name: promtail
- image: docker.io/grafana/promtail:3.1.0
+ image: docker.io/grafana/promtail:3.1.1
args:
- -config.file=/etc/promtail/promtail.yaml
- -log.level=info
diff --git a/operator/hack/addons_ocp.yaml b/operator/hack/addons_ocp.yaml
index 59b942c9a4050..6e49cbd847c8a 100644
--- a/operator/hack/addons_ocp.yaml
+++ b/operator/hack/addons_ocp.yaml
@@ -29,7 +29,7 @@ spec:
spec:
containers:
- name: logcli
- image: docker.io/grafana/logcli:3.1.0-amd64
+ image: docker.io/grafana/logcli:3.1.1-amd64
imagePullPolicy: IfNotPresent
command:
- /bin/sh
@@ -70,7 +70,7 @@ spec:
spec:
containers:
- name: promtail
- image: docker.io/grafana/promtail:3.1.0
+ image: docker.io/grafana/promtail:3.1.1
args:
- -config.file=/etc/promtail/promtail.yaml
- -log.level=info
diff --git a/operator/internal/manifests/var.go b/operator/internal/manifests/var.go
index b250734dd45f4..1fed3e1422982 100644
--- a/operator/internal/manifests/var.go
+++ b/operator/internal/manifests/var.go
@@ -59,7 +59,7 @@ const (
EnvRelatedImageGateway = "RELATED_IMAGE_GATEWAY"
// DefaultContainerImage declares the default fallback for loki image.
- DefaultContainerImage = "docker.io/grafana/loki:3.1.0"
+ DefaultContainerImage = "docker.io/grafana/loki:3.1.1"
// DefaultLokiStackGatewayImage declares the default image for lokiStack-gateway.
DefaultLokiStackGatewayImage = "quay.io/observatorium/api:latest"
|
feat
|
Update Loki operand to v3.1.1 (#14042)
|
a2ba4bc1817c13716929b6384d199dacbd45c3c7
|
2025-02-18 22:26:44
|
Jackson Coelho
|
chore: small fixes for #16348 (#16358)
| false
|
diff --git a/pkg/dataobj/consumer/config.go b/pkg/dataobj/consumer/config.go
index b76f9b6672d12..1e56153fd5370 100644
--- a/pkg/dataobj/consumer/config.go
+++ b/pkg/dataobj/consumer/config.go
@@ -30,9 +30,5 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
cfg.BuilderConfig.RegisterFlagsWithPrefix(prefix, f)
cfg.UploaderConfig.RegisterFlagsWithPrefix(prefix, f)
- if cfg.IdleFlushTimeout <= 0 {
- cfg.IdleFlushTimeout = 60 * 60 * time.Second // default to 1 hour
- }
-
- f.DurationVar(&cfg.IdleFlushTimeout, prefix+"idle-flush-timeout", cfg.IdleFlushTimeout, "The maximum amount of time to wait in seconds before flushing an object that is no longer receiving new writes")
+ f.DurationVar(&cfg.IdleFlushTimeout, prefix+"idle-flush-timeout", 60*60*time.Second, "The maximum amount of time to wait in seconds before flushing an object that is no longer receiving new writes")
}
diff --git a/pkg/dataobj/consumer/partition_processor.go b/pkg/dataobj/consumer/partition_processor.go
index b9030b354ed85..8b41e79f8299b 100644
--- a/pkg/dataobj/consumer/partition_processor.go
+++ b/pkg/dataobj/consumer/partition_processor.go
@@ -40,8 +40,8 @@ type partitionProcessor struct {
bufPool *sync.Pool
// Idle stream handling
- idleFlushTimout time.Duration
- lastFlush time.Time
+ idleFlushTimeout time.Duration
+ lastFlush time.Time
// Metrics
metrics *partitionOffsetMetrics
@@ -113,7 +113,7 @@ func newPartitionProcessor(
uploader: uploader,
metastoreManager: metastoreManager,
bufPool: bufPool,
- idleFlushTimout: idleFlushTimeout,
+ idleFlushTimeout: idleFlushTimeout,
lastFlush: time.Now(),
}
}
@@ -136,7 +136,7 @@ func (p *partitionProcessor) start() {
}
p.processRecord(record)
- case <-time.After(p.idleFlushTimout):
+ case <-time.After(p.idleFlushTimeout):
p.idleFlush()
}
}
@@ -294,8 +294,7 @@ func (p *partitionProcessor) idleFlush() {
return
}
- now := time.Now()
- if now.Sub(p.lastFlush) < p.idleFlushTimout {
+ if time.Since(p.lastFlush) < p.idleFlushTimeout {
return // Avoid checking too frequently
}
@@ -310,6 +309,6 @@ func (p *partitionProcessor) idleFlush() {
return
}
- p.lastFlush = now
+ p.lastFlush = time.Now()
}()
}
|
chore
|
small fixes for #16348 (#16358)
|
3834c74966b307411732cd3cbaf66305008b10eb
|
2024-10-16 23:51:07
|
Robert Jacob
|
fix(operator): Disable automatic discovery of service name (#14506)
| false
|
diff --git a/operator/internal/manifests/internal/config/build_test.go b/operator/internal/manifests/internal/config/build_test.go
index 016e42253f21b..63799bffbd372 100644
--- a/operator/internal/manifests/internal/config/build_test.go
+++ b/operator/internal/manifests/internal/config/build_test.go
@@ -96,6 +96,7 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -353,6 +354,7 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -779,6 +781,7 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -1137,6 +1140,7 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -1496,6 +1500,7 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -1889,6 +1894,7 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -2224,6 +2230,7 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -2663,6 +2670,7 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -2987,6 +2995,7 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -3484,6 +3493,7 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -3745,6 +3755,7 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -4007,6 +4018,7 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -4270,6 +4282,7 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -4569,6 +4582,7 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -4866,6 +4880,7 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -5364,6 +5379,7 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -5540,6 +5556,7 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -5709,6 +5726,7 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -6101,6 +6119,7 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
diff --git a/operator/internal/manifests/internal/config/loki-config.yaml b/operator/internal/manifests/internal/config/loki-config.yaml
index 01ee6fe9b7075..e7204df71ce5b 100644
--- a/operator/internal/manifests/internal/config/loki-config.yaml
+++ b/operator/internal/manifests/internal/config/loki-config.yaml
@@ -202,6 +202,7 @@ limits_config:
max_streams_per_user: 0
max_line_size: {{ .Stack.Limits.Global.IngestionLimits.MaxLineSize }}
max_entries_limit_per_query: {{ .Stack.Limits.Global.QueryLimits.MaxEntriesLimitPerQuery }}
+ discover_service_name: []
max_global_streams_per_user: {{ .Stack.Limits.Global.IngestionLimits.MaxGlobalStreamsPerTenant }}
max_chunks_per_query: {{ .Stack.Limits.Global.QueryLimits.MaxChunksPerQuery }}
max_query_length: 721h
|
fix
|
Disable automatic discovery of service name (#14506)
|
837b70ac78fc1dc3e8d09f0966acb2c303dbbe35
|
2025-03-03 23:59:07
|
Salva Corts
|
feat(policies): Support global policy (#16439)
| false
|
diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md
index 9afa0e2dff7d3..a3eb2cd788a9f 100644
--- a/docs/sources/shared/configuration.md
+++ b/docs/sources/shared/configuration.md
@@ -3664,9 +3664,10 @@ otlp_config:
# drop them altogether
[log_attributes: <list of attributes_configs>]
-# Block ingestion for policy until the configured date. The time should be in
-# RFC3339 format. The policy is based on the policy_stream_mapping
-# configuration.
+# Block ingestion for policy until the configured date. The policy '*' is the
+# global policy, which is applied to all streams not matching a policy and can
+# be overridden by other policies. The time should be in RFC3339 format. The
+# policy is based on the policy_stream_mapping configuration.
[block_ingestion_policy_until: <map of string to Time>]
# Block ingestion until the configured date. The time should be in RFC3339
@@ -3686,7 +3687,8 @@ otlp_config:
# CLI flag: -validation.enforced-labels
[enforced_labels: <list of strings> | default = []]
-# Map of policies to enforced labels. Example:
+# Map of policies to enforced labels. The policy '*' is the global policy, which
+# is applied to all streams and can be extended by other policies. Example:
# policy_enforced_labels:
# policy1:
# - label1
@@ -3694,6 +3696,8 @@ otlp_config:
# policy2:
# - label3
# - label4
+# '*':
+# - label5
[policy_enforced_labels: <map of string to list of strings>]
# Map of policies to stream selectors with a priority. Experimental. Example:
diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go
index 52ac75d1cd296..bd8bae6217196 100644
--- a/pkg/distributor/distributor.go
+++ b/pkg/distributor/distributor.go
@@ -776,16 +776,16 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
// It also returns the first label that is missing if any (for the case of multiple labels missing).
func (d *Distributor) missingEnforcedLabels(lbs labels.Labels, tenantID string, policy string) (bool, []string) {
perPolicyEnforcedLabels := d.validator.Limits.PolicyEnforcedLabels(tenantID, policy)
- globalEnforcedLabels := d.validator.Limits.EnforcedLabels(tenantID)
+ tenantEnforcedLabels := d.validator.Limits.EnforcedLabels(tenantID)
- requiredLbs := append(globalEnforcedLabels, perPolicyEnforcedLabels...)
+ requiredLbs := append(tenantEnforcedLabels, perPolicyEnforcedLabels...)
if len(requiredLbs) == 0 {
// no enforced labels configured.
return false, []string{}
}
// Use a map to deduplicate the required labels. Duplicates may happen if the same label is configured
- // in both global and per-policy enforced labels.
+ // in both the per-tenant and per-policy enforced labels.
seen := make(map[string]struct{})
missingLbs := []string{}
diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go
index 01b22b8423ec1..c39c0a645034b 100644
--- a/pkg/distributor/distributor_test.go
+++ b/pkg/distributor/distributor_test.go
@@ -431,10 +431,11 @@ func Test_MissingEnforcedLabels(t *testing.T) {
limits := &validation.Limits{}
flagext.DefaultValues(limits)
- limits.EnforcedLabels = []string{"app", "env"}
+ limits.EnforcedLabels = []string{"app"}
limits.PolicyEnforcedLabels = map[string][]string{
- "policy1": {"cluster", "namespace"},
- "policy2": {"namespace"},
+ "policy1": {"cluster", "namespace"},
+ "policy2": {"namespace"},
+ validation.GlobalPolicy: {"env"},
}
distributors, _ := prepare(t, 1, 5, limits, nil)
@@ -446,12 +447,18 @@ func Test_MissingEnforcedLabels(t *testing.T) {
assert.False(t, missing)
assert.Empty(t, missingLabels)
- // request missing the `app` label from global enforced labels and `cluster` label from policy enforced labels.
+ // request missing the `app` label from per-tenant enforced labels and `cluster` label from policy enforced labels.
lbs = labels.FromMap(map[string]string{"env": "prod", "namespace": "ns1"})
missing, missingLabels = distributors[0].missingEnforcedLabels(lbs, "test", "policy1")
assert.True(t, missing)
assert.EqualValues(t, []string{"app", "cluster"}, missingLabels)
+ // request missing the `env` label from global policy enforced labels and `cluster` label from policy1 enforced labels.
+ lbs = labels.FromMap(map[string]string{"app": "foo", "namespace": "ns1"})
+ missing, missingLabels = distributors[0].missingEnforcedLabels(lbs, "test", "policy1")
+ assert.True(t, missing)
+ assert.EqualValues(t, []string{"env", "cluster"}, missingLabels)
+
// request missing all required labels.
lbs = labels.FromMap(map[string]string{"pod": "distributor-abc"})
missing, missingLabels = distributors[0].missingEnforcedLabels(lbs, "test", "policy2")
diff --git a/pkg/distributor/validator.go b/pkg/distributor/validator.go
index 083473a5a9b4c..acff95641a546 100644
--- a/pkg/distributor/validator.go
+++ b/pkg/distributor/validator.go
@@ -200,9 +200,11 @@ func (v Validator) reportDiscardedDataWithTracker(ctx context.Context, reason st
}
// ShouldBlockIngestion returns whether ingestion should be blocked, until when and the status code.
+// priority is: Per-tenant block > named policy block > Global policy block
func (v Validator) ShouldBlockIngestion(ctx validationContext, now time.Time, policy string) (bool, int, string, error) {
- if block, code, reason, err := v.shouldBlockGlobalPolicy(ctx, now); block {
- return block, code, reason, err
+ if block, until, code := v.shouldBlockTenant(ctx, now); block {
+ err := fmt.Errorf(validation.BlockedIngestionErrorMsg, ctx.userID, until.Format(time.RFC3339), code)
+ return true, code, validation.BlockedIngestion, err
}
if block, until, code := v.shouldBlockPolicy(ctx, policy, now); block {
@@ -213,27 +215,21 @@ func (v Validator) ShouldBlockIngestion(ctx validationContext, now time.Time, po
return false, 0, "", nil
}
-func (v Validator) shouldBlockGlobalPolicy(ctx validationContext, now time.Time) (bool, int, string, error) {
+func (v Validator) shouldBlockTenant(ctx validationContext, now time.Time) (bool, time.Time, int) {
if ctx.blockIngestionUntil.IsZero() {
- return false, 0, "", nil
+ return false, time.Time{}, 0
}
if now.Before(ctx.blockIngestionUntil) {
- err := fmt.Errorf(validation.BlockedIngestionErrorMsg, ctx.userID, ctx.blockIngestionUntil.Format(time.RFC3339), ctx.blockIngestionStatusCode)
- return true, ctx.blockIngestionStatusCode, validation.BlockedIngestion, err
+ return true, ctx.blockIngestionUntil, ctx.blockIngestionStatusCode
}
- return false, 0, "", nil
+ return false, time.Time{}, 0
}
// ShouldBlockPolicy checks if ingestion should be blocked for the given policy.
// It returns true if ingestion should be blocked, along with the block until time and status code.
-func (v *Validator) shouldBlockPolicy(ctx validationContext, policy string, now time.Time) (bool, time.Time, int) {
- // No policy provided, don't block
- if policy == "" {
- return false, time.Time{}, 0
- }
-
+func (v Validator) shouldBlockPolicy(ctx validationContext, policy string, now time.Time) (bool, time.Time, int) {
// Check if this policy is blocked in tenant configs
blockUntil := v.Limits.BlockIngestionPolicyUntil(ctx.userID, policy)
if blockUntil.IsZero() {
diff --git a/pkg/distributor/validator_test.go b/pkg/distributor/validator_test.go
index 73a9d1aa0cc38..114bcd09aa62f 100644
--- a/pkg/distributor/validator_test.go
+++ b/pkg/distributor/validator_test.go
@@ -238,6 +238,160 @@ func TestValidator_ValidateLabels(t *testing.T) {
}
}
+func TestShouldBlockIngestion(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+ policy string
+ time time.Time
+ overrides validation.TenantLimits
+
+ expectBlock bool
+ expectStatusCode int
+ expectReason string
+ }{
+ {
+ name: "no block configured",
+ time: testTime,
+ overrides: fakeLimits{
+ &validation.Limits{},
+ },
+ },
+ {
+ name: "all configured tenant blocked priority",
+ time: testTime,
+ policy: "policy1",
+ overrides: fakeLimits{
+ &validation.Limits{
+ BlockIngestionUntil: flagext.Time(testTime.Add(time.Hour)),
+ BlockIngestionPolicyUntil: map[string]flagext.Time{
+ validation.GlobalPolicy: flagext.Time(testTime.Add(-2 * time.Hour)),
+ "policy1": flagext.Time(testTime.Add(-time.Hour)),
+ },
+ BlockIngestionStatusCode: 1234,
+ },
+ },
+ expectBlock: true,
+ expectStatusCode: 1234,
+ expectReason: validation.BlockedIngestion,
+ },
+ {
+ name: "named policy priority",
+ time: testTime,
+ policy: "policy1",
+ overrides: fakeLimits{
+ &validation.Limits{
+ BlockIngestionUntil: flagext.Time(testTime.Add(-2 * time.Hour)), // Not active anymore
+ BlockIngestionPolicyUntil: map[string]flagext.Time{
+ validation.GlobalPolicy: flagext.Time(testTime.Add(-time.Hour)),
+ "policy1": flagext.Time(testTime.Add(time.Hour)),
+ },
+ BlockIngestionStatusCode: 1234,
+ },
+ },
+ expectBlock: true,
+ expectStatusCode: 1234,
+ expectReason: validation.BlockedIngestionPolicy,
+ },
+ {
+ name: "global policy ignored",
+ time: testTime,
+ policy: "policy1",
+ overrides: fakeLimits{
+ &validation.Limits{
+ BlockIngestionUntil: flagext.Time(testTime.Add(-time.Hour)), // Not active anymore
+ BlockIngestionPolicyUntil: map[string]flagext.Time{
+ validation.GlobalPolicy: flagext.Time(testTime.Add(time.Hour)), // Won't apply since we have a named policy
+ },
+ BlockIngestionStatusCode: 1234,
+ },
+ },
+ expectBlock: false,
+ },
+ {
+ name: "global policy matched",
+ time: testTime,
+ policy: "", // matches global policy
+ overrides: fakeLimits{
+ &validation.Limits{
+ BlockIngestionPolicyUntil: map[string]flagext.Time{
+ validation.GlobalPolicy: flagext.Time(testTime.Add(time.Hour)),
+ },
+ BlockIngestionStatusCode: 1234,
+ },
+ },
+ expectBlock: true,
+ expectStatusCode: 1234,
+ expectReason: validation.BlockedIngestionPolicy,
+ },
+ {
+ name: "unknown policy not blocked by global policy",
+ time: testTime,
+ policy: "notExists",
+ overrides: fakeLimits{
+ &validation.Limits{
+ BlockIngestionPolicyUntil: map[string]flagext.Time{
+ validation.GlobalPolicy: flagext.Time(testTime.Add(time.Hour)),
+ "policy1": flagext.Time(testTime.Add(2 * time.Hour)),
+ },
+ BlockIngestionStatusCode: 1234,
+ },
+ },
+ expectBlock: false,
+ },
+ {
+ name: "named policy overrides global policy",
+ time: testTime,
+ policy: "policy1",
+ overrides: fakeLimits{
+ &validation.Limits{
+ BlockIngestionPolicyUntil: map[string]flagext.Time{
+ validation.GlobalPolicy: flagext.Time(testTime.Add(time.Hour)),
+ "policy1": flagext.Time(testTime.Add(-time.Hour)), // Not blocked overriding block from global quota
+ },
+ BlockIngestionStatusCode: 1234,
+ },
+ },
+ expectBlock: false,
+ },
+ {
+ name: "no matching policy",
+ time: testTime,
+ policy: "notExists",
+ overrides: fakeLimits{
+ &validation.Limits{
+ BlockIngestionPolicyUntil: map[string]flagext.Time{
+ "policy1": flagext.Time(testTime.Add(2 * time.Hour)),
+ },
+ BlockIngestionStatusCode: 1234,
+ },
+ },
+ expectBlock: false,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ l := &validation.Limits{}
+ flagext.DefaultValues(l)
+
+ o, err := validation.NewOverrides(*l, tc.overrides)
+ assert.NoError(t, err)
+ v, err := NewValidator(o, nil)
+ assert.NoError(t, err)
+
+ block, statusCode, reason, err := v.ShouldBlockIngestion(v.getValidationContextForTime(testTime, "fake"), testTime, tc.policy)
+ assert.Equal(t, tc.expectBlock, block)
+ if tc.expectBlock {
+ assert.Equal(t, tc.expectStatusCode, statusCode)
+ assert.Equal(t, tc.expectReason, reason)
+ assert.Error(t, err)
+ t.Logf("block: %v, statusCode: %d, reason: %s, err: %v", block, statusCode, reason, err)
+ } else {
+ assert.NoError(t, err)
+ }
+ })
+ }
+
+}
+
func mustParseLabels(s string) labels.Labels {
ls, err := syntax.ParseLabels(s)
if err != nil {
diff --git a/pkg/validation/ingestion_policies.go b/pkg/validation/ingestion_policies.go
index 2a614afaaa9a8..d5734f6324e5c 100644
--- a/pkg/validation/ingestion_policies.go
+++ b/pkg/validation/ingestion_policies.go
@@ -9,6 +9,10 @@ import (
"github.com/grafana/loki/v3/pkg/logql/syntax"
)
+const (
+ GlobalPolicy = "*"
+)
+
type PriorityStream struct {
Priority int `yaml:"priority" json:"priority" doc:"description=The larger the value, the higher the priority."`
Selector string `yaml:"selector" json:"selector" doc:"description=Stream selector expression."`
diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go
index 726d6a208bd3c..61c2ea61b0d05 100644
--- a/pkg/validation/limits.go
+++ b/pkg/validation/limits.go
@@ -227,11 +227,11 @@ type Limits struct {
OTLPConfig push.OTLPConfig `yaml:"otlp_config" json:"otlp_config" doc:"description=OTLP log ingestion configurations"`
GlobalOTLPConfig push.GlobalOTLPConfig `yaml:"-" json:"-"`
- BlockIngestionPolicyUntil map[string]dskit_flagext.Time `yaml:"block_ingestion_policy_until" json:"block_ingestion_policy_until" category:"experimental" doc:"description=Block ingestion for policy until the configured date. The time should be in RFC3339 format. The policy is based on the policy_stream_mapping configuration."`
+ BlockIngestionPolicyUntil map[string]dskit_flagext.Time `yaml:"block_ingestion_policy_until" json:"block_ingestion_policy_until" category:"experimental" doc:"description=Block ingestion for policy until the configured date. The policy '*' is the global policy, which is applied to all streams not matching a policy and can be overridden by other policies. The time should be in RFC3339 format. The policy is based on the policy_stream_mapping configuration."`
BlockIngestionUntil dskit_flagext.Time `yaml:"block_ingestion_until" json:"block_ingestion_until" category:"experimental"`
BlockIngestionStatusCode int `yaml:"block_ingestion_status_code" json:"block_ingestion_status_code"`
EnforcedLabels []string `yaml:"enforced_labels" json:"enforced_labels" category:"experimental"`
- PolicyEnforcedLabels map[string][]string `yaml:"policy_enforced_labels" json:"policy_enforced_labels" category:"experimental" doc:"description=Map of policies to enforced labels. Example:\n policy_enforced_labels: \n policy1: \n - label1 \n - label2 \n policy2: \n - label3 \n - label4"`
+ PolicyEnforcedLabels map[string][]string `yaml:"policy_enforced_labels" json:"policy_enforced_labels" category:"experimental" doc:"description=Map of policies to enforced labels. The policy '*' is the global policy, which is applied to all streams and can be extended by other policies. Example:\n policy_enforced_labels: \n policy1: \n - label1 \n - label2 \n policy2: \n - label3 \n - label4\n '*':\n - label5"`
PolicyStreamMapping PolicyStreamMapping `yaml:"policy_stream_mapping" json:"policy_stream_mapping" category:"experimental" doc:"description=Map of policies to stream selectors with a priority. Experimental. Example:\n policy_stream_mapping: \n finance: \n - selector: '{namespace=\"prod\", container=\"billing\"}' \n priority: 2 \n ops: \n - selector: '{namespace=\"prod\", container=\"ops\"}' \n priority: 1 \n staging: \n - selector: '{namespace=\"staging\"}' \n priority: 1"`
IngestionPartitionsTenantShardSize int `yaml:"ingestion_partitions_tenant_shard_size" json:"ingestion_partitions_tenant_shard_size" category:"experimental"`
@@ -1117,15 +1117,23 @@ func (o *Overrides) BlockIngestionStatusCode(userID string) int {
return o.getOverridesForUser(userID).BlockIngestionStatusCode
}
+// BlockIngestionPolicyUntil returns the time until the ingestion policy is blocked for a given user.
+// Order of priority is: named policy block > global policy block. The global policy block is enforced
+// only if the policy is empty.
func (o *Overrides) BlockIngestionPolicyUntil(userID string, policy string) time.Time {
limits := o.getOverridesForUser(userID)
- if limits == nil || limits.BlockIngestionPolicyUntil == nil {
- return time.Time{} // Zero time means no blocking
+
+ if forPolicy, ok := limits.BlockIngestionPolicyUntil[policy]; ok {
+ return time.Time(forPolicy)
}
- if blockUntil, ok := limits.BlockIngestionPolicyUntil[policy]; ok {
- return time.Time(blockUntil)
+ // We enforce the global policy on streams not matching any policy
+ if policy == "" {
+ if forPolicy, ok := limits.BlockIngestionPolicyUntil[GlobalPolicy]; ok {
+ return time.Time(forPolicy)
+ }
}
+
return time.Time{} // Zero time means no blocking
}
@@ -1133,8 +1141,11 @@ func (o *Overrides) EnforcedLabels(userID string) []string {
return o.getOverridesForUser(userID).EnforcedLabels
}
+// PolicyEnforcedLabels returns the labels enforced by the policy for a given user.
+// The output is the union of the global and policy specific labels.
func (o *Overrides) PolicyEnforcedLabels(userID string, policy string) []string {
- return o.getOverridesForUser(userID).PolicyEnforcedLabels[policy]
+ limits := o.getOverridesForUser(userID)
+ return append(limits.PolicyEnforcedLabels[GlobalPolicy], limits.PolicyEnforcedLabels[policy]...)
}
func (o *Overrides) PoliciesStreamMapping(userID string) PolicyStreamMapping {
|
feat
|
Support global policy (#16439)
|
96937d68188ee5e4d96100dff45641fa7be0c07e
|
2025-03-01 02:55:31
|
renovate[bot]
|
chore(deps): update terraform google to v6.23.0 (main) (#16508)
| false
|
diff --git a/tools/gcplog/main.tf b/tools/gcplog/main.tf
index 1831163c64940..c68b6a706eb29 100644
--- a/tools/gcplog/main.tf
+++ b/tools/gcplog/main.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
google = {
source = "hashicorp/google"
- version = "6.22.0"
+ version = "6.23.0"
}
}
}
|
chore
|
update terraform google to v6.23.0 (main) (#16508)
|
09a5a2cee31de2a973c00a7840dad3f5a24ced10
|
2022-01-28 19:42:11
|
Ed Welch
|
logstash: Add config option which allows seting up an allowlist for labels to be mapped to Loki (#5244)
| false
|
diff --git a/clients/cmd/logstash/lib/logstash/outputs/loki.rb b/clients/cmd/logstash/lib/logstash/outputs/loki.rb
index 1b130399d9138..01f9a0aa2e792 100644
--- a/clients/cmd/logstash/lib/logstash/outputs/loki.rb
+++ b/clients/cmd/logstash/lib/logstash/outputs/loki.rb
@@ -47,6 +47,9 @@ class LogStash::Outputs::Loki < LogStash::Outputs::Base
## 'Backoff configuration. Initial backoff time between retries. Default 1s'
config :min_delay, :validate => :number, :default => 1, :required => false
+ ## 'An array of fields to map to labels, if defined only fields in this list will be mapped.'
+ config :include_fields, :validate => :array, :default => [], :required => false
+
## 'Backoff configuration. Maximum backoff time between retries. Default 300s'
config :max_delay, :validate => :number, :default => 300, :required => false
@@ -198,7 +201,7 @@ def is_batch_expired
## Receives logstash events
public
def receive(event)
- @entries << Entry.new(event, @message_field)
+ @entries << Entry.new(event, @message_field, @include_fields)
end
def close
diff --git a/clients/cmd/logstash/lib/logstash/outputs/loki/entry.rb b/clients/cmd/logstash/lib/logstash/outputs/loki/entry.rb
index dcfd70bb20563..26f04fa36ba58 100644
--- a/clients/cmd/logstash/lib/logstash/outputs/loki/entry.rb
+++ b/clients/cmd/logstash/lib/logstash/outputs/loki/entry.rb
@@ -5,7 +5,7 @@ def to_ns(s)
class Entry
include Loki
attr_reader :labels, :entry
- def initialize(event,message_field)
+ def initialize(event,message_field,include_fields)
@entry = {
"ts" => to_ns(event.get("@timestamp")),
"line" => event.get(message_field).to_s
@@ -18,6 +18,7 @@ def initialize(event,message_field)
event.to_hash.each { |key,value|
next if key.start_with?('@')
next if value.is_a?(Hash)
+ next if include_fields.length() > 0 and not include_fields.include?(key)
@labels[key] = value.to_s
}
end
diff --git a/clients/cmd/logstash/logstash-output-loki.gemspec b/clients/cmd/logstash/logstash-output-loki.gemspec
index d58c702d148d1..cbd66eb062127 100644
--- a/clients/cmd/logstash/logstash-output-loki.gemspec
+++ b/clients/cmd/logstash/logstash-output-loki.gemspec
@@ -1,6 +1,6 @@
Gem::Specification.new do |s|
s.name = 'logstash-output-loki'
- s.version = '1.0.4'
+ s.version = '1.1.0'
s.authors = ['Aditya C S','Cyril Tovena']
s.email = ['[email protected]','[email protected]']
diff --git a/clients/cmd/logstash/loki.conf b/clients/cmd/logstash/loki.conf
index f8ae19ba48eb9..a0ab6e062a0c0 100644
--- a/clients/cmd/logstash/loki.conf
+++ b/clients/cmd/logstash/loki.conf
@@ -12,6 +12,9 @@ output {
#message_field => "message" #default message
+ # If include_fields is set, only fields in this list will be sent to Loki as labels.
+ #include_fields => ["service","host","app","env"] #default empty array, all labels included.
+
#batch_wait => 1 ## in seconds #default 1 second
#batch_size => 102400 #bytes #default 102400 bytes
diff --git a/clients/cmd/logstash/spec/outputs/loki/entry_spec.rb b/clients/cmd/logstash/spec/outputs/loki/entry_spec.rb
index 4f545706c6753..615d873bccff7 100644
--- a/clients/cmd/logstash/spec/outputs/loki/entry_spec.rb
+++ b/clients/cmd/logstash/spec/outputs/loki/entry_spec.rb
@@ -27,18 +27,25 @@
}
it 'labels extracted should not contains object and metadata or timestamp' do
- entry = Entry.new(event,"message")
+ entry = Entry.new(event,"message", [])
expect(entry.labels).to eql({ 'agent' => 'filebeat', 'host' => '172.0.0.1', 'foo'=>'5'})
expect(entry.entry['ts']).to eql to_ns(event.get("@timestamp"))
expect(entry.entry['line']).to eql 'hello'
end
+
+ it 'labels extracted should only contain allowlisted labels' do
+ entry = Entry.new(event, "message", %w[agent foo])
+ expect(entry.labels).to eql({ 'agent' => 'filebeat', 'foo'=>'5'})
+ expect(entry.entry['ts']).to eql to_ns(event.get("@timestamp"))
+ expect(entry.entry['line']).to eql 'hello'
+ end
end
context 'test batch generation with label order' do
let (:entries) {[
- Entry.new(LogStash::Event.new({"message"=>"foobuzz","buzz"=>"bar","cluster"=>"us-central1","@timestamp"=>Time.at(1)}),"message"),
- Entry.new(LogStash::Event.new({"log"=>"foobar","bar"=>"bar","@timestamp"=>Time.at(2)}),"log"),
- Entry.new(LogStash::Event.new({"cluster"=>"us-central1","message"=>"foobuzz","buzz"=>"bar","@timestamp"=>Time.at(3)}),"message"),
+ Entry.new(LogStash::Event.new({"message"=>"foobuzz","buzz"=>"bar","cluster"=>"us-central1","@timestamp"=>Time.at(1)}),"message", []),
+ Entry.new(LogStash::Event.new({"log"=>"foobar","bar"=>"bar","@timestamp"=>Time.at(2)}),"log", []),
+ Entry.new(LogStash::Event.new({"cluster"=>"us-central1","message"=>"foobuzz","buzz"=>"bar","@timestamp"=>Time.at(3)}),"message", []),
]}
let (:expected) {
diff --git a/clients/cmd/logstash/spec/outputs/loki_spec.rb b/clients/cmd/logstash/spec/outputs/loki_spec.rb
index bf1d42ee6a706..8183798f23c1a 100644
--- a/clients/cmd/logstash/spec/outputs/loki_spec.rb
+++ b/clients/cmd/logstash/spec/outputs/loki_spec.rb
@@ -28,12 +28,15 @@
context 'when adding en entry to the batch' do
let (:simple_loki_config) {{'url' => 'http://localhost:3100'}}
- let (:entry) {Entry.new(LogStash::Event.new({"message"=>"foobuzz","buzz"=>"bar","cluster"=>"us-central1","@timestamp"=>Time.at(1)}),"message")}
- let (:lbs) { {"buzz"=>"bar","cluster"=>"us-central1"}.sort.to_h}
+ let (:entry) {Entry.new(LogStash::Event.new({"message"=>"foobuzz","buzz"=>"bar","cluster"=>"us-central1","@timestamp"=>Time.at(1)}),"message", [])}
+ let (:lbs) {{"buzz"=>"bar","cluster"=>"us-central1"}.sort.to_h}
+ let (:include_loki_config) {{ 'url' => 'http://localhost:3100', 'include_fields' => ["cluster"] }}
+ let (:include_entry) {Entry.new(LogStash::Event.new({"message"=>"foobuzz","buzz"=>"bar","cluster"=>"us-central1","@timestamp"=>Time.at(1)}),"message", ["cluster"])}
+ let (:include_lbs) {{"cluster"=>"us-central1"}.sort.to_h}
it 'should not add empty line' do
plugin = LogStash::Plugin.lookup("output", "loki").new(simple_loki_config)
- emptyEntry = Entry.new(LogStash::Event.new({"message"=>"foobuzz","buzz"=>"bar","cluster"=>"us-central1","@timestamp"=>Time.at(1)}),"foo")
+ emptyEntry = Entry.new(LogStash::Event.new({"message"=>"foobuzz","buzz"=>"bar","cluster"=>"us-central1","@timestamp"=>Time.at(1)}),"foo", [])
expect(plugin.add_entry_to_batch(emptyEntry)).to eql true
expect(plugin.batch).to eql nil
end
@@ -50,6 +53,18 @@
expect(plugin.batch.size_bytes).to eq 14
end
+ it 'should only allowed labels defined in include_fields' do
+ plugin = LogStash::Plugin.lookup("output", "loki").new(include_loki_config)
+ expect(plugin.batch).to eql nil
+ expect(plugin.add_entry_to_batch(include_entry)).to eql true
+ expect(plugin.add_entry_to_batch(include_entry)).to eql true
+ expect(plugin.batch).not_to be_nil
+ expect(plugin.batch.streams.length).to eq 1
+ expect(plugin.batch.streams[include_lbs.to_s]['entries'].length).to eq 2
+ expect(plugin.batch.streams[include_lbs.to_s]['labels']).to eq include_lbs
+ expect(plugin.batch.size_bytes).to eq 14
+ end
+
it 'should not add if full' do
plugin = LogStash::Plugin.lookup("output", "loki").new(simple_loki_config.merge!({'batch_size'=>10}))
expect(plugin.batch).to eql nil
@@ -69,7 +84,7 @@
end
context 'batch expiration' do
- let (:entry) {Entry.new(LogStash::Event.new({"message"=>"foobuzz","buzz"=>"bar","cluster"=>"us-central1","@timestamp"=>Time.at(1)}),"message")}
+ let (:entry) {Entry.new(LogStash::Event.new({"message"=>"foobuzz","buzz"=>"bar","cluster"=>"us-central1","@timestamp"=>Time.at(1)}),"message", [])}
it 'should not expire if empty' do
loki = LogStash::Outputs::Loki.new(simple_loki_config.merge!({'batch_wait'=>0.5}))
@@ -138,7 +153,7 @@
end
context 'http requests' do
- let (:entry) {Entry.new(LogStash::Event.new({"message"=>"foobuzz","buzz"=>"bar","cluster"=>"us-central1","@timestamp"=>Time.at(1)}),"message")}
+ let (:entry) {Entry.new(LogStash::Event.new({"message"=>"foobuzz","buzz"=>"bar","cluster"=>"us-central1","@timestamp"=>Time.at(1)}),"message", [])}
it 'should send credentials' do
conf = {
diff --git a/docs/sources/clients/logstash/_index.md b/docs/sources/clients/logstash/_index.md
index da70791e476c5..7bdf2305ba746 100644
--- a/docs/sources/clients/logstash/_index.md
+++ b/docs/sources/clients/logstash/_index.md
@@ -57,6 +57,8 @@ output {
[tenant_id => string | default = nil | required=false]
[message_field => string | default = "message" | required=false]
+
+ [include_fields => array | default = [] | required=false]
[batch_wait => number | default = 1(s) | required=false]
@@ -106,6 +108,8 @@ Contains a `message` and `@timestamp` fields, which are respectively used to for
All other fields (except nested fields) will form the label set (key value pairs) attached to the log line. [This means you're responsible for mutating and dropping high cardinality labels](https://grafana.com/blog/2020/04/21/how-labels-in-loki-can-make-log-queries-faster-and-easier/) such as client IPs.
You can usually do so by using a [`mutate`](https://www.elastic.co/guide/en/logstash/current/plugins-filters-mutate.html) filter.
+**Note:** In version 1.1.0 and greater of this plugin you can also specify a list of labels to allowlist via the `include_fields` configuration.
+
For example the configuration below :
```conf
@@ -204,6 +208,10 @@ If using the [GrafanaLab's hosted Loki](https://grafana.com/products/cloud/), th
Message field to use for log lines. You can use logstash key accessor language to grab nested property, for example : `[log][message]`.
+#### include_fields
+
+An array of fields which will be mapped to labels and sent to Loki, when this list is configured **only** these fields will be sent, all other fields will be ignored.
+
#### batch_wait
Interval in seconds to wait before pushing a batch of records to Loki. This means even if the [batch size](#batch_size) is not reached after `batch_wait` a partial batch will be sent, this is to ensure freshness of the data.
@@ -259,7 +267,7 @@ filter {
}
}
mutate {
- remove_field => ["tags"]
+ remove_field => ["tags"] # Note: with include_fields defined below this wouldn't be necessary
}
}
@@ -273,6 +281,7 @@ output {
min_delay => 3
max_delay => 500
message_field => "message"
+ include_fields => ["container_name","namespace","pod","host"]
}
# stdout { codec => rubydebug }
}
|
logstash
|
Add config option which allows seting up an allowlist for labels to be mapped to Loki (#5244)
|
b2fe044279a963c9d6b2c4ffdd39ab333be92b9f
|
2019-08-16 01:44:51
|
sh0rez
|
chore(ci/cd): fix grafanasaur credentials and CircleCI image build (#900)
| false
|
diff --git a/.drone/drone.jsonnet b/.drone/drone.jsonnet
index 366d1f6c47069..a54cad6ec2fcc 100644
--- a/.drone/drone.jsonnet
+++ b/.drone/drone.jsonnet
@@ -73,8 +73,8 @@ local manifest(apps) = pipeline('manifest') {
target: app,
spec: '.drone/docker-manifest.tmpl',
ignore_missing: true,
- username: { from_secret: 'docker_username' },
- password: { from_secret: 'docker_password' },
+ username: { from_secret: 'saur_username' },
+ password: { from_secret: 'saur_password' },
},
depends_on: ['clone'],
}
diff --git a/.drone/drone.yml b/.drone/drone.yml
index 10c77f0230661..05290af9fb847 100644
--- a/.drone/drone.yml
+++ b/.drone/drone.yml
@@ -357,11 +357,11 @@ steps:
settings:
ignore_missing: true
password:
- from_secret: docker_password
+ from_secret: saur_password
spec: .drone/docker-manifest.tmpl
target: promtail
username:
- from_secret: docker_username
+ from_secret: saur_username
- depends_on:
- clone
image: plugins/manifest
@@ -369,11 +369,11 @@ steps:
settings:
ignore_missing: true
password:
- from_secret: docker_password
+ from_secret: saur_password
spec: .drone/docker-manifest.tmpl
target: loki
username:
- from_secret: docker_username
+ from_secret: saur_username
- depends_on:
- clone
image: plugins/manifest
@@ -381,11 +381,11 @@ steps:
settings:
ignore_missing: true
password:
- from_secret: docker_password
+ from_secret: saur_password
spec: .drone/docker-manifest.tmpl
target: loki-canary
username:
- from_secret: docker_username
+ from_secret: saur_username
trigger:
ref:
include:
diff --git a/Makefile b/Makefile
index 37c2855dd579b..7b1664fa24e81 100644
--- a/Makefile
+++ b/Makefile
@@ -402,7 +402,7 @@ promtail-debug-image: OCI_PLATFORMS=
promtail-debug-image:
$(SUDO) $(BUILD_OCI) -t $(IMAGE_PREFIX)/promtail:$(IMAGE_TAG)-debug -f cmd/promtail/Dockerfile.debug .
-promtail-push: promtail-image
+promtail-push: promtail-image-cross
$(call push-image,promtail)
# loki
@@ -415,7 +415,7 @@ loki-debug-image: OCI_PLATFORMS=
loki-debug-image:
$(SUDO) $(BUILD_OCI) -t $(IMAGE_PREFIX)/loki:$(IMAGE_TAG)-debug -f cmd/loki/Dockerfile.debug .
-loki-push: loki-image
+loki-push: loki-image-cross
$(call push-image,loki)
# loki-canary
@@ -423,7 +423,7 @@ loki-canary-image:
$(SUDO) docker build -t $(IMAGE_PREFIX)/loki-canary:$(IMAGE_TAG) -f cmd/loki-canary/Dockerfile .
loki-canary-image-cross:
$(SUDO) $(BUILD_OCI) -t $(IMAGE_PREFIX)/loki-canary:$(IMAGE_TAG) -f cmd/loki-canary/Dockerfile.cross .
-loki-canary-push: loki-canary-image
+loki-canary-push: loki-canary-image-cross
$(SUDO) $(PUSH_OCI) $(IMAGE_PREFIX)/loki-canary:$(IMAGE_TAG)
# build-image (only amd64)
|
chore
|
fix grafanasaur credentials and CircleCI image build (#900)
|
f49a510f38fae228c36118bf88441826809ef7bd
|
2021-03-24 20:15:04
|
Michel Hollands
|
loki: Update cortex version and fix resulting changes (#3532)
| false
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8a69efd3bf9c3..33e3447ba425e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,6 @@
## Master
+* [3532](https://github.com/grafana/loki/pull/3532) **MichelHollands**: Update vendored Cortex to 8a2e2c1eeb65
* [3446](https://github.com/grafana/loki/pull/3446) **pracucci, owen-d**: Remove deprecated config `querier.split-queries-by-day` in favor of `querier.split-queries-by-interval`
## 2.2.0 (2021/03/10)
diff --git a/go.mod b/go.mod
index 0b9f108d72c3a..2c081215e841c 100644
--- a/go.mod
+++ b/go.mod
@@ -12,7 +12,7 @@ require (
github.com/cespare/xxhash/v2 v2.1.1
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
- github.com/cortexproject/cortex v1.7.1-0.20210310133228-161f103ed5ba
+ github.com/cortexproject/cortex v1.7.1-0.20210323110114-8a2e2c1eeb65
github.com/davecgh/go-spew v1.1.1
github.com/docker/docker v20.10.3+incompatible
github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82 // indirect
@@ -26,7 +26,7 @@ require (
github.com/go-logfmt/logfmt v0.5.0
github.com/gofrs/flock v0.7.1 // indirect
github.com/gogo/protobuf v1.3.2 // remember to update loki-build-image/Dockerfile too
- github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3
+ github.com/golang/snappy v0.0.3
github.com/gorilla/mux v1.7.3
github.com/gorilla/websocket v1.4.2
github.com/grpc-ecosystem/go-grpc-middleware v1.2.2
@@ -51,7 +51,7 @@ require (
github.com/prometheus/client_golang v1.9.0
github.com/prometheus/client_model v0.2.0
github.com/prometheus/common v0.18.0
- github.com/prometheus/prometheus v1.8.2-0.20210215121130-6f488061dfb4
+ github.com/prometheus/prometheus v1.8.2-0.20210321183757-31a518faab18
github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b
github.com/segmentio/fasthash v1.0.2
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749
diff --git a/go.sum b/go.sum
index e3ecdf1de292d..50f7f4036dbdf 100644
--- a/go.sum
+++ b/go.sum
@@ -333,8 +333,8 @@ github.com/cortexproject/cortex v1.5.1-0.20201111110551-ba512881b076/go.mod h1:z
github.com/cortexproject/cortex v1.6.1-0.20210108144208-6c2dab103f20/go.mod h1:fOsaeeFSyWrjd9nFJO8KVUpsikcxnYsjEzQyjURBoQk=
github.com/cortexproject/cortex v1.6.1-0.20210215155036-dfededd9f331/go.mod h1:8bRHNDawVx8te5lIqJ+/AcNTyfosYNC34Qah7+jX/8c=
github.com/cortexproject/cortex v1.7.1-0.20210224085859-66d6fb5b0d42/go.mod h1:u2dxcHInYbe45wxhLoWVdlFJyDhXewsMcxtnbq/QbH4=
-github.com/cortexproject/cortex v1.7.1-0.20210310133228-161f103ed5ba h1:dHqQ4mMapwPHCLwQJmvJvcZtG7wlH0oAJM1IkO8eZEc=
-github.com/cortexproject/cortex v1.7.1-0.20210310133228-161f103ed5ba/go.mod h1:2V5O+D4nzBRQteKKyHq7NVVOk1+wp2HvpP8n5Sv9bok=
+github.com/cortexproject/cortex v1.7.1-0.20210323110114-8a2e2c1eeb65 h1:5g+WHg3AO12lvwQ+Vt8RjU6fywHEm9hVtDjbT7lDZ4c=
+github.com/cortexproject/cortex v1.7.1-0.20210323110114-8a2e2c1eeb65/go.mod h1:mL94M9u0HyWk4Q7/de5nWAgbyujGjHgdwMYHcM3W36I=
github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U=
github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c=
github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs=
@@ -385,6 +385,8 @@ github.com/digitalocean/godo v1.52.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2x
github.com/digitalocean/godo v1.57.0 h1:uCpe0sRIZ/sJWxWDsJyBPBjUfSvxop+WHkHiSf+tjjM=
github.com/digitalocean/godo v1.57.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU=
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
+github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=
+github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
@@ -573,6 +575,8 @@ github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LB
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
+github.com/go-zookeeper/zk v1.0.2 h1:4mx0EYENAdX/B/rbunjlt5+4RTA/a9SMHBRuSKdGxPM=
+github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
@@ -662,8 +666,9 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3 h1:ur2rms48b3Ep1dxh7aUV2FZEQ8jEVO2F6ILKx8ofkAg=
github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gomodule/redigo v1.8.4/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0=
github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0=
github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
@@ -1258,8 +1263,8 @@ github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2a
github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkLF3KxmC/malTcT8Go=
github.com/prometheus/alertmanager v0.21.1-0.20200911160112-1fdff6b3f939/go.mod h1:imXRHOP6QTsE0fFsIsAV/cXimS32m7gVZOiUj11m6Ig=
github.com/prometheus/alertmanager v0.21.1-0.20201106142418-c39b78780054/go.mod h1:imXRHOP6QTsE0fFsIsAV/cXimS32m7gVZOiUj11m6Ig=
-github.com/prometheus/alertmanager v0.21.1-0.20210303154452-7866b9bb0927 h1:BLdqq8kRvpCWghcXjU32mi4pzJlyo8InM5hfmIqFyoc=
-github.com/prometheus/alertmanager v0.21.1-0.20210303154452-7866b9bb0927/go.mod h1:MTqVn+vIupE0dzdgo+sMcNCp37SCAi8vPrvKTTnTz9g=
+github.com/prometheus/alertmanager v0.21.1-0.20210310093010-0f9cab6991e6 h1:WeazuhFA+g8Xce5wgqskDP+b48oQKk7smH72dxO2beA=
+github.com/prometheus/alertmanager v0.21.1-0.20210310093010-0f9cab6991e6/go.mod h1:MTqVn+vIupE0dzdgo+sMcNCp37SCAi8vPrvKTTnTz9g=
github.com/prometheus/client_golang v0.0.0-20180328130430-f504d69affe1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
@@ -1340,8 +1345,9 @@ github.com/prometheus/prometheus v1.8.2-0.20201028100903-3245b3267b24/go.mod h1:
github.com/prometheus/prometheus v1.8.2-0.20201029103703-63be30dceed9/go.mod h1:MDRkz271loM/PrYN+wUNEaTMDGSP760MQzB0yEjdgSQ=
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9/go.mod h1:1MDE/bXgu4gqd5w/otko6WQpXZX9vu8QX4KbitCmaPg=
github.com/prometheus/prometheus v1.8.2-0.20201119181812-c8f810083d3f/go.mod h1:1MDE/bXgu4gqd5w/otko6WQpXZX9vu8QX4KbitCmaPg=
-github.com/prometheus/prometheus v1.8.2-0.20210215121130-6f488061dfb4 h1:EbUBvqL6oYUwL6IAI4OzxM9GYbRE+/N+maV/w5+v6Ac=
github.com/prometheus/prometheus v1.8.2-0.20210215121130-6f488061dfb4/go.mod h1:NAYujktP0dmSSpeV155mtnwX2pndLpVVK/Ps68R01TA=
+github.com/prometheus/prometheus v1.8.2-0.20210321183757-31a518faab18 h1:8chKJNOWv10FApdXgQ8Td8oYFrfFTbiBp/QpBaxEMRA=
+github.com/prometheus/prometheus v1.8.2-0.20210321183757-31a518faab18/go.mod h1:MS/bpdil77lPbfQeKk6OqVQ9OLnpN3Rszd0hka0EOWE=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1/go.mod h1:JaY6n2sDr+z2WTsXkOmNRUfDy6FN0L6Nk7x06ndm4tY=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
@@ -1368,11 +1374,12 @@ github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0
github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
-github.com/samuel/go-zookeeper v0.0.0-20201211165307-7117e9ea2414 h1:AJNDS0kP60X8wwWFvbLPwDuojxubj9pbfK7pjHw0vKg=
github.com/samuel/go-zookeeper v0.0.0-20201211165307-7117e9ea2414/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4=
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44 h1:3egqo0Vut6daANFm7tOXdNAa8v5/uLU+sgCJrc88Meo=
+github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44/go.mod h1:CJJ5VAbozOl0yEw7nHB9+7BXTJbIn6h7W+f6Gau5IP8=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e/go.mod h1:tm/wZFQ8e24NYaBGIlnO2WGCAi67re4HHuOm0sftE/M=
diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go
index 6c04a72bcbfec..1f62eba8f17f1 100644
--- a/pkg/distributor/distributor.go
+++ b/pkg/distributor/distributor.go
@@ -235,7 +235,7 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
}
now := time.Now()
- if ok, _ := d.ingestionRateLimiter.AllowN(now, userID, validatedSamplesSize); !ok {
+ if !d.ingestionRateLimiter.AllowN(now, userID, validatedSamplesSize) {
// Return a 429 to indicate to the client they are being rate limited
validation.DiscardedSamples.WithLabelValues(validation.RateLimited, userID).Add(float64(validatedSamplesCount))
validation.DiscardedBytes.WithLabelValues(validation.RateLimited, userID).Add(float64(validatedSamplesSize))
@@ -253,9 +253,9 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
return nil, err
}
- streams[i].minSuccess = len(replicationSet.Ingesters) - replicationSet.MaxErrors
+ streams[i].minSuccess = len(replicationSet.Instances) - replicationSet.MaxErrors
streams[i].maxFailures = replicationSet.MaxErrors
- for _, ingester := range replicationSet.Ingesters {
+ for _, ingester := range replicationSet.Instances {
samplesByIngester[ingester.Addr] = append(samplesByIngester[ingester.Addr], &streams[i])
ingesterDescs[ingester.Addr] = ingester
}
diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go
index 70a9e427ada64..5c6356d956b2e 100644
--- a/pkg/distributor/distributor_test.go
+++ b/pkg/distributor/distributor_test.go
@@ -371,11 +371,11 @@ type mockRing struct {
func (r mockRing) Get(key uint32, op ring.Operation, buf []ring.InstanceDesc, _ []string, _ []string) (ring.ReplicationSet, error) {
result := ring.ReplicationSet{
MaxErrors: 1,
- Ingesters: buf[:0],
+ Instances: buf[:0],
}
for i := uint32(0); i < r.replicationFactor; i++ {
n := (key + i) % uint32(len(r.ingesters))
- result.Ingesters = append(result.Ingesters, r.ingesters[n])
+ result.Instances = append(result.Instances, r.ingesters[n])
}
return result, nil
}
@@ -386,7 +386,7 @@ func (r mockRing) GetAllHealthy(op ring.Operation) (ring.ReplicationSet, error)
func (r mockRing) GetReplicationSetForOperation(op ring.Operation) (ring.ReplicationSet, error) {
return ring.ReplicationSet{
- Ingesters: r.ingesters,
+ Instances: r.ingesters,
MaxErrors: 1,
}, nil
}
diff --git a/pkg/promtail/scrapeconfig/scrapeconfig_test.go b/pkg/promtail/scrapeconfig/scrapeconfig_test.go
index 2756c21acea16..850c5b633f322 100644
--- a/pkg/promtail/scrapeconfig/scrapeconfig_test.go
+++ b/pkg/promtail/scrapeconfig/scrapeconfig_test.go
@@ -3,6 +3,7 @@ package scrapeconfig
import (
"testing"
+ promConfig "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery/kubernetes"
"github.com/prometheus/prometheus/discovery/targetgroup"
@@ -104,6 +105,9 @@ func TestLoadSmallConfig(t *testing.T) {
KubernetesSDConfigs: []*kubernetes.SDConfig{
{
Role: "pod",
+ HTTPClientConfig: promConfig.HTTPClientConfig{
+ FollowRedirects: true,
+ },
},
},
StaticConfigs: []*targetgroup.Group{
diff --git a/pkg/querier/ingester_querier.go b/pkg/querier/ingester_querier.go
index dcc88b82b7b3f..2bc78c98934bf 100644
--- a/pkg/querier/ingester_querier.go
+++ b/pkg/querier/ingester_querier.go
@@ -177,7 +177,7 @@ func (q *IngesterQuerier) TailDisconnectedIngesters(ctx context.Context, req *lo
// Look for disconnected ingesters or new one we should (re)connect to
reconnectIngesters := []ring.InstanceDesc{}
- for _, ingester := range replicationSet.Ingesters {
+ for _, ingester := range replicationSet.Instances {
if _, ok := connected[ingester.Addr]; ok {
continue
}
@@ -195,7 +195,7 @@ func (q *IngesterQuerier) TailDisconnectedIngesters(ctx context.Context, req *lo
}
// Instance a tail client for each ingester to re(connect)
- reconnectClients, err := q.forGivenIngesters(ctx, ring.ReplicationSet{Ingesters: reconnectIngesters}, func(client logproto.QuerierClient) (interface{}, error) {
+ reconnectClients, err := q.forGivenIngesters(ctx, ring.ReplicationSet{Instances: reconnectIngesters}, func(client logproto.QuerierClient) (interface{}, error) {
return client.Tail(ctx, req)
})
if err != nil {
@@ -233,9 +233,9 @@ func (q *IngesterQuerier) TailersCount(ctx context.Context) ([]uint32, error) {
// we want to check count of active tailers with only active ingesters
ingesters := make([]ring.InstanceDesc, 0, 1)
- for i := range replicationSet.Ingesters {
- if replicationSet.Ingesters[i].State == ring.ACTIVE {
- ingesters = append(ingesters, replicationSet.Ingesters[i])
+ for i := range replicationSet.Instances {
+ if replicationSet.Instances[i].State == ring.ACTIVE {
+ ingesters = append(ingesters, replicationSet.Instances[i])
}
}
diff --git a/pkg/querier/querier_mock_test.go b/pkg/querier/querier_mock_test.go
index d9b9b9a27e5d0..2bd5be99369ed 100644
--- a/pkg/querier/querier_mock_test.go
+++ b/pkg/querier/querier_mock_test.go
@@ -290,7 +290,7 @@ type readRingMock struct {
func newReadRingMock(ingesters []ring.InstanceDesc) *readRingMock {
return &readRingMock{
replicationSet: ring.ReplicationSet{
- Ingesters: ingesters,
+ Instances: ingesters,
MaxErrors: 0,
},
}
@@ -309,7 +309,7 @@ func (r *readRingMock) Get(key uint32, op ring.Operation, buf []ring.InstanceDes
func (r *readRingMock) ShuffleShard(identifier string, size int) ring.ReadRing {
// pass by value to copy
return func(r readRingMock) *readRingMock {
- r.replicationSet.Ingesters = r.replicationSet.Ingesters[:size]
+ r.replicationSet.Instances = r.replicationSet.Instances[:size]
return &r
}(*r)
}
@@ -331,7 +331,7 @@ func (r *readRingMock) ReplicationFactor() int {
}
func (r *readRingMock) InstancesCount() int {
- return len(r.replicationSet.Ingesters)
+ return len(r.replicationSet.Instances)
}
func (r *readRingMock) Subring(key uint32, n int) ring.ReadRing {
@@ -339,7 +339,7 @@ func (r *readRingMock) Subring(key uint32, n int) ring.ReadRing {
}
func (r *readRingMock) HasInstance(instanceID string) bool {
- for _, ing := range r.replicationSet.Ingesters {
+ for _, ing := range r.replicationSet.Instances {
if ing.Addr != instanceID {
return true
}
diff --git a/pkg/ruler/manager/memstore.go b/pkg/ruler/manager/memstore.go
index 54daf2d94b1eb..f7853d79773ed 100644
--- a/pkg/ruler/manager/memstore.go
+++ b/pkg/ruler/manager/memstore.go
@@ -13,6 +13,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/pkg/exemplar"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/rules"
@@ -29,11 +30,14 @@ type NoopAppender struct{}
func (a NoopAppender) Appender(_ context.Context) storage.Appender { return a }
func (a NoopAppender) Add(l labels.Labels, t int64, v float64) (uint64, error) { return 0, nil }
-func (a NoopAppender) AddFast(ref uint64, t int64, v float64) error {
- return errors.New("unimplemented")
+func (a NoopAppender) Append(ref uint64, l labels.Labels, t int64, v float64) (uint64, error) {
+ return 0, errors.New("unimplemented")
}
func (a NoopAppender) Commit() error { return nil }
func (a NoopAppender) Rollback() error { return nil }
+func (a NoopAppender) AppendExemplar(ref uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) {
+ return 0, errors.New("unimplemented")
+}
func ForStateMetric(base labels.Labels, alertName string) labels.Labels {
b := labels.NewBuilder(base)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go
index e352b89b77f22..a370bfe46fde1 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go
@@ -46,25 +46,31 @@ import (
"github.com/cortexproject/cortex/pkg/util/services"
)
-const notificationLogMaintenancePeriod = 15 * time.Minute
+const (
+ // MaintenancePeriod is used for periodic storing of silences and notifications to local file.
+ maintenancePeriod = 15 * time.Minute
+
+ // Filenames used within tenant-directory
+ notificationLogSnapshot = "notifications"
+ silencesSnapshot = "silences"
+ templatesDir = "templates"
+)
// Config configures an Alertmanager.
type Config struct {
- UserID string
- // Used to persist notification logs and silences on disk.
- DataDir string
+ UserID string
Logger log.Logger
Peer *cluster.Peer
PeerTimeout time.Duration
Retention time.Duration
ExternalURL *url.URL
- ShardingEnabled bool
- ReplicationFactor int
- ReplicateStateFunc func(context.Context, string, *clusterpb.Part) error
- // The alertmanager replication protocol relies on a position related to other replicas.
- // This position is then used to identify who should notify about the alert first.
- GetPositionFunc func(userID string) int
+ // Tenant-specific local directory where AM can store its state (notifications, silences, templates). When AM is stopped, entire dir is removed.
+ TenantDataDir string
+
+ ShardingEnabled bool
+ ReplicationFactor int
+ Replicator Replicator
}
// An Alertmanager manages the alerts for one user.
@@ -113,11 +119,24 @@ func init() {
type State interface {
AddState(string, cluster.State, prometheus.Registerer) cluster.ClusterChannel
Position() int
- WaitReady()
+ WaitReady(context.Context) error
+}
+
+// Replicator is used to exchange state with peers via the ring when sharding is enabled.
+type Replicator interface {
+ // ReplicateStateForUser writes the given partial state to the necessary replicas.
+ ReplicateStateForUser(ctx context.Context, userID string, part *clusterpb.Part) error
+ // The alertmanager replication protocol relies on a position related to other replicas.
+ // This position is then used to identify who should notify about the alert first.
+ GetPositionForUser(userID string) int
}
// New creates a new Alertmanager.
func New(cfg *Config, reg *prometheus.Registry) (*Alertmanager, error) {
+ if cfg.TenantDataDir == "" {
+ return nil, fmt.Errorf("directory for tenant-specific AlertManager is not configured")
+ }
+
am := &Alertmanager{
cfg: cfg,
logger: log.With(cfg.Logger, "user", cfg.UserID),
@@ -140,7 +159,7 @@ func New(cfg *Config, reg *prometheus.Registry) (*Alertmanager, error) {
am.state = cfg.Peer
} else if cfg.ShardingEnabled {
level.Debug(am.logger).Log("msg", "starting tenant alertmanager with ring-based replication")
- state := newReplicatedStates(cfg.UserID, cfg.ReplicationFactor, cfg.ReplicateStateFunc, cfg.GetPositionFunc, am.logger, am.registry)
+ state := newReplicatedStates(cfg.UserID, cfg.ReplicationFactor, cfg.Replicator, am.logger, am.registry)
if err := state.Service.StartAsync(context.Background()); err != nil {
return nil, errors.Wrap(err, "failed to start ring-based replication service")
@@ -153,12 +172,11 @@ func New(cfg *Config, reg *prometheus.Registry) (*Alertmanager, error) {
}
am.wg.Add(1)
- nflogID := fmt.Sprintf("nflog:%s", cfg.UserID)
var err error
am.nflog, err = nflog.New(
nflog.WithRetention(cfg.Retention),
- nflog.WithSnapshot(filepath.Join(cfg.DataDir, nflogID)),
- nflog.WithMaintenance(notificationLogMaintenancePeriod, am.stop, am.wg.Done),
+ nflog.WithSnapshot(filepath.Join(cfg.TenantDataDir, notificationLogSnapshot)),
+ nflog.WithMaintenance(maintenancePeriod, am.stop, am.wg.Done),
nflog.WithMetrics(am.registry),
nflog.WithLogger(log.With(am.logger, "component", "nflog")),
)
@@ -171,9 +189,9 @@ func New(cfg *Config, reg *prometheus.Registry) (*Alertmanager, error) {
am.marker = types.NewMarker(am.registry)
- silencesID := fmt.Sprintf("silences:%s", cfg.UserID)
+ silencesFile := filepath.Join(cfg.TenantDataDir, silencesSnapshot)
am.silences, err = silence.New(silence.Options{
- SnapshotFile: filepath.Join(cfg.DataDir, silencesID),
+ SnapshotFile: silencesFile,
Retention: cfg.Retention,
Logger: log.With(am.logger, "component", "silences"),
Metrics: am.registry,
@@ -189,7 +207,7 @@ func New(cfg *Config, reg *prometheus.Registry) (*Alertmanager, error) {
am.wg.Add(1)
go func() {
- am.silences.Maintenance(15*time.Minute, filepath.Join(cfg.DataDir, silencesID), am.stop)
+ am.silences.Maintenance(maintenancePeriod, silencesFile, am.stop)
am.wg.Done()
}()
@@ -249,7 +267,7 @@ func (am *Alertmanager) ApplyConfig(userID string, conf *config.Config, rawCfg s
templateFiles := make([]string, len(conf.Templates))
if len(conf.Templates) > 0 {
for i, t := range conf.Templates {
- templateFiles[i] = filepath.Join(am.cfg.DataDir, "templates", userID, t)
+ templateFiles[i] = filepath.Join(am.cfg.TenantDataDir, templatesDir, t)
}
}
@@ -349,7 +367,10 @@ func (am *Alertmanager) StopAndWait() {
}
func (am *Alertmanager) mergePartialExternalState(part *clusterpb.Part) error {
- return am.state.(*state).MergePartialState(part)
+ if state, ok := am.state.(*state); ok {
+ return state.MergePartialState(part)
+ }
+ return errors.New("ring-based sharding not enabled")
}
// buildIntegrationsMap builds a map of name to the list of integration notifiers off of a
@@ -426,11 +447,11 @@ func md5HashAsMetricValue(data []byte) float64 {
// In a multi-tenant environment, we choose not to expose these to tenants and thus are not implemented.
type NilPeer struct{}
-func (p *NilPeer) Name() string { return "" }
-func (p *NilPeer) Status() string { return "ready" }
-func (p *NilPeer) Peers() []cluster.ClusterMember { return nil }
-func (p *NilPeer) Position() int { return 0 }
-func (p *NilPeer) WaitReady() {}
+func (p *NilPeer) Name() string { return "" }
+func (p *NilPeer) Status() string { return "ready" }
+func (p *NilPeer) Peers() []cluster.ClusterMember { return nil }
+func (p *NilPeer) Position() int { return 0 }
+func (p *NilPeer) WaitReady(context.Context) error { return nil }
func (p *NilPeer) AddState(string, cluster.State, prometheus.Registerer) cluster.ClusterChannel {
return &NilChannel{}
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go
index 4d2bbb7f7d1ac..2da8c817945dc 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go
@@ -155,19 +155,19 @@ func newAlertmanagerMetrics() *alertmanagerMetrics {
partialMerges: prometheus.NewDesc(
"cortex_alertmanager_partial_state_merges_total",
"Number of times we have received a partial state to merge for a key.",
- []string{"key"}, nil),
+ []string{"user"}, nil),
partialMergesFailed: prometheus.NewDesc(
"cortex_alertmanager_partial_state_merges_failed_total",
"Number of times we have failed to merge a partial state received for a key.",
- []string{"key"}, nil),
+ []string{"user"}, nil),
replicationTotal: prometheus.NewDesc(
"cortex_alertmanager_state_replication_total",
"Number of times we have tried to replicate a state to other alertmanagers",
- []string{"key"}, nil),
+ []string{"user"}, nil),
replicationFailed: prometheus.NewDesc(
"cortex_alertmanager_state_replication_failed_total",
"Number of times we have failed to replicate a state to other alertmanagers",
- []string{"key"}, nil),
+ []string{"user"}, nil),
}
}
@@ -244,8 +244,8 @@ func (m *alertmanagerMetrics) Collect(out chan<- prometheus.Metric) {
data.SendMaxOfGaugesPerUser(out, m.configHashValue, "alertmanager_config_hash")
- data.SendSumOfCountersWithLabels(out, m.partialMerges, "alertmanager_partial_state_merges_total", "key")
- data.SendSumOfCountersWithLabels(out, m.partialMergesFailed, "alertmanager_partial_state_merges_failed_total", "key")
- data.SendSumOfCountersWithLabels(out, m.replicationTotal, "alertmanager_state_replication_total", "key")
- data.SendSumOfCountersWithLabels(out, m.replicationFailed, "alertmanager_state_replication_failed_total", "key")
+ data.SendSumOfCountersPerUser(out, m.partialMerges, "alertmanager_partial_state_merges_total")
+ data.SendSumOfCountersPerUser(out, m.partialMergesFailed, "alertmanager_partial_state_merges_failed_total")
+ data.SendSumOfCountersPerUser(out, m.replicationTotal, "alertmanager_state_replication_total")
+ data.SendSumOfCountersPerUser(out, m.replicationFailed, "alertmanager_state_replication_failed_total")
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/api.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/api.go
index 95b2a9d2e7281..74c8dbaf854fc 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/api.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/api.go
@@ -153,14 +153,14 @@ func validateUserConfig(logger log.Logger, cfg alertspb.AlertConfigDesc) error {
// not to configured data dir, and on the flipside, it'll fail if we can't write
// to tmpDir. Ignoring both cases for now as they're ultra rare but will revisit if
// we see this in the wild.
- tmpDir, err := ioutil.TempDir("", "validate-config")
+ userTempDir, err := ioutil.TempDir("", "validate-config-"+cfg.User)
if err != nil {
return err
}
- defer os.RemoveAll(tmpDir)
+ defer os.RemoveAll(userTempDir)
for _, tmpl := range cfg.Templates {
- _, err := createTemplateFile(tmpDir, cfg.User, tmpl.Filename, tmpl.Body)
+ _, err := storeTemplateFile(userTempDir, tmpl.Filename, tmpl.Body)
if err != nil {
level.Error(logger).Log("msg", "unable to create template file", "err", err, "user", cfg.User)
return fmt.Errorf("unable to create template file '%s'", tmpl.Filename)
@@ -169,7 +169,7 @@ func validateUserConfig(logger log.Logger, cfg alertspb.AlertConfigDesc) error {
templateFiles := make([]string, len(amCfg.Templates))
for i, t := range amCfg.Templates {
- templateFiles[i] = filepath.Join(tmpDir, "templates", cfg.User, t)
+ templateFiles[i] = filepath.Join(userTempDir, t)
}
_, err = template.FromGlobs(templateFiles...)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/distributor.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/distributor.go
index ef5279b69d0d8..e90489bde8192 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/distributor.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/distributor.go
@@ -189,7 +189,7 @@ func (d *Distributor) doRead(userID string, w http.ResponseWriter, r *http.Reque
defer sp.Finish()
// Until we have a mechanism to combine the results from multiple alertmanagers,
// we forward the request to only only of the alertmanagers.
- amDesc := replicationSet.Ingesters[rand.Intn(len(replicationSet.Ingesters))]
+ amDesc := replicationSet.Instances[rand.Intn(len(replicationSet.Instances))]
resp, err := d.doRequest(ctx, amDesc, req)
if err != nil {
respondFromError(err, w, logger)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go
index 23f7ef2dccb7e..6fd62961fbafc 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go
@@ -10,6 +10,7 @@ import (
"net/url"
"os"
"path/filepath"
+ "strings"
"sync"
"time"
@@ -21,6 +22,7 @@ import (
amconfig "github.com/prometheus/alertmanager/config"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
+ tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/weaveworks/common/httpgrpc"
"github.com/weaveworks/common/httpgrpc/server"
"github.com/weaveworks/common/user"
@@ -103,11 +105,6 @@ type MultitenantAlertmanagerConfig struct {
PollInterval time.Duration `yaml:"poll_interval"`
MaxRecvMsgSize int64 `yaml:"max_recv_msg_size"`
- DeprecatedClusterBindAddr string `yaml:"cluster_bind_address"`
- DeprecatedClusterAdvertiseAddr string `yaml:"cluster_advertise_address"`
- DeprecatedPeers flagext.StringSlice `yaml:"peers"`
- DeprecatedPeerTimeout time.Duration `yaml:"peer_timeout"`
-
// Enable sharding for the Alertmanager
ShardingEnabled bool `yaml:"sharding_enabled"`
ShardingRing RingConfig `yaml:"sharding_ring"`
@@ -150,13 +147,6 @@ func (cfg *MultitenantAlertmanagerConfig) RegisterFlags(f *flag.FlagSet) {
f.StringVar(&cfg.AutoWebhookRoot, "alertmanager.configs.auto-webhook-root", "", "Root of URL to generate if config is "+autoWebhookURL)
f.DurationVar(&cfg.PollInterval, "alertmanager.configs.poll-interval", 15*time.Second, "How frequently to poll Cortex configs")
- // Flags prefixed with `cluster` are deprecated in favor of their `alertmanager` prefix equivalent.
- // TODO: New flags introduced in Cortex 1.7, remove old ones in Cortex 1.9
- f.StringVar(&cfg.DeprecatedClusterBindAddr, "cluster.listen-address", defaultClusterAddr, "Deprecated. Use -alertmanager.cluster.listen-address instead.")
- f.StringVar(&cfg.DeprecatedClusterAdvertiseAddr, "cluster.advertise-address", "", "Deprecated. Use -alertmanager.cluster.advertise-address instead.")
- f.Var(&cfg.DeprecatedPeers, "cluster.peer", "Deprecated. Use -alertmanager.cluster.peers instead.")
- f.DurationVar(&cfg.DeprecatedPeerTimeout, "cluster.peer-timeout", time.Second*15, "Deprecated. Use -alertmanager.cluster.peer-timeout instead.")
-
f.BoolVar(&cfg.EnableAPI, "experimental.alertmanager.enable-api", false, "Enable the experimental alertmanager config api.")
f.BoolVar(&cfg.ShardingEnabled, "alertmanager.sharding-enabled", false, "Shard tenants across multiple alertmanager instances.")
@@ -178,33 +168,6 @@ func (cfg *ClusterConfig) RegisterFlags(f *flag.FlagSet) {
f.DurationVar(&cfg.PushPullInterval, prefix+"push-pull-interval", cluster.DefaultPushPullInterval, "The interval between gossip state syncs. Setting this interval lower (more frequent) will increase convergence speeds across larger clusters at the expense of increased bandwidth usage.")
}
-// SupportDeprecatedFlagset ensures we support the previous set of cluster flags that are now deprecated.
-func (cfg *ClusterConfig) SupportDeprecatedFlagset(amCfg *MultitenantAlertmanagerConfig, logger log.Logger) {
- if amCfg.DeprecatedClusterBindAddr != defaultClusterAddr {
- flagext.DeprecatedFlagsUsed.Inc()
- level.Warn(logger).Log("msg", "running with DEPRECATED flag -cluster.listen-address, use -alertmanager.cluster.listen-address instead.")
- cfg.ListenAddr = amCfg.DeprecatedClusterBindAddr
- }
-
- if amCfg.DeprecatedClusterAdvertiseAddr != "" {
- flagext.DeprecatedFlagsUsed.Inc()
- level.Warn(logger).Log("msg", "running with DEPRECATED flag -cluster.advertise-address, use -alertmanager.cluster.advertise-address instead.")
- cfg.AdvertiseAddr = amCfg.DeprecatedClusterAdvertiseAddr
- }
-
- if len(amCfg.DeprecatedPeers) > 0 {
- flagext.DeprecatedFlagsUsed.Inc()
- level.Warn(logger).Log("msg", "running with DEPRECATED flag -cluster.peer, use -alertmanager.cluster.peers instead.")
- cfg.Peers = []string(amCfg.DeprecatedPeers)
- }
-
- if amCfg.DeprecatedPeerTimeout != defaultPeerTimeout {
- flagext.DeprecatedFlagsUsed.Inc()
- level.Warn(logger).Log("msg", "running with DEPRECATED flag -cluster.peer-timeout, use -alertmanager.cluster.peer-timeout instead.")
- cfg.PeerTimeout = amCfg.DeprecatedPeerTimeout
- }
-}
-
// Validate config and returns error on failure
func (cfg *MultitenantAlertmanagerConfig) Validate() error {
if err := cfg.Store.Validate(); err != nil {
@@ -309,8 +272,6 @@ func NewMultitenantAlertmanager(cfg *MultitenantAlertmanagerConfig, store alerts
}
}
- cfg.Cluster.SupportDeprecatedFlagset(cfg, logger)
-
var peer *cluster.Peer
// We need to take this case into account to support our legacy upstream clustering.
if cfg.Cluster.ListenAddr != "" && !cfg.ShardingEnabled {
@@ -447,6 +408,11 @@ func (h *handlerForGRPCServer) ServeHTTP(w http.ResponseWriter, req *http.Reques
}
func (am *MultitenantAlertmanager) starting(ctx context.Context) (err error) {
+ err = am.migrateStateFilesToPerTenantDirectories()
+ if err != nil {
+ return err
+ }
+
defer func() {
if err == nil || am.subservices == nil {
return
@@ -500,6 +466,119 @@ func (am *MultitenantAlertmanager) starting(ctx context.Context) (err error) {
return nil
}
+// migrateStateFilesToPerTenantDirectories migrates any existing configuration from old place to new hierarchy.
+// TODO: Remove in Cortex 1.11.
+func (am *MultitenantAlertmanager) migrateStateFilesToPerTenantDirectories() error {
+ migrate := func(from, to string) error {
+ level.Info(am.logger).Log("msg", "migrating alertmanager state", "from", from, "to", to)
+ err := os.Rename(from, to)
+ return errors.Wrapf(err, "failed to migrate alertmanager state from %v to %v", from, to)
+ }
+
+ st, err := am.getObsoleteFilesPerUser()
+ if err != nil {
+ return errors.Wrap(err, "failed to migrate alertmanager state files")
+ }
+
+ for userID, files := range st {
+ tenantDir := am.getTenantDirectory(userID)
+ err := os.MkdirAll(tenantDir, 0777)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create per-tenant directory %v", tenantDir)
+ }
+
+ errs := tsdb_errors.NewMulti()
+
+ if files.notificationLogSnapshot != "" {
+ errs.Add(migrate(files.notificationLogSnapshot, filepath.Join(tenantDir, notificationLogSnapshot)))
+ }
+
+ if files.silencesSnapshot != "" {
+ errs.Add(migrate(files.silencesSnapshot, filepath.Join(tenantDir, silencesSnapshot)))
+ }
+
+ if files.templatesDir != "" {
+ errs.Add(migrate(files.templatesDir, filepath.Join(tenantDir, templatesDir)))
+ }
+
+ if err := errs.Err(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type obsoleteStateFiles struct {
+ notificationLogSnapshot string
+ silencesSnapshot string
+ templatesDir string
+}
+
+// getObsoleteFilesPerUser returns per-user set of files that should be migrated from old structure to new structure.
+func (am *MultitenantAlertmanager) getObsoleteFilesPerUser() (map[string]obsoleteStateFiles, error) {
+ files, err := ioutil.ReadDir(am.cfg.DataDir)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to list dir %v", am.cfg.DataDir)
+ }
+
+ // old names
+ const (
+ notificationLogPrefix = "nflog:"
+ silencesPrefix = "silences:"
+ templates = "templates"
+ )
+
+ result := map[string]obsoleteStateFiles{}
+
+ for _, f := range files {
+ fullPath := filepath.Join(am.cfg.DataDir, f.Name())
+
+ if f.IsDir() {
+ // Process templates dir.
+ if f.Name() != templates {
+ // Ignore other files -- those are likely per tenant directories.
+ continue
+ }
+
+ templateDirs, err := ioutil.ReadDir(fullPath)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to list dir %v", fullPath)
+ }
+
+ // Previously templates directory contained per-tenant subdirectory.
+ for _, d := range templateDirs {
+ if d.IsDir() {
+ v := result[d.Name()]
+ v.templatesDir = filepath.Join(fullPath, d.Name())
+ result[d.Name()] = v
+ } else {
+ level.Warn(am.logger).Log("msg", "ignoring unknown local file while migrating local alertmanager state files", "file", filepath.Join(fullPath, d.Name()))
+ }
+ }
+ continue
+ }
+
+ switch {
+ case strings.HasPrefix(f.Name(), notificationLogPrefix):
+ userID := strings.TrimPrefix(f.Name(), notificationLogPrefix)
+ v := result[userID]
+ v.notificationLogSnapshot = fullPath
+ result[userID] = v
+
+ case strings.HasPrefix(f.Name(), silencesPrefix):
+ userID := strings.TrimPrefix(f.Name(), silencesPrefix)
+ v := result[userID]
+ v.silencesSnapshot = fullPath
+ result[userID] = v
+
+ default:
+ level.Warn(am.logger).Log("msg", "ignoring unknown local data file while migrating local alertmanager state files", "file", fullPath)
+ }
+ }
+
+ return result, nil
+}
+
func (am *MultitenantAlertmanager) run(ctx context.Context) error {
tick := time.NewTicker(am.cfg.PollInterval)
defer tick.Stop()
@@ -551,6 +630,8 @@ func (am *MultitenantAlertmanager) loadAndSyncConfigs(ctx context.Context, syncR
}
am.syncConfigs(cfgs)
+ am.deleteUnusedLocalUserState()
+
return nil
}
@@ -636,20 +717,27 @@ func (am *MultitenantAlertmanager) syncConfigs(cfgs map[string]alertspb.AlertCon
am.multitenantMetrics.lastReloadSuccessfulTimestamp.WithLabelValues(user).SetToCurrentTime()
}
+ userAlertmanagersToStop := map[string]*Alertmanager{}
+
am.alertmanagersMtx.Lock()
- defer am.alertmanagersMtx.Unlock()
for userID, userAM := range am.alertmanagers {
if _, exists := cfgs[userID]; !exists {
- level.Info(am.logger).Log("msg", "deactivating per-tenant alertmanager", "user", userID)
- userAM.Stop()
+ userAlertmanagersToStop[userID] = userAM
delete(am.alertmanagers, userID)
delete(am.cfgs, userID)
am.multitenantMetrics.lastReloadSuccessful.DeleteLabelValues(userID)
am.multitenantMetrics.lastReloadSuccessfulTimestamp.DeleteLabelValues(userID)
am.alertmanagerMetrics.removeUserRegistry(userID)
- level.Info(am.logger).Log("msg", "deactivated per-tenant alertmanager", "user", userID)
}
}
+ am.alertmanagersMtx.Unlock()
+
+ // Now stop alertmanagers and wait until they are really stopped, without holding lock.
+ for userID, userAM := range userAlertmanagersToStop {
+ level.Info(am.logger).Log("msg", "deactivating per-tenant alertmanager", "user", userID)
+ userAM.StopAndWait()
+ level.Info(am.logger).Log("msg", "deactivated per-tenant alertmanager", "user", userID)
+ }
}
// setConfig applies the given configuration to the alertmanager for `userID`,
@@ -660,7 +748,7 @@ func (am *MultitenantAlertmanager) setConfig(cfg alertspb.AlertConfigDesc) error
var hasTemplateChanges bool
for _, tmpl := range cfg.Templates {
- hasChanged, err := createTemplateFile(am.cfg.DataDir, cfg.User, tmpl.Filename, tmpl.Body)
+ hasChanged, err := storeTemplateFile(filepath.Join(am.getTenantDirectory(cfg.User), templatesDir), tmpl.Filename, tmpl.Body)
if err != nil {
return err
}
@@ -742,21 +830,30 @@ func (am *MultitenantAlertmanager) setConfig(cfg alertspb.AlertConfigDesc) error
return nil
}
+func (am *MultitenantAlertmanager) getTenantDirectory(userID string) string {
+ return filepath.Join(am.cfg.DataDir, userID)
+}
+
func (am *MultitenantAlertmanager) newAlertmanager(userID string, amConfig *amconfig.Config, rawCfg string) (*Alertmanager, error) {
reg := prometheus.NewRegistry()
+ tenantDir := am.getTenantDirectory(userID)
+ err := os.MkdirAll(tenantDir, 0777)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to create per-tenant directory %v", tenantDir)
+ }
+
newAM, err := New(&Config{
- UserID: userID,
- DataDir: am.cfg.DataDir,
- Logger: util_log.Logger,
- Peer: am.peer,
- PeerTimeout: am.cfg.Cluster.PeerTimeout,
- Retention: am.cfg.Retention,
- ExternalURL: am.cfg.ExternalURL.URL,
- ShardingEnabled: am.cfg.ShardingEnabled,
- ReplicateStateFunc: am.replicateStateForUser,
- GetPositionFunc: am.getPositionFor,
- ReplicationFactor: am.cfg.ShardingRing.ReplicationFactor,
+ UserID: userID,
+ TenantDataDir: tenantDir,
+ Logger: util_log.Logger,
+ Peer: am.peer,
+ PeerTimeout: am.cfg.Cluster.PeerTimeout,
+ Retention: am.cfg.Retention,
+ ExternalURL: am.cfg.ExternalURL.URL,
+ ShardingEnabled: am.cfg.ShardingEnabled,
+ Replicator: am,
+ ReplicationFactor: am.cfg.ShardingRing.ReplicationFactor,
}, reg)
if err != nil {
return nil, fmt.Errorf("unable to start Alertmanager for user %v: %v", userID, err)
@@ -770,8 +867,8 @@ func (am *MultitenantAlertmanager) newAlertmanager(userID string, amConfig *amco
return newAM, nil
}
-// getPositionFor returns the position this Alertmanager instance holds in the ring related to its other replicas for an specific user.
-func (am *MultitenantAlertmanager) getPositionFor(userID string) int {
+// GetPositionForUser returns the position this Alertmanager instance holds in the ring related to its other replicas for an specific user.
+func (am *MultitenantAlertmanager) GetPositionForUser(userID string) int {
// If we have a replication factor of 1 or less we don't need to do any work and can immediately return.
if am.ring == nil || am.ring.ReplicationFactor() <= 1 {
return 0
@@ -786,7 +883,7 @@ func (am *MultitenantAlertmanager) getPositionFor(userID string) int {
}
var position int
- for i, instance := range set.Ingesters {
+ for i, instance := range set.Instances {
if instance.Addr == am.ringLifecycler.GetInstanceAddr() {
position = i
break
@@ -877,8 +974,8 @@ func (am *MultitenantAlertmanager) GetStatusHandler() StatusHandler {
}
}
-// replicateStateForUser attempts to replicate a partial state sent by an alertmanager to its other replicas through the ring.
-func (am *MultitenantAlertmanager) replicateStateForUser(ctx context.Context, userID string, part *clusterpb.Part) error {
+// ReplicateStateForUser attempts to replicate a partial state sent by an alertmanager to its other replicas through the ring.
+func (am *MultitenantAlertmanager) ReplicateStateForUser(ctx context.Context, userID string, part *clusterpb.Part) error {
level.Debug(am.logger).Log("msg", "message received for replication", "user", userID, "key", part.Key)
selfAddress := am.ringLifecycler.GetInstanceAddr()
@@ -918,7 +1015,7 @@ func (am *MultitenantAlertmanager) replicateStateForUser(ctx context.Context, us
func (am *MultitenantAlertmanager) UpdateState(ctx context.Context, part *clusterpb.Part) (*alertmanagerpb.UpdateStateResponse, error) {
userID, err := tenant.TenantID(ctx)
if err != nil {
- return nil, fmt.Errorf("no user id found in context")
+ return nil, err
}
am.alertmanagersMtx.Lock()
@@ -944,6 +1041,54 @@ func (am *MultitenantAlertmanager) UpdateState(ctx context.Context, part *cluste
return &alertmanagerpb.UpdateStateResponse{Status: alertmanagerpb.OK}, nil
}
+// deleteUnusedLocalUserState deletes local files for users that we no longer need.
+func (am *MultitenantAlertmanager) deleteUnusedLocalUserState() {
+ userDirs := am.getPerUserDirectories()
+
+ // And delete remaining files.
+ for userID, dir := range userDirs {
+ am.alertmanagersMtx.Lock()
+ userAM := am.alertmanagers[userID]
+ am.alertmanagersMtx.Unlock()
+
+ // Don't delete directory if AM for user still exists.
+ if userAM != nil {
+ continue
+ }
+
+ err := os.RemoveAll(dir)
+ if err != nil {
+ level.Warn(am.logger).Log("msg", "failed to delete directory for user", "dir", dir, "user", userID, "err", err)
+ } else {
+ level.Info(am.logger).Log("msg", "deleted local directory for user", "dir", dir, "user", userID)
+ }
+ }
+}
+
+// getPerUserDirectories returns map of users to their directories (full path). Only users with local
+// directory are returned.
+func (am *MultitenantAlertmanager) getPerUserDirectories() map[string]string {
+ files, err := ioutil.ReadDir(am.cfg.DataDir)
+ if err != nil {
+ level.Warn(am.logger).Log("msg", "failed to list local dir", "dir", am.cfg.DataDir, "err", err)
+ return nil
+ }
+
+ result := map[string]string{}
+
+ for _, f := range files {
+ fullPath := filepath.Join(am.cfg.DataDir, f.Name())
+
+ if !f.IsDir() {
+ level.Warn(am.logger).Log("msg", "ignoring unexpected file while scanning local alertmanager configs", "file", fullPath)
+ continue
+ }
+
+ result[f.Name()] = fullPath
+ }
+ return result
+}
+
// StatusHandler shows the status of the alertmanager.
type StatusHandler struct {
am *MultitenantAlertmanager
@@ -957,21 +1102,26 @@ func (s StatusHandler) ServeHTTP(w http.ResponseWriter, _ *http.Request) {
}
}
-func createTemplateFile(dataDir, userID, fn, content string) (bool, error) {
- if fn != filepath.Base(fn) {
- return false, fmt.Errorf("template file name '%s' is not not valid", fn)
+// storeTemplateFile stores template file with given content into specific directory.
+// Since templateFileName is provided by end-user, it is verified that it doesn't do any path-traversal.
+// Returns true, if file content has changed (new or updated file), false if file with the same name
+// and content was already stored locally.
+func storeTemplateFile(dir, templateFileName, content string) (bool, error) {
+ if templateFileName != filepath.Base(templateFileName) {
+ return false, fmt.Errorf("template file name '%s' is not not valid", templateFileName)
}
- dir := filepath.Join(dataDir, "templates", userID, filepath.Dir(fn))
err := os.MkdirAll(dir, 0755)
if err != nil {
return false, fmt.Errorf("unable to create Alertmanager templates directory %q: %s", dir, err)
}
- file := filepath.Join(dir, fn)
+ file := filepath.Join(dir, templateFileName)
// Check if the template file already exists and if it has changed
if tmpl, err := ioutil.ReadFile(file); err == nil && string(tmpl) == content {
return false, nil
+ } else if err != nil && !os.IsNotExist(err) {
+ return false, err
}
if err := ioutil.WriteFile(file, []byte(content), 0644); err != nil {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/state_replication.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/state_replication.go
index d5f3e4f60b508..17fc686ee5d29 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/state_replication.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/state_replication.go
@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"sync"
- "time"
"github.com/prometheus/client_golang/prometheus/promauto"
@@ -28,32 +27,28 @@ type state struct {
mtx sync.Mutex
states map[string]cluster.State
- replicationFactor int
- replicateStateFunc func(context.Context, string, *clusterpb.Part) error
- positionFunc func(string) int
+ replicationFactor int
+ replicator Replicator
partialStateMergesTotal *prometheus.CounterVec
partialStateMergesFailed *prometheus.CounterVec
stateReplicationTotal *prometheus.CounterVec
stateReplicationFailed *prometheus.CounterVec
- msgc chan *clusterpb.Part
- readyc chan struct{}
+ msgc chan *clusterpb.Part
}
// newReplicatedStates creates a new state struct, which manages state to be replicated between alertmanagers.
-func newReplicatedStates(userID string, rf int, f func(context.Context, string, *clusterpb.Part) error, pf func(string) int, l log.Logger, r prometheus.Registerer) *state {
+func newReplicatedStates(userID string, rf int, re Replicator, l log.Logger, r prometheus.Registerer) *state {
s := &state{
- logger: l,
- userID: userID,
- replicateStateFunc: f,
- replicationFactor: rf,
- positionFunc: pf,
- states: make(map[string]cluster.State, 2), // we use two, one for the notifications and one for silences.
- msgc: make(chan *clusterpb.Part),
- readyc: make(chan struct{}),
- reg: r,
+ logger: l,
+ userID: userID,
+ replicationFactor: rf,
+ replicator: re,
+ states: make(map[string]cluster.State, 2), // we use two, one for the notifications and one for silences.
+ msgc: make(chan *clusterpb.Part),
+ reg: r,
partialStateMergesTotal: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
Name: "alertmanager_partial_state_merges_total",
Help: "Number of times we have received a partial state to merge for a key.",
@@ -72,7 +67,7 @@ func newReplicatedStates(userID string, rf int, f func(context.Context, string,
}, []string{"key"}),
}
- s.Service = services.NewBasicService(nil, s.running, nil)
+ s.Service = services.NewBasicService(s.starting, s.running, nil)
return s
}
@@ -116,33 +111,27 @@ func (s *state) MergePartialState(p *clusterpb.Part) error {
}
// Position helps in determining how long should we wait before sending a notification based on the number of replicas.
-func (s *state) Position() int { return s.positionFunc(s.userID) }
+func (s *state) Position() int {
+ return s.replicator.GetPositionForUser(s.userID)
+}
-// Settle waits until the alertmanagers are ready (and sets the appropriate internal state when it is).
+// starting waits until the alertmanagers are ready (and sets the appropriate internal state when it is).
// The idea is that we don't want to start working" before we get a chance to know most of the notifications and/or silences.
-func (s *state) Settle(ctx context.Context, _ time.Duration) {
+func (s *state) starting(ctx context.Context) error {
level.Info(s.logger).Log("msg", "Waiting for notification and silences to settle...")
// TODO: Make sure that the state is fully synchronised at this point.
// We can check other alertmanager(s) and explicitly ask them to propagate their state to us if available.
- close(s.readyc)
+ return nil
}
// WaitReady is needed for the pipeline builder to know whenever we've settled and the state is up to date.
-func (s *state) WaitReady() {
- //TODO: At the moment, we settle in a separate go-routine (see multitenant.go as we create the Peer) we should
- // mimic that behaviour here once we have full state replication.
- s.Settle(context.Background(), time.Second)
- <-s.readyc
+func (s *state) WaitReady(ctx context.Context) error {
+ return s.Service.AwaitRunning(ctx)
}
func (s *state) Ready() bool {
- select {
- case <-s.readyc:
- return true
- default:
- }
- return false
+ return s.Service.State() == services.Running
}
func (s *state) running(ctx context.Context) error {
@@ -155,7 +144,7 @@ func (s *state) running(ctx context.Context) error {
}
s.stateReplicationTotal.WithLabelValues(p.Key).Inc()
- if err := s.replicateStateFunc(ctx, s.userID, p); err != nil {
+ if err := s.replicator.ReplicateStateForUser(ctx, s.userID, p); err != nil {
s.stateReplicationFailed.WithLabelValues(p.Key).Inc()
level.Error(s.logger).Log("msg", "failed to replicate state to other alertmanagers", "user", s.userID, "key", p.Key, "err", err)
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/api/handlers.go b/vendor/github.com/cortexproject/cortex/pkg/api/handlers.go
index 81e81142b3a4f..2de57df33c306 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/api/handlers.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/api/handlers.go
@@ -10,8 +10,6 @@ import (
"github.com/go-kit/kit/log"
"github.com/gorilla/mux"
- "github.com/opentracing-contrib/go-stdlib/nethttp"
- "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
@@ -191,6 +189,7 @@ func NewQuerierHandler(
engine,
errorTranslateQueryable{queryable}, // Translate errors to errors expected by API.
nil, // No remote write support.
+ nil, // No exemplars support.
func(context.Context) v1.TargetRetriever { return &querier.DummyTargetRetriever{} },
func(context.Context) v1.AlertmanagerRetriever { return &querier.DummyAlertmanagerRetriever{} },
func() config.Config { return config.Config{} },
@@ -208,6 +207,7 @@ func NewQuerierHandler(
&v1.PrometheusVersion{},
// This is used for the stats API which we should not support. Or find other ways to.
prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { return nil, nil }),
+ reg,
)
router := mux.NewRouter()
@@ -259,11 +259,6 @@ func NewQuerierHandler(
router.Path(legacyPrefix+"/api/v1/series").Methods("GET", "POST", "DELETE").Handler(legacyPromRouter)
router.Path(legacyPrefix + "/api/v1/metadata").Methods("GET").Handler(legacyPromRouter)
- // Add a middleware to extract the trace context and add a header.
- handler := nethttp.MiddlewareFunc(opentracing.GlobalTracer(), router.ServeHTTP, nethttp.OperationNameFunc(func(r *http.Request) string {
- return "internalQuerier"
- }))
-
// Track execution time.
- return stats.NewWallTimeMiddleware().Wrap(handler)
+ return stats.NewWallTimeMiddleware().Wrap(router)
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go
index 5f2fa0dc4f664..c60d624e407e2 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go
@@ -59,7 +59,7 @@ type Memcached struct {
logger log.Logger
}
-// NewMemcached makes a new Memcache.
+// NewMemcached makes a new Memcached.
func NewMemcached(cfg MemcachedConfig, client MemcachedClient, name string, reg prometheus.Registerer, logger log.Logger) *Memcached {
c := &Memcached{
cfg: cfg,
@@ -71,7 +71,7 @@ func NewMemcached(cfg MemcachedConfig, client MemcachedClient, name string, reg
Namespace: "cortex",
Name: "memcache_request_duration_seconds",
Help: "Total time spent in seconds doing memcache requests.",
- // Memecache requests are very quick: smallest bucket is 16us, biggest is 1s
+ // Memcached requests are very quick: smallest bucket is 16us, biggest is 1s
Buckets: prometheus.ExponentialBuckets(0.000016, 4, 8),
ConstLabels: prometheus.Labels{"name": name},
}, []string{"method", "status_code"}),
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go
index b0826d9bfd820..021e02404abcb 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go
@@ -52,10 +52,13 @@ type memcachedClient struct {
cbTimeout time.Duration
cbInterval time.Duration
+ maxItemSize int
+
quit chan struct{}
wait sync.WaitGroup
numServers prometheus.Gauge
+ skipped prometheus.Counter
logger log.Logger
}
@@ -67,6 +70,7 @@ type MemcachedClientConfig struct {
Addresses string `yaml:"addresses"` // EXPERIMENTAL.
Timeout time.Duration `yaml:"timeout"`
MaxIdleConns int `yaml:"max_idle_conns"`
+ MaxItemSize int `yaml:"max_item_size"`
UpdateInterval time.Duration `yaml:"update_interval"`
ConsistentHash bool `yaml:"consistent_hash"`
CBFailures uint `yaml:"circuit_breaker_consecutive_failures"`
@@ -86,6 +90,7 @@ func (cfg *MemcachedClientConfig) RegisterFlagsWithPrefix(prefix, description st
f.UintVar(&cfg.CBFailures, prefix+"memcached.circuit-breaker-consecutive-failures", 10, description+"Trip circuit-breaker after this number of consecutive dial failures (if zero then circuit-breaker is disabled).")
f.DurationVar(&cfg.CBTimeout, prefix+"memcached.circuit-breaker-timeout", 10*time.Second, description+"Duration circuit-breaker remains open after tripping (if zero then 60 seconds is used).")
f.DurationVar(&cfg.CBInterval, prefix+"memcached.circuit-breaker-interval", 10*time.Second, description+"Reset circuit-breaker counts after this long (if zero then never reset).")
+ f.IntVar(&cfg.MaxItemSize, prefix+"memcached.max-item-size", 0, description+"The maximum size of an item stored in memcached. Bigger items are not stored. If set to 0, no maximum size is enforced.")
}
// NewMemcachedClient creates a new MemcacheClient that gets its server list
@@ -107,18 +112,19 @@ func NewMemcachedClient(cfg MemcachedClientConfig, name string, r prometheus.Reg
}, r))
newClient := &memcachedClient{
- name: name,
- Client: client,
- serverList: selector,
- hostname: cfg.Host,
- service: cfg.Service,
- logger: logger,
- provider: dns.NewProvider(logger, dnsProviderRegisterer, dns.GolangResolverType),
- cbs: make(map[string]*gobreaker.CircuitBreaker),
- cbFailures: cfg.CBFailures,
- cbInterval: cfg.CBInterval,
- cbTimeout: cfg.CBTimeout,
- quit: make(chan struct{}),
+ name: name,
+ Client: client,
+ serverList: selector,
+ hostname: cfg.Host,
+ service: cfg.Service,
+ logger: logger,
+ provider: dns.NewProvider(logger, dnsProviderRegisterer, dns.GolangResolverType),
+ cbs: make(map[string]*gobreaker.CircuitBreaker),
+ cbFailures: cfg.CBFailures,
+ cbInterval: cfg.CBInterval,
+ cbTimeout: cfg.CBTimeout,
+ maxItemSize: cfg.MaxItemSize,
+ quit: make(chan struct{}),
numServers: promauto.With(r).NewGauge(prometheus.GaugeOpts{
Namespace: "cortex",
@@ -126,6 +132,13 @@ func NewMemcachedClient(cfg MemcachedClientConfig, name string, r prometheus.Reg
Help: "The number of memcache servers discovered.",
ConstLabels: prometheus.Labels{"name": name},
}),
+
+ skipped: promauto.With(r).NewCounter(prometheus.CounterOpts{
+ Namespace: "cortex",
+ Name: "memcache_client_set_skip_total",
+ Help: "Total number of skipped set operations because of the value is larger than the max-item-size.",
+ ConstLabels: prometheus.Labels{"name": name},
+ }),
}
if cfg.CBFailures > 0 {
newClient.Client.DialTimeout = newClient.dialViaCircuitBreaker
@@ -183,6 +196,12 @@ func (c *memcachedClient) Stop() {
}
func (c *memcachedClient) Set(item *memcache.Item) error {
+ // Skip hitting memcached at all if the item is bigger than the max allowed size.
+ if c.maxItemSize > 0 && len(item.Value) > c.maxItemSize {
+ c.skipped.Inc()
+ return nil
+ }
+
err := c.Client.Set(item)
if err == nil {
return nil
diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go
index e15e5f4787b5f..98c4864d9f137 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go
@@ -721,11 +721,11 @@ func (c *Compactor) ownUser(userID string) (bool, error) {
return false, err
}
- if len(rs.Ingesters) != 1 {
- return false, fmt.Errorf("unexpected number of compactors in the shard (expected 1, got %d)", len(rs.Ingesters))
+ if len(rs.Instances) != 1 {
+ return false, fmt.Errorf("unexpected number of compactors in the shard (expected 1, got %d)", len(rs.Instances))
}
- return rs.Ingesters[0].Addr == c.ringLifecycler.Addr, nil
+ return rs.Instances[0].Addr == c.ringLifecycler.Addr, nil
}
func isAllowedUser(enabledUsers, disabledUsers map[string]struct{}, userID string) bool {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/test.go b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/test.go
index b57ee9ab68165..2bfa874b7daab 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/test.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/test.go
@@ -292,7 +292,7 @@ func (cmd *loadCmd) append(a storage.Appender) error {
m := cmd.metrics[h]
for _, s := range smpls {
- if _, err := a.Add(m, s.T, s.V); err != nil {
+ if _, err := a.Append(0, m, s.T, s.V); err != nil {
return err
}
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go
index 5f4d927775dbb..df180214c7b0e 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go
@@ -4,11 +4,14 @@ import (
"context"
"flag"
"fmt"
+ "net/http"
"os"
"time"
"github.com/NYTimes/gziphandler"
"github.com/go-kit/kit/log/level"
+ "github.com/opentracing-contrib/go-stdlib/nethttp"
+ "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/promql"
@@ -324,6 +327,11 @@ func (t *Cortex) initQuerier() (serv services.Service, err error) {
t.Cfg.Worker.FrontendAddress = address
}
+ // Add a middleware to extract the trace context and add a header.
+ internalQuerierRouter = nethttp.MiddlewareFunc(opentracing.GlobalTracer(), internalQuerierRouter.ServeHTTP, nethttp.OperationNameFunc(func(r *http.Request) string {
+ return "internalQuerier"
+ }))
+
// If queries are processed using the external HTTP Server, we need wrap the internal querier with
// HTTP router with middleware to parse the tenant ID from the HTTP header and inject it into the
// request context.
@@ -642,7 +650,7 @@ func (t *Cortex) initRulerStorage() (serv services.Service, err error) {
if !t.Cfg.Ruler.StoreConfig.IsDefaults() {
t.RulerStorage, err = ruler.NewLegacyRuleStore(t.Cfg.Ruler.StoreConfig, rules.FileLoader{}, util_log.Logger)
} else {
- t.RulerStorage, err = ruler.NewRuleStore(context.Background(), t.Cfg.RulerStorage, t.Overrides, util_log.Logger, prometheus.DefaultRegisterer)
+ t.RulerStorage, err = ruler.NewRuleStore(context.Background(), t.Cfg.RulerStorage, t.Overrides, rules.FileLoader{}, util_log.Logger, prometheus.DefaultRegisterer)
}
return
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go
index 73c524ce66948..13007fa0453a4 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go
@@ -410,7 +410,7 @@ func (d *Distributor) checkSample(ctx context.Context, userID, cluster, replica
// Validates a single series from a write request. Will remove labels if
// any are configured to be dropped for the user ID.
// Returns the validated series with it's labels/samples, and any error.
-func (d *Distributor) validateSeries(ts cortexpb.PreallocTimeseries, userID string, skipLabelNameValidation bool) (cortexpb.PreallocTimeseries, error) {
+func (d *Distributor) validateSeries(ts cortexpb.PreallocTimeseries, userID string, skipLabelNameValidation bool) (cortexpb.PreallocTimeseries, validation.ValidationError) {
d.labelsHistogram.Observe(float64(len(ts.Labels)))
if err := validation.ValidateLabels(d.limits, userID, ts.Labels, skipLabelNameValidation); err != nil {
return emptyPreallocSeries, err
@@ -544,12 +544,12 @@ func (d *Distributor) Push(ctx context.Context, req *cortexpb.WriteRequest) (*co
}
skipLabelNameValidation := d.cfg.SkipLabelNameValidation || req.GetSkipLabelNameValidation()
- validatedSeries, err := d.validateSeries(ts, userID, skipLabelNameValidation)
+ validatedSeries, validationErr := d.validateSeries(ts, userID, skipLabelNameValidation)
// Errors in validation are considered non-fatal, as one series in a request may contain
// invalid data but all the remaining series could be perfectly valid.
- if err != nil && firstPartialErr == nil {
- firstPartialErr = err
+ if validationErr != nil && firstPartialErr == nil {
+ firstPartialErr = httpgrpc.Errorf(http.StatusBadRequest, validationErr.Error())
}
// validateSeries would have returned an emptyPreallocSeries if there were no valid samples.
@@ -588,8 +588,7 @@ func (d *Distributor) Push(ctx context.Context, req *cortexpb.WriteRequest) (*co
}
totalN := validatedSamples + len(validatedMetadata)
- rateOK, rateReservation := d.ingestionRateLimiter.AllowN(now, userID, totalN)
- if !rateOK {
+ if !d.ingestionRateLimiter.AllowN(now, userID, totalN) {
// Ensure the request slice is reused if the request is rate limited.
cortexpb.ReuseSlice(req.Timeseries)
@@ -641,8 +640,6 @@ func (d *Distributor) Push(ctx context.Context, req *cortexpb.WriteRequest) (*co
return d.send(localCtx, ingester, timeseries, metadata, req.Source)
}, func() { cortexpb.ReuseSlice(req.Timeseries) })
if err != nil {
- // Ingestion failed, so roll-back the reservation from the rate limiter.
- rateReservation.CancelAt(now)
return nil, err
}
return &cortexpb.WriteResponse{}, firstPartialErr
@@ -912,7 +909,7 @@ func (d *Distributor) AllUserStats(ctx context.Context) ([]UserIDStats, error) {
if err != nil {
return nil, err
}
- for _, ingester := range replicationSet.Ingesters {
+ for _, ingester := range replicationSet.Instances {
client, err := d.ingesterPool.GetClientFor(ingester.Addr)
if err != nil {
return nil, err
diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/transport/roundtripper.go b/vendor/github.com/cortexproject/cortex/pkg/frontend/transport/roundtripper.go
index 065d7fdca6e1c..45cfb22225cf3 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/frontend/transport/roundtripper.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/frontend/transport/roundtripper.go
@@ -36,9 +36,10 @@ func (a *grpcRoundTripperAdapter) RoundTrip(r *http.Request) (*http.Response, er
}
httpResp := &http.Response{
- StatusCode: int(resp.Code),
- Body: ioutil.NopCloser(bytes.NewReader(resp.Body)),
- Header: http.Header{},
+ StatusCode: int(resp.Code),
+ Body: ioutil.NopCloser(bytes.NewReader(resp.Body)),
+ Header: http.Header{},
+ ContentLength: int64(len(resp.Body)),
}
for _, h := range resp.Headers {
httpResp.Header[h.Key] = h.Values
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/errors.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/errors.go
index 1afa639d47b64..c08cefeb433ff 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/errors.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/errors.go
@@ -67,7 +67,7 @@ func grpcForwardableError(userID string, code int, e error) error {
})
}
-// Note: does not retain a reference to `err`
+// wrapWithUser prepends the user to the error. It does not retain a reference to err.
func wrapWithUser(err error, userID string) error {
return fmt.Errorf("user=%s: %s", userID, err)
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go
index d7ef551a09a9f..25d3b18dd8893 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go
@@ -49,6 +49,8 @@ const (
var (
// This is initialised if the WAL is enabled and the records are fetched from this pool.
recordPool sync.Pool
+
+ errIngesterStopping = errors.New("ingester stopping")
)
// Config for an Ingester.
@@ -452,7 +454,7 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte
userID, err := tenant.TenantID(ctx)
if err != nil {
- return nil, fmt.Errorf("no user id")
+ return nil, err
}
// Given metadata is a best-effort approach, and we don't halt on errors
@@ -535,7 +537,7 @@ func (i *Ingester) append(ctx context.Context, userID string, labels labelPairs,
}
}()
if i.stopped {
- return fmt.Errorf("ingester stopping")
+ return errIngesterStopping
}
// getOrCreateSeries copies the memory for `labels`, except on the error path.
@@ -624,7 +626,7 @@ func (i *Ingester) appendMetadata(userID string, m *cortexpb.MetricMetadata) err
i.userStatesMtx.RLock()
if i.stopped {
i.userStatesMtx.RUnlock()
- return fmt.Errorf("ingester stopping")
+ return errIngesterStopping
}
i.userStatesMtx.RUnlock()
@@ -955,7 +957,7 @@ func (i *Ingester) MetricsMetadata(ctx context.Context, req *client.MetricsMetad
userID, err := tenant.TenantID(ctx)
if err != nil {
- return nil, fmt.Errorf("no user id")
+ return nil, err
}
userMetadata := i.getUserMetadata(userID)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go
index e6cdbacb24426..a038a22619b9d 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go
@@ -216,7 +216,7 @@ func (u *userTSDB) PreCreation(metric labels.Labels) error {
// Total series limit.
if err := u.limiter.AssertMaxSeriesPerUser(u.userID, int(u.Head().NumSeries())); err != nil {
- return makeLimitError(perUserSeriesLimit, err)
+ return err
}
// Series per metric name limit.
@@ -225,7 +225,7 @@ func (u *userTSDB) PreCreation(metric labels.Labels) error {
return err
}
if err := u.seriesInMetric.canAddSeriesFor(u.userID, metricName); err != nil {
- return makeMetricLimitError(perMetricSeriesLimit, metric, err)
+ return err
}
return nil
@@ -693,7 +693,7 @@ func (i *Ingester) v2Push(ctx context.Context, req *cortexpb.WriteRequest) (*cor
userID, err := tenant.TenantID(ctx)
if err != nil {
- return nil, fmt.Errorf("no user id")
+ return nil, err
}
db, err := i.getOrCreateTSDB(userID, false)
@@ -705,7 +705,7 @@ func (i *Ingester) v2Push(ctx context.Context, req *cortexpb.WriteRequest) (*cor
i.userStatesMtx.RLock()
if i.stopped {
i.userStatesMtx.RUnlock()
- return nil, fmt.Errorf("ingester stopping")
+ return nil, errIngesterStopping
}
i.userStatesMtx.RUnlock()
@@ -720,22 +720,31 @@ func (i *Ingester) v2Push(ctx context.Context, req *cortexpb.WriteRequest) (*cor
// Keep track of some stats which are tracked only if the samples will be
// successfully committed
- succeededSamplesCount := 0
- failedSamplesCount := 0
- startAppend := time.Now()
+ var (
+ succeededSamplesCount = 0
+ failedSamplesCount = 0
+ startAppend = time.Now()
+ sampleOutOfBoundsCount = 0
+ sampleOutOfOrderCount = 0
+ newValueForTimestampCount = 0
+ perUserSeriesLimitCount = 0
+ perMetricSeriesLimitCount = 0
+
+ updateFirstPartial = func(errFn func() error) {
+ if firstPartialErr == nil {
+ firstPartialErr = errFn()
+ }
+ }
+ )
// Walk the samples, appending them to the users database
app := db.Appender(ctx)
for _, ts := range req.Timeseries {
- // Keeps a reference to labels copy, if it was needed. This is to avoid making a copy twice,
- // once for TSDB/refcache, and second time for activeSeries map.
- var copiedLabels []labels.Label
-
// Check if we already have a cached reference for this series. Be aware
// that even if we have a reference it's not guaranteed to be still valid.
// The labels must be sorted (in our case, it's guaranteed a write request
// has sorted labels once hit the ingester).
- cachedRef, cachedRefExists := db.refCache.Ref(startAppend, cortexpb.FromLabelAdaptersToLabels(ts.Labels))
+ cachedRef, copiedLabels, cachedRefExists := db.refCache.Ref(startAppend, cortexpb.FromLabelAdaptersToLabels(ts.Labels))
// To find out if any sample was added to this series, we keep old value.
oldSucceededSamplesCount := succeededSamplesCount
@@ -745,26 +754,26 @@ func (i *Ingester) v2Push(ctx context.Context, req *cortexpb.WriteRequest) (*cor
// If the cached reference exists, we try to use it.
if cachedRefExists {
- if err = app.AddFast(cachedRef, s.TimestampMs, s.Value); err == nil {
+ var ref uint64
+ if ref, err = app.Append(cachedRef, copiedLabels, s.TimestampMs, s.Value); err == nil {
succeededSamplesCount++
+ // This means the reference changes which means we need to update our cache.
+ if ref != cachedRef {
+ db.refCache.SetRef(startAppend, copiedLabels, ref)
+ }
continue
}
- if errors.Cause(err) == storage.ErrNotFound {
- cachedRefExists = false
- err = nil
- }
- }
-
- // If the cached reference doesn't exist, we (re)try without using the reference.
- if !cachedRefExists {
+ } else {
var ref uint64
// Copy the label set because both TSDB and the cache may retain it.
copiedLabels = cortexpb.FromLabelAdaptersToLabelsWithCopy(ts.Labels)
- if ref, err = app.Add(copiedLabels, s.TimestampMs, s.Value); err == nil {
+ if ref, err = app.Append(0, copiedLabels, s.TimestampMs, s.Value); err == nil {
db.refCache.SetRef(startAppend, copiedLabels, ref)
+
+ // Set these in case there are multiple samples for the series.
cachedRef = ref
cachedRefExists = true
@@ -779,31 +788,32 @@ func (i *Ingester) v2Push(ctx context.Context, req *cortexpb.WriteRequest) (*cor
// of it, so that we can return it back to the distributor, which will return a
// 400 error to the client. The client (Prometheus) will not retry on 400, and
// we actually ingested all samples which haven't failed.
- cause := errors.Cause(err)
- if cause == storage.ErrOutOfBounds || cause == storage.ErrOutOfOrderSample || cause == storage.ErrDuplicateSampleForTimestamp {
- if firstPartialErr == nil {
- firstPartialErr = wrappedTSDBIngestErr(err, model.Time(s.TimestampMs), ts.Labels)
- }
+ switch cause := errors.Cause(err); cause {
+ case storage.ErrOutOfBounds:
+ sampleOutOfBoundsCount++
+ updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(s.TimestampMs), ts.Labels) })
+ continue
- switch cause {
- case storage.ErrOutOfBounds:
- validation.DiscardedSamples.WithLabelValues(sampleOutOfBounds, userID).Inc()
- case storage.ErrOutOfOrderSample:
- validation.DiscardedSamples.WithLabelValues(sampleOutOfOrder, userID).Inc()
- case storage.ErrDuplicateSampleForTimestamp:
- validation.DiscardedSamples.WithLabelValues(newValueForTimestamp, userID).Inc()
- }
+ case storage.ErrOutOfOrderSample:
+ sampleOutOfOrderCount++
+ updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(s.TimestampMs), ts.Labels) })
+ continue
+ case storage.ErrDuplicateSampleForTimestamp:
+ newValueForTimestampCount++
+ updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(s.TimestampMs), ts.Labels) })
continue
- }
- var ve *validationError
- if errors.As(cause, &ve) {
- // Caused by limits.
- if firstPartialErr == nil {
- firstPartialErr = ve
- }
- validation.DiscardedSamples.WithLabelValues(ve.errorType, userID).Inc()
+ case errMaxSeriesPerUserLimitExceeded:
+ perUserSeriesLimitCount++
+ updateFirstPartial(func() error { return makeLimitError(perUserSeriesLimit, i.limiter.FormatError(userID, cause)) })
+ continue
+
+ case errMaxSeriesPerMetricLimitExceeded:
+ perMetricSeriesLimitCount++
+ updateFirstPartial(func() error {
+ return makeMetricLimitError(perMetricSeriesLimit, copiedLabels, i.limiter.FormatError(userID, cause))
+ })
continue
}
@@ -846,6 +856,22 @@ func (i *Ingester) v2Push(ctx context.Context, req *cortexpb.WriteRequest) (*cor
i.metrics.ingestedSamples.Add(float64(succeededSamplesCount))
i.metrics.ingestedSamplesFail.Add(float64(failedSamplesCount))
+ if sampleOutOfBoundsCount > 0 {
+ validation.DiscardedSamples.WithLabelValues(sampleOutOfBounds, userID).Add(float64(sampleOutOfBoundsCount))
+ }
+ if sampleOutOfOrderCount > 0 {
+ validation.DiscardedSamples.WithLabelValues(sampleOutOfOrder, userID).Add(float64(sampleOutOfOrderCount))
+ }
+ if newValueForTimestampCount > 0 {
+ validation.DiscardedSamples.WithLabelValues(newValueForTimestamp, userID).Add(float64(newValueForTimestampCount))
+ }
+ if perUserSeriesLimitCount > 0 {
+ validation.DiscardedSamples.WithLabelValues(perUserSeriesLimit, userID).Add(float64(perUserSeriesLimitCount))
+ }
+ if perMetricSeriesLimitCount > 0 {
+ validation.DiscardedSamples.WithLabelValues(perMetricSeriesLimit, userID).Add(float64(perMetricSeriesLimitCount))
+ }
+
switch req.Source {
case cortexpb.RULE:
db.ingestedRuleSamples.add(int64(succeededSamplesCount))
@@ -1898,9 +1924,16 @@ func (i *Ingester) closeAndDeleteUserTSDBIfIdle(userID string) tsdbCloseCheckRes
// This will prevent going back to "active" state in deferred statement.
userDB.casState(closing, closed)
- i.userStatesMtx.Lock()
- delete(i.TSDBState.dbs, userID)
- i.userStatesMtx.Unlock()
+ // Only remove user from TSDBState when everything is cleaned up
+ // This will prevent concurrency problems when cortex are trying to open new TSDB - Ie: New request for a given tenant
+ // came in - while closing the tsdb for the same tenant.
+ // If this happens now, the request will get reject as the push will not be able to acquire the lock as the tsdb will be
+ // in closed state
+ defer func() {
+ i.userStatesMtx.Lock()
+ delete(i.TSDBState.dbs, userID)
+ i.userStatesMtx.Unlock()
+ }()
i.metrics.memUsers.Dec()
i.TSDBState.tsdbMetrics.removeRegistryForUser(userID)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/limiter.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/limiter.go
index 03d3d8a1b6397..c2293cceaf6b2 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/limiter.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/limiter.go
@@ -4,16 +4,18 @@ import (
"fmt"
"math"
+ "github.com/pkg/errors"
+
"github.com/cortexproject/cortex/pkg/util"
util_math "github.com/cortexproject/cortex/pkg/util/math"
"github.com/cortexproject/cortex/pkg/util/validation"
)
-const (
- errMaxSeriesPerMetricLimitExceeded = "per-metric series limit of %d exceeded, please contact administrator to raise it. (local limit: %d global limit: %d actual local limit: %d)"
- errMaxSeriesPerUserLimitExceeded = "per-user series limit of %d exceeded, please contact administrator to raise it. (local limit: %d global limit: %d actual local limit: %d)"
- errMaxMetadataPerMetricLimitExceeded = "per-metric metadata limit of %d exceeded, please contact administrator to raise it. (local limit: %d global limit: %d actual local limit: %d)"
- errMaxMetadataPerUserLimitExceeded = "per-user metric metadata limit of %d exceeded, please contact administrator to raise it. (local limit: %d global limit: %d actual local limit: %d)"
+var (
+ errMaxSeriesPerMetricLimitExceeded = errors.New("per-metric series limit exceeded")
+ errMaxMetadataPerMetricLimitExceeded = errors.New("per-metric metadata limit exceeded")
+ errMaxSeriesPerUserLimitExceeded = errors.New("per-user series limit exceeded")
+ errMaxMetadataPerUserLimitExceeded = errors.New("per-user metric metadata limit exceeded")
)
// RingCount is the interface exposed by a ring implementation which allows
@@ -56,59 +58,41 @@ func NewLimiter(
// AssertMaxSeriesPerMetric limit has not been reached compared to the current
// number of series in input and returns an error if so.
func (l *Limiter) AssertMaxSeriesPerMetric(userID string, series int) error {
- actualLimit := l.maxSeriesPerMetric(userID)
- if series < actualLimit {
+ if actualLimit := l.maxSeriesPerMetric(userID); series < actualLimit {
return nil
}
- localLimit := l.limits.MaxLocalSeriesPerMetric(userID)
- globalLimit := l.limits.MaxGlobalSeriesPerMetric(userID)
-
- return fmt.Errorf(errMaxSeriesPerMetricLimitExceeded, minNonZero(localLimit, globalLimit), localLimit, globalLimit, actualLimit)
+ return errMaxSeriesPerMetricLimitExceeded
}
// AssertMaxMetadataPerMetric limit has not been reached compared to the current
// number of metadata per metric in input and returns an error if so.
func (l *Limiter) AssertMaxMetadataPerMetric(userID string, metadata int) error {
- actualLimit := l.maxMetadataPerMetric(userID)
-
- if metadata < actualLimit {
+ if actualLimit := l.maxMetadataPerMetric(userID); metadata < actualLimit {
return nil
}
- localLimit := l.limits.MaxLocalMetadataPerMetric(userID)
- globalLimit := l.limits.MaxGlobalMetadataPerMetric(userID)
-
- return fmt.Errorf(errMaxMetadataPerMetricLimitExceeded, minNonZero(localLimit, globalLimit), localLimit, globalLimit, actualLimit)
+ return errMaxMetadataPerMetricLimitExceeded
}
// AssertMaxSeriesPerUser limit has not been reached compared to the current
// number of series in input and returns an error if so.
func (l *Limiter) AssertMaxSeriesPerUser(userID string, series int) error {
- actualLimit := l.maxSeriesPerUser(userID)
- if series < actualLimit {
+ if actualLimit := l.maxSeriesPerUser(userID); series < actualLimit {
return nil
}
- localLimit := l.limits.MaxLocalSeriesPerUser(userID)
- globalLimit := l.limits.MaxGlobalSeriesPerUser(userID)
-
- return fmt.Errorf(errMaxSeriesPerUserLimitExceeded, minNonZero(localLimit, globalLimit), localLimit, globalLimit, actualLimit)
+ return errMaxSeriesPerUserLimitExceeded
}
// AssertMaxMetricsWithMetadataPerUser limit has not been reached compared to the current
// number of metrics with metadata in input and returns an error if so.
func (l *Limiter) AssertMaxMetricsWithMetadataPerUser(userID string, metrics int) error {
- actualLimit := l.maxMetadataPerUser(userID)
-
- if metrics < actualLimit {
+ if actualLimit := l.maxMetadataPerUser(userID); metrics < actualLimit {
return nil
}
- localLimit := l.limits.MaxLocalMetricsWithMetadataPerUser(userID)
- globalLimit := l.limits.MaxGlobalMetricsWithMetadataPerUser(userID)
-
- return fmt.Errorf(errMaxMetadataPerUserLimitExceeded, minNonZero(localLimit, globalLimit), localLimit, globalLimit, actualLimit)
+ return errMaxMetadataPerUserLimitExceeded
}
// MaxSeriesPerQuery returns the maximum number of series a query is allowed to hit.
@@ -116,6 +100,59 @@ func (l *Limiter) MaxSeriesPerQuery(userID string) int {
return l.limits.MaxSeriesPerQuery(userID)
}
+// FormatError returns the input error enriched with the actual limits for the given user.
+// It acts as pass-through if the input error is unknown.
+func (l *Limiter) FormatError(userID string, err error) error {
+ switch err {
+ case errMaxSeriesPerUserLimitExceeded:
+ return l.formatMaxSeriesPerUserError(userID)
+ case errMaxSeriesPerMetricLimitExceeded:
+ return l.formatMaxSeriesPerMetricError(userID)
+ case errMaxMetadataPerUserLimitExceeded:
+ return l.formatMaxMetadataPerUserError(userID)
+ case errMaxMetadataPerMetricLimitExceeded:
+ return l.formatMaxMetadataPerMetricError(userID)
+ default:
+ return err
+ }
+}
+
+func (l *Limiter) formatMaxSeriesPerUserError(userID string) error {
+ actualLimit := l.maxSeriesPerUser(userID)
+ localLimit := l.limits.MaxLocalSeriesPerUser(userID)
+ globalLimit := l.limits.MaxGlobalSeriesPerUser(userID)
+
+ return fmt.Errorf("per-user series limit of %d exceeded, please contact administrator to raise it (local limit: %d global limit: %d actual local limit: %d)",
+ minNonZero(localLimit, globalLimit), localLimit, globalLimit, actualLimit)
+}
+
+func (l *Limiter) formatMaxSeriesPerMetricError(userID string) error {
+ actualLimit := l.maxSeriesPerMetric(userID)
+ localLimit := l.limits.MaxLocalSeriesPerMetric(userID)
+ globalLimit := l.limits.MaxGlobalSeriesPerMetric(userID)
+
+ return fmt.Errorf("per-metric series limit of %d exceeded, please contact administrator to raise it (local limit: %d global limit: %d actual local limit: %d)",
+ minNonZero(localLimit, globalLimit), localLimit, globalLimit, actualLimit)
+}
+
+func (l *Limiter) formatMaxMetadataPerUserError(userID string) error {
+ actualLimit := l.maxMetadataPerUser(userID)
+ localLimit := l.limits.MaxLocalMetricsWithMetadataPerUser(userID)
+ globalLimit := l.limits.MaxGlobalMetricsWithMetadataPerUser(userID)
+
+ return fmt.Errorf("per-user metric metadata limit of %d exceeded, please contact administrator to raise it (local limit: %d global limit: %d actual local limit: %d)",
+ minNonZero(localLimit, globalLimit), localLimit, globalLimit, actualLimit)
+}
+
+func (l *Limiter) formatMaxMetadataPerMetricError(userID string) error {
+ actualLimit := l.maxMetadataPerMetric(userID)
+ localLimit := l.limits.MaxLocalMetadataPerMetric(userID)
+ globalLimit := l.limits.MaxGlobalMetadataPerMetric(userID)
+
+ return fmt.Errorf("per-metric metadata limit of %d exceeded, please contact administrator to raise it (local limit: %d global limit: %d actual local limit: %d)",
+ minNonZero(localLimit, globalLimit), localLimit, globalLimit, actualLimit)
+}
+
func (l *Limiter) maxSeriesPerMetric(userID string) int {
localLimit := l.limits.MaxLocalSeriesPerMetric(userID)
globalLimit := l.limits.MaxGlobalSeriesPerMetric(userID)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/user_metrics_metadata.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/user_metrics_metadata.go
index 4aa3c2c1f5522..ea64df9f8efab 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/user_metrics_metadata.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/user_metrics_metadata.go
@@ -41,7 +41,7 @@ func (mm *userMetricsMetadata) add(metric string, metadata *cortexpb.MetricMetad
// Verify that the user can create more metric metadata given we don't have a set for that metric name.
if err := mm.limiter.AssertMaxMetricsWithMetadataPerUser(mm.userID, len(mm.metricToMetadata)); err != nil {
validation.DiscardedMetadata.WithLabelValues(mm.userID, perUserMetadataLimit).Inc()
- return makeLimitError(perUserMetadataLimit, err)
+ return makeLimitError(perUserMetadataLimit, mm.limiter.FormatError(mm.userID, err))
}
set = metricMetadataSet{}
mm.metricToMetadata[metric] = set
@@ -49,7 +49,7 @@ func (mm *userMetricsMetadata) add(metric string, metadata *cortexpb.MetricMetad
if err := mm.limiter.AssertMaxMetadataPerMetric(mm.userID, len(set)); err != nil {
validation.DiscardedMetadata.WithLabelValues(mm.userID, perMetricMetadataLimit).Inc()
- return makeLimitError(perMetricMetadataLimit, err)
+ return makeLimitError(perMetricMetadataLimit, mm.limiter.FormatError(mm.userID, err))
}
// if we have seen this metadata before, it is a no-op and we don't need to change our metrics.
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/user_state.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/user_state.go
index e28062304db20..3617d65315ce5 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/user_state.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/user_state.go
@@ -2,7 +2,6 @@ package ingester
import (
"context"
- "fmt"
"net/http"
"sync"
"time"
@@ -181,7 +180,7 @@ func (us *userStates) teardown() {
func (us *userStates) getViaContext(ctx context.Context) (*userState, bool, error) {
userID, err := tenant.TenantID(ctx)
if err != nil {
- return nil, false, fmt.Errorf("no user id")
+ return nil, false, err
}
state, ok := us.get(userID)
return state, ok, nil
@@ -230,7 +229,7 @@ func (u *userState) createSeriesWithFingerprint(fp model.Fingerprint, metric lab
if !recovery {
if err := u.limiter.AssertMaxSeriesPerUser(u.userID, u.fpToSeries.length()); err != nil {
- return nil, makeLimitError(perUserSeriesLimit, err)
+ return nil, makeLimitError(perUserSeriesLimit, u.limiter.FormatError(u.userID, err))
}
}
@@ -244,7 +243,7 @@ func (u *userState) createSeriesWithFingerprint(fp model.Fingerprint, metric lab
// Check if the per-metric limit has been exceeded
if err = u.seriesInMetric.canAddSeriesFor(u.userID, metricName); err != nil {
// WARNING: returns a reference to `metric`
- return nil, makeMetricLimitError(perMetricSeriesLimit, cortexpb.FromLabelAdaptersToLabels(metric), err)
+ return nil, makeMetricLimitError(perMetricSeriesLimit, cortexpb.FromLabelAdaptersToLabels(metric), u.limiter.FormatError(u.userID, err))
}
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_replicated_set.go b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_replicated_set.go
index 8aaf7536c5de0..faf70f6970c91 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_replicated_set.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_replicated_set.go
@@ -144,12 +144,12 @@ func (s *blocksStoreReplicationSet) GetClientsFor(userID string, blockIDs []ulid
func getNonExcludedInstanceAddr(set ring.ReplicationSet, exclude []string, balancingStrategy loadBalancingStrategy) string {
if balancingStrategy == randomLoadBalancing {
// Randomize the list of instances to not always query the same one.
- rand.Shuffle(len(set.Ingesters), func(i, j int) {
- set.Ingesters[i], set.Ingesters[j] = set.Ingesters[j], set.Ingesters[i]
+ rand.Shuffle(len(set.Instances), func(i, j int) {
+ set.Instances[i], set.Instances[j] = set.Instances[j], set.Instances[i]
})
}
- for _, instance := range set.Ingesters {
+ for _, instance := range set.Instances {
if !util.StringsContain(exclude, instance.Addr) {
return instance.Addr
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/query_range.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/query_range.go
index 2223121cd32de..d68d2e595ffe2 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/query_range.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/query_range.go
@@ -31,8 +31,12 @@ import (
const StatusSuccess = "success"
var (
- matrix = model.ValMatrix.String()
- json = jsoniter.ConfigCompatibleWithStandardLibrary
+ matrix = model.ValMatrix.String()
+ json = jsoniter.Config{
+ EscapeHTML: false, // No HTML in our responses.
+ SortMapKeys: true,
+ ValidateJsonRawMessage: true,
+ }.Froze()
errEndBeforeStart = httpgrpc.Errorf(http.StatusBadRequest, "end timestamp must not be before start time")
errNegativeStep = httpgrpc.Errorf(http.StatusBadRequest, "zero or negative query resolution step widths are not accepted. Try a positive integer")
errStepTooSmall = httpgrpc.Errorf(http.StatusBadRequest, "exceeded maximum resolution of 11,000 points per timeseries. Try decreasing the query resolution (?step=XX)")
@@ -262,16 +266,20 @@ func (prometheusCodec) DecodeResponse(ctx context.Context, r *http.Response, _ R
log, ctx := spanlogger.New(ctx, "ParseQueryRangeResponse") //nolint:ineffassign,staticcheck
defer log.Finish()
- buf, err := ioutil.ReadAll(r.Body)
- if err != nil {
+ // Preallocate the buffer with the exact size so we don't waste allocations
+ // while progressively growing an initial small buffer. The buffer capacity
+ // is increased by MinRead to avoid extra allocations due to how ReadFrom()
+ // internally works.
+ buf := bytes.NewBuffer(make([]byte, 0, r.ContentLength+bytes.MinRead))
+ if _, err := buf.ReadFrom(r.Body); err != nil {
log.Error(err)
return nil, httpgrpc.Errorf(http.StatusInternalServerError, "error decoding response: %v", err)
}
- log.LogFields(otlog.Int("bytes", len(buf)))
+ log.LogFields(otlog.Int("bytes", buf.Len()))
var resp PrometheusResponse
- if err := json.Unmarshal(buf, &resp); err != nil {
+ if err := json.Unmarshal(buf.Bytes(), &resp); err != nil {
return nil, httpgrpc.Errorf(http.StatusInternalServerError, "error decoding response: %v", err)
}
@@ -303,8 +311,9 @@ func (prometheusCodec) EncodeResponse(ctx context.Context, res Response) (*http.
Header: http.Header{
"Content-Type": []string{"application/json"},
},
- Body: ioutil.NopCloser(bytes.NewBuffer(b)),
- StatusCode: http.StatusOK,
+ Body: ioutil.NopCloser(bytes.NewBuffer(b)),
+ StatusCode: http.StatusOK,
+ ContentLength: int64(len(b)),
}
return &resp, nil
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go b/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go
index c24dc200deea0..2528569fb35a2 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go
@@ -56,10 +56,10 @@ func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callb
if err != nil {
return err
}
- itemTrackers[i].minSuccess = len(replicationSet.Ingesters) - replicationSet.MaxErrors
+ itemTrackers[i].minSuccess = len(replicationSet.Instances) - replicationSet.MaxErrors
itemTrackers[i].maxFailures = replicationSet.MaxErrors
- for _, desc := range replicationSet.Ingesters {
+ for _, desc := range replicationSet.Instances {
curr, found := instances[desc.Addr]
if !found {
curr.itemTrackers = make([]*itemTracker, 0, expectedTrackers)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/client/ring_service_discovery.go b/vendor/github.com/cortexproject/cortex/pkg/ring/client/ring_service_discovery.go
index 1706edb2a175e..797b171c0740b 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/client/ring_service_discovery.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/client/ring_service_discovery.go
@@ -17,7 +17,7 @@ func NewRingServiceDiscovery(r ring.ReadRing) PoolServiceDiscovery {
}
var addrs []string
- for _, instance := range replicationSet.Ingesters {
+ for _, instance := range replicationSet.Instances {
addrs = append(addrs, instance.Addr)
}
return addrs, nil
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go
index 391773dff1503..253a7feb1a889 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go
@@ -6,10 +6,10 @@ import (
"time"
)
-// ReplicationSet describes the ingesters to talk to for a given key, and how
+// ReplicationSet describes the instances to talk to for a given key, and how
// many errors to tolerate.
type ReplicationSet struct {
- Ingesters []InstanceDesc
+ Instances []InstanceDesc
// Maximum number of tolerated failing instances. Max errors and max unavailable zones are
// mutually exclusive.
@@ -32,23 +32,23 @@ func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, f func(cont
// Initialise the result tracker, which is use to keep track of successes and failures.
var tracker replicationSetResultTracker
if r.MaxUnavailableZones > 0 {
- tracker = newZoneAwareResultTracker(r.Ingesters, r.MaxUnavailableZones)
+ tracker = newZoneAwareResultTracker(r.Instances, r.MaxUnavailableZones)
} else {
- tracker = newDefaultResultTracker(r.Ingesters, r.MaxErrors)
+ tracker = newDefaultResultTracker(r.Instances, r.MaxErrors)
}
var (
- ch = make(chan instanceResult, len(r.Ingesters))
+ ch = make(chan instanceResult, len(r.Instances))
forceStart = make(chan struct{}, r.MaxErrors)
)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Spawn a goroutine for each instance.
- for i := range r.Ingesters {
+ for i := range r.Instances {
go func(i int, ing *InstanceDesc) {
// Wait to send extra requests. Works only when zone-awareness is disabled.
- if delay > 0 && r.MaxUnavailableZones == 0 && i >= len(r.Ingesters)-r.MaxErrors {
+ if delay > 0 && r.MaxUnavailableZones == 0 && i >= len(r.Instances)-r.MaxErrors {
after := time.NewTimer(delay)
defer after.Stop()
select {
@@ -64,10 +64,10 @@ func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, f func(cont
err: err,
instance: ing,
}
- }(i, &r.Ingesters[i])
+ }(i, &r.Instances[i])
}
- results := make([]interface{}, 0, len(r.Ingesters))
+ results := make([]interface{}, 0, len(r.Instances))
for !tracker.succeeded() {
select {
@@ -96,7 +96,7 @@ func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, f func(cont
// Includes returns whether the replication set includes the replica with the provided addr.
func (r ReplicationSet) Includes(addr string) bool {
- for _, instance := range r.Ingesters {
+ for _, instance := range r.Instances {
if instance.GetAddr() == addr {
return true
}
@@ -108,8 +108,8 @@ func (r ReplicationSet) Includes(addr string) bool {
// GetAddresses returns the addresses of all instances within the replication set. Returned slice
// order is not guaranteed.
func (r ReplicationSet) GetAddresses() []string {
- addrs := make([]string, 0, len(r.Ingesters))
- for _, desc := range r.Ingesters {
+ addrs := make([]string, 0, len(r.Instances))
+ for _, desc := range r.Instances {
addrs = append(addrs, desc.Addr)
}
return addrs
@@ -118,8 +118,8 @@ func (r ReplicationSet) GetAddresses() []string {
// HasReplicationSetChanged returns true if two replications sets are the same (with possibly different timestamps),
// false if they differ in any way (number of instances, instance states, tokens, zones, ...).
func HasReplicationSetChanged(before, after ReplicationSet) bool {
- beforeInstances := before.Ingesters
- afterInstances := after.Ingesters
+ beforeInstances := before.Instances
+ afterInstances := after.Instances
if len(beforeInstances) != len(afterInstances) {
return true
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go
index c28a246ea31a8..76d25e97dc9b3 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go
@@ -368,7 +368,7 @@ func (r *Ring) Get(key uint32, op Operation, bufDescs []InstanceDesc, bufHosts,
}
return ReplicationSet{
- Ingesters: healthyInstances,
+ Instances: healthyInstances,
MaxErrors: maxFailure,
}, nil
}
@@ -391,7 +391,7 @@ func (r *Ring) GetAllHealthy(op Operation) (ReplicationSet, error) {
}
return ReplicationSet{
- Ingesters: instances,
+ Instances: instances,
MaxErrors: 0,
}, nil
}
@@ -472,7 +472,7 @@ func (r *Ring) GetReplicationSetForOperation(op Operation) (ReplicationSet, erro
}
return ReplicationSet{
- Ingesters: healthyInstances,
+ Instances: healthyInstances,
MaxErrors: maxErrors,
MaxUnavailableZones: maxUnavailableZones,
}, nil
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/api.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/api.go
index fdf6726169a8c..05be8ff1193a6 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ruler/api.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/api.go
@@ -21,7 +21,7 @@ import (
"gopkg.in/yaml.v3"
"github.com/cortexproject/cortex/pkg/cortexpb"
- store "github.com/cortexproject/cortex/pkg/ruler/rulespb"
+ "github.com/cortexproject/cortex/pkg/ruler/rulespb"
"github.com/cortexproject/cortex/pkg/ruler/rulestore"
"github.com/cortexproject/cortex/pkg/tenant"
util_log "github.com/cortexproject/cortex/pkg/util/log"
@@ -405,7 +405,7 @@ func (a *API) ListRules(w http.ResponseWriter, req *http.Request) {
return
}
- err = a.store.LoadRuleGroups(req.Context(), map[string]rulestore.RuleGroupList{userID: rgs})
+ err = a.store.LoadRuleGroups(req.Context(), map[string]rulespb.RuleGroupList{userID: rgs})
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
@@ -435,7 +435,7 @@ func (a *API) GetRuleGroup(w http.ResponseWriter, req *http.Request) {
return
}
- formatted := store.FromProto(rg)
+ formatted := rulespb.FromProto(rg)
marshalAndSend(formatted, w, logger)
}
@@ -495,7 +495,7 @@ func (a *API) CreateRuleGroup(w http.ResponseWriter, req *http.Request) {
return
}
- rgProto := store.ToProto(userID, namespace, rg)
+ rgProto := rulespb.ToProto(userID, namespace, rg)
level.Debug(logger).Log("msg", "attempting to store rulegroup", "userID", userID, "group", rgProto.String())
err = a.store.SetRuleGroup(req.Context(), userID, namespace, rgProto)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go
index 5bfa71fcbee79..cc76cfadc2fd0 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go
@@ -2,11 +2,13 @@ package ruler
import (
"context"
+ "errors"
"time"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/notifier"
+ "github.com/prometheus/prometheus/pkg/exemplar"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/value"
"github.com/prometheus/prometheus/promql"
@@ -31,7 +33,7 @@ type pusherAppender struct {
evaluationDelay time.Duration
}
-func (a *pusherAppender) Add(l labels.Labels, t int64, v float64) (uint64, error) {
+func (a *pusherAppender) Append(_ uint64, l labels.Labels, t int64, v float64) (uint64, error) {
a.labels = append(a.labels, l)
// Adapt staleness markers for ruler evaluation delay. As the upstream code
@@ -50,8 +52,8 @@ func (a *pusherAppender) Add(l labels.Labels, t int64, v float64) (uint64, error
return 0, nil
}
-func (a *pusherAppender) AddFast(_ uint64, _ int64, _ float64) error {
- return storage.ErrNotFound
+func (a *pusherAppender) AppendExemplar(_ uint64, _ labels.Labels, _ exemplar.Exemplar) (uint64, error) {
+ return 0, errors.New("exemplars are unsupported")
}
func (a *pusherAppender) Commit() error {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/manager.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/manager.go
index c901a33295f0b..83e6818bf6ec3 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ruler/manager.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/manager.go
@@ -19,7 +19,7 @@ import (
"github.com/weaveworks/common/user"
"golang.org/x/net/context/ctxhttp"
- "github.com/cortexproject/cortex/pkg/ruler/rulestore"
+ "github.com/cortexproject/cortex/pkg/ruler/rulespb"
)
type DefaultMultiTenantManager struct {
@@ -91,7 +91,7 @@ func NewDefaultMultiTenantManager(cfg Config, managerFactory ManagerFactory, reg
}, nil
}
-func (r *DefaultMultiTenantManager) SyncRuleGroups(ctx context.Context, ruleGroups map[string]rulestore.RuleGroupList) {
+func (r *DefaultMultiTenantManager) SyncRuleGroups(ctx context.Context, ruleGroups map[string]rulespb.RuleGroupList) {
// A lock is taken to ensure if this function is called concurrently, then each call
// returns after the call map files and check for updates
r.userManagerMtx.Lock()
@@ -121,7 +121,7 @@ func (r *DefaultMultiTenantManager) SyncRuleGroups(ctx context.Context, ruleGrou
// syncRulesToManager maps the rule files to disk, detects any changes and will create/update the
// the users Prometheus Rules Manager.
-func (r *DefaultMultiTenantManager) syncRulesToManager(ctx context.Context, user string, groups rulestore.RuleGroupList) {
+func (r *DefaultMultiTenantManager) syncRulesToManager(ctx context.Context, user string, groups rulespb.RuleGroupList) {
// Map the files to disk and return the file names to be passed to the users manager if they
// have been updated
update, files, err := r.mapper.MapRules(user, groups.Formatted())
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go
index 92d128dbeac21..d99eac97eb777 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go
@@ -72,7 +72,7 @@ type Config struct {
// How frequently to poll for updated rules.
PollInterval time.Duration `yaml:"poll_interval"`
// Rule Storage and Polling configuration.
- StoreConfig RuleStoreConfig `yaml:"storage"`
+ StoreConfig RuleStoreConfig `yaml:"storage" doc:"description=Deprecated. Use -ruler-storage.* CLI flags and their respective YAML config options instead."`
// Path to store rule files for prom manager.
RulePath string `yaml:"rule_path"`
@@ -170,7 +170,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
type MultiTenantManager interface {
// SyncRuleGroups is used to sync the Manager with rules from the RuleStore.
// If existing user is missing in the ruleGroups map, its ruler manager will be stopped.
- SyncRuleGroups(ctx context.Context, ruleGroups map[string]rulestore.RuleGroupList)
+ SyncRuleGroups(ctx context.Context, ruleGroups map[string]rulespb.RuleGroupList)
// GetRules fetches rules for a particular tenant (userID).
GetRules(userID string) []*promRules.Group
// Stop stops all Manager components.
@@ -385,7 +385,7 @@ func instanceOwnsRuleGroup(r ring.ReadRing, g *rulespb.RuleGroupDesc, instanceAd
return false, errors.Wrap(err, "error reading ring to verify rule group ownership")
}
- return rlrs.Ingesters[0].Addr == instanceAddr, nil
+ return rlrs.Instances[0].Addr == instanceAddr, nil
}
func (r *Ruler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
@@ -470,7 +470,7 @@ func (r *Ruler) syncRules(ctx context.Context, reason string) {
r.manager.SyncRuleGroups(ctx, configs)
}
-func (r *Ruler) listRules(ctx context.Context) (map[string]rulestore.RuleGroupList, error) {
+func (r *Ruler) listRules(ctx context.Context) (map[string]rulespb.RuleGroupList, error) {
switch {
case !r.cfg.EnableSharding:
return r.listRulesNoSharding(ctx)
@@ -486,17 +486,17 @@ func (r *Ruler) listRules(ctx context.Context) (map[string]rulestore.RuleGroupLi
}
}
-func (r *Ruler) listRulesNoSharding(ctx context.Context) (map[string]rulestore.RuleGroupList, error) {
+func (r *Ruler) listRulesNoSharding(ctx context.Context) (map[string]rulespb.RuleGroupList, error) {
return r.store.ListAllRuleGroups(ctx)
}
-func (r *Ruler) listRulesShardingDefault(ctx context.Context) (map[string]rulestore.RuleGroupList, error) {
+func (r *Ruler) listRulesShardingDefault(ctx context.Context) (map[string]rulespb.RuleGroupList, error) {
configs, err := r.store.ListAllRuleGroups(ctx)
if err != nil {
return nil, err
}
- filteredConfigs := make(map[string]rulestore.RuleGroupList)
+ filteredConfigs := make(map[string]rulespb.RuleGroupList)
for userID, groups := range configs {
filtered := filterRuleGroups(userID, groups, r.ring, r.lifecycler.GetInstanceAddr(), r.logger, r.ringCheckErrors)
if len(filtered) > 0 {
@@ -506,7 +506,7 @@ func (r *Ruler) listRulesShardingDefault(ctx context.Context) (map[string]rulest
return filteredConfigs, nil
}
-func (r *Ruler) listRulesShuffleSharding(ctx context.Context) (map[string]rulestore.RuleGroupList, error) {
+func (r *Ruler) listRulesShuffleSharding(ctx context.Context) (map[string]rulespb.RuleGroupList, error) {
users, err := r.store.ListAllUsers(ctx)
if err != nil {
return nil, errors.Wrap(err, "unable to list users of ruler")
@@ -540,7 +540,7 @@ func (r *Ruler) listRulesShuffleSharding(ctx context.Context) (map[string]rulest
close(userCh)
mu := sync.Mutex{}
- result := map[string]rulestore.RuleGroupList{}
+ result := map[string]rulespb.RuleGroupList{}
concurrency := loadRulesConcurrency
if len(userRings) < concurrency {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulespb/custom.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rulespb/custom.go
new file mode 100644
index 0000000000000..b0043092829a4
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/rulespb/custom.go
@@ -0,0 +1,21 @@
+package rulespb
+
+import "github.com/prometheus/prometheus/pkg/rulefmt"
+
+// RuleGroupList contains a set of rule groups
+type RuleGroupList []*RuleGroupDesc
+
+// Formatted returns the rule group list as a set of formatted rule groups mapped
+// by namespace
+func (l RuleGroupList) Formatted() map[string][]rulefmt.RuleGroup {
+ ruleMap := map[string][]rulefmt.RuleGroup{}
+ for _, g := range l {
+ if _, exists := ruleMap[g.Namespace]; !exists {
+ ruleMap[g.Namespace] = []rulefmt.RuleGroup{FromProto(g)}
+ continue
+ }
+ ruleMap[g.Namespace] = append(ruleMap[g.Namespace], FromProto(g))
+
+ }
+ return ruleMap
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/bucketclient/bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/bucketclient/bucket_client.go
index a655302c3a80d..9f996307a13b0 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/bucketclient/bucket_client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/bucketclient/bucket_client.go
@@ -97,8 +97,8 @@ func (b *BucketRuleStore) ListAllUsers(ctx context.Context) ([]string, error) {
}
// ListAllRuleGroups implements rules.RuleStore.
-func (b *BucketRuleStore) ListAllRuleGroups(ctx context.Context) (map[string]rulestore.RuleGroupList, error) {
- out := map[string]rulestore.RuleGroupList{}
+func (b *BucketRuleStore) ListAllRuleGroups(ctx context.Context) (map[string]rulespb.RuleGroupList, error) {
+ out := map[string]rulespb.RuleGroupList{}
// List rule groups for all tenants.
err := b.bucket.Iter(ctx, "", func(key string) error {
@@ -126,10 +126,10 @@ func (b *BucketRuleStore) ListAllRuleGroups(ctx context.Context) (map[string]rul
}
// ListRuleGroupsForUserAndNamespace implements rules.RuleStore.
-func (b *BucketRuleStore) ListRuleGroupsForUserAndNamespace(ctx context.Context, userID string, namespace string) (rulestore.RuleGroupList, error) {
+func (b *BucketRuleStore) ListRuleGroupsForUserAndNamespace(ctx context.Context, userID string, namespace string) (rulespb.RuleGroupList, error) {
userBucket := bucket.NewUserBucketClient(userID, b.bucket, b.cfgProvider)
- groupList := rulestore.RuleGroupList{}
+ groupList := rulespb.RuleGroupList{}
// The prefix to list objects depends on whether the namespace has been
// specified in the request.
@@ -162,7 +162,7 @@ func (b *BucketRuleStore) ListRuleGroupsForUserAndNamespace(ctx context.Context,
}
// LoadRuleGroups implements rules.RuleStore.
-func (b *BucketRuleStore) LoadRuleGroups(ctx context.Context, groupsToLoad map[string]rulestore.RuleGroupList) error {
+func (b *BucketRuleStore) LoadRuleGroups(ctx context.Context, groupsToLoad map[string]rulespb.RuleGroupList) error {
ch := make(chan *rulespb.RuleGroupDesc)
// Given we store one file per rule group. With this, we create a pool of workers that will
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/config.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/config.go
index 2e28b67848568..d44843bab8eb0 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/config.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/config.go
@@ -4,25 +4,24 @@ import (
"flag"
"github.com/cortexproject/cortex/pkg/configs/client"
+ "github.com/cortexproject/cortex/pkg/ruler/rulestore/configdb"
+ "github.com/cortexproject/cortex/pkg/ruler/rulestore/local"
"github.com/cortexproject/cortex/pkg/storage/bucket"
)
-const (
- ConfigDB = "configdb"
-
- Name = "ruler-storage"
- prefix = "ruler-storage."
-)
-
// Config configures a rule store.
type Config struct {
bucket.Config `yaml:",inline"`
ConfigDB client.Config `yaml:"configdb"`
+ Local local.Config `yaml:"local"`
}
// RegisterFlags registers the backend storage config.
func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
- cfg.ExtraBackends = []string{ConfigDB}
+ prefix := "ruler-storage."
+
+ cfg.ExtraBackends = []string{configdb.Name, local.Name}
cfg.ConfigDB.RegisterFlagsWithPrefix(prefix, f)
+ cfg.Local.RegisterFlagsWithPrefix(prefix, f)
cfg.RegisterFlagsWithPrefix(prefix, f)
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/configdb/store.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/configdb/store.go
index 6f1a8e7dcee8c..5d125a920d943 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/configdb/store.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/configdb/store.go
@@ -7,14 +7,17 @@ import (
"github.com/cortexproject/cortex/pkg/configs/client"
"github.com/cortexproject/cortex/pkg/configs/userconfig"
"github.com/cortexproject/cortex/pkg/ruler/rulespb"
- "github.com/cortexproject/cortex/pkg/ruler/rulestore"
+)
+
+const (
+ Name = "configdb"
)
// ConfigRuleStore is a concrete implementation of RuleStore that sources rules from the config service
type ConfigRuleStore struct {
configClient client.Client
since userconfig.ID
- ruleGroupList map[string]rulestore.RuleGroupList
+ ruleGroupList map[string]rulespb.RuleGroupList
}
func (c *ConfigRuleStore) SupportsModifications() bool {
@@ -26,7 +29,7 @@ func NewConfigRuleStore(c client.Client) *ConfigRuleStore {
return &ConfigRuleStore{
configClient: c,
since: 0,
- ruleGroupList: make(map[string]rulestore.RuleGroupList),
+ ruleGroupList: make(map[string]rulespb.RuleGroupList),
}
}
@@ -43,7 +46,7 @@ func (c *ConfigRuleStore) ListAllUsers(ctx context.Context) ([]string, error) {
}
// ListAllRuleGroups implements RuleStore
-func (c *ConfigRuleStore) ListAllRuleGroups(ctx context.Context) (map[string]rulestore.RuleGroupList, error) {
+func (c *ConfigRuleStore) ListAllRuleGroups(ctx context.Context) (map[string]rulespb.RuleGroupList, error) {
configs, err := c.configClient.GetRules(ctx, c.since)
if err != nil {
@@ -51,7 +54,7 @@ func (c *ConfigRuleStore) ListAllRuleGroups(ctx context.Context) (map[string]rul
}
for user, cfg := range configs {
- userRules := rulestore.RuleGroupList{}
+ userRules := rulespb.RuleGroupList{}
if cfg.IsDeleted() {
delete(c.ruleGroupList, user)
continue
@@ -85,7 +88,7 @@ func getLatestConfigID(cfgs map[string]userconfig.VersionedRulesConfig, latest u
return ret
}
-func (c *ConfigRuleStore) ListRuleGroupsForUserAndNamespace(ctx context.Context, userID string, namespace string) (rulestore.RuleGroupList, error) {
+func (c *ConfigRuleStore) ListRuleGroupsForUserAndNamespace(ctx context.Context, userID string, namespace string) (rulespb.RuleGroupList, error) {
r, err := c.ListAllRuleGroups(ctx)
if err != nil {
return nil, err
@@ -107,7 +110,7 @@ func (c *ConfigRuleStore) ListRuleGroupsForUserAndNamespace(ctx context.Context,
return list, nil
}
-func (c *ConfigRuleStore) LoadRuleGroups(ctx context.Context, groupsToLoad map[string]rulestore.RuleGroupList) error {
+func (c *ConfigRuleStore) LoadRuleGroups(ctx context.Context, groupsToLoad map[string]rulespb.RuleGroupList) error {
// Since ConfigRuleStore already Loads the rules in the List methods, there is nothing left to do here.
return nil
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/local/local.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/local/local.go
index 2daf79e6a0647..f3061e1f280c3 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/local/local.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/local/local.go
@@ -11,7 +11,10 @@ import (
promRules "github.com/prometheus/prometheus/rules"
"github.com/cortexproject/cortex/pkg/ruler/rulespb"
- "github.com/cortexproject/cortex/pkg/ruler/rulestore"
+)
+
+const (
+ Name = "local"
)
type Config struct {
@@ -70,13 +73,13 @@ func (l *Client) ListAllUsers(ctx context.Context) ([]string, error) {
}
// ListAllRuleGroups implements rules.RuleStore. This method also loads the rules.
-func (l *Client) ListAllRuleGroups(ctx context.Context) (map[string]rulestore.RuleGroupList, error) {
+func (l *Client) ListAllRuleGroups(ctx context.Context) (map[string]rulespb.RuleGroupList, error) {
users, err := l.ListAllUsers(ctx)
if err != nil {
return nil, err
}
- lists := make(map[string]rulestore.RuleGroupList)
+ lists := make(map[string]rulespb.RuleGroupList)
for _, user := range users {
list, err := l.loadAllRulesGroupsForUser(ctx, user)
if err != nil {
@@ -90,7 +93,7 @@ func (l *Client) ListAllRuleGroups(ctx context.Context) (map[string]rulestore.Ru
}
// ListRuleGroupsForUserAndNamespace implements rules.RuleStore. This method also loads the rules.
-func (l *Client) ListRuleGroupsForUserAndNamespace(ctx context.Context, userID string, namespace string) (rulestore.RuleGroupList, error) {
+func (l *Client) ListRuleGroupsForUserAndNamespace(ctx context.Context, userID string, namespace string) (rulespb.RuleGroupList, error) {
if namespace != "" {
return l.loadAllRulesGroupsForUserAndNamespace(ctx, userID, namespace)
}
@@ -98,7 +101,7 @@ func (l *Client) ListRuleGroupsForUserAndNamespace(ctx context.Context, userID s
return l.loadAllRulesGroupsForUser(ctx, userID)
}
-func (l *Client) LoadRuleGroups(_ context.Context, _ map[string]rulestore.RuleGroupList) error {
+func (l *Client) LoadRuleGroups(_ context.Context, _ map[string]rulespb.RuleGroupList) error {
// This Client already loads the rules in its List methods, there is nothing left to do here.
return nil
}
@@ -123,8 +126,8 @@ func (l *Client) DeleteNamespace(ctx context.Context, userID, namespace string)
return errors.New("DeleteNamespace unsupported in rule local store")
}
-func (l *Client) loadAllRulesGroupsForUser(ctx context.Context, userID string) (rulestore.RuleGroupList, error) {
- var allLists rulestore.RuleGroupList
+func (l *Client) loadAllRulesGroupsForUser(ctx context.Context, userID string) (rulespb.RuleGroupList, error) {
+ var allLists rulespb.RuleGroupList
root := filepath.Join(l.cfg.Directory, userID)
infos, err := ioutil.ReadDir(root)
@@ -159,7 +162,7 @@ func (l *Client) loadAllRulesGroupsForUser(ctx context.Context, userID string) (
return allLists, nil
}
-func (l *Client) loadAllRulesGroupsForUserAndNamespace(_ context.Context, userID string, namespace string) (rulestore.RuleGroupList, error) {
+func (l *Client) loadAllRulesGroupsForUserAndNamespace(_ context.Context, userID string, namespace string) (rulespb.RuleGroupList, error) {
filename := filepath.Join(l.cfg.Directory, userID, namespace)
rulegroups, allErrors := l.loader.Load(filename)
@@ -167,7 +170,7 @@ func (l *Client) loadAllRulesGroupsForUserAndNamespace(_ context.Context, userID
return nil, errors.Wrapf(allErrors[0], "error parsing %s", filename)
}
- var list rulestore.RuleGroupList
+ var list rulespb.RuleGroupList
for _, group := range rulegroups.Groups {
desc := rulespb.ToProto(userID, namespace, group)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/objectclient/rule_store.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/objectclient/rule_store.go
index d68485c98363d..a61ebba411b19 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/objectclient/rule_store.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/objectclient/rule_store.go
@@ -104,7 +104,7 @@ func (o *RuleStore) ListAllUsers(ctx context.Context) ([]string, error) {
}
// ListAllRuleGroups implements rules.RuleStore.
-func (o *RuleStore) ListAllRuleGroups(ctx context.Context) (map[string]rulestore.RuleGroupList, error) {
+func (o *RuleStore) ListAllRuleGroups(ctx context.Context) (map[string]rulespb.RuleGroupList, error) {
// No delimiter to get *all* rule groups for all users and namespaces.
ruleGroupObjects, _, err := o.client.List(ctx, rulePrefix, "")
if err != nil {
@@ -114,7 +114,7 @@ func (o *RuleStore) ListAllRuleGroups(ctx context.Context) (map[string]rulestore
return convertRuleGroupObjectsToMap(ruleGroupObjects), nil
}
-func (o *RuleStore) ListRuleGroupsForUserAndNamespace(ctx context.Context, userID, namespace string) (rulestore.RuleGroupList, error) {
+func (o *RuleStore) ListRuleGroupsForUserAndNamespace(ctx context.Context, userID, namespace string) (rulespb.RuleGroupList, error) {
ruleGroupObjects, _, err := o.client.List(ctx, generateRuleObjectKey(userID, namespace, ""), "")
if err != nil {
return nil, err
@@ -123,7 +123,7 @@ func (o *RuleStore) ListRuleGroupsForUserAndNamespace(ctx context.Context, userI
return convertRuleGroupObjectsToMap(ruleGroupObjects)[userID], nil
}
-func (o *RuleStore) LoadRuleGroups(ctx context.Context, groupsToLoad map[string]rulestore.RuleGroupList) error {
+func (o *RuleStore) LoadRuleGroups(ctx context.Context, groupsToLoad map[string]rulespb.RuleGroupList) error {
ch := make(chan *rulespb.RuleGroupDesc)
// Given we store one file per rule group. With this, we create a pool of workers that will
@@ -176,8 +176,8 @@ outer:
return g.Wait()
}
-func convertRuleGroupObjectsToMap(ruleGroupObjects []chunk.StorageObject) map[string]rulestore.RuleGroupList {
- result := map[string]rulestore.RuleGroupList{}
+func convertRuleGroupObjectsToMap(ruleGroupObjects []chunk.StorageObject) map[string]rulespb.RuleGroupList {
+ result := map[string]rulespb.RuleGroupList{}
for _, rg := range ruleGroupObjects {
user, namespace, group := decomposeRuleObjectKey(rg.Key)
if user == "" || namespace == "" || group == "" {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/store.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/store.go
index 4bc443659d70f..d8b97ed05a779 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/store.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/store.go
@@ -4,8 +4,6 @@ import (
"context"
"errors"
- "github.com/prometheus/prometheus/pkg/rulefmt"
-
"github.com/cortexproject/cortex/pkg/ruler/rulespb"
)
@@ -26,16 +24,16 @@ type RuleStore interface {
ListAllUsers(ctx context.Context) ([]string, error)
// ListAllRuleGroups returns all rule groups for all users.
- ListAllRuleGroups(ctx context.Context) (map[string]RuleGroupList, error)
+ ListAllRuleGroups(ctx context.Context) (map[string]rulespb.RuleGroupList, error)
// ListRuleGroupsForUserAndNamespace returns all the active rule groups for a user from given namespace.
// If namespace is empty, groups from all namespaces are returned.
- ListRuleGroupsForUserAndNamespace(ctx context.Context, userID string, namespace string) (RuleGroupList, error)
+ ListRuleGroupsForUserAndNamespace(ctx context.Context, userID string, namespace string) (rulespb.RuleGroupList, error)
// LoadRuleGroups loads rules for each rule group in the map.
// Parameter with groups to load *MUST* be coming from one of the List methods.
// Reason is that some implementations don't do anything, since their List method already loads the rules.
- LoadRuleGroups(ctx context.Context, groupsToLoad map[string]RuleGroupList) error
+ LoadRuleGroups(ctx context.Context, groupsToLoad map[string]rulespb.RuleGroupList) error
GetRuleGroup(ctx context.Context, userID, namespace, group string) (*rulespb.RuleGroupDesc, error)
SetRuleGroup(ctx context.Context, userID, namespace string, group *rulespb.RuleGroupDesc) error
@@ -47,21 +45,3 @@ type RuleStore interface {
// If namespace is empty, deletes all rule groups for user.
DeleteNamespace(ctx context.Context, userID, namespace string) error
}
-
-// RuleGroupList contains a set of rule groups
-type RuleGroupList []*rulespb.RuleGroupDesc
-
-// Formatted returns the rule group list as a set of formatted rule groups mapped
-// by namespace
-func (l RuleGroupList) Formatted() map[string][]rulefmt.RuleGroup {
- ruleMap := map[string][]rulefmt.RuleGroup{}
- for _, g := range l {
- if _, exists := ruleMap[g.Namespace]; !exists {
- ruleMap[g.Namespace] = []rulefmt.RuleGroup{rulespb.FromProto(g)}
- continue
- }
- ruleMap[g.Namespace] = append(ruleMap[g.Namespace], rulespb.FromProto(g))
-
- }
- return ruleMap
-}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/storage.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/storage.go
index 642871192e385..ef008f5325513 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ruler/storage.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/storage.go
@@ -26,6 +26,7 @@ import (
)
// RuleStoreConfig configures a rule store.
+// TODO remove this legacy config in Cortex 1.11.
type RuleStoreConfig struct {
Type string `yaml:"type"`
ConfigDB client.Config `yaml:"configdb"`
@@ -115,8 +116,8 @@ func NewLegacyRuleStore(cfg RuleStoreConfig, loader promRules.GroupLoader, logge
}
// NewRuleStore returns a rule store backend client based on the provided cfg.
-func NewRuleStore(ctx context.Context, cfg rulestore.Config, cfgProvider bucket.TenantConfigProvider, logger log.Logger, reg prometheus.Registerer) (rulestore.RuleStore, error) {
- if cfg.Backend == rulestore.ConfigDB {
+func NewRuleStore(ctx context.Context, cfg rulestore.Config, cfgProvider bucket.TenantConfigProvider, loader promRules.GroupLoader, logger log.Logger, reg prometheus.Registerer) (rulestore.RuleStore, error) {
+ if cfg.Backend == configdb.Name {
c, err := client.New(cfg.ConfigDB)
if err != nil {
@@ -126,7 +127,11 @@ func NewRuleStore(ctx context.Context, cfg rulestore.Config, cfgProvider bucket.
return configdb.NewConfigRuleStore(c), nil
}
- bucketClient, err := bucket.NewClient(ctx, cfg.Config, rulestore.Name, logger, reg)
+ if cfg.Backend == local.Name {
+ return local.NewLocalRulesClient(cfg.Local, loader)
+ }
+
+ bucketClient, err := bucket.NewClient(ctx, cfg.Config, "ruler-storage", logger, reg)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/ref_cache.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/ref_cache.go
index 27f625b24c77d..62e061a31297e 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/ref_cache.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/ref_cache.go
@@ -57,8 +57,8 @@ func NewRefCache() *RefCache {
}
// Ref returns the cached series reference, and guarantees the input labels set
-// is NOT retained.
-func (c *RefCache) Ref(now time.Time, series labels.Labels) (uint64, bool) {
+// is NOT retained. We also output the copied set of labels which are safe to use.
+func (c *RefCache) Ref(now time.Time, series labels.Labels) (uint64, labels.Labels, bool) {
fp := client.Fingerprint(series)
stripeID := util.HashFP(fp) % numRefCacheStripes
@@ -81,24 +81,25 @@ func (c *RefCache) Purge(keepUntil time.Time) {
}
}
-func (s *refCacheStripe) ref(now time.Time, series labels.Labels, fp model.Fingerprint) (uint64, bool) {
+func (s *refCacheStripe) ref(now time.Time, series labels.Labels, fp model.Fingerprint) (uint64, labels.Labels, bool) {
s.refsMu.RLock()
defer s.refsMu.RUnlock()
entries, ok := s.refs[fp]
if !ok {
- return 0, false
+ return 0, nil, false
}
for ix := range entries {
- if labels.Equal(entries[ix].lbs, series) {
+ lbs := entries[ix].lbs
+ if labels.Equal(lbs, series) {
// Since we use read-only lock, we need to use atomic update.
entries[ix].touchedAt.Store(now.UnixNano())
- return entries[ix].ref, true
+ return entries[ix].ref, lbs, true
}
}
- return 0, false
+ return 0, nil, false
}
func (s *refCacheStripe) setRef(now time.Time, series labels.Labels, fp model.Fingerprint, ref uint64) {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/limiter/rate_limiter.go b/vendor/github.com/cortexproject/cortex/pkg/util/limiter/rate_limiter.go
index f2fa22b117824..48fc6a4262375 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/limiter/rate_limiter.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/limiter/rate_limiter.go
@@ -26,17 +26,6 @@ type RateLimiter struct {
tenants map[string]*tenantLimiter
}
-// Reservation is similar to rate.Reservation but excludes interfaces which do
-// not make sense to expose, because we are following the semantics of AllowN,
-// being an immediate reservation, i.e. not delayed into the future.
-type Reservation interface {
- // CancelAt returns the reservation to the rate limiter for use by other
- // requests. Note that typically the reservation should be canceled with
- // the same timestamp it was requested with, or not all the tokens
- // consumed will be returned.
- CancelAt(now time.Time)
-}
-
type tenantLimiter struct {
limiter *rate.Limiter
recheckAt time.Time
@@ -53,29 +42,9 @@ func NewRateLimiter(strategy RateLimiterStrategy, recheckPeriod time.Duration) *
}
}
-// AllowN reports whether n tokens may be consumed happen at time now. The
-// reservation of tokens can be canceled using CancelAt on the returned object.
-func (l *RateLimiter) AllowN(now time.Time, tenantID string, n int) (bool, Reservation) {
-
- // Using ReserveN allows cancellation of the reservation, but
- // the semantics are subtly different to AllowN.
- r := l.getTenantLimiter(now, tenantID).ReserveN(now, n)
- if !r.OK() {
- return false, nil
- }
-
- // ReserveN will still return OK if the necessary tokens are
- // available in the future, and tells us this time delay. In
- // order to mimic the semantics of AllowN, we must check that
- // there is no delay before we can use them.
- if r.DelayFrom(now) > 0 {
- // Having decided not to use the reservation, return the
- // tokens to the rate limiter.
- r.CancelAt(now)
- return false, nil
- }
-
- return true, r
+// AllowN reports whether n tokens may be consumed happen at time now.
+func (l *RateLimiter) AllowN(now time.Time, tenantID string, n int) bool {
+ return l.getTenantLimiter(now, tenantID).AllowN(now, n)
}
// Limit returns the currently configured maximum overall tokens rate.
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/errors.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/errors.go
new file mode 100644
index 0000000000000..dbc6a0a213a6d
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/errors.go
@@ -0,0 +1,162 @@
+package validation
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/prometheus/common/model"
+
+ "github.com/cortexproject/cortex/pkg/cortexpb"
+)
+
+// ValidationError is an error returned by series validation.
+//
+// nolint:golint ignore stutter warning
+type ValidationError error
+
+// genericValidationError is a basic implementation of ValidationError which can be used when the
+// error format only contains the cause and the series.
+type genericValidationError struct {
+ message string
+ cause string
+ series []cortexpb.LabelAdapter
+}
+
+func (e *genericValidationError) Error() string {
+ return fmt.Sprintf(e.message, e.cause, formatLabelSet(e.series))
+}
+
+func newLabelNameTooLongError(series []cortexpb.LabelAdapter, labelName string) ValidationError {
+ return &genericValidationError{
+ message: "label name too long: %.200q metric %.200q",
+ cause: labelName,
+ series: series,
+ }
+}
+
+func newLabelValueTooLongError(series []cortexpb.LabelAdapter, labelValue string) ValidationError {
+ return &genericValidationError{
+ message: "label value too long: %.200q metric %.200q",
+ cause: labelValue,
+ series: series,
+ }
+}
+
+func newInvalidLabelError(series []cortexpb.LabelAdapter, labelName string) ValidationError {
+ return &genericValidationError{
+ message: "sample invalid label: %.200q metric %.200q",
+ cause: labelName,
+ series: series,
+ }
+}
+
+func newDuplicatedLabelError(series []cortexpb.LabelAdapter, labelName string) ValidationError {
+ return &genericValidationError{
+ message: "duplicate label name: %.200q metric %.200q",
+ cause: labelName,
+ series: series,
+ }
+}
+
+func newLabelsNotSortedError(series []cortexpb.LabelAdapter, labelName string) ValidationError {
+ return &genericValidationError{
+ message: "labels not sorted: %.200q metric %.200q",
+ cause: labelName,
+ series: series,
+ }
+}
+
+type tooManyLabelsError struct {
+ series []cortexpb.LabelAdapter
+ limit int
+}
+
+func newTooManyLabelsError(series []cortexpb.LabelAdapter, limit int) ValidationError {
+ return &tooManyLabelsError{
+ series: series,
+ limit: limit,
+ }
+}
+
+func (e *tooManyLabelsError) Error() string {
+ return fmt.Sprintf(
+ "series has too many labels (actual: %d, limit: %d) series: '%s'",
+ len(e.series), e.limit, cortexpb.FromLabelAdaptersToMetric(e.series).String())
+}
+
+type noMetricNameError struct{}
+
+func newNoMetricNameError() ValidationError {
+ return &noMetricNameError{}
+}
+
+func (e *noMetricNameError) Error() string {
+ return "sample missing metric name"
+}
+
+type invalidMetricNameError struct {
+ metricName string
+}
+
+func newInvalidMetricNameError(metricName string) ValidationError {
+ return &invalidMetricNameError{
+ metricName: metricName,
+ }
+}
+
+func (e *invalidMetricNameError) Error() string {
+ return fmt.Sprintf("sample invalid metric name: %.200q", e.metricName)
+}
+
+// sampleValidationError is a ValidationError implementation suitable for sample validation errors.
+type sampleValidationError struct {
+ message string
+ metricName string
+ timestamp int64
+}
+
+func (e *sampleValidationError) Error() string {
+ return fmt.Sprintf(e.message, e.timestamp, e.metricName)
+}
+
+func newSampleTimestampTooOldError(metricName string, timestamp int64) ValidationError {
+ return &sampleValidationError{
+ message: "timestamp too old: %d metric: %.200q",
+ metricName: metricName,
+ timestamp: timestamp,
+ }
+}
+
+func newSampleTimestampTooNewError(metricName string, timestamp int64) ValidationError {
+ return &sampleValidationError{
+ message: "timestamp too new: %d metric: %.200q",
+ metricName: metricName,
+ timestamp: timestamp,
+ }
+}
+
+// formatLabelSet formats label adapters as a metric name with labels, while preserving
+// label order, and keeping duplicates. If there are multiple "__name__" labels, only
+// first one is used as metric name, other ones will be included as regular labels.
+func formatLabelSet(ls []cortexpb.LabelAdapter) string {
+ metricName, hasMetricName := "", false
+
+ labelStrings := make([]string, 0, len(ls))
+ for _, l := range ls {
+ if l.Name == model.MetricNameLabel && !hasMetricName && l.Value != "" {
+ metricName = l.Value
+ hasMetricName = true
+ } else {
+ labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", l.Name, l.Value))
+ }
+ }
+
+ if len(labelStrings) == 0 {
+ if hasMetricName {
+ return metricName
+ }
+ return "{}"
+ }
+
+ return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go
index 8e4b154bdaeb9..10f1f42b121e8 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go
@@ -1,7 +1,6 @@
package validation
import (
- "fmt"
"net/http"
"strings"
"time"
@@ -31,17 +30,6 @@ const (
helpTooLong = "help_too_long"
unitTooLong = "unit_too_long"
- errMissingMetricName = "sample missing metric name"
- errInvalidMetricName = "sample invalid metric name: %.200q"
- errInvalidLabel = "sample invalid label: %.200q metric %.200q"
- errLabelNameTooLong = "label name too long: %.200q metric %.200q"
- errLabelValueTooLong = "label value too long: %.200q metric %.200q"
- errTooManyLabels = "series has too many labels (actual: %d, limit: %d) series: '%s'"
- errTooOld = "sample for '%s' has timestamp too old: %d"
- errTooNew = "sample for '%s' has timestamp too new: %d"
- errDuplicateLabelName = "duplicate label name: %.200q metric %.200q"
- errLabelsNotSorted = "labels not sorted: %.200q metric %.200q"
-
// ErrQueryTooLong is used in chunk store, querier and query frontend.
ErrQueryTooLong = "the query time range exceeds the limit (query length: %s, limit: %s)"
@@ -95,15 +83,15 @@ type SampleValidationConfig interface {
}
// ValidateSample returns an err if the sample is invalid.
-func ValidateSample(cfg SampleValidationConfig, userID string, metricName string, s cortexpb.Sample) error {
+func ValidateSample(cfg SampleValidationConfig, userID string, metricName string, s cortexpb.Sample) ValidationError {
if cfg.RejectOldSamples(userID) && model.Time(s.TimestampMs) < model.Now().Add(-cfg.RejectOldSamplesMaxAge(userID)) {
DiscardedSamples.WithLabelValues(greaterThanMaxSampleAge, userID).Inc()
- return httpgrpc.Errorf(http.StatusBadRequest, errTooOld, metricName, model.Time(s.TimestampMs))
+ return newSampleTimestampTooOldError(metricName, s.TimestampMs)
}
if model.Time(s.TimestampMs) > model.Now().Add(cfg.CreationGracePeriod(userID)) {
DiscardedSamples.WithLabelValues(tooFarInFuture, userID).Inc()
- return httpgrpc.Errorf(http.StatusBadRequest, errTooNew, metricName, model.Time(s.TimestampMs))
+ return newSampleTimestampTooNewError(metricName, s.TimestampMs)
}
return nil
@@ -118,60 +106,49 @@ type LabelValidationConfig interface {
}
// ValidateLabels returns an err if the labels are invalid.
-func ValidateLabels(cfg LabelValidationConfig, userID string, ls []cortexpb.LabelAdapter, skipLabelNameValidation bool) error {
+func ValidateLabels(cfg LabelValidationConfig, userID string, ls []cortexpb.LabelAdapter, skipLabelNameValidation bool) ValidationError {
if cfg.EnforceMetricName(userID) {
metricName, err := extract.MetricNameFromLabelAdapters(ls)
if err != nil {
DiscardedSamples.WithLabelValues(missingMetricName, userID).Inc()
- return httpgrpc.Errorf(http.StatusBadRequest, errMissingMetricName)
+ return newNoMetricNameError()
}
if !model.IsValidMetricName(model.LabelValue(metricName)) {
DiscardedSamples.WithLabelValues(invalidMetricName, userID).Inc()
- return httpgrpc.Errorf(http.StatusBadRequest, errInvalidMetricName, metricName)
+ return newInvalidMetricNameError(metricName)
}
}
numLabelNames := len(ls)
if numLabelNames > cfg.MaxLabelNamesPerSeries(userID) {
DiscardedSamples.WithLabelValues(maxLabelNamesPerSeries, userID).Inc()
- return httpgrpc.Errorf(http.StatusBadRequest, errTooManyLabels, numLabelNames, cfg.MaxLabelNamesPerSeries(userID), cortexpb.FromLabelAdaptersToMetric(ls).String())
+ return newTooManyLabelsError(ls, cfg.MaxLabelNamesPerSeries(userID))
}
maxLabelNameLength := cfg.MaxLabelNameLength(userID)
maxLabelValueLength := cfg.MaxLabelValueLength(userID)
lastLabelName := ""
for _, l := range ls {
- var errTemplate string
- var reason string
- var cause interface{}
if !skipLabelNameValidation && !model.LabelName(l.Name).IsValid() {
- reason = invalidLabel
- errTemplate = errInvalidLabel
- cause = l.Name
+ DiscardedSamples.WithLabelValues(invalidLabel, userID).Inc()
+ return newInvalidLabelError(ls, l.Name)
} else if len(l.Name) > maxLabelNameLength {
- reason = labelNameTooLong
- errTemplate = errLabelNameTooLong
- cause = l.Name
+ DiscardedSamples.WithLabelValues(labelNameTooLong, userID).Inc()
+ return newLabelNameTooLongError(ls, l.Name)
} else if len(l.Value) > maxLabelValueLength {
- reason = labelValueTooLong
- errTemplate = errLabelValueTooLong
- cause = l.Value
+ DiscardedSamples.WithLabelValues(labelValueTooLong, userID).Inc()
+ return newLabelValueTooLongError(ls, l.Value)
} else if cmp := strings.Compare(lastLabelName, l.Name); cmp >= 0 {
if cmp == 0 {
- reason = duplicateLabelNames
- errTemplate = errDuplicateLabelName
- cause = l.Name
- } else {
- reason = labelsNotSorted
- errTemplate = errLabelsNotSorted
- cause = l.Name
+ DiscardedSamples.WithLabelValues(duplicateLabelNames, userID).Inc()
+ return newDuplicatedLabelError(ls, l.Name)
}
+
+ DiscardedSamples.WithLabelValues(labelsNotSorted, userID).Inc()
+ return newLabelsNotSortedError(ls, l.Name)
}
- if errTemplate != "" {
- DiscardedSamples.WithLabelValues(reason, userID).Inc()
- return httpgrpc.Errorf(http.StatusBadRequest, errTemplate, cause, formatLabelSet(ls))
- }
+
lastLabelName = l.Name
}
return nil
@@ -216,32 +193,6 @@ func ValidateMetadata(cfg MetadataValidationConfig, userID string, metadata *cor
return nil
}
-// this function formats label adapters as a metric name with labels, while preserving
-// label order, and keeping duplicates. If there are multiple "__name__" labels, only
-// first one is used as metric name, other ones will be included as regular labels.
-func formatLabelSet(ls []cortexpb.LabelAdapter) string {
- metricName, hasMetricName := "", false
-
- labelStrings := make([]string, 0, len(ls))
- for _, l := range ls {
- if l.Name == model.MetricNameLabel && !hasMetricName && l.Value != "" {
- metricName = l.Value
- hasMetricName = true
- } else {
- labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", l.Name, l.Value))
- }
- }
-
- if len(labelStrings) == 0 {
- if hasMetricName {
- return metricName
- }
- return "{}"
- }
-
- return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
-}
-
func DeletePerUserValidationMetrics(userID string, log log.Logger) {
filter := map[string]string{"user": userID}
diff --git a/vendor/github.com/go-zookeeper/zk/.codecov.yaml b/vendor/github.com/go-zookeeper/zk/.codecov.yaml
new file mode 100644
index 0000000000000..98475205d2868
--- /dev/null
+++ b/vendor/github.com/go-zookeeper/zk/.codecov.yaml
@@ -0,0 +1,8 @@
+coverage:
+ status:
+ patch:
+ default:
+ target: 75%
+ project:
+ default:
+ threshold: 1%
diff --git a/vendor/github.com/go-zookeeper/zk/.gitignore b/vendor/github.com/go-zookeeper/zk/.gitignore
new file mode 100644
index 0000000000000..9c83a9667c6f7
--- /dev/null
+++ b/vendor/github.com/go-zookeeper/zk/.gitignore
@@ -0,0 +1,8 @@
+.vscode/
+.DS_Store
+profile.cov
+zookeeper
+zookeeper-*/
+zookeeper-*.tar.gz
+apache-zookeeper-*/
+apache-zookeeper-*.tar.gz
diff --git a/vendor/github.com/go-zookeeper/zk/CONTRIBUTION.md b/vendor/github.com/go-zookeeper/zk/CONTRIBUTION.md
new file mode 100644
index 0000000000000..b7c6e258d7a5f
--- /dev/null
+++ b/vendor/github.com/go-zookeeper/zk/CONTRIBUTION.md
@@ -0,0 +1,57 @@
+# how to contribute to the go zookeeper library
+
+## **Did you find a bug?**
+
+* **Ensure the bug was not already reported** by searching on GitHub under [Issues](https://github.com/go-zookeper/zk/issues).
+
+* If you're unable to find an open issue addressing the problem, open a new one.
+ * Be sure to include a title and clear description.
+ * Be sure to include the actual behavior vs the expected.
+ * As much relevant information as possible, a code sample or an executable test case demonstrating the expected vs actual behavior.
+
+## Did you write a patch that fixes a bug
+
+* Ensure that all bugs are first reported as an issue. This will help others in finding fixes through issues first.
+
+* Open a PR referencing the issue for the bug.
+
+## Pull Requests
+
+We are open to all Pull Requests, its best to accompany the requests with an issue.
+
+* The PR requires the github actions to pass.
+
+* Requires at least one maintainer to approve the PR to merge to master.
+
+While the above must be satisfied prior to having your pull request reviewed, the reviewer(s) may ask you to complete additional design work, tests, or other changes before your pull request can be ultimately accepted.
+
+## Versioned Releases
+
+Since this library is a core client for interacting with Zookeeper, we do [SemVer](https://semver.org/) releases to ensure predictable changes for users.
+
+Zookeeper itself maintains a compatibility check on the main codebase as well as maintaining backwards compatibility through all Major releases, this core library will try to uphold similar standards of releases.
+
+* Code that is merged into master should be ready for release at any given time.
+ * This is to say, that code should not be merged into master if it is not complete and ready for production use.
+
+* If a fix needs to be released ahead of normal operations, file an issue explaining the urgency and impact of the bug.
+
+## Coding guidelines
+
+Some good external resources for style:
+
+1. [Effective Go](https://golang.org/doc/effective_go.html)
+2. [The Go common mistakes guide](https://github.com/golang/go/wiki/CodeReviewComments)
+
+All code should be error-free when run through `golint` and `go vet`. We
+recommend setting up your editor to:
+
+* Run `goimports` on save
+* Run `golint` and `go vet` to check for errors
+
+You can find information in editor support for Go tools here:
+<https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins>
+
+## Addition information
+
+* We have zero external dependencies, and would like to maintain this. Use of any external go library should be limited to tests.
diff --git a/vendor/github.com/samuel/go-zookeeper/LICENSE b/vendor/github.com/go-zookeeper/zk/LICENSE
similarity index 100%
rename from vendor/github.com/samuel/go-zookeeper/LICENSE
rename to vendor/github.com/go-zookeeper/zk/LICENSE
diff --git a/vendor/github.com/go-zookeeper/zk/Makefile b/vendor/github.com/go-zookeeper/zk/Makefile
new file mode 100644
index 0000000000000..f0b7965cdde40
--- /dev/null
+++ b/vendor/github.com/go-zookeeper/zk/Makefile
@@ -0,0 +1,51 @@
+# make file to hold the logic of build and test setup
+ZK_VERSION ?= 3.5.6
+
+# Apache changed the name of the archive in version 3.5.x and seperated out
+# src and binary packages
+ZK_MINOR_VER=$(word 2, $(subst ., ,$(ZK_VERSION)))
+ifeq ($(shell test $(ZK_MINOR_VER) -le 4; echo $$?),0)
+ ZK = zookeeper-$(ZK_VERSION)
+else
+ ZK = apache-zookeeper-$(ZK_VERSION)-bin
+endif
+ZK_URL = "https://archive.apache.org/dist/zookeeper/zookeeper-$(ZK_VERSION)/$(ZK).tar.gz"
+
+PACKAGES := $(shell go list ./... | grep -v examples)
+
+.DEFAULT_GOAL := test
+
+$(ZK):
+ wget $(ZK_URL)
+ tar -zxf $(ZK).tar.gz
+ rm $(ZK).tar.gz
+
+zookeeper: $(ZK)
+ # we link to a standard directory path so then the tests dont need to find based on version
+ # in the test code. this allows backward compatable testing.
+ ln -s $(ZK) zookeeper
+
+.PHONY: setup
+setup: zookeeper
+
+.PHONY: lint
+lint:
+ go fmt ./...
+ go vet ./...
+
+.PHONY: build
+build:
+ go build ./...
+
+.PHONY: test
+test: build zookeeper
+ go test -timeout 500s -v -race -covermode atomic -coverprofile=profile.cov $(PACKAGES)
+
+.PHONY: clean
+clean:
+ rm -f apache-zookeeper-*.tar.gz
+ rm -f zookeeper-*.tar.gz
+ rm -rf apache-zookeeper-*/
+ rm -rf zookeeper-*/
+ rm -f zookeeper
+ rm -f profile.cov
diff --git a/vendor/github.com/go-zookeeper/zk/README.md b/vendor/github.com/go-zookeeper/zk/README.md
new file mode 100644
index 0000000000000..0028096f37d55
--- /dev/null
+++ b/vendor/github.com/go-zookeeper/zk/README.md
@@ -0,0 +1,11 @@
+Native Go Zookeeper Client Library
+===================================
+
+[](https://godoc.org/github.com/go-zookeeper/zk)
+[](https://github.com/go-zookeeper/zk/actions?query=branch%3Amaster)
+[](https://codecov.io/gh/go-zookeeper/zk/branch/master)
+
+License
+-------
+
+3-clause BSD. See LICENSE file.
diff --git a/vendor/github.com/samuel/go-zookeeper/zk/conn.go b/vendor/github.com/go-zookeeper/zk/conn.go
similarity index 85%
rename from vendor/github.com/samuel/go-zookeeper/zk/conn.go
rename to vendor/github.com/go-zookeeper/zk/conn.go
index da9503a271619..97377eceaec62 100644
--- a/vendor/github.com/samuel/go-zookeeper/zk/conn.go
+++ b/vendor/github.com/go-zookeeper/zk/conn.go
@@ -10,13 +10,13 @@ Possible watcher events:
*/
import (
+ "context"
"crypto/rand"
"encoding/binary"
"errors"
"fmt"
"io"
"net"
- "strconv"
"strings"
"sync"
"sync/atomic"
@@ -82,6 +82,7 @@ type Conn struct {
eventChan chan Event
eventCallback EventCallback // may be nil
shouldQuit chan struct{}
+ shouldQuitOnce sync.Once
pingInterval time.Duration
recvTimeout time.Duration
connectTimeout time.Duration
@@ -101,9 +102,10 @@ type Conn struct {
reconnectLatch chan struct{}
setWatchLimit int
setWatchCallback func([]*setWatchesRequest)
+
// Debug (for recurring re-auth hang)
debugCloseRecvLoop bool
- debugReauthDone chan struct{}
+ resendZkAuthFn func(context.Context, *Conn) error
logger Logger
logInfo bool // true if information messages are logged; false if only errors are logged
@@ -177,15 +179,7 @@ func Connect(servers []string, sessionTimeout time.Duration, options ...connOpti
return nil, nil, errors.New("zk: server list must not be empty")
}
- srvs := make([]string, len(servers))
-
- for i, addr := range servers {
- if strings.Contains(addr, ":") {
- srvs[i] = addr
- } else {
- srvs[i] = addr + ":" + strconv.Itoa(DefaultPort)
- }
- }
+ srvs := FormatServers(servers)
// Randomize the order of the servers to avoid creating hotspots
stringShuffle(srvs)
@@ -206,6 +200,7 @@ func Connect(servers []string, sessionTimeout time.Duration, options ...connOpti
logger: DefaultLogger,
logInfo: true, // default is true for backwards compatability
buf: make([]byte, bufferSize),
+ resendZkAuthFn: resendZkAuth,
}
// Set provided options.
@@ -218,9 +213,11 @@ func Connect(servers []string, sessionTimeout time.Duration, options ...connOpti
}
conn.setTimeouts(int32(sessionTimeout / time.Millisecond))
+ // TODO: This context should be passed in by the caller to be the connection lifecycle context.
+ ctx := context.Background()
go func() {
- conn.loop()
+ conn.loop(ctx)
conn.flushRequests(ErrClosing)
conn.invalidateWatches(ErrClosing)
close(conn.eventChan)
@@ -309,13 +306,17 @@ func WithMaxConnBufferSize(maxBufferSize int) connOption {
}
}
+// Close will submit a close request with ZK and signal the connection to stop
+// sending and receiving packets.
func (c *Conn) Close() {
- close(c.shouldQuit)
+ c.shouldQuitOnce.Do(func() {
+ close(c.shouldQuit)
- select {
- case <-c.queueRequest(opClose, &closeRequest{}, &closeResponse{}, nil):
- case <-time.After(time.Second):
- }
+ select {
+ case <-c.queueRequest(opClose, &closeRequest{}, &closeResponse{}, nil):
+ case <-time.After(time.Second):
+ }
+ })
}
// State returns the current state of the connection.
@@ -364,7 +365,9 @@ func (c *Conn) connect() error {
c.serverMu.Lock()
c.server, retryStart = c.hostProvider.Next()
c.serverMu.Unlock()
+
c.setState(StateConnecting)
+
if retryStart {
c.flushUnsentRequests(ErrNoServer)
select {
@@ -382,70 +385,12 @@ func (c *Conn) connect() error {
c.conn = zkConn
c.setState(StateConnected)
if c.logInfo {
- c.logger.Printf("Connected to %s", c.Server())
+ c.logger.Printf("connected to %s", c.Server())
}
return nil
}
- c.logger.Printf("Failed to connect to %s: %+v", c.Server(), err)
- }
-}
-
-func (c *Conn) resendZkAuth(reauthReadyChan chan struct{}) {
- shouldCancel := func() bool {
- select {
- case <-c.shouldQuit:
- return true
- case <-c.closeChan:
- return true
- default:
- return false
- }
- }
-
- c.credsMu.Lock()
- defer c.credsMu.Unlock()
-
- defer close(reauthReadyChan)
-
- if c.logInfo {
- c.logger.Printf("re-submitting `%d` credentials after reconnect", len(c.creds))
- }
-
- for _, cred := range c.creds {
- if shouldCancel() {
- return
- }
- resChan, err := c.sendRequest(
- opSetAuth,
- &setAuthRequest{Type: 0,
- Scheme: cred.scheme,
- Auth: cred.auth,
- },
- &setAuthResponse{},
- nil)
-
- if err != nil {
- c.logger.Printf("call to sendRequest failed during credential resubmit: %s", err)
- // FIXME(prozlach): lets ignore errors for now
- continue
- }
-
- var res response
- select {
- case res = <-resChan:
- case <-c.closeChan:
- c.logger.Printf("recv closed, cancel re-submitting credentials")
- return
- case <-c.shouldQuit:
- c.logger.Printf("should quit, cancel re-submitting credentials")
- return
- }
- if res.err != nil {
- c.logger.Printf("credential re-submit failed: %s", res.err)
- // FIXME(prozlach): lets ignore errors for now
- continue
- }
+ c.logger.Printf("failed to connect to %s: %v", c.Server(), err)
}
}
@@ -474,7 +419,7 @@ func (c *Conn) sendRequest(
return rq.recvChan, nil
}
-func (c *Conn) loop() {
+func (c *Conn) loop(ctx context.Context) {
for {
if err := c.connect(); err != nil {
// c.Close() was called
@@ -495,25 +440,29 @@ func (c *Conn) loop() {
}
c.hostProvider.Connected() // mark success
c.closeChan = make(chan struct{}) // channel to tell send loop stop
- reauthChan := make(chan struct{}) // channel to tell send loop that authdata has been resubmitted
var wg sync.WaitGroup
+
wg.Add(1)
go func() {
- <-reauthChan
- if c.debugCloseRecvLoop {
- close(c.debugReauthDone)
+ defer c.conn.Close() // causes recv loop to EOF/exit
+ defer wg.Done()
+
+ if err := c.resendZkAuthFn(ctx, c); err != nil {
+ c.logger.Printf("error in resending auth creds: %v", err)
+ return
}
- err := c.sendLoop()
- if err != nil || c.logInfo {
- c.logger.Printf("send loop terminated: err=%v", err)
+
+ if err := c.sendLoop(); err != nil || c.logInfo {
+ c.logger.Printf("send loop terminated: %v", err)
}
- c.conn.Close() // causes recv loop to EOF/exit
- wg.Done()
}()
wg.Add(1)
go func() {
+ defer close(c.closeChan) // tell send loop to exit
+ defer wg.Done()
+
var err error
if c.debugCloseRecvLoop {
err = errors.New("DEBUG: close recv loop")
@@ -521,17 +470,13 @@ func (c *Conn) loop() {
err = c.recvLoop(c.conn)
}
if err != io.EOF || c.logInfo {
- c.logger.Printf("recv loop terminated: err=%v", err)
+ c.logger.Printf("recv loop terminated: %v", err)
}
if err == nil {
panic("zk: recvLoop should never return nil error")
}
- close(c.closeChan) // tell send loop to exit
- wg.Done()
}()
- c.resendZkAuth(reauthChan)
-
c.sendSetWatches()
wg.Wait()
}
@@ -671,7 +616,7 @@ func (c *Conn) sendSetWatches() {
for _, req := range reqs {
_, err := c.request(opSetWatches, req, res, nil)
if err != nil {
- c.logger.Printf("Failed to set previous watches: %s", err.Error())
+ c.logger.Printf("Failed to set previous watches: %v", err)
break
}
}
@@ -695,28 +640,20 @@ func (c *Conn) authenticate() error {
binary.BigEndian.PutUint32(buf[:4], uint32(n))
- if err := c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout * 10)); err != nil {
- return err
- }
+ c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout * 10))
_, err = c.conn.Write(buf[:n+4])
+ c.conn.SetWriteDeadline(time.Time{})
if err != nil {
return err
}
- if err := c.conn.SetWriteDeadline(time.Time{}); err != nil {
- return err
- }
// Receive and decode a connect response.
- if err := c.conn.SetReadDeadline(time.Now().Add(c.recvTimeout * 10)); err != nil {
- return err
- }
+ c.conn.SetReadDeadline(time.Now().Add(c.recvTimeout * 10))
_, err = io.ReadFull(c.conn, buf[:4])
+ c.conn.SetReadDeadline(time.Time{})
if err != nil {
return err
}
- if err := c.conn.SetReadDeadline(time.Time{}); err != nil {
- return err
- }
blen := int(binary.BigEndian.Uint32(buf[:4]))
if cap(buf) < blen {
@@ -778,18 +715,14 @@ func (c *Conn) sendData(req *request) error {
c.requests[req.xid] = req
c.requestsLock.Unlock()
- if err := c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout)); err != nil {
- return err
- }
+ c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout))
_, err = c.conn.Write(c.buf[:n+4])
+ c.conn.SetWriteDeadline(time.Time{})
if err != nil {
req.recvChan <- response{-1, err}
c.conn.Close()
return err
}
- if err := c.conn.SetWriteDeadline(time.Time{}); err != nil {
- return err
- }
return nil
}
@@ -812,17 +745,13 @@ func (c *Conn) sendLoop() error {
binary.BigEndian.PutUint32(c.buf[:4], uint32(n))
- if err := c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout)); err != nil {
- return err
- }
+ c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout))
_, err = c.conn.Write(c.buf[:n+4])
+ c.conn.SetWriteDeadline(time.Time{})
if err != nil {
c.conn.Close()
return err
}
- if err := c.conn.SetWriteDeadline(time.Time{}); err != nil {
- return err
- }
case <-c.closeChan:
return nil
}
@@ -854,12 +783,10 @@ func (c *Conn) recvLoop(conn net.Conn) error {
}
_, err = io.ReadFull(conn, buf[:blen])
+ conn.SetReadDeadline(time.Time{})
if err != nil {
return err
}
- if err := conn.SetReadDeadline(time.Time{}); err != nil {
- return err
- }
res := responseHeader{}
_, err = decodePacket(buf[:16], &res)
@@ -892,7 +819,7 @@ func (c *Conn) recvLoop(conn net.Conn) error {
c.watchersLock.Lock()
for _, t := range wTypes {
wpt := watchPathType{res.Path, t}
- if watchers, ok := c.watchers[wpt]; ok {
+ if watchers := c.watchers[wpt]; watchers != nil && len(watchers) > 0 {
for _, ch := range watchers {
ch <- ev
close(ch)
@@ -957,16 +884,51 @@ func (c *Conn) queueRequest(opcode int32, req interface{}, res interface{}, recv
opcode: opcode,
pkt: req,
recvStruct: res,
- recvChan: make(chan response, 1),
+ recvChan: make(chan response, 2),
recvFunc: recvFunc,
}
- c.sendChan <- rq
+
+ switch opcode {
+ case opClose:
+ // always attempt to send close ops.
+ select {
+ case c.sendChan <- rq:
+ case <-time.After(c.connectTimeout * 2):
+ c.logger.Printf("gave up trying to send opClose to server")
+ rq.recvChan <- response{-1, ErrConnectionClosed}
+ }
+ default:
+ // otherwise avoid deadlocks for dumb clients who aren't aware that
+ // the ZK connection is closed yet.
+ select {
+ case <-c.shouldQuit:
+ rq.recvChan <- response{-1, ErrConnectionClosed}
+ case c.sendChan <- rq:
+ // check for a tie
+ select {
+ case <-c.shouldQuit:
+ // maybe the caller gets this, maybe not- we tried.
+ rq.recvChan <- response{-1, ErrConnectionClosed}
+ default:
+ }
+ }
+ }
return rq.recvChan
}
func (c *Conn) request(opcode int32, req interface{}, res interface{}, recvFunc func(*request, *responseHeader, error)) (int64, error) {
r := <-c.queueRequest(opcode, req, res, recvFunc)
- return r.zxid, r.err
+ select {
+ case <-c.shouldQuit:
+ // queueRequest() can be racy, double-check for the race here and avoid
+ // a potential data-race. otherwise the client of this func may try to
+ // access `res` fields concurrently w/ the async response processor.
+ // NOTE: callers of this func should check for (at least) ErrConnectionClosed
+ // and avoid accessing fields of the response object if such error is present.
+ return -1, ErrConnectionClosed
+ default:
+ return r.zxid, r.err
+ }
}
func (c *Conn) AddAuth(scheme string, auth []byte) error {
@@ -1002,6 +964,9 @@ func (c *Conn) Children(path string) ([]string, *Stat, error) {
res := &getChildren2Response{}
_, err := c.request(opGetChildren2, &getChildren2Request{Path: path, Watch: false}, res, nil)
+ if err == ErrConnectionClosed {
+ return nil, nil, err
+ }
return res.Children, &res.Stat, err
}
@@ -1030,6 +995,9 @@ func (c *Conn) Get(path string) ([]byte, *Stat, error) {
res := &getDataResponse{}
_, err := c.request(opGetData, &getDataRequest{Path: path, Watch: false}, res, nil)
+ if err == ErrConnectionClosed {
+ return nil, nil, err
+ }
return res.Data, &res.Stat, err
}
@@ -1059,6 +1027,9 @@ func (c *Conn) Set(path string, data []byte, version int32) (*Stat, error) {
res := &setDataResponse{}
_, err := c.request(opSetData, &SetDataRequest{path, data, version}, res, nil)
+ if err == ErrConnectionClosed {
+ return nil, err
+ }
return &res.Stat, err
}
@@ -1069,6 +1040,35 @@ func (c *Conn) Create(path string, data []byte, flags int32, acl []ACL) (string,
res := &createResponse{}
_, err := c.request(opCreate, &CreateRequest{path, data, acl, flags}, res, nil)
+ if err == ErrConnectionClosed {
+ return "", err
+ }
+ return res.Path, err
+}
+
+func (c *Conn) CreateContainer(path string, data []byte, flags int32, acl []ACL) (string, error) {
+ if err := validatePath(path, flags&FlagSequence == FlagSequence); err != nil {
+ return "", err
+ }
+ if flags&FlagTTL != FlagTTL {
+ return "", ErrInvalidFlags
+ }
+
+ res := &createResponse{}
+ _, err := c.request(opCreateContainer, &CreateContainerRequest{path, data, acl, flags}, res, nil)
+ return res.Path, err
+}
+
+func (c *Conn) CreateTTL(path string, data []byte, flags int32, acl []ACL, ttl time.Duration) (string, error) {
+ if err := validatePath(path, flags&FlagSequence == FlagSequence); err != nil {
+ return "", err
+ }
+ if flags&FlagTTL != FlagTTL {
+ return "", ErrInvalidFlags
+ }
+
+ res := &createResponse{}
+ _, err := c.request(opCreateTTL, &CreateTTLRequest{path, data, acl, flags, ttl.Milliseconds()}, res, nil)
return res.Path, err
}
@@ -1137,6 +1137,9 @@ func (c *Conn) Exists(path string) (bool, *Stat, error) {
res := &existsResponse{}
_, err := c.request(opExists, &existsRequest{Path: path, Watch: false}, res, nil)
+ if err == ErrConnectionClosed {
+ return false, nil, err
+ }
exists := true
if err == ErrNoNode {
exists = false
@@ -1177,6 +1180,9 @@ func (c *Conn) GetACL(path string) ([]ACL, *Stat, error) {
res := &getAclResponse{}
_, err := c.request(opGetAcl, &getAclRequest{Path: path}, res, nil)
+ if err == ErrConnectionClosed {
+ return nil, nil, err
+ }
return res.Acl, &res.Stat, err
}
func (c *Conn) SetACL(path string, acl []ACL, version int32) (*Stat, error) {
@@ -1186,6 +1192,9 @@ func (c *Conn) SetACL(path string, acl []ACL, version int32) (*Stat, error) {
res := &setAclResponse{}
_, err := c.request(opSetAcl, &setAclRequest{Path: path, Acl: acl, Version: version}, res, nil)
+ if err == ErrConnectionClosed {
+ return nil, err
+ }
return &res.Stat, err
}
@@ -1196,6 +1205,9 @@ func (c *Conn) Sync(path string) (string, error) {
res := &syncResponse{}
_, err := c.request(opSync, &syncRequest{Path: path}, res, nil)
+ if err == ErrConnectionClosed {
+ return "", err
+ }
return res.Path, err
}
@@ -1231,6 +1243,9 @@ func (c *Conn) Multi(ops ...interface{}) ([]MultiResponse, error) {
}
res := &multiResponse{}
_, err := c.request(opMulti, req, res, nil)
+ if err == ErrConnectionClosed {
+ return nil, err
+ }
mr := make([]MultiResponse, len(res.Ops))
for i, op := range res.Ops {
mr[i] = MultiResponse{Stat: op.Stat, String: op.String, Error: op.Err.toError()}
@@ -1239,8 +1254,11 @@ func (c *Conn) Multi(ops ...interface{}) ([]MultiResponse, error) {
}
// IncrementalReconfig is the zookeeper reconfiguration api that allows adding and removing servers
-// by lists of members.
-// Return the new configuration stats.
+// by lists of members. For more info refer to the ZK documentation.
+//
+// An optional version allows for conditional reconfigurations, -1 ignores the condition.
+//
+// Returns the new configuration znode stat.
func (c *Conn) IncrementalReconfig(joining, leaving []string, version int64) (*Stat, error) {
// TODO: validate the shape of the member string to give early feedback.
request := &reconfigRequest{
@@ -1252,9 +1270,12 @@ func (c *Conn) IncrementalReconfig(joining, leaving []string, version int64) (*S
return c.internalReconfig(request)
}
-// Reconfig is the non-incremental update functionality for Zookeeper where the list preovided
-// is the entire new member list.
-// the optional version allows for conditional reconfigurations, -1 ignores the condition.
+// Reconfig is the non-incremental update functionality for Zookeeper where the list provided
+// is the entire new member list. For more info refer to the ZK documentation.
+//
+// An optional version allows for conditional reconfigurations, -1 ignores the condition.
+//
+// Returns the new configuration znode stat.
func (c *Conn) Reconfig(members []string, version int64) (*Stat, error) {
request := &reconfigRequest{
NewMembers: []byte(strings.Join(members, ",")),
@@ -1276,3 +1297,62 @@ func (c *Conn) Server() string {
defer c.serverMu.Unlock()
return c.server
}
+
+func resendZkAuth(ctx context.Context, c *Conn) error {
+ shouldCancel := func() bool {
+ select {
+ case <-c.shouldQuit:
+ return true
+ case <-c.closeChan:
+ return true
+ default:
+ return false
+ }
+ }
+
+ c.credsMu.Lock()
+ defer c.credsMu.Unlock()
+
+ if c.logInfo {
+ c.logger.Printf("re-submitting `%d` credentials after reconnect", len(c.creds))
+ }
+
+ for _, cred := range c.creds {
+ // return early before attempting to send request.
+ if shouldCancel() {
+ return nil
+ }
+ // do not use the public API for auth since it depends on the send/recv loops
+ // that are waiting for this to return
+ resChan, err := c.sendRequest(
+ opSetAuth,
+ &setAuthRequest{Type: 0,
+ Scheme: cred.scheme,
+ Auth: cred.auth,
+ },
+ &setAuthResponse{},
+ nil, /* recvFunc*/
+ )
+ if err != nil {
+ return fmt.Errorf("failed to send auth request: %v", err)
+ }
+
+ var res response
+ select {
+ case res = <-resChan:
+ case <-c.closeChan:
+ c.logger.Printf("recv closed, cancel re-submitting credentials")
+ return nil
+ case <-c.shouldQuit:
+ c.logger.Printf("should quit, cancel re-submitting credentials")
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ if res.err != nil {
+ return fmt.Errorf("failed conneciton setAuth request: %v", res.err)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/samuel/go-zookeeper/zk/constants.go b/vendor/github.com/go-zookeeper/zk/constants.go
similarity index 82%
rename from vendor/github.com/samuel/go-zookeeper/zk/constants.go
rename to vendor/github.com/go-zookeeper/zk/constants.go
index ccafcfc977ab6..d914301f24ec2 100644
--- a/vendor/github.com/samuel/go-zookeeper/zk/constants.go
+++ b/vendor/github.com/go-zookeeper/zk/constants.go
@@ -12,25 +12,27 @@ const (
)
const (
- opNotify = 0
- opCreate = 1
- opDelete = 2
- opExists = 3
- opGetData = 4
- opSetData = 5
- opGetAcl = 6
- opSetAcl = 7
- opGetChildren = 8
- opSync = 9
- opPing = 11
- opGetChildren2 = 12
- opCheck = 13
- opMulti = 14
- opReconfig = 16
- opClose = -11
- opSetAuth = 100
- opSetWatches = 101
- opError = -1
+ opNotify = 0
+ opCreate = 1
+ opDelete = 2
+ opExists = 3
+ opGetData = 4
+ opSetData = 5
+ opGetAcl = 6
+ opSetAcl = 7
+ opGetChildren = 8
+ opSync = 9
+ opPing = 11
+ opGetChildren2 = 12
+ opCheck = 13
+ opMulti = 14
+ opReconfig = 16
+ opCreateContainer = 19
+ opCreateTTL = 21
+ opClose = -11
+ opSetAuth = 100
+ opSetWatches = 101
+ opError = -1
// Not in protocol, used internally
opWatcherEvent = -2
)
@@ -72,6 +74,7 @@ const (
const (
FlagEphemeral = 1
FlagSequence = 2
+ FlagTTL = 4
)
var (
@@ -94,7 +97,7 @@ func (s State) String() string {
if name := stateNames[s]; name != "" {
return name
}
- return "unknown state"
+ return "Unknown"
}
type ErrCode int32
@@ -111,6 +114,7 @@ var (
ErrNotEmpty = errors.New("zk: node has children")
ErrSessionExpired = errors.New("zk: session has been expired by the server")
ErrInvalidACL = errors.New("zk: invalid ACL specified")
+ ErrInvalidFlags = errors.New("zk: invalid flags specified")
ErrAuthFailed = errors.New("zk: client authentication failed")
ErrClosing = errors.New("zk: zookeeper is closing")
ErrNothing = errors.New("zk: no server responsees to process")
@@ -191,24 +195,26 @@ const (
var (
emptyPassword = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
opNames = map[int32]string{
- opNotify: "notify",
- opCreate: "create",
- opDelete: "delete",
- opExists: "exists",
- opGetData: "getData",
- opSetData: "setData",
- opGetAcl: "getACL",
- opSetAcl: "setACL",
- opGetChildren: "getChildren",
- opSync: "sync",
- opPing: "ping",
- opGetChildren2: "getChildren2",
- opCheck: "check",
- opMulti: "multi",
- opReconfig: "reconfig",
- opClose: "close",
- opSetAuth: "setAuth",
- opSetWatches: "setWatches",
+ opNotify: "notify",
+ opCreate: "create",
+ opCreateContainer: "createContainer",
+ opCreateTTL: "createTTL",
+ opDelete: "delete",
+ opExists: "exists",
+ opGetData: "getData",
+ opSetData: "setData",
+ opGetAcl: "getACL",
+ opSetAcl: "setACL",
+ opGetChildren: "getChildren",
+ opSync: "sync",
+ opPing: "ping",
+ opGetChildren2: "getChildren2",
+ opCheck: "check",
+ opMulti: "multi",
+ opReconfig: "reconfig",
+ opClose: "close",
+ opSetAuth: "setAuth",
+ opSetWatches: "setWatches",
opWatcherEvent: "watcherEvent",
}
diff --git a/vendor/github.com/samuel/go-zookeeper/zk/dnshostprovider.go b/vendor/github.com/go-zookeeper/zk/dnshostprovider.go
similarity index 100%
rename from vendor/github.com/samuel/go-zookeeper/zk/dnshostprovider.go
rename to vendor/github.com/go-zookeeper/zk/dnshostprovider.go
diff --git a/vendor/github.com/samuel/go-zookeeper/zk/flw.go b/vendor/github.com/go-zookeeper/zk/flw.go
similarity index 96%
rename from vendor/github.com/samuel/go-zookeeper/zk/flw.go
rename to vendor/github.com/go-zookeeper/zk/flw.go
index 1fb8b2aed0206..0ccc486618022 100644
--- a/vendor/github.com/samuel/go-zookeeper/zk/flw.go
+++ b/vendor/github.com/go-zookeeper/zk/flw.go
@@ -24,7 +24,7 @@ func FLWSrvr(servers []string, timeout time.Duration) ([]*ServerStats, bool) {
// different parts of the regular expression that are required to parse the srvr output
const (
zrVer = `^Zookeeper version: ([A-Za-z0-9\.\-]+), built on (\d\d/\d\d/\d\d\d\d \d\d:\d\d [A-Za-z0-9:\+\-]+)`
- zrLat = `^Latency min/avg/max: (\d+)/(\d+)/(\d+)`
+ zrLat = `^Latency min/avg/max: (\d+)/([0-9.]+)/(\d+)`
zrNet = `^Received: (\d+).*\n^Sent: (\d+).*\n^Connections: (\d+).*\n^Outstanding: (\d+)`
zrState = `^Zxid: (0x[A-Za-z0-9]+).*\n^Mode: (\w+).*\n^Node count: (\d+)`
)
@@ -97,7 +97,7 @@ func FLWSrvr(servers []string, timeout time.Duration) ([]*ServerStats, bool) {
// within the regex above, these values must be numerical
// so we can avoid useless checking of the error return value
minLatency, _ := strconv.ParseInt(match[2], 0, 64)
- avgLatency, _ := strconv.ParseInt(match[3], 0, 64)
+ avgLatency, _ := strconv.ParseFloat(match[3], 64)
maxLatency, _ := strconv.ParseInt(match[4], 0, 64)
recv, _ := strconv.ParseInt(match[5], 0, 64)
sent, _ := strconv.ParseInt(match[6], 0, 64)
@@ -255,16 +255,12 @@ func fourLetterWord(server, command string, timeout time.Duration) ([]byte, erro
// once the command has been processed, but better safe than sorry
defer conn.Close()
- if err := conn.SetWriteDeadline(time.Now().Add(timeout)); err != nil {
- return nil, err
- }
+ conn.SetWriteDeadline(time.Now().Add(timeout))
_, err = conn.Write([]byte(command))
if err != nil {
return nil, err
}
- if err := conn.SetReadDeadline(time.Now().Add(timeout)); err != nil {
- return nil, err
- }
+ conn.SetReadDeadline(time.Now().Add(timeout))
return ioutil.ReadAll(conn)
}
diff --git a/vendor/github.com/go-zookeeper/zk/go.mod b/vendor/github.com/go-zookeeper/zk/go.mod
new file mode 100644
index 0000000000000..a2662730be783
--- /dev/null
+++ b/vendor/github.com/go-zookeeper/zk/go.mod
@@ -0,0 +1,3 @@
+module github.com/go-zookeeper/zk
+
+go 1.13
diff --git a/vendor/github.com/samuel/go-zookeeper/zk/lock.go b/vendor/github.com/go-zookeeper/zk/lock.go
similarity index 82%
rename from vendor/github.com/samuel/go-zookeeper/zk/lock.go
rename to vendor/github.com/go-zookeeper/zk/lock.go
index 3c35a427c8dee..33a6ecda30ab7 100644
--- a/vendor/github.com/samuel/go-zookeeper/zk/lock.go
+++ b/vendor/github.com/go-zookeeper/zk/lock.go
@@ -36,13 +36,23 @@ func NewLock(c *Conn, path string, acl []ACL) *Lock {
func parseSeq(path string) (int, error) {
parts := strings.Split(path, "-")
+ // python client uses a __LOCK__ prefix
+ if len(parts) == 1 {
+ parts = strings.Split(path, "__")
+ }
return strconv.Atoi(parts[len(parts)-1])
}
-// Lock attempts to acquire the lock. It will wait to return until the lock
-// is acquired or an error occurs. If this instance already has the lock
-// then ErrDeadlock is returned.
+// Lock attempts to acquire the lock. It works like LockWithData, but it doesn't
+// write any data to the lock node.
func (l *Lock) Lock() error {
+ return l.LockWithData([]byte{})
+}
+
+// LockWithData attempts to acquire the lock, writing data into the lock node.
+// It will wait to return until the lock is acquired or an error occurs. If
+// this instance already has the lock then ErrDeadlock is returned.
+func (l *Lock) LockWithData(data []byte) error {
if l.lockPath != "" {
return ErrDeadlock
}
@@ -52,7 +62,7 @@ func (l *Lock) Lock() error {
path := ""
var err error
for i := 0; i < 3; i++ {
- path, err = l.c.CreateProtectedEphemeralSequential(prefix, []byte{}, l.acl)
+ path, err = l.c.CreateProtectedEphemeralSequential(prefix, data, l.acl)
if err == ErrNoNode {
// Create parent node.
parts := strings.Split(l.path, "/")
diff --git a/vendor/github.com/samuel/go-zookeeper/zk/structs.go b/vendor/github.com/go-zookeeper/zk/structs.go
similarity index 97%
rename from vendor/github.com/samuel/go-zookeeper/zk/structs.go
rename to vendor/github.com/go-zookeeper/zk/structs.go
index 9400c3c0b7089..e41d8c527c538 100644
--- a/vendor/github.com/samuel/go-zookeeper/zk/structs.go
+++ b/vendor/github.com/go-zookeeper/zk/structs.go
@@ -78,7 +78,7 @@ type ServerStats struct {
Received int64
NodeCount int64
MinLatency int64
- AvgLatency int64
+ AvgLatency float64
MaxLatency int64
Connections int64
Outstanding int64
@@ -165,6 +165,16 @@ type CreateRequest struct {
Flags int32
}
+type CreateContainerRequest CreateRequest
+
+type CreateTTLRequest struct {
+ Path string
+ Data []byte
+ Acl []ACL
+ Flags int32
+ Ttl int64 // ms
+}
+
type createResponse pathResponse
type DeleteRequest PathVersionRequest
type deleteResponse struct{}
@@ -589,6 +599,10 @@ func requestStructForOp(op int32) interface{} {
return &closeRequest{}
case opCreate:
return &CreateRequest{}
+ case opCreateContainer:
+ return &CreateContainerRequest{}
+ case opCreateTTL:
+ return &CreateTTLRequest{}
case opDelete:
return &DeleteRequest{}
case opExists:
diff --git a/vendor/github.com/samuel/go-zookeeper/zk/util.go b/vendor/github.com/go-zookeeper/zk/util.go
similarity index 93%
rename from vendor/github.com/samuel/go-zookeeper/zk/util.go
rename to vendor/github.com/go-zookeeper/zk/util.go
index f40a5b15612c2..5a92b66baf353 100644
--- a/vendor/github.com/samuel/go-zookeeper/zk/util.go
+++ b/vendor/github.com/go-zookeeper/zk/util.go
@@ -38,12 +38,15 @@ func DigestACL(perms int32, user, password string) []ACL {
// that resembles <addr>:<port>. If the server has no port provided, the
// DefaultPort constant is added to the end.
func FormatServers(servers []string) []string {
- for i := range servers {
- if !strings.Contains(servers[i], ":") {
- servers[i] = servers[i] + ":" + strconv.Itoa(DefaultPort)
+ srvs := make([]string, len(servers))
+ for i, addr := range servers {
+ if strings.Contains(addr, ":") {
+ srvs[i] = addr
+ } else {
+ srvs[i] = addr + ":" + strconv.Itoa(DefaultPort)
}
}
- return servers
+ return srvs
}
// stringShuffle performs a Fisher-Yates shuffle on a slice of strings
diff --git a/vendor/github.com/prometheus/alertmanager/cluster/cluster.go b/vendor/github.com/prometheus/alertmanager/cluster/cluster.go
index 04e66789bc239..a12beb95752b4 100644
--- a/vendor/github.com/prometheus/alertmanager/cluster/cluster.go
+++ b/vendor/github.com/prometheus/alertmanager/cluster/cluster.go
@@ -584,8 +584,13 @@ func (p *Peer) Ready() bool {
}
// Wait until Settle() has finished.
-func (p *Peer) WaitReady() {
- <-p.readyc
+func (p *Peer) WaitReady(ctx context.Context) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-p.readyc:
+ return nil
+ }
}
// Return a status string representing the peer state.
diff --git a/vendor/github.com/prometheus/alertmanager/notify/notify.go b/vendor/github.com/prometheus/alertmanager/notify/notify.go
index 3dcc30f6ec14f..2f2205c9e8b07 100644
--- a/vendor/github.com/prometheus/alertmanager/notify/notify.go
+++ b/vendor/github.com/prometheus/alertmanager/notify/notify.go
@@ -44,7 +44,7 @@ type ResolvedSender interface {
// Peer represents the cluster node from where we are the sending the notification.
type Peer interface {
// WaitReady waits until the node silences and notifications have settled before attempting to send a notification.
- WaitReady()
+ WaitReady(context.Context) error
}
// MinTimeout is the minimum timeout that is set for the context of a call
@@ -430,7 +430,9 @@ func NewGossipSettleStage(p Peer) *GossipSettleStage {
func (n *GossipSettleStage) Exec(ctx context.Context, _ log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
if n.peer != nil {
- n.peer.WaitReady()
+ if err := n.peer.WaitReady(ctx); err != nil {
+ return ctx, nil, err
+ }
}
return ctx, alerts, nil
}
diff --git a/vendor/github.com/prometheus/prometheus/NOTICE b/vendor/github.com/prometheus/prometheus/NOTICE
index 5e4f509896b86..7c0e4c1020726 100644
--- a/vendor/github.com/prometheus/prometheus/NOTICE
+++ b/vendor/github.com/prometheus/prometheus/NOTICE
@@ -92,7 +92,7 @@ Copyright (c) 2015,2016 Damian Gryski <[email protected]>
See https://github.com/dgryski/go-tsz/blob/master/LICENSE for license details.
We also use code from a large number of npm packages. For details, see:
-- https://github.com/prometheus/prometheus/blob/master/web/ui/react-app/package.json
-- https://github.com/prometheus/prometheus/blob/master/web/ui/react-app/package-lock.json
+- https://github.com/prometheus/prometheus/blob/main/web/ui/react-app/package.json
+- https://github.com/prometheus/prometheus/blob/main/web/ui/react-app/package-lock.json
- The individual package licenses as copied from the node_modules directory can be found in
the npm_licenses.tar.bz2 archive in release tarballs and Docker images.
diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go
index 89e2a6d2e0156..419c26a945ecf 100644
--- a/vendor/github.com/prometheus/prometheus/config/config.go
+++ b/vendor/github.com/prometheus/prometheus/config/config.go
@@ -33,21 +33,29 @@ import (
)
var (
- patRulePath = regexp.MustCompile(`^[^*]*(\*[^/]*)?$`)
- unchangeableHeaders = map[string]struct{}{
+ patRulePath = regexp.MustCompile(`^[^*]*(\*[^/]*)?$`)
+ reservedHeaders = map[string]struct{}{
// NOTE: authorization is checked specially,
// see RemoteWriteConfig.UnmarshalYAML.
// "authorization": {},
"host": {},
"content-encoding": {},
+ "content-length": {},
"content-type": {},
- "x-prometheus-remote-write-version": {},
"user-agent": {},
"connection": {},
"keep-alive": {},
"proxy-authenticate": {},
"proxy-authorization": {},
"www-authenticate": {},
+ "accept-encoding": {},
+ "x-prometheus-remote-write-version": {},
+ "x-prometheus-remote-read-version": {},
+
+ // Added by SigV4.
+ "x-amz-date": {},
+ "x-amz-security-token": {},
+ "x-amz-content-sha256": {},
}
)
@@ -98,24 +106,27 @@ var (
DefaultScrapeConfig = ScrapeConfig{
// ScrapeTimeout and ScrapeInterval default to the
// configured globals.
- MetricsPath: "/metrics",
- Scheme: "http",
- HonorLabels: false,
- HonorTimestamps: true,
+ MetricsPath: "/metrics",
+ Scheme: "http",
+ HonorLabels: false,
+ HonorTimestamps: true,
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
}
// DefaultAlertmanagerConfig is the default alertmanager configuration.
DefaultAlertmanagerConfig = AlertmanagerConfig{
- Scheme: "http",
- Timeout: model.Duration(10 * time.Second),
- APIVersion: AlertmanagerAPIVersionV1,
+ Scheme: "http",
+ Timeout: model.Duration(10 * time.Second),
+ APIVersion: AlertmanagerAPIVersionV2,
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
}
// DefaultRemoteWriteConfig is the default remote write configuration.
DefaultRemoteWriteConfig = RemoteWriteConfig{
- RemoteTimeout: model.Duration(30 * time.Second),
- QueueConfig: DefaultQueueConfig,
- MetadataConfig: DefaultMetadataConfig,
+ RemoteTimeout: model.Duration(30 * time.Second),
+ QueueConfig: DefaultQueueConfig,
+ MetadataConfig: DefaultMetadataConfig,
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
}
// DefaultQueueConfig is the default remote queue configuration.
@@ -145,7 +156,8 @@ var (
// DefaultRemoteReadConfig is the default remote read configuration.
DefaultRemoteReadConfig = RemoteReadConfig{
- RemoteTimeout: model.Duration(1 * time.Minute),
+ RemoteTimeout: model.Duration(1 * time.Minute),
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
}
)
@@ -594,6 +606,7 @@ type RemoteWriteConfig struct {
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
QueueConfig QueueConfig `yaml:"queue_config,omitempty"`
MetadataConfig MetadataConfig `yaml:"metadata_config,omitempty"`
+ SigV4Config *SigV4Config `yaml:"sigv4,omitempty"`
}
// SetDirectory joins any relative file paths with dir.
@@ -616,19 +629,37 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
return errors.New("empty or null relabeling rule in remote write config")
}
}
- for header := range c.Headers {
- if strings.ToLower(header) == "authorization" {
- return errors.New("authorization header must be changed via the basic_auth, bearer_token, or bearer_token_file parameter")
- }
- if _, ok := unchangeableHeaders[strings.ToLower(header)]; ok {
- return errors.Errorf("%s is an unchangeable header", header)
- }
+ if err := validateHeaders(c.Headers); err != nil {
+ return err
}
// The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer.
// We cannot make it a pointer as the parser panics for inlined pointer structs.
// Thus we just do its validation here.
- return c.HTTPClientConfig.Validate()
+ if err := c.HTTPClientConfig.Validate(); err != nil {
+ return err
+ }
+
+ httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil ||
+ c.HTTPClientConfig.Authorization != nil
+
+ if httpClientConfigAuthEnabled && c.SigV4Config != nil {
+ return fmt.Errorf("at most one of basic_auth, authorization, & sigv4 must be configured")
+ }
+
+ return nil
+}
+
+func validateHeaders(headers map[string]string) error {
+ for header := range headers {
+ if strings.ToLower(header) == "authorization" {
+ return errors.New("authorization header must be changed via the basic_auth or authorization parameter")
+ }
+ if _, ok := reservedHeaders[strings.ToLower(header)]; ok {
+ return errors.Errorf("%s is a reserved header. It must not be changed", header)
+ }
+ }
+ return nil
}
// QueueConfig is the configuration for the queue used to write to remote
@@ -651,8 +682,9 @@ type QueueConfig struct {
BatchSendDeadline model.Duration `yaml:"batch_send_deadline,omitempty"`
// On recoverable errors, backoff exponentially.
- MinBackoff model.Duration `yaml:"min_backoff,omitempty"`
- MaxBackoff model.Duration `yaml:"max_backoff,omitempty"`
+ MinBackoff model.Duration `yaml:"min_backoff,omitempty"`
+ MaxBackoff model.Duration `yaml:"max_backoff,omitempty"`
+ RetryOnRateLimit bool `yaml:"retry_on_http_429,omitempty"`
}
// MetadataConfig is the configuration for sending metadata to remote
@@ -664,12 +696,24 @@ type MetadataConfig struct {
SendInterval model.Duration `yaml:"send_interval"`
}
+// SigV4Config is the configuration for signing remote write requests with
+// AWS's SigV4 verification process. Empty values will be retrieved using the
+// AWS default credentials chain.
+type SigV4Config struct {
+ Region string `yaml:"region,omitempty"`
+ AccessKey string `yaml:"access_key,omitempty"`
+ SecretKey config.Secret `yaml:"secret_key,omitempty"`
+ Profile string `yaml:"profile,omitempty"`
+ RoleARN string `yaml:"role_arn,omitempty"`
+}
+
// RemoteReadConfig is the configuration for reading from remote storage.
type RemoteReadConfig struct {
- URL *config.URL `yaml:"url"`
- RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"`
- ReadRecent bool `yaml:"read_recent,omitempty"`
- Name string `yaml:"name,omitempty"`
+ URL *config.URL `yaml:"url"`
+ RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"`
+ Headers map[string]string `yaml:"headers,omitempty"`
+ ReadRecent bool `yaml:"read_recent,omitempty"`
+ Name string `yaml:"name,omitempty"`
// We cannot do proper Go type embedding below as the parser will then parse
// values arbitrarily into the overflow maps of further-down types.
@@ -695,6 +739,9 @@ func (c *RemoteReadConfig) UnmarshalYAML(unmarshal func(interface{}) error) erro
if c.URL == nil {
return errors.New("url for remote_read is empty")
}
+ if err := validateHeaders(c.Headers); err != nil {
+ return err
+ }
// The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer.
// We cannot make it a pointer as the parser panics for inlined pointer structs.
// Thus we just do its validation here.
diff --git a/vendor/github.com/prometheus/prometheus/discovery/README.md b/vendor/github.com/prometheus/prometheus/discovery/README.md
index f7d7120ddd5b3..9431876c7ef1e 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/README.md
+++ b/vendor/github.com/prometheus/prometheus/discovery/README.md
@@ -259,3 +259,9 @@ Here are some non-obvious parts of adding service discoveries that need to be ve
`<alertmanager_config>` in `docs/configuration/configuration.md`.
<!-- TODO: Add best-practices -->
+
+### Examples of Service Discovery pull requests
+
+The exemples given might become out of date but should give a good impression about the areas touched by a new service discovery.
+
+- [Eureka](https://github.com/prometheus/prometheus/pull/3369)
diff --git a/vendor/github.com/prometheus/prometheus/discovery/digitalocean/digitalocean.go b/vendor/github.com/prometheus/prometheus/discovery/digitalocean/digitalocean.go
index 25436e2e41b60..412191758e2ac 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/digitalocean/digitalocean.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/digitalocean/digitalocean.go
@@ -38,6 +38,7 @@ const (
doLabelID = doLabel + "droplet_id"
doLabelName = doLabel + "droplet_name"
doLabelImage = doLabel + "image"
+ doLabelImageName = doLabel + "image_name"
doLabelPrivateIPv4 = doLabel + "private_ipv4"
doLabelPublicIPv4 = doLabel + "public_ipv4"
doLabelPublicIPv6 = doLabel + "public_ipv6"
@@ -51,8 +52,9 @@ const (
// DefaultSDConfig is the default DigitalOcean SD configuration.
var DefaultSDConfig = SDConfig{
- Port: 80,
- RefreshInterval: model.Duration(60 * time.Second),
+ Port: 80,
+ RefreshInterval: model.Duration(60 * time.Second),
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
}
func init() {
@@ -88,7 +90,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err != nil {
return err
}
- return nil
+ return c.HTTPClientConfig.Validate()
}
// Discovery periodically performs DigitalOcean requests. It implements
@@ -161,6 +163,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
doLabelID: model.LabelValue(fmt.Sprintf("%d", droplet.ID)),
doLabelName: model.LabelValue(droplet.Name),
doLabelImage: model.LabelValue(droplet.Image.Slug),
+ doLabelImageName: model.LabelValue(droplet.Image.Name),
doLabelPrivateIPv4: model.LabelValue(privateIPv4),
doLabelPublicIPv4: model.LabelValue(publicIPv4),
doLabelPublicIPv6: model.LabelValue(publicIPv6),
diff --git a/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/dockerswarm.go b/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/dockerswarm.go
index 2e0b477cd7785..c9cf1d115a137 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/dockerswarm.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/dockerswarm.go
@@ -40,9 +40,10 @@ var userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
// DefaultSDConfig is the default Docker Swarm SD configuration.
var DefaultSDConfig = SDConfig{
- RefreshInterval: model.Duration(60 * time.Second),
- Port: 80,
- Filters: []Filter{},
+ RefreshInterval: model.Duration(60 * time.Second),
+ Port: 80,
+ Filters: []Filter{},
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
}
func init() {
@@ -102,7 +103,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
default:
return fmt.Errorf("invalid role %s, expected tasks, services, or nodes", c.Role)
}
- return nil
+ return c.HTTPClientConfig.Validate()
}
// Discovery periodically performs Docker Swarm requests. It implements
diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go
index 0183d061f315c..d7595b5024cbc 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go
@@ -66,7 +66,9 @@ var (
[]string{"role", "event"},
)
// DefaultSDConfig is the default Kubernetes SD configuration
- DefaultSDConfig = SDConfig{}
+ DefaultSDConfig = SDConfig{
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
+ }
)
func init() {
@@ -152,7 +154,7 @@ type resourceSelector struct {
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
- *c = SDConfig{}
+ *c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))
if err != nil {
@@ -165,7 +167,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err != nil {
return err
}
- if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config.HTTPClientConfig{}) {
+ if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) {
return errors.Errorf("to use custom HTTP client configuration please provide the 'api_server' URL explicitly")
}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/pod.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/pod.go
index 86fa31f7f6adf..751bc515a7fa4 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/pod.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/pod.go
@@ -124,12 +124,12 @@ func (p *Pod) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool
send(ctx, ch, &targetgroup.Group{Source: podSourceFromNamespaceAndName(namespace, name)})
return true
}
- eps, err := convertToPod(o)
+ pod, err := convertToPod(o)
if err != nil {
level.Error(p.logger).Log("msg", "converting to Pod object failed", "err", err)
return true
}
- send(ctx, ch, p.buildPod(eps))
+ send(ctx, ch, p.buildPod(pod))
return true
}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/marathon/marathon.go b/vendor/github.com/prometheus/prometheus/discovery/marathon/marathon.go
index efd4769e3afc6..016906910e4c2 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/marathon/marathon.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/marathon/marathon.go
@@ -61,7 +61,8 @@ const (
// DefaultSDConfig is the default Marathon SD configuration.
var DefaultSDConfig = SDConfig{
- RefreshInterval: model.Duration(30 * time.Second),
+ RefreshInterval: model.Duration(30 * time.Second),
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
}
func init() {
@@ -111,6 +112,9 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if (len(c.HTTPClientConfig.BearerToken) > 0 || len(c.HTTPClientConfig.BearerTokenFile) > 0) && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) {
return errors.New("marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured")
}
+ if c.HTTPClientConfig.Authorization != nil && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) {
+ return errors.New("marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured")
+ }
return c.HTTPClientConfig.Validate()
}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/zookeeper/zookeeper.go b/vendor/github.com/prometheus/prometheus/discovery/zookeeper/zookeeper.go
index 09edf6e0e780d..856ae35fdfa2c 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/zookeeper/zookeeper.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/zookeeper/zookeeper.go
@@ -23,9 +23,9 @@ import (
"time"
"github.com/go-kit/kit/log"
+ "github.com/go-zookeeper/zk"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
- "github.com/samuel/go-zookeeper/zk"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup"
diff --git a/vendor/github.com/prometheus/prometheus/pkg/exemplar/exemplar.go b/vendor/github.com/prometheus/prometheus/pkg/exemplar/exemplar.go
index c6ea0db94da19..8e3e01b5c90f2 100644
--- a/vendor/github.com/prometheus/prometheus/pkg/exemplar/exemplar.go
+++ b/vendor/github.com/prometheus/prometheus/pkg/exemplar/exemplar.go
@@ -22,3 +22,25 @@ type Exemplar struct {
HasTs bool
Ts int64
}
+
+type QueryResult struct {
+ SeriesLabels labels.Labels `json:"seriesLabels"`
+ Exemplars []Exemplar `json:"exemplars"`
+}
+
+// Equals compares if the exemplar e is the same as e2. Note that if HasTs is false for
+// both exemplars then the timestamps will be ignored for the comparison. This can come up
+// when an exemplar is exported without it's own timestamp, in which case the scrape timestamp
+// is assigned to the Ts field. However we still want to treat the same exemplar, scraped without
+// an exported timestamp, as a duplicate of itself for each subsequent scrape.
+func (e Exemplar) Equals(e2 Exemplar) bool {
+ if !labels.Equal(e.Labels, e2.Labels) {
+ return false
+ }
+
+ if (e.HasTs || e2.HasTs) && e.Ts != e2.Ts {
+ return false
+ }
+
+ return e.Value == e2.Value
+}
diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go
index 2ed1014419056..5c9caebada372 100644
--- a/vendor/github.com/prometheus/prometheus/promql/engine.go
+++ b/vendor/github.com/prometheus/prometheus/promql/engine.go
@@ -19,6 +19,7 @@ import (
"context"
"fmt"
"math"
+ "reflect"
"regexp"
"runtime"
"sort"
@@ -211,6 +212,9 @@ type EngineOpts struct {
// EnableAtModifier if true enables @ modifier. Disabled otherwise.
EnableAtModifier bool
+
+ // EnableNegativeOffset if true enables negative (-) offset values. Disabled otherwise.
+ EnableNegativeOffset bool
}
// Engine handles the lifetime of queries from beginning to end.
@@ -226,6 +230,7 @@ type Engine struct {
lookbackDelta time.Duration
noStepSubqueryIntervalFn func(rangeMillis int64) int64
enableAtModifier bool
+ enableNegativeOffset bool
}
// NewEngine returns a new engine.
@@ -307,6 +312,7 @@ func NewEngine(opts EngineOpts) *Engine {
lookbackDelta: opts.LookbackDelta,
noStepSubqueryIntervalFn: opts.NoStepSubqueryIntervalFn,
enableAtModifier: opts.EnableAtModifier,
+ enableNegativeOffset: opts.EnableNegativeOffset,
}
}
@@ -388,34 +394,53 @@ func (ng *Engine) newQuery(q storage.Queryable, expr parser.Expr, start, end tim
}
var ErrValidationAtModifierDisabled = errors.New("@ modifier is disabled")
+var ErrValidationNegativeOffsetDisabled = errors.New("negative offset is disabled")
func (ng *Engine) validateOpts(expr parser.Expr) error {
- if ng.enableAtModifier {
+ if ng.enableAtModifier && ng.enableNegativeOffset {
return nil
}
+ var atModifierUsed, negativeOffsetUsed bool
+
var validationErr error
parser.Inspect(expr, func(node parser.Node, path []parser.Node) error {
switch n := node.(type) {
case *parser.VectorSelector:
if n.Timestamp != nil || n.StartOrEnd == parser.START || n.StartOrEnd == parser.END {
- validationErr = ErrValidationAtModifierDisabled
- return validationErr
+ atModifierUsed = true
+ }
+ if n.OriginalOffset < 0 {
+ negativeOffsetUsed = true
}
case *parser.MatrixSelector:
vs := n.VectorSelector.(*parser.VectorSelector)
if vs.Timestamp != nil || vs.StartOrEnd == parser.START || vs.StartOrEnd == parser.END {
- validationErr = ErrValidationAtModifierDisabled
- return validationErr
+ atModifierUsed = true
+ }
+ if vs.OriginalOffset < 0 {
+ negativeOffsetUsed = true
}
case *parser.SubqueryExpr:
if n.Timestamp != nil || n.StartOrEnd == parser.START || n.StartOrEnd == parser.END {
- validationErr = ErrValidationAtModifierDisabled
- return validationErr
+ atModifierUsed = true
+ }
+ if n.OriginalOffset < 0 {
+ negativeOffsetUsed = true
}
}
+
+ if atModifierUsed && !ng.enableAtModifier {
+ validationErr = ErrValidationAtModifierDisabled
+ return validationErr
+ }
+ if negativeOffsetUsed && !ng.enableNegativeOffset {
+ validationErr = ErrValidationNegativeOffsetDisabled
+ return validationErr
+ }
+
return nil
})
@@ -877,6 +902,12 @@ func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws storage.Warnings
return v, ws, nil
}
+// EvalSeriesHelper stores extra information about a series.
+type EvalSeriesHelper struct {
+ // The grouping key used by aggregation.
+ groupingKey uint64
+}
+
// EvalNodeHelper stores extra information and caches for evaluating a single node across steps.
type EvalNodeHelper struct {
// Evaluation timestamp.
@@ -937,10 +968,12 @@ func (enh *EvalNodeHelper) signatureFunc(on bool, names ...string) func(labels.L
}
// rangeEval evaluates the given expressions, and then for each step calls
-// the given function with the values computed for each expression at that
-// step. The return value is the combination into time series of all the
+// the given funcCall with the values computed for each expression at that
+// step. The return value is the combination into time series of all the
// function call results.
-func (ev *evaluator) rangeEval(funcCall func([]parser.Value, *EvalNodeHelper) (Vector, storage.Warnings), exprs ...parser.Expr) (Matrix, storage.Warnings) {
+// The prepSeries function (if provided) can be used to prepare the helper
+// for each series, then passed to each call funcCall.
+func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, storage.Warnings), exprs ...parser.Expr) (Matrix, storage.Warnings) {
numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1
matrixes := make([]Matrix, len(exprs))
origMatrixes := make([]Matrix, len(exprs))
@@ -976,6 +1009,30 @@ func (ev *evaluator) rangeEval(funcCall func([]parser.Value, *EvalNodeHelper) (V
enh := &EvalNodeHelper{Out: make(Vector, 0, biggestLen)}
seriess := make(map[uint64]Series, biggestLen) // Output series by series hash.
tempNumSamples := ev.currentSamples
+
+ var (
+ seriesHelpers [][]EvalSeriesHelper
+ bufHelpers [][]EvalSeriesHelper // Buffer updated on each step
+ )
+
+ // If the series preparation function is provided, we should run it for
+ // every single series in the matrix.
+ if prepSeries != nil {
+ seriesHelpers = make([][]EvalSeriesHelper, len(exprs))
+ bufHelpers = make([][]EvalSeriesHelper, len(exprs))
+
+ for i := range exprs {
+ seriesHelpers[i] = make([]EvalSeriesHelper, len(matrixes[i]))
+ bufHelpers[i] = make([]EvalSeriesHelper, len(matrixes[i]))
+
+ for si, series := range matrixes[i] {
+ h := seriesHelpers[i][si]
+ prepSeries(series.Metric, &h)
+ seriesHelpers[i][si] = h
+ }
+ }
+ }
+
for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval {
if err := contextDone(ev.ctx, "expression evaluation"); err != nil {
ev.error(err)
@@ -985,11 +1042,20 @@ func (ev *evaluator) rangeEval(funcCall func([]parser.Value, *EvalNodeHelper) (V
// Gather input vectors for this timestamp.
for i := range exprs {
vectors[i] = vectors[i][:0]
+
+ if prepSeries != nil {
+ bufHelpers[i] = bufHelpers[i][:0]
+ }
+
for si, series := range matrixes[i] {
for _, point := range series.Points {
if point.T == ts {
if ev.currentSamples < ev.maxSamples {
vectors[i] = append(vectors[i], Sample{Metric: series.Metric, Point: point})
+ if prepSeries != nil {
+ bufHelpers[i] = append(bufHelpers[i], seriesHelpers[i][si])
+ }
+
// Move input vectors forward so we don't have to re-scan the same
// past points at the next step.
matrixes[i][si].Points = series.Points[1:]
@@ -1003,9 +1069,10 @@ func (ev *evaluator) rangeEval(funcCall func([]parser.Value, *EvalNodeHelper) (V
}
args[i] = vectors[i]
}
+
// Make the function call.
enh.Ts = ts
- result, ws := funcCall(args, enh)
+ result, ws := funcCall(args, bufHelpers, enh)
if result.ContainsSameLabelset() {
ev.errorf("vector cannot contain metrics with the same labelset")
}
@@ -1101,20 +1168,35 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
}
numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1
+ // Create a new span to help investigate inner evaluation performances.
+ span, _ := opentracing.StartSpanFromContext(ev.ctx, stats.InnerEvalTime.SpanOperation()+" eval "+reflect.TypeOf(expr).String())
+ defer span.Finish()
+
switch e := expr.(type) {
case *parser.AggregateExpr:
+ // Grouping labels must be sorted (expected both by generateGroupingKey() and aggregation()).
+ sortedGrouping := e.Grouping
+ sort.Strings(sortedGrouping)
+
+ // Prepare a function to initialise series helpers with the grouping key.
+ buf := make([]byte, 0, 1024)
+ initSeries := func(series labels.Labels, h *EvalSeriesHelper) {
+ h.groupingKey, buf = generateGroupingKey(series, sortedGrouping, e.Without, buf)
+ }
+
unwrapParenExpr(&e.Param)
if s, ok := unwrapStepInvariantExpr(e.Param).(*parser.StringLiteral); ok {
- return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
- return ev.aggregation(e.Op, e.Grouping, e.Without, s.Val, v[0].(Vector), enh), nil
+ return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
+ return ev.aggregation(e.Op, sortedGrouping, e.Without, s.Val, v[0].(Vector), sh[0], enh), nil
}, e.Expr)
}
- return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
+
+ return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
var param float64
if e.Param != nil {
param = v[0].(Vector)[0].V
}
- return ev.aggregation(e.Op, e.Grouping, e.Without, param, v[1].(Vector), enh), nil
+ return ev.aggregation(e.Op, sortedGrouping, e.Without, param, v[1].(Vector), sh[1], enh), nil
}, e.Param, e.Expr)
case *parser.Call:
@@ -1127,7 +1209,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
arg := unwrapStepInvariantExpr(e.Args[0])
vs, ok := arg.(*parser.VectorSelector)
if ok {
- return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
+ return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
if vs.Timestamp != nil {
// This is a special case only for "timestamp" since the offset
// needs to be adjusted for every point.
@@ -1171,7 +1253,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
}
if !matrixArg {
// Does not have a matrix argument.
- return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
+ return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return call(v, e.Args, enh), warnings
}, e.Args...)
}
@@ -1216,11 +1298,16 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
ev.currentSamples -= len(points)
points = points[:0]
it.Reset(s.Iterator())
+ metric := selVS.Series[i].Labels()
+ // The last_over_time function acts like offset; thus, it
+ // should keep the metric name. For all the other range
+ // vector functions, the only change needed is to drop the
+ // metric name in the output.
+ if e.Func.Name != "last_over_time" {
+ metric = dropMetricName(metric)
+ }
ss := Series{
- // For all range vector functions, the only change to the
- // output labels is dropping the metric name so just do
- // it once here.
- Metric: dropMetricName(selVS.Series[i].Labels()),
+ Metric: metric,
Points: getPointSlice(numSteps),
}
inMatrix[0].Metric = selVS.Series[i].Labels()
@@ -1333,43 +1420,43 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
case *parser.BinaryExpr:
switch lt, rt := e.LHS.Type(), e.RHS.Type(); {
case lt == parser.ValueTypeScalar && rt == parser.ValueTypeScalar:
- return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
+ return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
val := scalarBinop(e.Op, v[0].(Vector)[0].Point.V, v[1].(Vector)[0].Point.V)
return append(enh.Out, Sample{Point: Point{V: val}}), nil
}, e.LHS, e.RHS)
case lt == parser.ValueTypeVector && rt == parser.ValueTypeVector:
switch e.Op {
case parser.LAND:
- return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
+ return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh), nil
}, e.LHS, e.RHS)
case parser.LOR:
- return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
+ return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh), nil
}, e.LHS, e.RHS)
case parser.LUNLESS:
- return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
+ return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh), nil
}, e.LHS, e.RHS)
default:
- return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
+ return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, enh), nil
}, e.LHS, e.RHS)
}
case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar:
- return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
+ return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].Point.V}, false, e.ReturnBool, enh), nil
}, e.LHS, e.RHS)
case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector:
- return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
+ return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].Point.V}, true, e.ReturnBool, enh), nil
}, e.LHS, e.RHS)
}
case *parser.NumberLiteral:
- return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
+ return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return append(enh.Out, Sample{Point: Point{V: e.Val}}), nil
})
@@ -1382,7 +1469,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
ev.error(errWithWarnings{errors.Wrap(err, "expanding series"), ws})
}
mat := make(Matrix, 0, len(e.Series))
- it := storage.NewBuffer(durationMilliseconds(ev.lookbackDelta))
+ it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta))
for i, s := range e.Series {
it.Reset(s.Iterator())
ss := Series{
@@ -1513,7 +1600,7 @@ func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) (Vect
ev.error(errWithWarnings{errors.Wrap(err, "expanding series"), ws})
}
vec := make(Vector, 0, len(node.Series))
- it := storage.NewBuffer(durationMilliseconds(ev.lookbackDelta))
+ it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta))
for i, s := range node.Series {
it.Reset(s.Iterator())
@@ -1535,7 +1622,7 @@ func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) (Vect
}
// vectorSelectorSingle evaluates a instant vector for the iterator of one time series.
-func (ev *evaluator) vectorSelectorSingle(it *storage.BufferedSeriesIterator, node *parser.VectorSelector, ts int64) (int64, float64, bool) {
+func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, node *parser.VectorSelector, ts int64) (int64, float64, bool) {
refTime := ts - durationMilliseconds(node.Offset)
var t int64
var v float64
@@ -1552,7 +1639,7 @@ func (ev *evaluator) vectorSelectorSingle(it *storage.BufferedSeriesIterator, no
}
if !ok || t > refTime {
- t, v, ok = it.PeekBack(1)
+ t, v, ok = it.PeekPrev()
if !ok || t < refTime-durationMilliseconds(ev.lookbackDelta) {
return 0, 0, false
}
@@ -2033,8 +2120,9 @@ type groupedAggregation struct {
reverseHeap vectorByReverseValueHeap
}
-// aggregation evaluates an aggregation operation on a Vector.
-func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without bool, param interface{}, vec Vector, enh *EvalNodeHelper) Vector {
+// aggregation evaluates an aggregation operation on a Vector. The provided grouping labels
+// must be sorted.
+func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without bool, param interface{}, vec Vector, seriesHelper []EvalSeriesHelper, enh *EvalNodeHelper) Vector {
result := map[uint64]*groupedAggregation{}
var k int64
@@ -2053,35 +2141,43 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
q = param.(float64)
}
var valueLabel string
+ var recomputeGroupingKey bool
if op == parser.COUNT_VALUES {
valueLabel = param.(string)
if !model.LabelName(valueLabel).IsValid() {
ev.errorf("invalid label name %q", valueLabel)
}
if !without {
+ // We're changing the grouping labels so we have to ensure they're still sorted
+ // and we have to flag to recompute the grouping key. Considering the count_values()
+ // operator is less frequently used than other aggregations, we're fine having to
+ // re-compute the grouping key on each step for this case.
grouping = append(grouping, valueLabel)
+ sort.Strings(grouping)
+ recomputeGroupingKey = true
}
}
- sort.Strings(grouping)
lb := labels.NewBuilder(nil)
- buf := make([]byte, 0, 1024)
- for _, s := range vec {
+ var buf []byte
+ for si, s := range vec {
metric := s.Metric
if op == parser.COUNT_VALUES {
lb.Reset(metric)
lb.Set(valueLabel, strconv.FormatFloat(s.V, 'f', -1, 64))
metric = lb.Labels()
+
+ // We've changed the metric so we have to recompute the grouping key.
+ recomputeGroupingKey = true
}
- var (
- groupingKey uint64
- )
- if without {
- groupingKey, buf = metric.HashWithoutLabels(buf, grouping...)
+ // We can use the pre-computed grouping key unless grouping labels have changed.
+ var groupingKey uint64
+ if !recomputeGroupingKey {
+ groupingKey = seriesHelper[si].groupingKey
} else {
- groupingKey, buf = metric.HashForLabels(buf, grouping...)
+ groupingKey, buf = generateGroupingKey(metric, grouping, without, buf)
}
group, ok := result[groupingKey]
@@ -2268,6 +2364,21 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
return enh.Out
}
+// groupingKey builds and returns the grouping key for the given metric and
+// grouping labels.
+func generateGroupingKey(metric labels.Labels, grouping []string, without bool, buf []byte) (uint64, []byte) {
+ if without {
+ return metric.HashWithoutLabels(buf, grouping...)
+ }
+
+ if len(grouping) == 0 {
+ // No need to generate any hash if there are no grouping labels.
+ return 0, buf
+ }
+
+ return metric.HashForLabels(buf, grouping...)
+}
+
// btos returns 1 if b is true, 0 otherwise.
func btos(b bool) float64 {
if b {
diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go
index 3a96a9ecec0a4..e497be364be29 100644
--- a/vendor/github.com/prometheus/prometheus/promql/functions.go
+++ b/vendor/github.com/prometheus/prometheus/promql/functions.go
@@ -70,17 +70,17 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
if len(samples.Points) < 2 {
return enh.Out
}
- var (
- counterCorrection float64
- lastValue float64
- )
- for _, sample := range samples.Points {
- if isCounter && sample.V < lastValue {
- counterCorrection += lastValue
+
+ resultValue := samples.Points[len(samples.Points)-1].V - samples.Points[0].V
+ if isCounter {
+ var lastValue float64
+ for _, sample := range samples.Points {
+ if sample.V < lastValue {
+ resultValue += lastValue
+ }
+ lastValue = sample.V
}
- lastValue = sample.V
}
- resultValue := lastValue - samples.Points[0].V + counterCorrection
// Duration between first/last samples and boundary of range.
durationToStart := float64(samples.Points[0].T-rangeStart) / 1000
@@ -278,6 +278,23 @@ func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel
return Vector(byValueSorter)
}
+// === clamp(Vector parser.ValueTypeVector, min, max Scalar) Vector ===
+func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
+ vec := vals[0].(Vector)
+ min := vals[1].(Vector)[0].Point.V
+ max := vals[2].(Vector)[0].Point.V
+ if max < min {
+ return enh.Out
+ }
+ for _, el := range vec {
+ enh.Out = append(enh.Out, Sample{
+ Metric: enh.DropMetricName(el.Metric),
+ Point: Point{V: math.Max(min, math.Min(max, el.V))},
+ })
+ }
+ return enh.Out
+}
+
// === clamp_max(Vector parser.ValueTypeVector, max Scalar) Vector ===
func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
vec := vals[0].(Vector)
@@ -383,7 +400,16 @@ func funcCountOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNo
})
}
-// === floor(Vector parser.ValueTypeVector) Vector ===
+// === last_over_time(Matrix parser.ValueTypeMatrix) Vector ===
+func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
+ el := vals[0].(Matrix)[0]
+
+ return append(enh.Out, Sample{
+ Metric: el.Metric,
+ Point: Point{V: el.Points[len(el.Points)-1].V},
+ })
+}
+
// === max_over_time(Matrix parser.ValueTypeMatrix) Vector ===
func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
return aggrOverTime(vals, enh, func(values []Point) float64 {
@@ -537,6 +563,18 @@ func funcLog10(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
return simpleFunc(vals, enh, math.Log10)
}
+// === sgn(Vector parser.ValueTypeVector) Vector ===
+func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
+ return simpleFunc(vals, enh, func(v float64) float64 {
+ if v < 0 {
+ return -1
+ } else if v > 0 {
+ return 1
+ }
+ return v
+ })
+}
+
// === timestamp(Vector parser.ValueTypeVector) Vector ===
func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
vec := vals[0].(Vector)
@@ -893,6 +931,7 @@ var FunctionCalls = map[string]FunctionCall{
"avg_over_time": funcAvgOverTime,
"ceil": funcCeil,
"changes": funcChanges,
+ "clamp": funcClamp,
"clamp_max": funcClampMax,
"clamp_min": funcClampMin,
"count_over_time": funcCountOverTime,
@@ -914,6 +953,7 @@ var FunctionCalls = map[string]FunctionCall{
"ln": funcLn,
"log10": funcLog10,
"log2": funcLog2,
+ "last_over_time": funcLastOverTime,
"max_over_time": funcMaxOverTime,
"min_over_time": funcMinOverTime,
"minute": funcMinute,
@@ -924,6 +964,7 @@ var FunctionCalls = map[string]FunctionCall{
"resets": funcResets,
"round": funcRound,
"scalar": funcScalar,
+ "sgn": funcSgn,
"sort": funcSort,
"sort_desc": funcSortDesc,
"sqrt": funcSqrt,
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go
index a8754f2405e95..776242564087c 100644
--- a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go
@@ -316,6 +316,18 @@ func Walk(v Visitor, node Node, path []Node) error {
return err
}
+func ExtractSelectors(expr Expr) [][]*labels.Matcher {
+ var selectors [][]*labels.Matcher
+ Inspect(expr, func(node Node, _ []Node) error {
+ vs, ok := node.(*VectorSelector)
+ if ok {
+ selectors = append(selectors, vs.LabelMatchers)
+ }
+ return nil
+ })
+ return selectors
+}
+
type inspector func(Node, []Node) error
func (f inspector) Visit(node Node, path []Node) (Visitor, error) {
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go
index 4516829e551e2..a127cd28a4f8d 100644
--- a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go
@@ -54,6 +54,11 @@ var Functions = map[string]*Function{
ArgTypes: []ValueType{ValueTypeMatrix},
ReturnType: ValueTypeVector,
},
+ "clamp": {
+ Name: "clamp",
+ ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar, ValueTypeScalar},
+ ReturnType: ValueTypeVector,
+ },
"clamp_max": {
Name: "clamp_max",
ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar},
@@ -149,6 +154,11 @@ var Functions = map[string]*Function{
Variadic: -1,
ReturnType: ValueTypeVector,
},
+ "last_over_time": {
+ Name: "last_over_time",
+ ArgTypes: []ValueType{ValueTypeMatrix},
+ ReturnType: ValueTypeVector,
+ },
"ln": {
Name: "ln",
ArgTypes: []ValueType{ValueTypeVector},
@@ -217,6 +227,11 @@ var Functions = map[string]*Function{
ArgTypes: []ValueType{ValueTypeVector},
ReturnType: ValueTypeScalar,
},
+ "sgn": {
+ Name: "sgn",
+ ArgTypes: []ValueType{ValueTypeVector},
+ ReturnType: ValueTypeVector,
+ },
"sort": {
Name: "sort",
ArgTypes: []ValueType{ValueTypeVector},
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y
index 6f398addfdc12..3f914e4ac3df1 100644
--- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y
@@ -386,6 +386,11 @@ offset_expr: expr OFFSET duration
yylex.(*parser).addOffset($1, $3)
$$ = $1
}
+ | expr OFFSET SUB duration
+ {
+ yylex.(*parser).addOffset($1, -$4)
+ $$ = $1
+ }
| expr OFFSET error
{ yylex.(*parser).unexpected("offset", "duration"); $$ = $1 }
;
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go
index 26005236bbcbc..e0c5ceac57778 100644
--- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go
@@ -189,13 +189,14 @@ var yyToknames = [...]string{
"START_METRIC_SELECTOR",
"startSymbolsEnd",
}
+
var yyStatenames = [...]string{}
const yyEofCode = 1
const yyErrCode = 2
const yyInitialStackSize = 16
-//line generated_parser.y:742
+//line generated_parser.y:747
//line yacctab:1
var yyExca = [...]int{
@@ -203,301 +204,300 @@ var yyExca = [...]int{
1, -1,
-2, 0,
-1, 33,
- 1, 127,
- 10, 127,
- 22, 127,
+ 1, 128,
+ 10, 128,
+ 22, 128,
-2, 0,
-1, 56,
- 2, 139,
- 15, 139,
- 61, 139,
- 67, 139,
- -2, 95,
- -1, 57,
2, 140,
15, 140,
61, 140,
67, 140,
-2, 96,
- -1, 58,
+ -1, 57,
2, 141,
15, 141,
61, 141,
67, 141,
- -2, 98,
- -1, 59,
+ -2, 97,
+ -1, 58,
2, 142,
15, 142,
61, 142,
67, 142,
-2, 99,
- -1, 60,
+ -1, 59,
2, 143,
15, 143,
61, 143,
67, 143,
-2, 100,
- -1, 61,
+ -1, 60,
2, 144,
15, 144,
61, 144,
67, 144,
- -2, 105,
- -1, 62,
+ -2, 101,
+ -1, 61,
2, 145,
15, 145,
61, 145,
67, 145,
- -2, 107,
- -1, 63,
+ -2, 106,
+ -1, 62,
2, 146,
15, 146,
61, 146,
67, 146,
- -2, 109,
- -1, 64,
+ -2, 108,
+ -1, 63,
2, 147,
15, 147,
61, 147,
67, 147,
-2, 110,
- -1, 65,
+ -1, 64,
2, 148,
15, 148,
61, 148,
67, 148,
-2, 111,
- -1, 66,
+ -1, 65,
2, 149,
15, 149,
61, 149,
67, 149,
-2, 112,
- -1, 67,
+ -1, 66,
2, 150,
15, 150,
61, 150,
67, 150,
-2, 113,
- -1, 185,
- 12, 192,
- 13, 192,
- 16, 192,
- 17, 192,
- 23, 192,
- 26, 192,
- 32, 192,
- 33, 192,
- 36, 192,
- 42, 192,
- 46, 192,
- 47, 192,
- 48, 192,
- 49, 192,
- 50, 192,
- 51, 192,
- 52, 192,
- 53, 192,
- 54, 192,
- 55, 192,
- 56, 192,
- 57, 192,
- 61, 192,
- 65, 192,
- 67, 192,
- -2, 0,
+ -1, 67,
+ 2, 151,
+ 15, 151,
+ 61, 151,
+ 67, 151,
+ -2, 114,
-1, 186,
- 12, 192,
- 13, 192,
- 16, 192,
- 17, 192,
- 23, 192,
- 26, 192,
- 32, 192,
- 33, 192,
- 36, 192,
- 42, 192,
- 46, 192,
- 47, 192,
- 48, 192,
- 49, 192,
- 50, 192,
- 51, 192,
- 52, 192,
- 53, 192,
- 54, 192,
- 55, 192,
- 56, 192,
- 57, 192,
- 61, 192,
- 65, 192,
- 67, 192,
+ 12, 193,
+ 13, 193,
+ 16, 193,
+ 17, 193,
+ 23, 193,
+ 26, 193,
+ 32, 193,
+ 33, 193,
+ 36, 193,
+ 42, 193,
+ 46, 193,
+ 47, 193,
+ 48, 193,
+ 49, 193,
+ 50, 193,
+ 51, 193,
+ 52, 193,
+ 53, 193,
+ 54, 193,
+ 55, 193,
+ 56, 193,
+ 57, 193,
+ 61, 193,
+ 65, 193,
+ 67, 193,
-2, 0,
- -1, 205,
- 19, 190,
+ -1, 187,
+ 12, 193,
+ 13, 193,
+ 16, 193,
+ 17, 193,
+ 23, 193,
+ 26, 193,
+ 32, 193,
+ 33, 193,
+ 36, 193,
+ 42, 193,
+ 46, 193,
+ 47, 193,
+ 48, 193,
+ 49, 193,
+ 50, 193,
+ 51, 193,
+ 52, 193,
+ 53, 193,
+ 54, 193,
+ 55, 193,
+ 56, 193,
+ 57, 193,
+ 61, 193,
+ 65, 193,
+ 67, 193,
-2, 0,
- -1, 252,
+ -1, 207,
19, 191,
-2, 0,
+ -1, 254,
+ 19, 192,
+ -2, 0,
}
const yyPrivate = 57344
-const yyLast = 640
+const yyLast = 638
var yyAct = [...]int{
-
- 258, 35, 209, 138, 248, 247, 145, 110, 75, 99,
- 98, 143, 6, 101, 183, 123, 184, 100, 261, 102,
- 185, 186, 243, 144, 149, 242, 148, 97, 49, 70,
- 103, 51, 22, 50, 55, 149, 160, 237, 250, 52,
- 150, 118, 68, 206, 262, 259, 148, 205, 18, 19,
- 236, 150, 20, 105, 93, 106, 96, 201, 69, 104,
- 204, 119, 56, 57, 58, 59, 60, 61, 62, 63,
- 64, 65, 66, 67, 241, 175, 101, 13, 95, 146,
- 147, 24, 102, 30, 2, 3, 4, 5, 70, 107,
- 97, 157, 103, 256, 31, 240, 174, 7, 255, 112,
- 263, 151, 80, 81, 156, 161, 155, 158, 153, 111,
- 154, 254, 10, 90, 91, 238, 8, 93, 94, 96,
- 33, 182, 72, 173, 79, 181, 187, 188, 189, 190,
- 191, 192, 193, 194, 195, 196, 197, 198, 199, 200,
- 32, 95, 180, 124, 125, 126, 127, 128, 129, 130,
- 131, 132, 133, 134, 135, 136, 137, 97, 117, 77,
- 116, 165, 115, 167, 253, 168, 164, 114, 1, 76,
- 81, 140, 239, 202, 203, 172, 152, 163, 113, 251,
- 90, 91, 45, 140, 93, 244, 96, 44, 245, 246,
- 170, 34, 249, 49, 70, 112, 51, 22, 50, 139,
- 169, 171, 77, 177, 52, 111, 140, 68, 95, 252,
- 179, 109, 76, 18, 19, 148, 46, 20, 74, 43,
- 42, 122, 71, 69, 149, 41, 40, 56, 57, 58,
- 59, 60, 61, 62, 63, 64, 65, 66, 67, 257,
- 150, 39, 13, 120, 260, 159, 24, 38, 30, 49,
- 70, 121, 51, 22, 50, 37, 36, 47, 265, 141,
- 52, 54, 266, 68, 9, 9, 178, 78, 176, 18,
- 19, 207, 73, 20, 142, 53, 210, 166, 48, 69,
- 108, 0, 0, 56, 57, 58, 59, 60, 61, 62,
- 63, 64, 65, 66, 67, 211, 0, 0, 13, 0,
- 0, 0, 24, 0, 30, 221, 0, 0, 0, 227,
- 0, 0, 0, 264, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 223, 224, 0, 0, 225,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 212,
- 214, 216, 217, 218, 226, 228, 231, 232, 233, 234,
- 235, 211, 0, 213, 215, 219, 220, 222, 229, 230,
- 0, 221, 0, 0, 0, 227, 0, 0, 0, 208,
+ 260, 35, 211, 138, 250, 249, 146, 110, 75, 99,
+ 98, 101, 144, 184, 6, 185, 123, 102, 140, 100,
+ 186, 187, 55, 145, 245, 141, 150, 149, 263, 244,
+ 49, 70, 103, 51, 22, 50, 150, 118, 161, 252,
+ 243, 52, 151, 239, 68, 264, 261, 112, 149, 203,
+ 18, 19, 151, 105, 20, 106, 238, 111, 139, 104,
+ 69, 242, 119, 240, 56, 57, 58, 59, 60, 61,
+ 62, 63, 64, 65, 66, 67, 176, 107, 101, 13,
+ 147, 148, 158, 24, 102, 30, 2, 3, 4, 5,
+ 97, 258, 103, 7, 208, 157, 257, 175, 207, 166,
+ 70, 152, 80, 81, 165, 162, 156, 159, 154, 256,
+ 155, 206, 31, 90, 91, 164, 141, 93, 94, 96,
+ 117, 183, 116, 174, 265, 182, 188, 189, 190, 191,
+ 192, 193, 194, 195, 196, 197, 198, 199, 200, 201,
+ 79, 95, 181, 202, 124, 125, 126, 127, 128, 129,
+ 130, 131, 132, 133, 134, 135, 136, 137, 255, 10,
+ 168, 77, 169, 153, 54, 141, 112, 9, 9, 72,
+ 141, 76, 32, 241, 204, 205, 111, 173, 34, 97,
+ 49, 70, 109, 51, 22, 50, 246, 171, 1, 247,
+ 248, 52, 81, 251, 68, 8, 253, 170, 172, 33,
+ 18, 19, 90, 91, 20, 97, 93, 46, 96, 45,
+ 69, 254, 44, 71, 56, 57, 58, 59, 60, 61,
+ 62, 63, 64, 65, 66, 67, 43, 77, 42, 13,
+ 95, 122, 93, 24, 96, 30, 41, 76, 40, 39,
+ 120, 259, 178, 74, 160, 38, 262, 49, 70, 180,
+ 51, 22, 50, 121, 149, 37, 95, 115, 52, 36,
+ 267, 68, 114, 150, 268, 47, 142, 18, 19, 179,
+ 78, 20, 177, 113, 209, 73, 143, 69, 53, 151,
+ 212, 56, 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, 66, 67, 213, 167, 48, 13, 108, 0, 0,
+ 24, 0, 30, 223, 0, 0, 0, 229, 0, 0,
+ 0, 266, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 225, 226, 0, 0, 227, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 214, 216, 218,
+ 219, 220, 228, 230, 233, 234, 235, 236, 237, 213,
+ 0, 215, 217, 221, 222, 224, 231, 232, 0, 223,
+ 0, 0, 0, 229, 0, 0, 0, 210, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 225,
+ 226, 0, 0, 227, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 214, 216, 218, 219, 220, 228, 230,
+ 233, 234, 235, 236, 237, 0, 0, 215, 217, 221,
+ 222, 224, 231, 232, 17, 70, 0, 0, 22, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 223, 224, 0, 0, 225, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 212, 214, 216, 217, 218,
- 226, 228, 231, 232, 233, 234, 235, 0, 0, 213,
- 215, 219, 220, 222, 229, 230, 17, 70, 0, 0,
- 22, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 18, 19, 0, 0,
- 20, 0, 17, 31, 0, 0, 22, 0, 0, 0,
- 11, 12, 14, 15, 16, 21, 23, 25, 26, 27,
- 28, 29, 18, 19, 0, 13, 20, 0, 0, 24,
- 0, 30, 0, 0, 0, 0, 11, 12, 14, 15,
- 16, 21, 23, 25, 26, 27, 28, 29, 97, 0,
- 0, 13, 0, 0, 162, 24, 0, 30, 0, 0,
- 80, 81, 82, 0, 83, 84, 85, 86, 87, 88,
- 89, 90, 91, 92, 0, 93, 94, 96, 0, 0,
- 0, 0, 0, 0, 97, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 80, 81, 82, 95,
- 83, 84, 85, 86, 87, 88, 89, 90, 91, 92,
+ 0, 0, 0, 0, 18, 19, 0, 0, 20, 0,
+ 17, 31, 0, 0, 22, 0, 0, 0, 11, 12,
+ 14, 15, 16, 21, 23, 25, 26, 27, 28, 29,
+ 18, 19, 0, 13, 20, 0, 0, 24, 0, 30,
+ 0, 0, 0, 0, 11, 12, 14, 15, 16, 21,
+ 23, 25, 26, 27, 28, 29, 97, 0, 0, 13,
+ 0, 0, 163, 24, 0, 30, 0, 0, 80, 81,
+ 82, 0, 83, 84, 85, 86, 87, 88, 89, 90,
+ 91, 92, 0, 93, 94, 96, 0, 0, 0, 0,
+ 0, 0, 97, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 80, 81, 82, 95, 83, 84,
+ 85, 86, 87, 88, 89, 90, 91, 92, 0, 93,
+ 94, 96, 0, 0, 97, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 80, 81, 82, 0,
+ 83, 84, 85, 95, 87, 88, 89, 90, 91, 92,
0, 93, 94, 96, 0, 0, 97, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 80, 81,
- 82, 0, 83, 84, 85, 95, 87, 88, 89, 90,
- 91, 92, 0, 93, 94, 96, 0, 0, 97, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 80, 81, 82, 0, 83, 84, 0, 95, 87, 88,
- 0, 90, 91, 92, 0, 93, 94, 96, 0, 0,
+ 82, 0, 83, 84, 0, 95, 87, 88, 0, 90,
+ 91, 92, 0, 93, 94, 96, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 95,
+ 0, 0, 0, 0, 0, 0, 0, 95,
}
-var yyPact = [...]int{
- 10, 87, 430, 430, 181, 404, -1000, -1000, -1000, 81,
+var yyPact = [...]int{
+ 12, 83, 428, 428, 168, 402, -1000, -1000, -1000, 99,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, 200, -1000, 122, -1000, 510, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 15, 77,
- -1000, 237, -1000, 237, 75, -1000, -1000, -1000, -1000, -1000,
+ -1000, 225, -1000, 138, -1000, 508, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 17, 77,
+ -1000, 235, -1000, 235, 87, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- 193, -1000, -1000, 160, -1000, -1000, 156, -1000, 19, -1000,
- -45, -45, -45, -45, -45, -45, -45, -45, -45, -45,
- -45, -45, -45, -45, -45, 197, 9, 174, 77, -48,
- -1000, 89, 89, 16, -1000, 474, 13, -1000, 159, -1000,
- -1000, 161, -1000, -1000, 157, -1000, 73, -1000, 198, 237,
- -1000, -50, -42, -1000, 237, 237, 237, 237, 237, 237,
- 237, 237, 237, 237, 237, 237, 237, 237, -1000, -1000,
- -1000, -1000, 42, -1000, -1000, -1000, -1000, -1000, -1000, 29,
- 29, 41, -1000, -1000, -1000, -1000, 349, -1000, -1000, 30,
- -1000, 510, -1000, -1000, 97, -1000, 72, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1, -2, -1000,
- -1000, -1000, 143, 89, 89, 89, 89, 13, 76, 76,
- 76, 574, 542, 76, 76, 574, 13, 13, 76, 13,
- 143, 18, -1000, -1000, -1000, 162, -1000, 91, -1000, -1000,
+ 164, -1000, -1000, 255, -1000, -1000, 118, -1000, 15, -1000,
+ -44, -44, -44, -44, -44, -44, -44, -44, -44, -44,
+ -44, -44, -44, -44, -44, 16, 10, 161, 77, -50,
+ -1000, 80, 80, 18, -1000, 472, 191, -1000, 97, -1000,
+ -1000, 158, -1000, -1000, 159, -1000, 74, -1000, 237, 235,
+ -1000, -51, -42, -1000, 235, 235, 235, 235, 235, 235,
+ 235, 235, 235, 235, 235, 235, 235, 235, -1000, 107,
+ -1000, -1000, -1000, 34, -1000, -1000, -1000, -1000, -1000, -1000,
+ 31, 31, 92, -1000, -1000, -1000, -1000, 347, -1000, -1000,
+ 36, -1000, 508, -1000, -1000, 45, -1000, 38, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 5, 0,
+ -1000, -1000, -1000, 165, 80, 80, 80, 80, 191, 76,
+ 76, 76, 572, 540, 76, 76, 572, 191, 191, 76,
+ 191, 165, -1000, 19, -1000, -1000, -1000, 156, -1000, 89,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, 237, -1000, -1000,
- -1000, -1000, 28, 28, -6, -1000, -1000, -1000, -1000, -1000,
- -1000, 25, 98, -1000, -1000, 293, -1000, 510, -1000, -1000,
- -1000, 28, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 235,
+ -1000, -1000, -1000, -1000, 29, 29, 4, -1000, -1000, -1000,
+ -1000, -1000, -1000, 26, 122, -1000, -1000, 291, -1000, 508,
+ -1000, -1000, -1000, 29, -1000, -1000, -1000, -1000, -1000,
}
-var yyPgo = [...]int{
- 0, 280, 7, 278, 2, 277, 276, 261, 275, 274,
- 112, 272, 116, 8, 271, 4, 5, 268, 267, 0,
- 23, 266, 6, 259, 257, 256, 10, 61, 255, 251,
- 1, 247, 245, 9, 243, 34, 241, 226, 225, 221,
- 220, 219, 187, 182, 216, 3, 179, 168, 140,
+var yyPgo = [...]int{
+ 0, 297, 7, 295, 2, 294, 280, 164, 278, 276,
+ 159, 275, 195, 8, 274, 4, 5, 272, 270, 0,
+ 23, 269, 6, 266, 265, 259, 10, 62, 255, 253,
+ 1, 245, 244, 9, 240, 22, 239, 238, 236, 231,
+ 228, 226, 212, 209, 207, 3, 196, 188, 172,
}
-var yyR1 = [...]int{
+var yyR1 = [...]int{
0, 47, 47, 47, 47, 47, 47, 47, 30, 30,
30, 30, 30, 30, 30, 30, 30, 30, 30, 30,
25, 25, 25, 25, 26, 26, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 27, 29, 29, 39, 39, 34, 34, 34, 34,
15, 15, 15, 15, 14, 14, 14, 4, 4, 31,
- 33, 33, 32, 32, 32, 40, 38, 38, 24, 24,
- 24, 9, 9, 36, 42, 42, 42, 42, 42, 43,
- 44, 44, 44, 35, 35, 35, 1, 1, 1, 2,
- 2, 2, 2, 12, 12, 7, 7, 7, 7, 7,
+ 33, 33, 32, 32, 32, 40, 38, 38, 38, 24,
+ 24, 24, 9, 9, 36, 42, 42, 42, 42, 42,
+ 43, 44, 44, 44, 35, 35, 35, 1, 1, 1,
+ 2, 2, 2, 2, 12, 12, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 10, 10, 10, 10, 11,
- 11, 11, 13, 13, 13, 13, 48, 18, 18, 18,
- 18, 17, 17, 17, 17, 17, 21, 21, 21, 3,
+ 7, 7, 7, 7, 7, 7, 10, 10, 10, 10,
+ 11, 11, 11, 13, 13, 13, 13, 48, 18, 18,
+ 18, 18, 17, 17, 17, 17, 17, 21, 21, 21,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 3, 3, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
- 6, 6, 6, 6, 6, 8, 8, 5, 5, 5,
- 5, 37, 20, 22, 22, 23, 23, 19, 45, 41,
- 46, 46, 16, 16,
+ 6, 6, 6, 6, 6, 6, 8, 8, 5, 5,
+ 5, 5, 37, 20, 22, 22, 23, 23, 19, 45,
+ 41, 46, 46, 16, 16,
}
-var yyR2 = [...]int{
+var yyR2 = [...]int{
0, 2, 2, 2, 2, 2, 2, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
3, 3, 2, 2, 2, 2, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 1, 0, 1, 3, 3, 1, 1, 3, 3,
3, 4, 2, 1, 3, 1, 2, 1, 1, 2,
- 3, 2, 3, 1, 2, 3, 3, 3, 3, 5,
- 3, 1, 1, 4, 6, 6, 5, 4, 3, 2,
- 2, 1, 1, 3, 4, 2, 3, 1, 2, 3,
- 3, 2, 1, 2, 1, 1, 1, 1, 1, 1,
+ 3, 2, 3, 1, 2, 3, 3, 4, 3, 3,
+ 5, 3, 1, 1, 4, 6, 6, 5, 4, 3,
+ 2, 2, 1, 1, 3, 4, 2, 3, 1, 2,
+ 3, 3, 2, 1, 2, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 3, 4, 2, 0, 3,
- 1, 2, 3, 3, 2, 1, 2, 0, 3, 2,
- 1, 1, 3, 1, 3, 4, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 3, 4, 2, 0,
+ 3, 1, 2, 3, 3, 2, 1, 2, 0, 3,
+ 2, 1, 1, 3, 1, 3, 4, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 2, 2, 1, 1, 1, 1, 1,
- 0, 1, 0, 1,
+ 1, 1, 1, 1, 2, 2, 1, 1, 1, 1,
+ 1, 0, 1, 0, 1,
}
-var yyChk = [...]int{
+var yyChk = [...]int{
-1000, -47, 74, 75, 76, 77, 2, 10, -12, -7,
-10, 46, 47, 61, 48, 49, 50, 12, 32, 33,
36, 51, 16, 52, 65, 53, 54, 55, 56, 57,
@@ -511,57 +511,57 @@ var yyChk = [...]int{
2, 61, 67, 15, -33, -30, -30, -35, -1, 18,
-2, 12, 2, 18, 7, 2, 4, 2, 22, -27,
-34, -29, -39, 60, -27, -27, -27, -27, -27, -27,
- -27, -27, -27, -27, -27, -27, -27, -27, -45, 2,
- 9, -23, -9, 2, -20, -22, 70, 71, 17, 26,
- 42, -45, 2, -33, -26, -15, 15, 2, -15, -32,
- 20, -30, 20, 18, 7, 2, -5, 2, 4, 39,
- 29, 40, 18, -13, 23, 2, -17, 5, -21, 12,
- -20, -22, -30, 64, 66, 62, 63, -30, -30, -30,
+ -27, -27, -27, -27, -27, -27, -27, -27, -45, 42,
+ 2, 9, -23, -9, 2, -20, -22, 70, 71, 17,
+ 26, 42, -45, 2, -33, -26, -15, 15, 2, -15,
+ -32, 20, -30, 20, 18, 7, 2, -5, 2, 4,
+ 39, 29, 40, 18, -13, 23, 2, -17, 5, -21,
+ 12, -20, -22, -30, 64, 66, 62, 63, -30, -30,
-30, -30, -30, -30, -30, -30, -30, -30, -30, -30,
- -30, 15, -20, -20, 19, 6, 2, -14, 20, -4,
- -6, 2, 46, 60, 47, 61, 48, 49, 50, 62,
- 63, 12, 64, 32, 33, 36, 51, 16, 52, 65,
- 66, 53, 54, 55, 56, 57, 20, 7, 18, -2,
- 23, 2, 24, 24, -22, -15, -15, -16, -15, -16,
- 20, -46, -45, 2, 20, 7, 2, -30, -19, 17,
- -19, 24, 19, 2, 20, -4, -19,
+ -30, -30, -45, 15, -20, -20, 19, 6, 2, -14,
+ 20, -4, -6, 2, 46, 60, 47, 61, 48, 49,
+ 50, 62, 63, 12, 64, 32, 33, 36, 51, 16,
+ 52, 65, 66, 53, 54, 55, 56, 57, 20, 7,
+ 18, -2, 23, 2, 24, 24, -22, -15, -15, -16,
+ -15, -16, 20, -46, -45, 2, 20, 7, 2, -30,
+ -19, 17, -19, 24, 19, 2, 20, -4, -19,
}
-var yyDef = [...]int{
- 0, -2, 118, 118, 0, 0, 7, 6, 1, 118,
- 94, 95, 96, 97, 98, 99, 100, 101, 102, 103,
- 104, 105, 106, 107, 108, 109, 110, 111, 112, 113,
- 114, 0, 2, -2, 3, 4, 8, 9, 10, 11,
- 12, 13, 14, 15, 16, 17, 18, 19, 0, 101,
- 181, 0, 189, 0, 81, 82, -2, -2, -2, -2,
- -2, -2, -2, -2, -2, -2, -2, -2, 175, 176,
- 0, 5, 93, 0, 117, 120, 0, 125, 126, 130,
+var yyDef = [...]int{
+ 0, -2, 119, 119, 0, 0, 7, 6, 1, 119,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,
+ 115, 0, 2, -2, 3, 4, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 0, 102,
+ 182, 0, 190, 0, 82, 83, -2, -2, -2, -2,
+ -2, -2, -2, -2, -2, -2, -2, -2, 176, 177,
+ 0, 5, 94, 0, 118, 121, 0, 126, 127, 131,
42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
42, 42, 42, 42, 42, 0, 0, 0, 0, 22,
- 23, 0, 0, 0, 59, 0, 79, 80, 0, 85,
- 87, 0, 92, 115, 0, 121, 0, 124, 129, 0,
+ 23, 0, 0, 0, 59, 0, 80, 81, 0, 86,
+ 88, 0, 93, 116, 0, 122, 0, 125, 130, 0,
41, 46, 47, 43, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 66, 67,
- 188, 68, 0, 70, 185, 186, 71, 72, 182, 0,
- 0, 0, 78, 20, 21, 24, 0, 53, 25, 0,
- 61, 63, 65, 83, 0, 88, 0, 91, 177, 178,
- 179, 180, 116, 119, 122, 123, 128, 131, 133, 136,
- 137, 138, 26, 0, 0, -2, -2, 27, 28, 29,
- 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
- 40, 0, 183, 184, 73, -2, 77, 0, 52, 55,
- 57, 58, 151, 152, 153, 154, 155, 156, 157, 158,
- 159, 160, 161, 162, 163, 164, 165, 166, 167, 168,
- 169, 170, 171, 172, 173, 174, 60, 64, 84, 86,
- 89, 90, 0, 0, 0, 44, 45, 48, 193, 49,
- 69, 0, -2, 76, 50, 0, 56, 62, 132, 187,
- 134, 0, 74, 75, 51, 54, 135,
+ 0, 0, 0, 0, 0, 0, 0, 0, 66, 0,
+ 68, 189, 69, 0, 71, 186, 187, 72, 73, 183,
+ 0, 0, 0, 79, 20, 21, 24, 0, 53, 25,
+ 0, 61, 63, 65, 84, 0, 89, 0, 92, 178,
+ 179, 180, 181, 117, 120, 123, 124, 129, 132, 134,
+ 137, 138, 139, 26, 0, 0, -2, -2, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
+ 39, 40, 67, 0, 184, 185, 74, -2, 78, 0,
+ 52, 55, 57, 58, 152, 153, 154, 155, 156, 157,
+ 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175, 60, 64,
+ 85, 87, 90, 91, 0, 0, 0, 44, 45, 48,
+ 194, 49, 70, 0, -2, 77, 50, 0, 56, 62,
+ 133, 188, 135, 0, 75, 76, 51, 54, 136,
}
-var yyTok1 = [...]int{
+var yyTok1 = [...]int{
1,
}
-var yyTok2 = [...]int{
+var yyTok2 = [...]int{
2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
@@ -571,6 +571,7 @@ var yyTok2 = [...]int{
62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
72, 73, 74, 75, 76, 77, 78,
}
+
var yyTok3 = [...]int{
0,
}
@@ -1244,36 +1245,43 @@ yydefault:
yyVAL.node = yyDollar[1].node
}
case 67:
- yyDollar = yyS[yypt-3 : yypt+1]
+ yyDollar = yyS[yypt-4 : yypt+1]
//line generated_parser.y:390
{
- yylex.(*parser).unexpected("offset", "duration")
+ yylex.(*parser).addOffset(yyDollar[1].node, -yyDollar[4].duration)
yyVAL.node = yyDollar[1].node
}
case 68:
yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:397
+//line generated_parser.y:395
{
- yylex.(*parser).setTimestamp(yyDollar[1].node, yyDollar[3].float)
+ yylex.(*parser).unexpected("offset", "duration")
yyVAL.node = yyDollar[1].node
}
case 69:
- yyDollar = yyS[yypt-5 : yypt+1]
+ yyDollar = yyS[yypt-3 : yypt+1]
//line generated_parser.y:402
{
- yylex.(*parser).setAtModifierPreprocessor(yyDollar[1].node, yyDollar[3].item)
+ yylex.(*parser).setTimestamp(yyDollar[1].node, yyDollar[3].float)
yyVAL.node = yyDollar[1].node
}
case 70:
- yyDollar = yyS[yypt-3 : yypt+1]
+ yyDollar = yyS[yypt-5 : yypt+1]
//line generated_parser.y:407
+ {
+ yylex.(*parser).setAtModifierPreprocessor(yyDollar[1].node, yyDollar[3].item)
+ yyVAL.node = yyDollar[1].node
+ }
+ case 71:
+ yyDollar = yyS[yypt-3 : yypt+1]
+//line generated_parser.y:412
{
yylex.(*parser).unexpected("@", "timestamp")
yyVAL.node = yyDollar[1].node
}
- case 73:
+ case 74:
yyDollar = yyS[yypt-4 : yypt+1]
-//line generated_parser.y:417
+//line generated_parser.y:422
{
var errMsg string
vs, ok := yyDollar[1].node.(*VectorSelector)
@@ -1296,9 +1304,9 @@ yydefault:
EndPos: yylex.(*parser).lastClosing,
}
}
- case 74:
+ case 75:
yyDollar = yyS[yypt-6 : yypt+1]
-//line generated_parser.y:442
+//line generated_parser.y:447
{
yyVAL.node = &SubqueryExpr{
Expr: yyDollar[1].node.(Expr),
@@ -1308,37 +1316,37 @@ yydefault:
EndPos: yyDollar[6].item.Pos + 1,
}
}
- case 75:
+ case 76:
yyDollar = yyS[yypt-6 : yypt+1]
-//line generated_parser.y:452
+//line generated_parser.y:457
{
yylex.(*parser).unexpected("subquery selector", "\"]\"")
yyVAL.node = yyDollar[1].node
}
- case 76:
+ case 77:
yyDollar = yyS[yypt-5 : yypt+1]
-//line generated_parser.y:454
+//line generated_parser.y:459
{
yylex.(*parser).unexpected("subquery selector", "duration or \"]\"")
yyVAL.node = yyDollar[1].node
}
- case 77:
+ case 78:
yyDollar = yyS[yypt-4 : yypt+1]
-//line generated_parser.y:456
+//line generated_parser.y:461
{
yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\"")
yyVAL.node = yyDollar[1].node
}
- case 78:
+ case 79:
yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:458
+//line generated_parser.y:463
{
yylex.(*parser).unexpected("subquery selector", "duration")
yyVAL.node = yyDollar[1].node
}
- case 79:
+ case 80:
yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:468
+//line generated_parser.y:473
{
if nl, ok := yyDollar[2].node.(*NumberLiteral); ok {
if yyDollar[1].item.Typ == SUB {
@@ -1350,9 +1358,9 @@ yydefault:
yyVAL.node = &UnaryExpr{Op: yyDollar[1].item.Typ, Expr: yyDollar[2].node.(Expr), StartPos: yyDollar[1].item.Pos}
}
}
- case 80:
+ case 81:
yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:486
+//line generated_parser.y:491
{
vs := yyDollar[2].node.(*VectorSelector)
vs.PosRange = mergeRanges(&yyDollar[1].item, vs)
@@ -1360,9 +1368,9 @@ yydefault:
yylex.(*parser).assembleVectorSelector(vs)
yyVAL.node = vs
}
- case 81:
+ case 82:
yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:494
+//line generated_parser.y:499
{
vs := &VectorSelector{
Name: yyDollar[1].item.Val,
@@ -1372,44 +1380,44 @@ yydefault:
yylex.(*parser).assembleVectorSelector(vs)
yyVAL.node = vs
}
- case 82:
+ case 83:
yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:504
+//line generated_parser.y:509
{
vs := yyDollar[1].node.(*VectorSelector)
yylex.(*parser).assembleVectorSelector(vs)
yyVAL.node = vs
}
- case 83:
+ case 84:
yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:512
+//line generated_parser.y:517
{
yyVAL.node = &VectorSelector{
LabelMatchers: yyDollar[2].matchers,
PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[3].item),
}
}
- case 84:
+ case 85:
yyDollar = yyS[yypt-4 : yypt+1]
-//line generated_parser.y:519
+//line generated_parser.y:524
{
yyVAL.node = &VectorSelector{
LabelMatchers: yyDollar[2].matchers,
PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[4].item),
}
}
- case 85:
+ case 86:
yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:526
+//line generated_parser.y:531
{
yyVAL.node = &VectorSelector{
LabelMatchers: []*labels.Matcher{},
PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[2].item),
}
}
- case 86:
+ case 87:
yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:535
+//line generated_parser.y:540
{
if yyDollar[1].matchers != nil {
yyVAL.matchers = append(yyDollar[1].matchers, yyDollar[3].matcher)
@@ -1417,196 +1425,196 @@ yydefault:
yyVAL.matchers = yyDollar[1].matchers
}
}
- case 87:
+ case 88:
yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:543
+//line generated_parser.y:548
{
yyVAL.matchers = []*labels.Matcher{yyDollar[1].matcher}
}
- case 88:
+ case 89:
yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:545
+//line generated_parser.y:550
{
yylex.(*parser).unexpected("label matching", "\",\" or \"}\"")
yyVAL.matchers = yyDollar[1].matchers
}
- case 89:
+ case 90:
yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:549
+//line generated_parser.y:554
{
yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item)
}
- case 90:
+ case 91:
yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:551
+//line generated_parser.y:556
{
yylex.(*parser).unexpected("label matching", "string")
yyVAL.matcher = nil
}
- case 91:
+ case 92:
yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:553
+//line generated_parser.y:558
{
yylex.(*parser).unexpected("label matching", "label matching operator")
yyVAL.matcher = nil
}
- case 92:
+ case 93:
yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:555
+//line generated_parser.y:560
{
yylex.(*parser).unexpected("label matching", "identifier or \"}\"")
yyVAL.matcher = nil
}
- case 93:
+ case 94:
yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:563
+//line generated_parser.y:568
{
yyVAL.labels = append(yyDollar[2].labels, labels.Label{Name: labels.MetricName, Value: yyDollar[1].item.Val})
sort.Sort(yyVAL.labels)
}
- case 94:
+ case 95:
yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:565
+//line generated_parser.y:570
{
yyVAL.labels = yyDollar[1].labels
}
- case 115:
+ case 116:
yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:572
+//line generated_parser.y:577
{
yyVAL.labels = labels.New(yyDollar[2].labels...)
}
- case 116:
+ case 117:
yyDollar = yyS[yypt-4 : yypt+1]
-//line generated_parser.y:574
+//line generated_parser.y:579
{
yyVAL.labels = labels.New(yyDollar[2].labels...)
}
- case 117:
+ case 118:
yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:576
+//line generated_parser.y:581
{
yyVAL.labels = labels.New()
}
- case 118:
+ case 119:
yyDollar = yyS[yypt-0 : yypt+1]
-//line generated_parser.y:578
+//line generated_parser.y:583
{
yyVAL.labels = labels.New()
}
- case 119:
+ case 120:
yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:582
+//line generated_parser.y:587
{
yyVAL.labels = append(yyDollar[1].labels, yyDollar[3].label)
}
- case 120:
+ case 121:
yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:584
+//line generated_parser.y:589
{
yyVAL.labels = []labels.Label{yyDollar[1].label}
}
- case 121:
+ case 122:
yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:586
+//line generated_parser.y:591
{
yylex.(*parser).unexpected("label set", "\",\" or \"}\"")
yyVAL.labels = yyDollar[1].labels
}
- case 122:
+ case 123:
yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:591
+//line generated_parser.y:596
{
yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)}
}
- case 123:
+ case 124:
yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:593
+//line generated_parser.y:598
{
yylex.(*parser).unexpected("label set", "string")
yyVAL.label = labels.Label{}
}
- case 124:
+ case 125:
yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:595
+//line generated_parser.y:600
{
yylex.(*parser).unexpected("label set", "\"=\"")
yyVAL.label = labels.Label{}
}
- case 125:
+ case 126:
yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:597
+//line generated_parser.y:602
{
yylex.(*parser).unexpected("label set", "identifier or \"}\"")
yyVAL.label = labels.Label{}
}
- case 126:
+ case 127:
yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:605
+//line generated_parser.y:610
{
yylex.(*parser).generatedParserResult = &seriesDescription{
labels: yyDollar[1].labels,
values: yyDollar[2].series,
}
}
- case 127:
+ case 128:
yyDollar = yyS[yypt-0 : yypt+1]
-//line generated_parser.y:614
+//line generated_parser.y:619
{
yyVAL.series = []SequenceValue{}
}
- case 128:
+ case 129:
yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:616
+//line generated_parser.y:621
{
yyVAL.series = append(yyDollar[1].series, yyDollar[3].series...)
}
- case 129:
+ case 130:
yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:618
+//line generated_parser.y:623
{
yyVAL.series = yyDollar[1].series
}
- case 130:
+ case 131:
yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:620
+//line generated_parser.y:625
{
yylex.(*parser).unexpected("series values", "")
yyVAL.series = nil
}
- case 131:
+ case 132:
yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:624
+//line generated_parser.y:629
{
yyVAL.series = []SequenceValue{{Omitted: true}}
}
- case 132:
+ case 133:
yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:626
+//line generated_parser.y:631
{
yyVAL.series = []SequenceValue{}
for i := uint64(0); i < yyDollar[3].uint; i++ {
yyVAL.series = append(yyVAL.series, SequenceValue{Omitted: true})
}
}
- case 133:
+ case 134:
yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:633
+//line generated_parser.y:638
{
yyVAL.series = []SequenceValue{{Value: yyDollar[1].float}}
}
- case 134:
+ case 135:
yyDollar = yyS[yypt-3 : yypt+1]
-//line generated_parser.y:635
+//line generated_parser.y:640
{
yyVAL.series = []SequenceValue{}
for i := uint64(0); i <= yyDollar[3].uint; i++ {
yyVAL.series = append(yyVAL.series, SequenceValue{Value: yyDollar[1].float})
}
}
- case 135:
+ case 136:
yyDollar = yyS[yypt-4 : yypt+1]
-//line generated_parser.y:642
+//line generated_parser.y:647
{
yyVAL.series = []SequenceValue{}
for i := uint64(0); i <= yyDollar[4].uint; i++ {
@@ -1614,45 +1622,45 @@ yydefault:
yyDollar[1].float += yyDollar[2].float
}
}
- case 136:
+ case 137:
yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:652
+//line generated_parser.y:657
{
if yyDollar[1].item.Val != "stale" {
yylex.(*parser).unexpected("series values", "number or \"stale\"")
}
yyVAL.float = math.Float64frombits(value.StaleNaN)
}
- case 181:
+ case 182:
yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:683
+//line generated_parser.y:688
{
yyVAL.node = &NumberLiteral{
Val: yylex.(*parser).number(yyDollar[1].item.Val),
PosRange: yyDollar[1].item.PositionRange(),
}
}
- case 182:
+ case 183:
yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:691
+//line generated_parser.y:696
{
yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val)
}
- case 183:
+ case 184:
yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:693
+//line generated_parser.y:698
{
yyVAL.float = yyDollar[2].float
}
- case 184:
+ case 185:
yyDollar = yyS[yypt-2 : yypt+1]
-//line generated_parser.y:694
+//line generated_parser.y:699
{
yyVAL.float = -yyDollar[2].float
}
- case 187:
+ case 188:
yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:700
+//line generated_parser.y:705
{
var err error
yyVAL.uint, err = strconv.ParseUint(yyDollar[1].item.Val, 10, 64)
@@ -1660,9 +1668,9 @@ yydefault:
yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err)
}
}
- case 188:
+ case 189:
yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:710
+//line generated_parser.y:715
{
var err error
yyVAL.duration, err = parseDuration(yyDollar[1].item.Val)
@@ -1670,24 +1678,24 @@ yydefault:
yylex.(*parser).addParseErr(yyDollar[1].item.PositionRange(), err)
}
}
- case 189:
+ case 190:
yyDollar = yyS[yypt-1 : yypt+1]
-//line generated_parser.y:721
+//line generated_parser.y:726
{
yyVAL.node = &StringLiteral{
Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val),
PosRange: yyDollar[1].item.PositionRange(),
}
}
- case 190:
+ case 191:
yyDollar = yyS[yypt-0 : yypt+1]
-//line generated_parser.y:734
+//line generated_parser.y:739
{
yyVAL.duration = 0
}
- case 192:
+ case 193:
yyDollar = yyS[yypt-0 : yypt+1]
-//line generated_parser.y:738
+//line generated_parser.y:743
{
yyVAL.strings = nil
}
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/lex.go b/vendor/github.com/prometheus/prometheus/promql/parser/lex.go
index ece762dc4dab6..313bd8f88b05c 100644
--- a/vendor/github.com/prometheus/prometheus/promql/parser/lex.go
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/lex.go
@@ -583,8 +583,12 @@ func lexEscape(l *Lexer) stateFn {
return lexString
}
x = x*base + d
- ch = l.next()
n--
+
+ // Don't seek after last rune.
+ if n > 0 {
+ ch = l.next()
+ }
}
if x > max || 0xD800 <= x && x < 0xE000 {
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/printer.go b/vendor/github.com/prometheus/prometheus/promql/parser/printer.go
index 0b6368ed243bd..c5f80eb0b6083 100644
--- a/vendor/github.com/prometheus/prometheus/promql/parser/printer.go
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/printer.go
@@ -116,8 +116,10 @@ func (node *MatrixSelector) String() string {
// Copy the Vector selector before changing the offset
vecSelector := *node.VectorSelector.(*VectorSelector)
offset := ""
- if vecSelector.OriginalOffset != time.Duration(0) {
+ if vecSelector.OriginalOffset > time.Duration(0) {
offset = fmt.Sprintf(" offset %s", model.Duration(vecSelector.OriginalOffset))
+ } else if vecSelector.OriginalOffset < time.Duration(0) {
+ offset = fmt.Sprintf(" offset -%s", model.Duration(-vecSelector.OriginalOffset))
}
at := ""
if vecSelector.Timestamp != nil {
@@ -147,8 +149,10 @@ func (node *SubqueryExpr) String() string {
step = model.Duration(node.Step).String()
}
offset := ""
- if node.OriginalOffset != time.Duration(0) {
+ if node.OriginalOffset > time.Duration(0) {
offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset))
+ } else if node.OriginalOffset < time.Duration(0) {
+ offset = fmt.Sprintf(" offset -%s", model.Duration(-node.OriginalOffset))
}
at := ""
if node.Timestamp != nil {
@@ -187,8 +191,10 @@ func (node *VectorSelector) String() string {
labelStrings = append(labelStrings, matcher.String())
}
offset := ""
- if node.OriginalOffset != time.Duration(0) {
+ if node.OriginalOffset > time.Duration(0) {
offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset))
+ } else if node.OriginalOffset < time.Duration(0) {
+ offset = fmt.Sprintf(" offset -%s", model.Duration(-node.OriginalOffset))
}
at := ""
if node.Timestamp != nil {
diff --git a/vendor/github.com/prometheus/prometheus/promql/test.go b/vendor/github.com/prometheus/prometheus/promql/test.go
index 77e133eed4fa3..a60b408907f19 100644
--- a/vendor/github.com/prometheus/prometheus/promql/test.go
+++ b/vendor/github.com/prometheus/prometheus/promql/test.go
@@ -108,6 +108,15 @@ func (t *Test) TSDB() *tsdb.DB {
return t.storage.DB
}
+// ExemplarStorage returns the test's exemplar storage.
+func (t *Test) ExemplarStorage() storage.ExemplarStorage {
+ return t.storage
+}
+
+func (t *Test) ExemplarQueryable() storage.ExemplarQueryable {
+ return t.storage.ExemplarQueryable()
+}
+
func raise(line int, format string, v ...interface{}) error {
return &parser.ParseErr{
LineOffset: line,
@@ -307,7 +316,7 @@ func (cmd *loadCmd) append(a storage.Appender) error {
m := cmd.metrics[h]
for _, s := range smpls {
- if _, err := a.Add(m, s.T, s.V); err != nil {
+ if _, err := a.Append(0, m, s.T, s.V); err != nil {
return err
}
}
@@ -657,7 +666,8 @@ type LazyLoader struct {
loadCmd *loadCmd
- storage storage.Storage
+ storage storage.Storage
+ SubqueryInterval time.Duration
queryEngine *Engine
context context.Context
@@ -710,11 +720,12 @@ func (ll *LazyLoader) clear() {
ll.storage = teststorage.New(ll)
opts := EngineOpts{
- Logger: nil,
- Reg: nil,
- MaxSamples: 10000,
- Timeout: 100 * time.Second,
- EnableAtModifier: true,
+ Logger: nil,
+ Reg: nil,
+ MaxSamples: 10000,
+ Timeout: 100 * time.Second,
+ NoStepSubqueryIntervalFn: func(int64) int64 { return durationMilliseconds(ll.SubqueryInterval) },
+ EnableAtModifier: true,
}
ll.queryEngine = NewEngine(opts)
@@ -732,7 +743,7 @@ func (ll *LazyLoader) appendTill(ts int64) error {
ll.loadCmd.defs[h] = smpls[i:]
break
}
- if _, err := app.Add(m, s.T, s.V); err != nil {
+ if _, err := app.Append(0, m, s.T, s.V); err != nil {
return err
}
if i == len(smpls)-1 {
diff --git a/vendor/github.com/prometheus/prometheus/rules/manager.go b/vendor/github.com/prometheus/prometheus/rules/manager.go
index 129d47638f69e..04ce2f30989e8 100644
--- a/vendor/github.com/prometheus/prometheus/rules/manager.go
+++ b/vendor/github.com/prometheus/prometheus/rules/manager.go
@@ -592,6 +592,9 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
vector, err := rule.Eval(ctx, ts, g.opts.QueryFunc, g.opts.ExternalURL)
if err != nil {
+ rule.SetHealth(HealthBad)
+ rule.SetLastError(err)
+
// Canceled queries are intentional termination of queries. This normally
// happens on shutdown and thus we skip logging of any errors here.
if _, ok := err.(promql.ErrQueryCanceled); !ok {
@@ -616,13 +619,20 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
seriesReturned := make(map[string]labels.Labels, len(g.seriesInPreviousEval[i]))
defer func() {
if err := app.Commit(); err != nil {
+ rule.SetHealth(HealthBad)
+ rule.SetLastError(err)
+
level.Warn(g.logger).Log("msg", "Rule sample appending failed", "err", err)
return
}
g.seriesInPreviousEval[i] = seriesReturned
}()
+
for _, s := range vector {
- if _, err := app.Add(s.Metric, s.T, s.V); err != nil {
+ if _, err := app.Append(0, s.Metric, s.T, s.V); err != nil {
+ rule.SetHealth(HealthBad)
+ rule.SetLastError(err)
+
switch errors.Cause(err) {
case storage.ErrOutOfOrderSample:
numOutOfOrder++
@@ -647,7 +657,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
for metric, lset := range g.seriesInPreviousEval[i] {
if _, ok := seriesReturned[metric]; !ok {
// Series no longer exposed, mark it stale.
- _, err = app.Add(lset, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN))
+ _, err = app.Append(0, lset, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN))
switch errors.Cause(err) {
case nil:
case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp:
@@ -673,7 +683,7 @@ func (g *Group) cleanupStaleSeries(ctx context.Context, ts time.Time) {
app := g.opts.Appendable.Appender(ctx)
for _, s := range g.staleSeries {
// Rule that produced series no longer configured, mark it stale.
- _, err := app.Add(s, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN))
+ _, err := app.Append(0, s, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN))
switch errors.Cause(err) {
case nil:
case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp:
diff --git a/vendor/github.com/prometheus/prometheus/rules/recording.go b/vendor/github.com/prometheus/prometheus/rules/recording.go
index cec1a0fad488e..2665506af497b 100644
--- a/vendor/github.com/prometheus/prometheus/rules/recording.go
+++ b/vendor/github.com/prometheus/prometheus/rules/recording.go
@@ -76,8 +76,6 @@ func (rule *RecordingRule) Labels() labels.Labels {
func (rule *RecordingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, _ *url.URL) (promql.Vector, error) {
vector, err := query(ctx, rule.vector.String(), ts)
if err != nil {
- rule.SetHealth(HealthBad)
- rule.SetLastError(err)
return nil, err
}
// Override the metric name and labels.
diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go
index bd89b69f6f22a..7967c309f47d0 100644
--- a/vendor/github.com/prometheus/prometheus/scrape/scrape.go
+++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go
@@ -38,6 +38,7 @@ import (
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery/targetgroup"
+ "github.com/prometheus/prometheus/pkg/exemplar"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/pool"
"github.com/prometheus/prometheus/pkg/relabel"
@@ -142,19 +143,19 @@ var (
targetScrapeSampleDuplicate = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "prometheus_target_scrapes_sample_duplicate_timestamp_total",
- Help: "Total number of samples rejected due to duplicate timestamps but different values",
+ Help: "Total number of samples rejected due to duplicate timestamps but different values.",
},
)
targetScrapeSampleOutOfOrder = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "prometheus_target_scrapes_sample_out_of_order_total",
- Help: "Total number of samples rejected due to not being out of the expected order",
+ Help: "Total number of samples rejected due to not being out of the expected order.",
},
)
targetScrapeSampleOutOfBounds = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "prometheus_target_scrapes_sample_out_of_bounds_total",
- Help: "Total number of samples rejected due to timestamp falling outside of the time bounds",
+ Help: "Total number of samples rejected due to timestamp falling outside of the time bounds.",
},
)
targetScrapeCacheFlushForced = prometheus.NewCounter(
@@ -163,6 +164,12 @@ var (
Help: "How many times a scrape cache was flushed due to getting big while scrapes are failing.",
},
)
+ targetScrapeExemplarOutOfOrder = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Name: "prometheus_target_scrapes_exemplar_out_of_order_total",
+ Help: "Total number of exemplar rejected due to not being out of the expected order.",
+ },
+ )
)
func init() {
@@ -184,6 +191,7 @@ func init() {
targetScrapePoolTargetsAdded,
targetScrapeCacheFlushForced,
targetMetadataCache,
+ targetScrapeExemplarOutOfOrder,
)
}
@@ -1005,7 +1013,7 @@ func (sl *scrapeLoop) run(interval, timeout time.Duration, errc chan<- error) {
var last time.Time
- alignedScrapeTime := time.Now()
+ alignedScrapeTime := time.Now().Round(0)
ticker := time.NewTicker(interval)
defer ticker.Stop()
@@ -1023,7 +1031,9 @@ mainLoop:
// Temporary workaround for a jitter in go timers that causes disk space
// increase in TSDB.
// See https://github.com/prometheus/prometheus/issues/7846
- scrapeTime := time.Now()
+ // Calling Round ensures the time used is the wall clock, as otherwise .Sub
+ // and .Add on time.Time behave differently (see time package docs).
+ scrapeTime := time.Now().Round(0)
if AlignScrapeTimestamps && interval > 100*scrapeTimestampTolerance {
// For some reason, a tick might have been skipped, in which case we
// would call alignedScrapeTime.Add(interval) multiple times.
@@ -1243,9 +1253,10 @@ func (sl *scrapeLoop) getCache() *scrapeCache {
}
type appendErrors struct {
- numOutOfOrder int
- numDuplicates int
- numOutOfBounds int
+ numOutOfOrder int
+ numDuplicates int
+ numOutOfBounds int
+ numExemplarOutOfOrder int
}
func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) {
@@ -1270,6 +1281,7 @@ loop:
var (
et textparse.Entry
sampleAdded bool
+ e exemplar.Exemplar
)
if et, err = p.Next(); err != nil {
if err == io.EOF {
@@ -1306,20 +1318,19 @@ loop:
continue
}
ce, ok := sl.cache.get(yoloString(met))
+ var (
+ ref uint64
+ lset labels.Labels
+ mets string
+ hash uint64
+ )
if ok {
- err = app.AddFast(ce.ref, t, v)
- _, err = sl.checkAddError(ce, met, tp, err, &sampleLimitErr, &appErrs)
- // In theory this should never happen.
- if err == storage.ErrNotFound {
- ok = false
- }
- }
- if !ok {
- var lset labels.Labels
-
- mets := p.Metric(&lset)
- hash := lset.Hash()
+ ref = ce.ref
+ lset = ce.lset
+ } else {
+ mets = p.Metric(&lset)
+ hash = lset.Hash()
// Hash label set as it is seen local to the target. Then add target labels
// and relabeling and store the final label set.
@@ -1335,17 +1346,18 @@ loop:
err = errNameLabelMandatory
break loop
}
+ }
- var ref uint64
- ref, err = app.Add(lset, t, v)
- sampleAdded, err = sl.checkAddError(nil, met, tp, err, &sampleLimitErr, &appErrs)
- if err != nil {
- if err != storage.ErrNotFound {
- level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err)
- }
- break loop
+ ref, err = app.Append(ref, lset, t, v)
+ sampleAdded, err = sl.checkAddError(ce, met, tp, err, &sampleLimitErr, &appErrs)
+ if err != nil {
+ if err != storage.ErrNotFound {
+ level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err)
}
+ break loop
+ }
+ if !ok {
if tp == nil {
// Bypass staleness logic if there is an explicit timestamp.
sl.cache.trackStaleness(hash, lset)
@@ -1360,6 +1372,18 @@ loop:
// number of samples remaining after relabeling.
added++
+ if hasExemplar := p.Exemplar(&e); hasExemplar {
+ if !e.HasTs {
+ e.Ts = t
+ }
+ _, exemplarErr := app.AppendExemplar(ref, lset, e)
+ exemplarErr = sl.checkAddExemplarError(exemplarErr, e, &appErrs)
+ if exemplarErr != nil {
+ // Since exemplar storage is still experimental, we don't fail the scrape on ingestion errors.
+ level.Debug(sl.l).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr)
+ }
+ }
+
}
if sampleLimitErr != nil {
if err == nil {
@@ -1377,10 +1401,13 @@ loop:
if appErrs.numOutOfBounds > 0 {
level.Warn(sl.l).Log("msg", "Error on ingesting samples that are too old or are too far into the future", "num_dropped", appErrs.numOutOfBounds)
}
+ if appErrs.numExemplarOutOfOrder > 0 {
+ level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", appErrs.numExemplarOutOfOrder)
+ }
if err == nil {
sl.cache.forEachStale(func(lset labels.Labels) bool {
// Series no longer exposed, mark it stale.
- _, err = app.Add(lset, defTime, math.Float64frombits(value.StaleNaN))
+ _, err = app.Append(0, lset, defTime, math.Float64frombits(value.StaleNaN))
switch errors.Cause(err) {
case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp:
// Do not count these in logging, as this is expected if a target
@@ -1434,6 +1461,20 @@ func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err e
}
}
+func (sl *scrapeLoop) checkAddExemplarError(err error, e exemplar.Exemplar, appErrs *appendErrors) error {
+ switch errors.Cause(err) {
+ case storage.ErrNotFound:
+ return storage.ErrNotFound
+ case storage.ErrOutOfOrderExemplar:
+ appErrs.numExemplarOutOfOrder++
+ level.Debug(sl.l).Log("msg", "Out of order exemplar", "exemplar", fmt.Sprintf("%+v", e))
+ targetScrapeExemplarOutOfOrder.Inc()
+ return nil
+ default:
+ return err
+ }
+}
+
// The constants are suffixed with the invalid \xff unicode rune to avoid collisions
// with scraped metrics in the cache.
const (
@@ -1497,37 +1538,31 @@ func (sl *scrapeLoop) reportStale(app storage.Appender, start time.Time) (err er
func (sl *scrapeLoop) addReportSample(app storage.Appender, s string, t int64, v float64) error {
ce, ok := sl.cache.get(s)
+ var ref uint64
+ var lset labels.Labels
if ok {
- err := app.AddFast(ce.ref, t, v)
- switch errors.Cause(err) {
- case nil:
- return nil
- case storage.ErrNotFound:
- // Try an Add.
- case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp:
- // Do not log here, as this is expected if a target goes away and comes back
- // again with a new scrape loop.
- return nil
- default:
- return err
+ ref = ce.ref
+ lset = ce.lset
+ } else {
+ lset = labels.Labels{
+ // The constants are suffixed with the invalid \xff unicode rune to avoid collisions
+ // with scraped metrics in the cache.
+ // We have to drop it when building the actual metric.
+ labels.Label{Name: labels.MetricName, Value: s[:len(s)-1]},
}
+ lset = sl.reportSampleMutator(lset)
}
- lset := labels.Labels{
- // The constants are suffixed with the invalid \xff unicode rune to avoid collisions
- // with scraped metrics in the cache.
- // We have to drop it when building the actual metric.
- labels.Label{Name: labels.MetricName, Value: s[:len(s)-1]},
- }
-
- hash := lset.Hash()
- lset = sl.reportSampleMutator(lset)
- ref, err := app.Add(lset, t, v)
+ ref, err := app.Append(ref, lset, t, v)
switch errors.Cause(err) {
case nil:
- sl.cache.addRef(s, ref, lset, hash)
+ if !ok {
+ sl.cache.addRef(s, ref, lset, lset.Hash())
+ }
return nil
case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp:
+ // Do not log here, as this is expected if a target goes away and comes back
+ // again with a new scrape loop.
return nil
default:
return err
diff --git a/vendor/github.com/prometheus/prometheus/scrape/target.go b/vendor/github.com/prometheus/prometheus/scrape/target.go
index 2b4c4301ca4cf..f3dd2d0c066e2 100644
--- a/vendor/github.com/prometheus/prometheus/scrape/target.go
+++ b/vendor/github.com/prometheus/prometheus/scrape/target.go
@@ -290,57 +290,38 @@ type limitAppender struct {
i int
}
-func (app *limitAppender) Add(lset labels.Labels, t int64, v float64) (uint64, error) {
+func (app *limitAppender) Append(ref uint64, lset labels.Labels, t int64, v float64) (uint64, error) {
if !value.IsStaleNaN(v) {
app.i++
if app.i > app.limit {
return 0, errSampleLimit
}
}
- ref, err := app.Appender.Add(lset, t, v)
+ ref, err := app.Appender.Append(ref, lset, t, v)
if err != nil {
return 0, err
}
return ref, nil
}
-func (app *limitAppender) AddFast(ref uint64, t int64, v float64) error {
- if !value.IsStaleNaN(v) {
- app.i++
- if app.i > app.limit {
- return errSampleLimit
- }
- }
- err := app.Appender.AddFast(ref, t, v)
- return err
-}
-
type timeLimitAppender struct {
storage.Appender
maxTime int64
}
-func (app *timeLimitAppender) Add(lset labels.Labels, t int64, v float64) (uint64, error) {
+func (app *timeLimitAppender) Append(ref uint64, lset labels.Labels, t int64, v float64) (uint64, error) {
if t > app.maxTime {
return 0, storage.ErrOutOfBounds
}
- ref, err := app.Appender.Add(lset, t, v)
+ ref, err := app.Appender.Append(ref, lset, t, v)
if err != nil {
return 0, err
}
return ref, nil
}
-func (app *timeLimitAppender) AddFast(ref uint64, t int64, v float64) error {
- if t > app.maxTime {
- return storage.ErrOutOfBounds
- }
- err := app.Appender.AddFast(ref, t, v)
- return err
-}
-
// populateLabels builds a label set from the given label set and scrape configuration.
// It returns a label set before relabeling was applied as the second return value.
// Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling.
diff --git a/vendor/github.com/prometheus/prometheus/storage/fanout.go b/vendor/github.com/prometheus/prometheus/storage/fanout.go
index 4bc3db12d9f68..b737d63008b15 100644
--- a/vendor/github.com/prometheus/prometheus/storage/fanout.go
+++ b/vendor/github.com/prometheus/prometheus/storage/fanout.go
@@ -20,6 +20,7 @@ import (
"github.com/go-kit/kit/log/level"
"github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/pkg/exemplar"
"github.com/prometheus/prometheus/pkg/labels"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
)
@@ -143,31 +144,32 @@ type fanoutAppender struct {
secondaries []Appender
}
-func (f *fanoutAppender) Add(l labels.Labels, t int64, v float64) (uint64, error) {
- ref, err := f.primary.Add(l, t, v)
+func (f *fanoutAppender) Append(ref uint64, l labels.Labels, t int64, v float64) (uint64, error) {
+ ref, err := f.primary.Append(ref, l, t, v)
if err != nil {
return ref, err
}
for _, appender := range f.secondaries {
- if _, err := appender.Add(l, t, v); err != nil {
+ if _, err := appender.Append(ref, l, t, v); err != nil {
return 0, err
}
}
return ref, nil
}
-func (f *fanoutAppender) AddFast(ref uint64, t int64, v float64) error {
- if err := f.primary.AddFast(ref, t, v); err != nil {
- return err
+func (f *fanoutAppender) AppendExemplar(ref uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) {
+ ref, err := f.primary.AppendExemplar(ref, l, e)
+ if err != nil {
+ return ref, err
}
for _, appender := range f.secondaries {
- if err := appender.AddFast(ref, t, v); err != nil {
- return err
+ if _, err := appender.AppendExemplar(ref, l, e); err != nil {
+ return 0, err
}
}
- return nil
+ return ref, nil
}
func (f *fanoutAppender) Commit() (err error) {
diff --git a/vendor/github.com/prometheus/prometheus/storage/interface.go b/vendor/github.com/prometheus/prometheus/storage/interface.go
index 711c253a7656b..eb2b5975ff63c 100644
--- a/vendor/github.com/prometheus/prometheus/storage/interface.go
+++ b/vendor/github.com/prometheus/prometheus/storage/interface.go
@@ -17,6 +17,7 @@ import (
"context"
"errors"
+ "github.com/prometheus/prometheus/pkg/exemplar"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
@@ -28,6 +29,7 @@ var (
ErrOutOfOrderSample = errors.New("out of order sample")
ErrDuplicateSampleForTimestamp = errors.New("duplicate sample for timestamp")
ErrOutOfBounds = errors.New("out of bounds")
+ ErrOutOfOrderExemplar = errors.New("out of order exemplar")
)
// Appendable allows creating appenders.
@@ -45,7 +47,7 @@ type SampleAndChunkQueryable interface {
}
// Storage ingests and manages samples, along with various indexes. All methods
-// are goroutine-safe. Storage implements storage.SampleAppender.
+// are goroutine-safe. Storage implements storage.Appender.
type Storage interface {
SampleAndChunkQueryable
Appendable
@@ -57,6 +59,13 @@ type Storage interface {
Close() error
}
+// ExemplarStorage ingests and manages exemplars, along with various indexes. All methods are
+// goroutine-safe. ExemplarStorage implements storage.ExemplarAppender and storage.ExemplarQuerier.
+type ExemplarStorage interface {
+ ExemplarQueryable
+ ExemplarAppender
+}
+
// A Queryable handles queries against a storage.
// Use it when you need to have access to all samples without chunk encoding abstraction e.g promQL.
type Queryable interface {
@@ -107,6 +116,18 @@ type LabelQuerier interface {
Close() error
}
+type ExemplarQueryable interface {
+ // ExemplarQuerier returns a new ExemplarQuerier on the storage.
+ ExemplarQuerier(ctx context.Context) (ExemplarQuerier, error)
+}
+
+// Querier provides reading access to time series data.
+type ExemplarQuerier interface {
+ // Select all the exemplars that match the matchers.
+ // Within a single slice of matchers, it is an intersection. Between the slices, it is a union.
+ Select(start, end int64, matchers ...[]*labels.Matcher) ([]exemplar.QueryResult, error)
+}
+
// SelectHints specifies hints passed for data selections.
// This is used only as an option for implementation to use.
type SelectHints struct {
@@ -136,18 +157,15 @@ func (f QueryableFunc) Querier(ctx context.Context, mint, maxt int64) (Querier,
//
// Operations on the Appender interface are not goroutine-safe.
type Appender interface {
- // Add adds a sample pair for the given series. A reference number is
- // returned which can be used to add further samples in the same or later
- // transactions.
+ // Append adds a sample pair for the given series.
+ // An optional reference number can be provided to accelerate calls.
+ // A reference number is returned which can be used to add further
+ // samples in the same or later transactions.
// Returned reference numbers are ephemeral and may be rejected in calls
- // to AddFast() at any point. Adding the sample via Add() returns a new
+ // to Append() at any point. Adding the sample via Append() returns a new
// reference number.
// If the reference is 0 it must not be used for caching.
- Add(l labels.Labels, t int64, v float64) (uint64, error)
-
- // AddFast adds a sample pair for the referenced series. It is generally
- // faster than adding a sample by providing its full label set.
- AddFast(ref uint64, t int64, v float64) error
+ Append(ref uint64, l labels.Labels, t int64, v float64) (uint64, error)
// Commit submits the collected samples and purges the batch. If Commit
// returns a non-nil error, it also rolls back all modifications made in
@@ -158,6 +176,33 @@ type Appender interface {
// Rollback rolls back all modifications made in the appender so far.
// Appender has to be discarded after rollback.
Rollback() error
+
+ ExemplarAppender
+}
+
+// GetRef is an extra interface on Appenders used by downstream projects
+// (e.g. Cortex) to avoid maintaining a parallel set of references.
+type GetRef interface {
+ // Returns reference number that can be used to pass to Appender.Append().
+ // 0 means the appender does not have a reference to this series.
+ GetRef(lset labels.Labels) uint64
+}
+
+// ExemplarAppender provides an interface for adding samples to exemplar storage, which
+// within Prometheus is in-memory only.
+type ExemplarAppender interface {
+ // AppendExemplar adds an exemplar for the given series labels.
+ // An optional reference number can be provided to accelerate calls.
+ // A reference number is returned which can be used to add further
+ // exemplars in the same or later transactions.
+ // Returned reference numbers are ephemeral and may be rejected in calls
+ // to Append() at any point. Adding the sample via Append() returns a new
+ // reference number.
+ // If the reference is 0 it must not be used for caching.
+ // Note that in our current implementation of Prometheus' exemplar storage
+ // calls to Append should generate the reference numbers, AppendExemplar
+ // generating a new reference number should be considered possible erroneous behaviour and be logged.
+ AppendExemplar(ref uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error)
}
// SeriesSet contains a set of series.
diff --git a/vendor/github.com/prometheus/prometheus/storage/memoized_iterator.go b/vendor/github.com/prometheus/prometheus/storage/memoized_iterator.go
new file mode 100644
index 0000000000000..0c40bb9d0ee59
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/storage/memoized_iterator.go
@@ -0,0 +1,123 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "math"
+
+ "github.com/prometheus/prometheus/tsdb/chunkenc"
+)
+
+// MemoizedSeriesIterator wraps an iterator with a buffer to look back the previous element.
+type MemoizedSeriesIterator struct {
+ it chunkenc.Iterator
+ delta int64
+
+ lastTime int64
+ ok bool
+
+ // Keep track of the previously returned value.
+ prevTime int64
+ prevValue float64
+}
+
+// NewMemoizedEmptyIterator is like NewMemoizedIterator but it's initialised with an empty iterator.
+func NewMemoizedEmptyIterator(delta int64) *MemoizedSeriesIterator {
+ return NewMemoizedIterator(chunkenc.NewNopIterator(), delta)
+}
+
+// NewMemoizedIterator returns a new iterator that buffers the values within the
+// time range of the current element and the duration of delta before.
+func NewMemoizedIterator(it chunkenc.Iterator, delta int64) *MemoizedSeriesIterator {
+ bit := &MemoizedSeriesIterator{
+ delta: delta,
+ prevTime: math.MinInt64,
+ }
+ bit.Reset(it)
+
+ return bit
+}
+
+// Reset the internal state to reuse the wrapper with the provided iterator.
+func (b *MemoizedSeriesIterator) Reset(it chunkenc.Iterator) {
+ b.it = it
+ b.lastTime = math.MinInt64
+ b.ok = true
+ b.prevTime = math.MinInt64
+ it.Next()
+}
+
+// PeekPrev returns the previous element of the iterator. If there is none buffered,
+// ok is false.
+func (b *MemoizedSeriesIterator) PeekPrev() (t int64, v float64, ok bool) {
+ if b.prevTime == math.MinInt64 {
+ return 0, 0, false
+ }
+ return b.prevTime, b.prevValue, true
+}
+
+// Seek advances the iterator to the element at time t or greater.
+func (b *MemoizedSeriesIterator) Seek(t int64) bool {
+ t0 := t - b.delta
+
+ if t0 > b.lastTime {
+ // Reset the previously stored element because the seek advanced
+ // more than the delta.
+ b.prevTime = math.MinInt64
+
+ b.ok = b.it.Seek(t0)
+ if !b.ok {
+ return false
+ }
+ b.lastTime, _ = b.it.At()
+ }
+
+ if b.lastTime >= t {
+ return true
+ }
+ for b.Next() {
+ if b.lastTime >= t {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Next advances the iterator to the next element.
+func (b *MemoizedSeriesIterator) Next() bool {
+ if !b.ok {
+ return false
+ }
+
+ // Keep track of the previous element.
+ b.prevTime, b.prevValue = b.it.At()
+
+ b.ok = b.it.Next()
+ if b.ok {
+ b.lastTime, _ = b.it.At()
+ }
+
+ return b.ok
+}
+
+// Values returns the current element of the iterator.
+func (b *MemoizedSeriesIterator) Values() (int64, float64) {
+ return b.it.At()
+}
+
+// Err returns the last encountered error.
+func (b *MemoizedSeriesIterator) Err() error {
+ return b.it.Err()
+}
diff --git a/vendor/github.com/prometheus/prometheus/storage/merge.go b/vendor/github.com/prometheus/prometheus/storage/merge.go
index c57a24c76254b..fca052ca1e12a 100644
--- a/vendor/github.com/prometheus/prometheus/storage/merge.go
+++ b/vendor/github.com/prometheus/prometheus/storage/merge.go
@@ -467,6 +467,7 @@ func (c *chainSampleIterator) Seek(t int64) bool {
}
if len(c.h) > 0 {
c.curr = heap.Pop(&c.h).(chunkenc.Iterator)
+ c.lastt, _ = c.curr.At()
return true
}
c.curr = nil
diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/client.go b/vendor/github.com/prometheus/prometheus/storage/remote/client.go
index e122dcbf4ee13..14bb8f7d7da2c 100644
--- a/vendor/github.com/prometheus/prometheus/storage/remote/client.go
+++ b/vendor/github.com/prometheus/prometheus/storage/remote/client.go
@@ -35,10 +35,11 @@ import (
"github.com/prometheus/common/model"
"github.com/prometheus/common/version"
+ "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/prompb"
)
-const maxErrMsgLen = 512
+const maxErrMsgLen = 1024
var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
@@ -83,7 +84,8 @@ type Client struct {
url *config_util.URL
Client *http.Client
timeout time.Duration
- headers map[string]string
+
+ retryOnRateLimit bool
readQueries prometheus.Gauge
readQueriesTotal *prometheus.CounterVec
@@ -95,7 +97,9 @@ type ClientConfig struct {
URL *config_util.URL
Timeout model.Duration
HTTPClientConfig config_util.HTTPClientConfig
+ SigV4Config *config.SigV4Config
Headers map[string]string
+ RetryOnRateLimit bool
}
// ReadClient uses the SAMPLES method of remote read to read series samples from remote server.
@@ -112,6 +116,9 @@ func NewReadClient(name string, conf *ClientConfig) (ReadClient, error) {
}
t := httpClient.Transport
+ if len(conf.Headers) > 0 {
+ t = newInjectHeadersRoundTripper(conf.Headers, t)
+ }
httpClient.Transport = &nethttp.Transport{
RoundTripper: t,
}
@@ -133,21 +140,48 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
if err != nil {
return nil, err
}
-
t := httpClient.Transport
+
+ if conf.SigV4Config != nil {
+ t, err = newSigV4RoundTripper(conf.SigV4Config, httpClient.Transport)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if len(conf.Headers) > 0 {
+ t = newInjectHeadersRoundTripper(conf.Headers, t)
+ }
+
httpClient.Transport = &nethttp.Transport{
RoundTripper: t,
}
return &Client{
- remoteName: name,
- url: conf.URL,
- Client: httpClient,
- timeout: time.Duration(conf.Timeout),
- headers: conf.Headers,
+ remoteName: name,
+ url: conf.URL,
+ Client: httpClient,
+ retryOnRateLimit: conf.RetryOnRateLimit,
+ timeout: time.Duration(conf.Timeout),
}, nil
}
+func newInjectHeadersRoundTripper(h map[string]string, underlyingRT http.RoundTripper) *injectHeadersRoundTripper {
+ return &injectHeadersRoundTripper{headers: h, RoundTripper: underlyingRT}
+}
+
+type injectHeadersRoundTripper struct {
+ headers map[string]string
+ http.RoundTripper
+}
+
+func (t *injectHeadersRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ for key, value := range t.headers {
+ req.Header.Set(key, value)
+ }
+ return t.RoundTripper.RoundTrip(req)
+}
+
const defaultBackoff = 0
type RecoverableError struct {
@@ -164,9 +198,7 @@ func (c *Client) Store(ctx context.Context, req []byte) error {
// recoverable.
return err
}
- for k, v := range c.headers {
- httpReq.Header.Set(k, v)
- }
+
httpReq.Header.Add("Content-Encoding", "snappy")
httpReq.Header.Set("Content-Type", "application/x-protobuf")
httpReq.Header.Set("User-Agent", UserAgent)
@@ -209,7 +241,7 @@ func (c *Client) Store(ctx context.Context, req []byte) error {
if httpResp.StatusCode/100 == 5 {
return RecoverableError{err, defaultBackoff}
}
- if httpResp.StatusCode == http.StatusTooManyRequests {
+ if c.retryOnRateLimit && httpResp.StatusCode == http.StatusTooManyRequests {
return RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))}
}
return err
diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go
new file mode 100644
index 0000000000000..e5c8ea9ee35be
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go
@@ -0,0 +1,272 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "context"
+ "net/http"
+ "sort"
+
+ "github.com/go-kit/kit/log"
+ "github.com/go-kit/kit/log/level"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/prometheus/config"
+ "github.com/prometheus/prometheus/pkg/gate"
+ "github.com/prometheus/prometheus/pkg/labels"
+ "github.com/prometheus/prometheus/prompb"
+ "github.com/prometheus/prometheus/storage"
+)
+
+type readHandler struct {
+ logger log.Logger
+ queryable storage.SampleAndChunkQueryable
+ config func() config.Config
+ remoteReadSampleLimit int
+ remoteReadMaxBytesInFrame int
+ remoteReadGate *gate.Gate
+ queries prometheus.Gauge
+}
+
+// NewReadHandler creates a http.Handler that accepts remote read requests and
+// writes them to the provided queryable.
+func NewReadHandler(logger log.Logger, r prometheus.Registerer, queryable storage.SampleAndChunkQueryable, config func() config.Config, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame int) http.Handler {
+ h := &readHandler{
+ logger: logger,
+ queryable: queryable,
+ config: config,
+ remoteReadSampleLimit: remoteReadSampleLimit,
+ remoteReadGate: gate.New(remoteReadConcurrencyLimit),
+ remoteReadMaxBytesInFrame: remoteReadMaxBytesInFrame,
+
+ queries: prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "prometheus",
+ Subsystem: "api", // TODO: changes to storage in Prometheus 3.0.
+ Name: "remote_read_queries",
+ Help: "The current number of remote read queries being executed or waiting.",
+ }),
+ }
+ if r != nil {
+ r.MustRegister(h.queries)
+ }
+ return h
+}
+
+func (h *readHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ if err := h.remoteReadGate.Start(ctx); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ h.queries.Inc()
+
+ defer h.remoteReadGate.Done()
+ defer h.queries.Dec()
+
+ req, err := DecodeReadRequest(r)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ externalLabels := h.config().GlobalConfig.ExternalLabels.Map()
+
+ sortedExternalLabels := make([]prompb.Label, 0, len(externalLabels))
+ for name, value := range externalLabels {
+ sortedExternalLabels = append(sortedExternalLabels, prompb.Label{
+ Name: name,
+ Value: value,
+ })
+ }
+ sort.Slice(sortedExternalLabels, func(i, j int) bool {
+ return sortedExternalLabels[i].Name < sortedExternalLabels[j].Name
+ })
+
+ responseType, err := NegotiateResponseType(req.AcceptedResponseTypes)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ switch responseType {
+ case prompb.ReadRequest_STREAMED_XOR_CHUNKS:
+ h.remoteReadStreamedXORChunks(ctx, w, req, externalLabels, sortedExternalLabels)
+ default:
+ // On empty or unknown types in req.AcceptedResponseTypes we default to non streamed, raw samples response.
+ h.remoteReadSamples(ctx, w, req, externalLabels, sortedExternalLabels)
+ }
+}
+
+func (h *readHandler) remoteReadSamples(
+ ctx context.Context,
+ w http.ResponseWriter,
+ req *prompb.ReadRequest,
+ externalLabels map[string]string,
+ sortedExternalLabels []prompb.Label,
+) {
+ w.Header().Set("Content-Type", "application/x-protobuf")
+ w.Header().Set("Content-Encoding", "snappy")
+
+ resp := prompb.ReadResponse{
+ Results: make([]*prompb.QueryResult, len(req.Queries)),
+ }
+ for i, query := range req.Queries {
+ if err := func() error {
+ filteredMatchers, err := filterExtLabelsFromMatchers(query.Matchers, externalLabels)
+ if err != nil {
+ return err
+ }
+
+ querier, err := h.queryable.Querier(ctx, query.StartTimestampMs, query.EndTimestampMs)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err := querier.Close(); err != nil {
+ level.Warn(h.logger).Log("msg", "Error on querier close", "err", err.Error())
+ }
+ }()
+
+ var hints *storage.SelectHints
+ if query.Hints != nil {
+ hints = &storage.SelectHints{
+ Start: query.Hints.StartMs,
+ End: query.Hints.EndMs,
+ Step: query.Hints.StepMs,
+ Func: query.Hints.Func,
+ Grouping: query.Hints.Grouping,
+ Range: query.Hints.RangeMs,
+ By: query.Hints.By,
+ }
+ }
+
+ var ws storage.Warnings
+ resp.Results[i], ws, err = ToQueryResult(querier.Select(false, hints, filteredMatchers...), h.remoteReadSampleLimit)
+ if err != nil {
+ return err
+ }
+ for _, w := range ws {
+ level.Warn(h.logger).Log("msg", "Warnings on remote read query", "err", w.Error())
+ }
+ for _, ts := range resp.Results[i].Timeseries {
+ ts.Labels = MergeLabels(ts.Labels, sortedExternalLabels)
+ }
+ return nil
+ }(); err != nil {
+ if httpErr, ok := err.(HTTPError); ok {
+ http.Error(w, httpErr.Error(), httpErr.Status())
+ return
+ }
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+
+ if err := EncodeReadResponse(&resp, w); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+}
+
+func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.ResponseWriter, req *prompb.ReadRequest, externalLabels map[string]string, sortedExternalLabels []prompb.Label) {
+ w.Header().Set("Content-Type", "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse")
+
+ f, ok := w.(http.Flusher)
+ if !ok {
+ http.Error(w, "internal http.ResponseWriter does not implement http.Flusher interface", http.StatusInternalServerError)
+ return
+ }
+
+ for i, query := range req.Queries {
+ if err := func() error {
+ filteredMatchers, err := filterExtLabelsFromMatchers(query.Matchers, externalLabels)
+ if err != nil {
+ return err
+ }
+
+ querier, err := h.queryable.ChunkQuerier(ctx, query.StartTimestampMs, query.EndTimestampMs)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err := querier.Close(); err != nil {
+ level.Warn(h.logger).Log("msg", "Error on chunk querier close", "err", err.Error())
+ }
+ }()
+
+ var hints *storage.SelectHints
+ if query.Hints != nil {
+ hints = &storage.SelectHints{
+ Start: query.Hints.StartMs,
+ End: query.Hints.EndMs,
+ Step: query.Hints.StepMs,
+ Func: query.Hints.Func,
+ Grouping: query.Hints.Grouping,
+ Range: query.Hints.RangeMs,
+ By: query.Hints.By,
+ }
+ }
+
+ ws, err := StreamChunkedReadResponses(
+ NewChunkedWriter(w, f),
+ int64(i),
+ // The streaming API has to provide the series sorted.
+ querier.Select(true, hints, filteredMatchers...),
+ sortedExternalLabels,
+ h.remoteReadMaxBytesInFrame,
+ )
+ if err != nil {
+ return err
+ }
+
+ for _, w := range ws {
+ level.Warn(h.logger).Log("msg", "Warnings on chunked remote read query", "warnings", w.Error())
+ }
+ return nil
+ }(); err != nil {
+ if httpErr, ok := err.(HTTPError); ok {
+ http.Error(w, httpErr.Error(), httpErr.Status())
+ return
+ }
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+}
+
+// filterExtLabelsFromMatchers change equality matchers which match external labels
+// to a matcher that looks for an empty label,
+// as that label should not be present in the storage.
+func filterExtLabelsFromMatchers(pbMatchers []*prompb.LabelMatcher, externalLabels map[string]string) ([]*labels.Matcher, error) {
+ matchers, err := FromLabelMatchers(pbMatchers)
+ if err != nil {
+ return nil, err
+ }
+
+ filteredMatchers := make([]*labels.Matcher, 0, len(matchers))
+ for _, m := range matchers {
+ value := externalLabels[m.Name]
+ if m.Type == labels.MatchEqual && value == m.Value {
+ matcher, err := labels.NewMatcher(labels.MatchEqual, m.Name, "")
+ if err != nil {
+ return nil, err
+ }
+ filteredMatchers = append(filteredMatchers, matcher)
+ } else {
+ filteredMatchers = append(filteredMatchers, m)
+ }
+ }
+
+ return filteredMatchers, nil
+}
diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/sigv4.go b/vendor/github.com/prometheus/prometheus/storage/remote/sigv4.go
new file mode 100644
index 0000000000000..4a8974f711915
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/storage/remote/sigv4.go
@@ -0,0 +1,138 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/textproto"
+ "sync"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/credentials/stscreds"
+ "github.com/aws/aws-sdk-go/aws/session"
+ signer "github.com/aws/aws-sdk-go/aws/signer/v4"
+ "github.com/prometheus/prometheus/config"
+)
+
+var sigv4HeaderDenylist = []string{
+ "uber-trace-id",
+}
+
+type sigV4RoundTripper struct {
+ region string
+ next http.RoundTripper
+ pool sync.Pool
+
+ signer *signer.Signer
+}
+
+// newSigV4RoundTripper returns a new http.RoundTripper that will sign requests
+// using Amazon's Signature Verification V4 signing procedure. The request will
+// then be handed off to the next RoundTripper provided by next. If next is nil,
+// http.DefaultTransport will be used.
+//
+// Credentials for signing are retrieved using the the default AWS credential
+// chain. If credentials cannot be found, an error will be returned.
+func newSigV4RoundTripper(cfg *config.SigV4Config, next http.RoundTripper) (http.RoundTripper, error) {
+ if next == nil {
+ next = http.DefaultTransport
+ }
+
+ creds := credentials.NewStaticCredentials(cfg.AccessKey, string(cfg.SecretKey), "")
+ if cfg.AccessKey == "" && cfg.SecretKey == "" {
+ creds = nil
+ }
+
+ sess, err := session.NewSessionWithOptions(session.Options{
+ Config: aws.Config{
+ Region: aws.String(cfg.Region),
+ Credentials: creds,
+ },
+ Profile: cfg.Profile,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("could not create new AWS session: %w", err)
+ }
+ if _, err := sess.Config.Credentials.Get(); err != nil {
+ return nil, fmt.Errorf("could not get SigV4 credentials: %w", err)
+ }
+ if aws.StringValue(sess.Config.Region) == "" {
+ return nil, fmt.Errorf("region not configured in sigv4 or in default credentials chain")
+ }
+
+ signerCreds := sess.Config.Credentials
+ if cfg.RoleARN != "" {
+ signerCreds = stscreds.NewCredentials(sess, cfg.RoleARN)
+ }
+
+ rt := &sigV4RoundTripper{
+ region: cfg.Region,
+ next: next,
+ signer: signer.NewSigner(signerCreds),
+ }
+ rt.pool.New = rt.newBuf
+ return rt, nil
+}
+
+func (rt *sigV4RoundTripper) newBuf() interface{} {
+ return bytes.NewBuffer(make([]byte, 0, 1024))
+}
+
+func (rt *sigV4RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ // rt.signer.Sign needs a seekable body, so we replace the body with a
+ // buffered reader filled with the contents of original body.
+ buf := rt.pool.Get().(*bytes.Buffer)
+ defer func() {
+ buf.Reset()
+ rt.pool.Put(buf)
+ }()
+ if _, err := io.Copy(buf, req.Body); err != nil {
+ return nil, err
+ }
+ // Close the original body since we don't need it anymore.
+ _ = req.Body.Close()
+
+ // Ensure our seeker is back at the start of the buffer once we return.
+ var seeker io.ReadSeeker = bytes.NewReader(buf.Bytes())
+ defer func() {
+ _, _ = seeker.Seek(0, io.SeekStart)
+ }()
+ req.Body = ioutil.NopCloser(seeker)
+
+ // Clone the request and trim out headers that we don't want to sign.
+ signReq := req.Clone(req.Context())
+ for _, header := range sigv4HeaderDenylist {
+ signReq.Header.Del(header)
+ }
+
+ headers, err := rt.signer.Sign(signReq, seeker, "aps", rt.region, time.Now().UTC())
+ if err != nil {
+ return nil, fmt.Errorf("failed to sign request: %w", err)
+ }
+
+ // Copy over signed headers. Authorization header is not returned by
+ // rt.signer.Sign and needs to be copied separately.
+ for k, v := range headers {
+ req.Header[textproto.CanonicalMIMEHeaderKey(k)] = v
+ }
+ req.Header.Set("Authorization", signReq.Header.Get("Authorization"))
+
+ return rt.next.RoundTrip(req)
+}
diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/storage.go b/vendor/github.com/prometheus/prometheus/storage/remote/storage.go
index 2ca540ed3ea4a..131ab73b7feca 100644
--- a/vendor/github.com/prometheus/prometheus/storage/remote/storage.go
+++ b/vendor/github.com/prometheus/prometheus/storage/remote/storage.go
@@ -111,6 +111,7 @@ func (s *Storage) ApplyConfig(conf *config.Config) error {
URL: rrConf.URL,
Timeout: rrConf.RemoteTimeout,
HTTPClientConfig: rrConf.HTTPClientConfig,
+ Headers: rrConf.Headers,
})
if err != nil {
return err
diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write.go b/vendor/github.com/prometheus/prometheus/storage/remote/write.go
index 4929b439cac3a..a9270630fbd12 100644
--- a/vendor/github.com/prometheus/prometheus/storage/remote/write.go
+++ b/vendor/github.com/prometheus/prometheus/storage/remote/write.go
@@ -24,6 +24,7 @@ import (
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/prometheus/config"
+ "github.com/prometheus/prometheus/pkg/exemplar"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/wal"
@@ -134,7 +135,9 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
URL: rwConf.URL,
Timeout: rwConf.RemoteTimeout,
HTTPClientConfig: rwConf.HTTPClientConfig,
+ SigV4Config: rwConf.SigV4Config,
Headers: rwConf.Headers,
+ RetryOnRateLimit: rwConf.QueueConfig.RetryOnRateLimit,
})
if err != nil {
return err
@@ -211,8 +214,8 @@ type timestampTracker struct {
highestRecvTimestamp *maxTimestamp
}
-// Add implements storage.Appender.
-func (t *timestampTracker) Add(_ labels.Labels, ts int64, _ float64) (uint64, error) {
+// Append implements storage.Appender.
+func (t *timestampTracker) Append(_ uint64, _ labels.Labels, ts int64, _ float64) (uint64, error) {
t.samples++
if ts > t.highestTimestamp {
t.highestTimestamp = ts
@@ -220,10 +223,8 @@ func (t *timestampTracker) Add(_ labels.Labels, ts int64, _ float64) (uint64, er
return 0, nil
}
-// AddFast implements storage.Appender.
-func (t *timestampTracker) AddFast(_ uint64, ts int64, v float64) error {
- _, err := t.Add(nil, ts, v)
- return err
+func (t *timestampTracker) AppendExemplar(_ uint64, _ labels.Labels, _ exemplar.Exemplar) (uint64, error) {
+ return 0, nil
}
// Commit implements storage.Appender.
diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go
index c0538e2b2f233..20e2cf4512022 100644
--- a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go
+++ b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go
@@ -23,7 +23,7 @@ import (
"github.com/prometheus/prometheus/storage"
)
-type handler struct {
+type writeHandler struct {
logger log.Logger
appendable storage.Appendable
}
@@ -31,13 +31,13 @@ type handler struct {
// NewWriteHandler creates a http.Handler that accepts remote write requests and
// writes them to the provided appendable.
func NewWriteHandler(logger log.Logger, appendable storage.Appendable) http.Handler {
- return &handler{
+ return &writeHandler{
logger: logger,
appendable: appendable,
}
}
-func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
req, err := DecodeWriteRequest(r.Body)
if err != nil {
level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err.Error())
@@ -62,7 +62,7 @@ func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNoContent)
}
-func (h *handler) write(ctx context.Context, req *prompb.WriteRequest) (err error) {
+func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err error) {
app := h.appendable.Appender(ctx)
defer func() {
if err != nil {
@@ -75,7 +75,7 @@ func (h *handler) write(ctx context.Context, req *prompb.WriteRequest) (err erro
for _, ts := range req.Timeseries {
labels := labelProtosToLabels(ts.Labels)
for _, s := range ts.Samples {
- _, err = app.Add(labels, s.Timestamp, s.Value)
+ _, err = app.Append(0, labels, s.Timestamp, s.Value)
if err != nil {
return err
}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/CHANGELOG.md b/vendor/github.com/prometheus/prometheus/tsdb/CHANGELOG.md
index 66d07bf3cc7af..71a67d3b18748 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/CHANGELOG.md
+++ b/vendor/github.com/prometheus/prometheus/tsdb/CHANGELOG.md
@@ -1,5 +1,3 @@
-## master / unreleased
-
## 0.10.0
- [FEATURE] Added `DBReadOnly` to allow opening a database in read only mode.
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/block.go b/vendor/github.com/prometheus/prometheus/tsdb/block.go
index 7ae8d5bbf12a9..47c842319f423 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/block.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/block.go
@@ -569,7 +569,9 @@ Outer:
// CleanTombstones will remove the tombstones and rewrite the block (only if there are any tombstones).
// If there was a rewrite, then it returns the ULID of the new block written, else nil.
-func (pb *Block) CleanTombstones(dest string, c Compactor) (*ulid.ULID, error) {
+// If the resultant block is empty (tombstones covered the whole block), then it deletes the new block and return nil UID.
+// It returns a boolean indicating if the parent block can be deleted safely of not.
+func (pb *Block) CleanTombstones(dest string, c Compactor) (*ulid.ULID, bool, error) {
numStones := 0
if err := pb.tombstones.Iter(func(id uint64, ivs tombstones.Intervals) error {
@@ -580,15 +582,16 @@ func (pb *Block) CleanTombstones(dest string, c Compactor) (*ulid.ULID, error) {
panic(err)
}
if numStones == 0 {
- return nil, nil
+ return nil, false, nil
}
meta := pb.Meta()
uid, err := c.Write(dest, pb, pb.meta.MinTime, pb.meta.MaxTime, &meta)
if err != nil {
- return nil, err
+ return nil, false, err
}
- return &uid, nil
+
+ return &uid, true, nil
}
// Snapshot creates snapshot of the block into dir.
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go
index 051b9b1a89fb4..d5386f7ea1365 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go
@@ -605,12 +605,14 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef, chunkRef uint64,
}
}
if allZeros {
+ // End of segment chunk file content.
break
}
return &CorruptionErr{
Dir: cdm.dir.Name(),
FileIndex: segID,
- Err: errors.Errorf("head chunk file doesn't include enough bytes to read the chunk header - required:%v, available:%v, file:%d", idx+MaxHeadChunkMetaSize, fileEnd, segID),
+ Err: errors.Errorf("head chunk file has some unread data, but doesn't include enough bytes to read the chunk header"+
+ " - required:%v, available:%v, file:%d", idx+MaxHeadChunkMetaSize, fileEnd, segID),
}
}
chkCRC32.Reset()
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go
index 5c95e02c020bf..b8b3c9970fa8b 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/db.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go
@@ -136,6 +136,10 @@ type Options struct {
// It is always the default time and size based retention in Prometheus and
// mainly meant for external users who import TSDB.
BlocksToDelete BlocksToDeleteFunc
+
+ // MaxExemplars sets the size, in # of exemplars stored, of the single circular buffer used to store exemplars in memory.
+ // See tsdb/exemplar.go, specifically the CircularExemplarStorage struct and it's constructor NewCircularExemplarStorage.
+ MaxExemplars int
}
type BlocksToDeleteFunc func(blocks []*Block) map[ulid.ULID]struct{}
@@ -663,6 +667,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
headOpts.ChunkWriteBufferSize = opts.HeadChunksWriteBufferSize
headOpts.StripeSize = opts.StripeSize
headOpts.SeriesCallback = opts.SeriesLifecycleCallback
+ headOpts.NumExemplars = opts.MaxExemplars
db.head, err = NewHead(r, l, wlog, headOpts)
if err != nil {
return nil, err
@@ -790,6 +795,15 @@ type dbAppender struct {
db *DB
}
+var _ storage.GetRef = dbAppender{}
+
+func (a dbAppender) GetRef(lset labels.Labels) uint64 {
+ if g, ok := a.Appender.(storage.GetRef); ok {
+ return g.GetRef(lset)
+ }
+ return 0
+}
+
func (a dbAppender) Commit() error {
err := a.Appender.Commit()
@@ -979,6 +993,12 @@ func (db *DB) reloadBlocks() (err error) {
db.metrics.reloads.Inc()
}()
+ // Now that we reload TSDB every minute, there is high chance for race condition with a reload
+ // triggered by CleanTombstones(). We need to lock the reload to avoid the situation where
+ // a normal reload and CleanTombstones try to delete the same block.
+ db.mtx.Lock()
+ defer db.mtx.Unlock()
+
loadable, corrupted, err := openBlocks(db.logger, db.dir, db.blocks, db.chunkPool)
if err != nil {
return err
@@ -1044,10 +1064,8 @@ func (db *DB) reloadBlocks() (err error) {
}
// Swap new blocks first for subsequently created readers to be seen.
- db.mtx.Lock()
oldBlocks := db.blocks
db.blocks = toLoad
- db.mtx.Unlock()
blockMetas := make([]BlockMeta, 0, len(toLoad))
for _, b := range toLoad {
@@ -1502,6 +1520,10 @@ func (db *DB) ChunkQuerier(_ context.Context, mint, maxt int64) (storage.ChunkQu
return storage.NewMergeChunkQuerier(blockQueriers, nil, storage.NewCompactingChunkSeriesMerger(storage.ChainedSeriesMerge)), nil
}
+func (db *DB) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, error) {
+ return db.head.exemplars.ExemplarQuerier(ctx)
+}
+
func rangeForTimestamp(t int64, width int64) (maxt int64) {
return (t/width)*width + width
}
@@ -1537,34 +1559,44 @@ func (db *DB) CleanTombstones() (err error) {
start := time.Now()
defer db.metrics.tombCleanTimer.Observe(time.Since(start).Seconds())
- newUIDs := []ulid.ULID{}
- defer func() {
- // If any error is caused, we need to delete all the new directory created.
- if err != nil {
- for _, uid := range newUIDs {
+ cleanUpCompleted := false
+ // Repeat cleanup until there is no tombstones left.
+ for !cleanUpCompleted {
+ cleanUpCompleted = true
+
+ for _, pb := range db.Blocks() {
+ uid, safeToDelete, cleanErr := pb.CleanTombstones(db.Dir(), db.compactor)
+ if cleanErr != nil {
+ return errors.Wrapf(cleanErr, "clean tombstones: %s", pb.Dir())
+ }
+ if !safeToDelete {
+ // There was nothing to clean.
+ continue
+ }
+
+ // In case tombstones of the old block covers the whole block,
+ // then there would be no resultant block to tell the parent.
+ // The lock protects against race conditions when deleting blocks
+ // during an already running reload.
+ db.mtx.Lock()
+ pb.meta.Compaction.Deletable = safeToDelete
+ db.mtx.Unlock()
+ cleanUpCompleted = false
+ if err = db.reloadBlocks(); err == nil { // Will try to delete old block.
+ // Successful reload will change the existing blocks.
+ // We need to loop over the new set of blocks.
+ break
+ }
+
+ // Delete new block if it was created.
+ if uid != nil && *uid != (ulid.ULID{}) {
dir := filepath.Join(db.Dir(), uid.String())
if err := os.RemoveAll(dir); err != nil {
level.Error(db.logger).Log("msg", "failed to delete block after failed `CleanTombstones`", "dir", dir, "err", err)
}
}
+ return errors.Wrap(err, "reload blocks")
}
- }()
-
- db.mtx.RLock()
- blocks := db.blocks[:]
- db.mtx.RUnlock()
-
- for _, b := range blocks {
- if uid, er := b.CleanTombstones(db.Dir(), db.compactor); er != nil {
- err = errors.Wrapf(er, "clean tombstones: %s", b.Dir())
- return err
- } else if uid != nil { // New block was created.
- newUIDs = append(newUIDs, *uid)
- }
- }
-
- if err := db.reloadBlocks(); err != nil {
- return errors.Wrap(err, "reload blocks")
}
return nil
}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go b/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go
new file mode 100644
index 0000000000000..5a42f30a88ac4
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go
@@ -0,0 +1,209 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tsdb
+
+import (
+ "context"
+ "sort"
+ "sync"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/prometheus/pkg/exemplar"
+ "github.com/prometheus/prometheus/pkg/labels"
+ "github.com/prometheus/prometheus/storage"
+)
+
+type CircularExemplarStorage struct {
+ outOfOrderExemplars prometheus.Counter
+
+ lock sync.RWMutex
+ exemplars []*circularBufferEntry
+ nextIndex int
+
+ // Map of series labels as a string to index entry, which points to the first
+ // and last exemplar for the series in the exemplars circular buffer.
+ index map[string]*indexEntry
+}
+
+type indexEntry struct {
+ first int
+ last int
+}
+
+type circularBufferEntry struct {
+ exemplar exemplar.Exemplar
+ seriesLabels labels.Labels
+ next int
+}
+
+// If we assume the average case 95 bytes per exemplar we can fit 5651272 exemplars in
+// 1GB of extra memory, accounting for the fact that this is heap allocated space.
+// If len < 1, then the exemplar storage is disabled.
+func NewCircularExemplarStorage(len int, reg prometheus.Registerer) (ExemplarStorage, error) {
+ if len < 1 {
+ return &noopExemplarStorage{}, nil
+ }
+ c := &CircularExemplarStorage{
+ exemplars: make([]*circularBufferEntry, len),
+ index: make(map[string]*indexEntry),
+ outOfOrderExemplars: prometheus.NewCounter(prometheus.CounterOpts{
+ Name: "prometheus_tsdb_exemplar_out_of_order_exemplars_total",
+ Help: "Total number of out of order exemplar ingestion failed attempts",
+ }),
+ }
+
+ if reg != nil {
+ reg.MustRegister(c.outOfOrderExemplars)
+ }
+
+ return c, nil
+}
+
+func (ce *CircularExemplarStorage) Appender() *CircularExemplarStorage {
+ return ce
+}
+
+func (ce *CircularExemplarStorage) ExemplarQuerier(_ context.Context) (storage.ExemplarQuerier, error) {
+ return ce, nil
+}
+
+func (ce *CircularExemplarStorage) Querier(ctx context.Context) (storage.ExemplarQuerier, error) {
+ return ce, nil
+}
+
+// Select returns exemplars for a given set of label matchers.
+func (ce *CircularExemplarStorage) Select(start, end int64, matchers ...[]*labels.Matcher) ([]exemplar.QueryResult, error) {
+ ret := make([]exemplar.QueryResult, 0)
+
+ ce.lock.RLock()
+ defer ce.lock.RUnlock()
+
+ // Loop through each index entry, which will point us to first/last exemplar for each series.
+ for _, idx := range ce.index {
+ var se exemplar.QueryResult
+ e := ce.exemplars[idx.first]
+ if !matchesSomeMatcherSet(e.seriesLabels, matchers) {
+ continue
+ }
+ se.SeriesLabels = e.seriesLabels
+
+ // Loop through all exemplars in the circular buffer for the current series.
+ for e.exemplar.Ts <= end {
+ if e.exemplar.Ts >= start {
+ se.Exemplars = append(se.Exemplars, e.exemplar)
+ }
+ if e.next == -1 {
+ break
+ }
+ e = ce.exemplars[e.next]
+ }
+ if len(se.Exemplars) > 0 {
+ ret = append(ret, se)
+ }
+ }
+
+ sort.Slice(ret, func(i, j int) bool {
+ return labels.Compare(ret[i].SeriesLabels, ret[j].SeriesLabels) < 0
+ })
+
+ return ret, nil
+}
+
+func matchesSomeMatcherSet(lbls labels.Labels, matchers [][]*labels.Matcher) bool {
+Outer:
+ for _, ms := range matchers {
+ for _, m := range ms {
+ if !m.Matches(lbls.Get(m.Name)) {
+ continue Outer
+ }
+ }
+ return true
+ }
+ return false
+}
+
+// indexGc takes the circularBufferEntry that will be overwritten and updates the
+// storages index for that entries labelset if necessary.
+func (ce *CircularExemplarStorage) indexGc(cbe *circularBufferEntry) {
+ if cbe == nil {
+ return
+ }
+
+ l := cbe.seriesLabels.String()
+ i := cbe.next
+ if i == -1 {
+ delete(ce.index, l)
+ return
+ }
+
+ ce.index[l] = &indexEntry{i, ce.index[l].last}
+}
+
+func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemplar) error {
+ seriesLabels := l.String()
+ ce.lock.Lock()
+ defer ce.lock.Unlock()
+
+ idx, ok := ce.index[seriesLabels]
+ if !ok {
+ ce.indexGc(ce.exemplars[ce.nextIndex])
+ // Default the next value to -1 (which we use to detect that we've iterated through all exemplars for a series in Select)
+ // since this is the first exemplar stored for this series.
+ ce.exemplars[ce.nextIndex] = &circularBufferEntry{
+ exemplar: e,
+ seriesLabels: l,
+ next: -1}
+ ce.index[seriesLabels] = &indexEntry{ce.nextIndex, ce.nextIndex}
+ ce.nextIndex = (ce.nextIndex + 1) % len(ce.exemplars)
+ return nil
+ }
+
+ // Check for duplicate vs last stored exemplar for this series.
+ // NB these are expected, add appending them is a no-op.
+ if ce.exemplars[idx.last].exemplar.Equals(e) {
+ return nil
+ }
+
+ if e.Ts <= ce.exemplars[idx.last].exemplar.Ts {
+ ce.outOfOrderExemplars.Inc()
+ return storage.ErrOutOfOrderExemplar
+ }
+ ce.indexGc(ce.exemplars[ce.nextIndex])
+ ce.exemplars[ce.nextIndex] = &circularBufferEntry{
+ exemplar: e,
+ seriesLabels: l,
+ next: -1,
+ }
+
+ ce.exemplars[ce.index[seriesLabels].last].next = ce.nextIndex
+ ce.index[seriesLabels].last = ce.nextIndex
+ ce.nextIndex = (ce.nextIndex + 1) % len(ce.exemplars)
+ return nil
+}
+
+type noopExemplarStorage struct{}
+
+func (noopExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemplar) error {
+ return nil
+}
+
+func (noopExemplarStorage) ExemplarQuerier(context.Context) (storage.ExemplarQuerier, error) {
+ return &noopExemplarQuerier{}, nil
+}
+
+type noopExemplarQuerier struct{}
+
+func (noopExemplarQuerier) Select(_, _ int64, _ ...[]*labels.Matcher) ([]exemplar.QueryResult, error) {
+ return nil, nil
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go
index 4352d86d9e5c5..4f4e8d51dd350 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/head.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go
@@ -30,6 +30,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/atomic"
+ "github.com/prometheus/prometheus/pkg/exemplar"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
@@ -46,11 +47,19 @@ var (
// ErrInvalidSample is returned if an appended sample is not valid and can't
// be ingested.
ErrInvalidSample = errors.New("invalid sample")
+ // ErrInvalidExemplar is returned if an appended exemplar is not valid and can't
+ // be ingested.
+ ErrInvalidExemplar = errors.New("invalid exemplar")
// ErrAppenderClosed is returned if an appender has already be successfully
// rolled back or committed.
ErrAppenderClosed = errors.New("appender closed")
)
+type ExemplarStorage interface {
+ storage.ExemplarQueryable
+ AddExemplar(labels.Labels, exemplar.Exemplar) error
+}
+
// Head handles reads and writes of time series data within a time window.
type Head struct {
chunkRange atomic.Int64
@@ -60,14 +69,16 @@ type Head struct {
lastWALTruncationTime atomic.Int64
lastSeriesID atomic.Uint64
- metrics *headMetrics
- opts *HeadOptions
- wal *wal.WAL
- logger log.Logger
- appendPool sync.Pool
- seriesPool sync.Pool
- bytesPool sync.Pool
- memChunkPool sync.Pool
+ metrics *headMetrics
+ opts *HeadOptions
+ wal *wal.WAL
+ exemplars ExemplarStorage
+ logger log.Logger
+ appendPool sync.Pool
+ exemplarsPool sync.Pool
+ seriesPool sync.Pool
+ bytesPool sync.Pool
+ memChunkPool sync.Pool
// All series addressable by their ID or hash.
series *stripeSeries
@@ -107,6 +118,7 @@ type HeadOptions struct {
// A smaller StripeSize reduces the memory allocated, but can decrease performance with large number of series.
StripeSize int
SeriesCallback SeriesLifecycleCallback
+ NumExemplars int
}
func DefaultHeadOptions() *HeadOptions {
@@ -133,6 +145,7 @@ type headMetrics struct {
samplesAppended prometheus.Counter
outOfBoundSamples prometheus.Counter
outOfOrderSamples prometheus.Counter
+ outOfOrderExemplars prometheus.Counter
walTruncateDuration prometheus.Summary
walCorruptionsTotal prometheus.Counter
walTotalReplayDuration prometheus.Gauge
@@ -209,6 +222,10 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics {
Name: "prometheus_tsdb_out_of_order_samples_total",
Help: "Total number of out of order samples ingestion failed attempts.",
}),
+ outOfOrderExemplars: prometheus.NewCounter(prometheus.CounterOpts{
+ Name: "prometheus_tsdb_out_of_order_exemplars_total",
+ Help: "Total number of out of order exemplars ingestion failed attempts.",
+ }),
headTruncateFail: prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_head_truncations_failed_total",
Help: "Total number of head truncations that failed.",
@@ -256,6 +273,7 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics {
m.samplesAppended,
m.outOfBoundSamples,
m.outOfOrderSamples,
+ m.outOfOrderExemplars,
m.headTruncateFail,
m.headTruncateTotal,
m.checkpointDeleteFail,
@@ -325,10 +343,17 @@ func NewHead(r prometheus.Registerer, l log.Logger, wal *wal.WAL, opts *HeadOpti
if opts.SeriesCallback == nil {
opts.SeriesCallback = &noopSeriesLifecycleCallback{}
}
+
+ es, err := NewCircularExemplarStorage(opts.NumExemplars, r)
+ if err != nil {
+ return nil, err
+ }
+
h := &Head{
wal: wal,
logger: l,
opts: opts,
+ exemplars: es,
series: newStripeSeries(opts.StripeSize, opts.SeriesCallback),
symbols: map[string]struct{}{},
postings: index.NewUnorderedMemPostings(),
@@ -351,7 +376,6 @@ func NewHead(r prometheus.Registerer, l log.Logger, wal *wal.WAL, opts *HeadOpti
opts.ChunkPool = chunkenc.NewPool()
}
- var err error
h.chunkDiskMapper, err = chunks.NewChunkDiskMapper(
mmappedChunksDir(opts.ChunkDirRoot),
opts.ChunkPool,
@@ -366,6 +390,10 @@ func NewHead(r prometheus.Registerer, l log.Logger, wal *wal.WAL, opts *HeadOpti
func mmappedChunksDir(dir string) string { return filepath.Join(dir, "chunks_head") }
+func (h *Head) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, error) {
+ return h.exemplars.ExemplarQuerier(ctx)
+}
+
// processWALSamples adds a partition of samples it receives to the head and passes
// them on to other workers.
// Samples before the mint timestamp are discarded.
@@ -752,6 +780,11 @@ func (h *Head) Init(minValidTime int64) error {
return nil
}
+// SetMinValidTime sets the minimum timestamp the head can ingest.
+func (h *Head) SetMinValidTime(minValidTime int64) {
+ h.minValidTime.Store(minValidTime)
+}
+
func (h *Head) loadMmappedChunks() (map[uint64][]*mmappedChunk, error) {
mmappedChunks := map[uint64][]*mmappedChunk{}
if err := h.chunkDiskMapper.IterateAllChunks(func(seriesRef, chunkRef uint64, mint, maxt int64, numSamples uint16) error {
@@ -1047,21 +1080,40 @@ type initAppender struct {
head *Head
}
-func (a *initAppender) Add(lset labels.Labels, t int64, v float64) (uint64, error) {
+func (a *initAppender) Append(ref uint64, lset labels.Labels, t int64, v float64) (uint64, error) {
if a.app != nil {
- return a.app.Add(lset, t, v)
+ return a.app.Append(ref, lset, t, v)
}
+
a.head.initTime(t)
a.app = a.head.appender()
+ return a.app.Append(ref, lset, t, v)
+}
+
+func (a *initAppender) AppendExemplar(ref uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) {
+ // Check if exemplar storage is enabled.
+ if a.head.opts.NumExemplars == 0 {
+ return 0, nil
+ }
+
+ if a.app != nil {
+ return a.app.AppendExemplar(ref, l, e)
+ }
+ // We should never reach here given we would call Append before AppendExemplar
+ // and we probably want to always base head/WAL min time on sample times.
+ a.head.initTime(e.Ts)
+ a.app = a.head.appender()
- return a.app.Add(lset, t, v)
+ return a.app.AppendExemplar(ref, l, e)
}
-func (a *initAppender) AddFast(ref uint64, t int64, v float64) error {
- if a.app == nil {
- return storage.ErrNotFound
+var _ storage.GetRef = &initAppender{}
+
+func (a *initAppender) GetRef(lset labels.Labels) uint64 {
+ if g, ok := a.app.(storage.GetRef); ok {
+ return g.GetRef(lset)
}
- return a.app.AddFast(ref, t, v)
+ return 0
}
func (a *initAppender) Commit() error {
@@ -1103,8 +1155,10 @@ func (h *Head) appender() *headAppender {
maxt: math.MinInt64,
samples: h.getAppendBuffer(),
sampleSeries: h.getSeriesBuffer(),
+ exemplars: h.getExemplarBuffer(),
appendID: appendID,
cleanupAppendIDsBelow: cleanupAppendIDsBelow,
+ exemplarAppender: h.exemplars,
}
}
@@ -1121,6 +1175,19 @@ func max(a, b int64) int64 {
return b
}
+func (h *Head) ExemplarAppender() storage.ExemplarAppender {
+ h.metrics.activeAppenders.Inc()
+
+ // The head cache might not have a starting point yet. The init appender
+ // picks up the first appended timestamp as the base.
+ if h.MinTime() == math.MaxInt64 {
+ return &initAppender{
+ head: h,
+ }
+ }
+ return h.appender()
+}
+
func (h *Head) getAppendBuffer() []record.RefSample {
b := h.appendPool.Get()
if b == nil {
@@ -1134,6 +1201,19 @@ func (h *Head) putAppendBuffer(b []record.RefSample) {
h.appendPool.Put(b[:0])
}
+func (h *Head) getExemplarBuffer() []exemplarWithSeriesRef {
+ b := h.exemplarsPool.Get()
+ if b == nil {
+ return make([]exemplarWithSeriesRef, 0, 512)
+ }
+ return b.([]exemplarWithSeriesRef)
+}
+
+func (h *Head) putExemplarBuffer(b []exemplarWithSeriesRef) {
+ //nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
+ h.exemplarsPool.Put(b[:0])
+}
+
func (h *Head) getSeriesBuffer() []*memSeries {
b := h.seriesPool.Get()
if b == nil {
@@ -1160,67 +1240,65 @@ func (h *Head) putBytesBuffer(b []byte) {
h.bytesPool.Put(b[:0])
}
+type exemplarWithSeriesRef struct {
+ ref uint64
+ exemplar exemplar.Exemplar
+}
+
type headAppender struct {
- head *Head
- minValidTime int64 // No samples below this timestamp are allowed.
- mint, maxt int64
+ head *Head
+ minValidTime int64 // No samples below this timestamp are allowed.
+ mint, maxt int64
+ exemplarAppender ExemplarStorage
series []record.RefSeries
samples []record.RefSample
+ exemplars []exemplarWithSeriesRef
sampleSeries []*memSeries
appendID, cleanupAppendIDsBelow uint64
closed bool
}
-func (a *headAppender) Add(lset labels.Labels, t int64, v float64) (uint64, error) {
+func (a *headAppender) Append(ref uint64, lset labels.Labels, t int64, v float64) (uint64, error) {
if t < a.minValidTime {
a.head.metrics.outOfBoundSamples.Inc()
return 0, storage.ErrOutOfBounds
}
- // Ensure no empty labels have gotten through.
- lset = lset.WithoutEmpty()
-
- if len(lset) == 0 {
- return 0, errors.Wrap(ErrInvalidSample, "empty labelset")
- }
-
- if l, dup := lset.HasDuplicateLabelNames(); dup {
- return 0, errors.Wrap(ErrInvalidSample, fmt.Sprintf(`label name "%s" is not unique`, l))
- }
-
- s, created, err := a.head.getOrCreate(lset.Hash(), lset)
- if err != nil {
- return 0, err
- }
+ s := a.head.series.getByID(ref)
+ if s == nil {
+ // Ensure no empty labels have gotten through.
+ lset = lset.WithoutEmpty()
+ if len(lset) == 0 {
+ return 0, errors.Wrap(ErrInvalidSample, "empty labelset")
+ }
- if created {
- a.series = append(a.series, record.RefSeries{
- Ref: s.ref,
- Labels: lset,
- })
- }
- return s.ref, a.AddFast(s.ref, t, v)
-}
+ if l, dup := lset.HasDuplicateLabelNames(); dup {
+ return 0, errors.Wrap(ErrInvalidSample, fmt.Sprintf(`label name "%s" is not unique`, l))
+ }
-func (a *headAppender) AddFast(ref uint64, t int64, v float64) error {
- if t < a.minValidTime {
- a.head.metrics.outOfBoundSamples.Inc()
- return storage.ErrOutOfBounds
+ var created bool
+ var err error
+ s, created, err = a.head.getOrCreate(lset.Hash(), lset)
+ if err != nil {
+ return 0, err
+ }
+ if created {
+ a.series = append(a.series, record.RefSeries{
+ Ref: s.ref,
+ Labels: lset,
+ })
+ }
}
- s := a.head.series.getByID(ref)
- if s == nil {
- return errors.Wrap(storage.ErrNotFound, "unknown series")
- }
s.Lock()
if err := s.appendable(t, v); err != nil {
s.Unlock()
if err == storage.ErrOutOfOrderSample {
a.head.metrics.outOfOrderSamples.Inc()
}
- return err
+ return 0, err
}
s.pendingCommit = true
s.Unlock()
@@ -1233,12 +1311,43 @@ func (a *headAppender) AddFast(ref uint64, t int64, v float64) error {
}
a.samples = append(a.samples, record.RefSample{
- Ref: ref,
+ Ref: s.ref,
T: t,
V: v,
})
a.sampleSeries = append(a.sampleSeries, s)
- return nil
+ return s.ref, nil
+}
+
+// AppendExemplar for headAppender assumes the series ref already exists, and so it doesn't
+// use getOrCreate or make any of the lset sanity checks that Append does.
+func (a *headAppender) AppendExemplar(ref uint64, _ labels.Labels, e exemplar.Exemplar) (uint64, error) {
+ // Check if exemplar storage is enabled.
+ if a.head.opts.NumExemplars == 0 {
+ return 0, nil
+ }
+
+ s := a.head.series.getByID(ref)
+ if s == nil {
+ return 0, fmt.Errorf("unknown series ref. when trying to add exemplar: %d", ref)
+ }
+
+ // Ensure no empty labels have gotten through.
+ e.Labels = e.Labels.WithoutEmpty()
+
+ a.exemplars = append(a.exemplars, exemplarWithSeriesRef{ref, e})
+
+ return s.ref, nil
+}
+
+var _ storage.GetRef = &headAppender{}
+
+func (a *headAppender) GetRef(lset labels.Labels) uint64 {
+ s := a.head.series.getByHash(lset.Hash(), lset)
+ if s == nil {
+ return 0
+ }
+ return s.ref
}
func (a *headAppender) log() error {
@@ -1282,9 +1391,21 @@ func (a *headAppender) Commit() (err error) {
return errors.Wrap(err, "write to WAL")
}
+ // No errors logging to WAL, so pass the exemplars along to the in memory storage.
+ for _, e := range a.exemplars {
+ s := a.head.series.getByID(e.ref)
+ err := a.exemplarAppender.AddExemplar(s.lset, e.exemplar)
+ if err == storage.ErrOutOfOrderExemplar {
+ a.head.metrics.outOfOrderExemplars.Inc()
+ } else if err != nil {
+ level.Debug(a.head.logger).Log("msg", "Unknown error while adding exemplar", "err", err)
+ }
+ }
+
defer a.head.metrics.activeAppenders.Dec()
defer a.head.putAppendBuffer(a.samples)
defer a.head.putSeriesBuffer(a.sampleSeries)
+ defer a.head.putExemplarBuffer(a.exemplars)
defer a.head.iso.closeAppend(a.appendID)
total := len(a.samples)
@@ -1769,7 +1890,7 @@ func (h *headIndexReader) LabelValueFor(id uint64, label string) (string, error)
}
func (h *Head) getOrCreate(hash uint64, lset labels.Labels) (*memSeries, bool, error) {
- // Just using `getOrSet` below would be semantically sufficient, but we'd create
+ // Just using `getOrCreateWithID` below would be semantically sufficient, but we'd create
// a new series on every sample inserted via Add(), which causes allocations
// and makes our series IDs rather random and harder to compress in postings.
s := h.series.getByHash(hash, lset)
@@ -1784,9 +1905,9 @@ func (h *Head) getOrCreate(hash uint64, lset labels.Labels) (*memSeries, bool, e
}
func (h *Head) getOrCreateWithID(id, hash uint64, lset labels.Labels) (*memSeries, bool, error) {
- s := newMemSeries(lset, id, h.chunkRange.Load(), &h.memChunkPool)
-
- s, created, err := h.series.getOrSet(hash, s)
+ s, created, err := h.series.getOrSet(hash, lset, func() *memSeries {
+ return newMemSeries(lset, id, h.chunkRange.Load(), &h.memChunkPool)
+ })
if err != nil {
return nil, false, err
}
@@ -1975,27 +2096,34 @@ func (s *stripeSeries) getByHash(hash uint64, lset labels.Labels) *memSeries {
return series
}
-func (s *stripeSeries) getOrSet(hash uint64, series *memSeries) (*memSeries, bool, error) {
+func (s *stripeSeries) getOrSet(hash uint64, lset labels.Labels, createSeries func() *memSeries) (*memSeries, bool, error) {
// PreCreation is called here to avoid calling it inside the lock.
// It is not necessary to call it just before creating a series,
// rather it gives a 'hint' whether to create a series or not.
- createSeriesErr := s.seriesLifecycleCallback.PreCreation(series.lset)
+ preCreationErr := s.seriesLifecycleCallback.PreCreation(lset)
+
+ // Create the series, unless the PreCreation() callback as failed.
+ // If failed, we'll not allow to create a new series anyway.
+ var series *memSeries
+ if preCreationErr == nil {
+ series = createSeries()
+ }
i := hash & uint64(s.size-1)
s.locks[i].Lock()
- if prev := s.hashes[i].get(hash, series.lset); prev != nil {
+ if prev := s.hashes[i].get(hash, lset); prev != nil {
s.locks[i].Unlock()
return prev, false, nil
}
- if createSeriesErr == nil {
+ if preCreationErr == nil {
s.hashes[i].set(hash, series)
}
s.locks[i].Unlock()
- if createSeriesErr != nil {
+ if preCreationErr != nil {
// The callback prevented creation of series.
- return nil, false, createSeriesErr
+ return nil, false, preCreationErr
}
// Setting the series in the s.hashes marks the creation of series
// as any further calls to this methods would return that series.
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go
index be2c63f9f4329..8cc0dd195276c 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go
@@ -50,14 +50,10 @@ func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger l
for _, s := range series {
ref := uint64(0)
it := s.Iterator()
+ lset := s.Labels()
for it.Next() {
t, v := it.At()
- if ref != 0 {
- if err := app.AddFast(ref, t, v); err == nil {
- continue
- }
- }
- ref, err = app.Add(s.Labels(), t, v)
+ ref, err = app.Append(ref, lset, t, v)
if err != nil {
return "", err
}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go b/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go
index 3aa87b2c07068..7b45e0e9124cf 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go
@@ -613,18 +613,8 @@ func (w *WAL) log(rec []byte, final bool) error {
return err
}
}
- // If the record is too big to fit within the active page in the current
- // segment, terminate the active segment and advance to the next one.
- // This ensures that records do not cross segment boundaries.
- left := w.page.remaining() - recordHeaderSize // Free space in the active page.
- left += (pageSize - recordHeaderSize) * (w.pagesPerSegment() - w.donePages - 1) // Free pages in the active segment.
-
- if len(rec) > left {
- if err := w.nextSegment(); err != nil {
- return err
- }
- }
+ // Compress the record before calculating if a new segment is needed.
compressed := false
if w.compress && len(rec) > 0 {
// The snappy library uses `len` to calculate if we need a new buffer.
@@ -638,6 +628,18 @@ func (w *WAL) log(rec []byte, final bool) error {
}
}
+ // If the record is too big to fit within the active page in the current
+ // segment, terminate the active segment and advance to the next one.
+ // This ensures that records do not cross segment boundaries.
+ left := w.page.remaining() - recordHeaderSize // Free space in the active page.
+ left += (pageSize - recordHeaderSize) * (w.pagesPerSegment() - w.donePages - 1) // Free pages in the active segment.
+
+ if len(rec) > left {
+ if err := w.nextSegment(); err != nil {
+ return err
+ }
+ }
+
// Populate as many pages as necessary to fit the record.
// Be careful to always do one pass to ensure we write zero-length records.
for i := 0; i == 0 || len(rec) > 0; i++ {
diff --git a/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go b/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go
index 6fa90781e1b16..16b3c3c7c69fc 100644
--- a/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go
+++ b/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go
@@ -18,6 +18,9 @@ import (
"os"
"time"
+ "github.com/prometheus/prometheus/pkg/exemplar"
+ "github.com/prometheus/prometheus/pkg/labels"
+ "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/util/testutil"
)
@@ -35,16 +38,22 @@ func New(t testutil.T) *TestStorage {
opts := tsdb.DefaultOptions()
opts.MinBlockDuration = int64(24 * time.Hour / time.Millisecond)
opts.MaxBlockDuration = int64(24 * time.Hour / time.Millisecond)
+ opts.MaxExemplars = 10
db, err := tsdb.Open(dir, nil, nil, opts)
if err != nil {
t.Fatalf("Opening test storage failed: %s", err)
}
- return &TestStorage{DB: db, dir: dir}
+ es, err := tsdb.NewCircularExemplarStorage(10, nil)
+ if err != nil {
+ t.Fatalf("Opening test exemplar storage failed: %s", err)
+ }
+ return &TestStorage{DB: db, exemplarStorage: es, dir: dir}
}
type TestStorage struct {
*tsdb.DB
- dir string
+ exemplarStorage tsdb.ExemplarStorage
+ dir string
}
func (s TestStorage) Close() error {
@@ -53,3 +62,15 @@ func (s TestStorage) Close() error {
}
return os.RemoveAll(s.dir)
}
+
+func (s TestStorage) ExemplarAppender() storage.ExemplarAppender {
+ return s
+}
+
+func (s TestStorage) ExemplarQueryable() storage.ExemplarQueryable {
+ return s.exemplarStorage
+}
+
+func (s TestStorage) AppendExemplar(ref uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) {
+ return ref, s.exemplarStorage.AddExemplar(l, e)
+}
diff --git a/vendor/github.com/prometheus/prometheus/util/treecache/treecache.go b/vendor/github.com/prometheus/prometheus/util/treecache/treecache.go
index 19111e526b1a3..ec48f9e9cb008 100644
--- a/vendor/github.com/prometheus/prometheus/util/treecache/treecache.go
+++ b/vendor/github.com/prometheus/prometheus/util/treecache/treecache.go
@@ -22,9 +22,9 @@ import (
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
+ "github.com/go-zookeeper/zk"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
- "github.com/samuel/go-zookeeper/zk"
)
var (
diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go
index 81980ae47a823..3483c36456cb8 100644
--- a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go
+++ b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go
@@ -39,11 +39,10 @@ import (
"github.com/prometheus/common/route"
"github.com/prometheus/prometheus/config"
- "github.com/prometheus/prometheus/pkg/gate"
+ "github.com/prometheus/prometheus/pkg/exemplar"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/textparse"
"github.com/prometheus/prometheus/pkg/timestamp"
- "github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/rules"
@@ -56,11 +55,6 @@ import (
"github.com/prometheus/prometheus/util/stats"
)
-const (
- namespace = "prometheus"
- subsystem = "api"
-)
-
type status string
const (
@@ -83,12 +77,6 @@ const (
var (
LocalhostRepresentations = []string{"127.0.0.1", "localhost", "::1"}
- remoteReadQueries = prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: namespace,
- Subsystem: subsystem,
- Name: "remote_read_queries",
- Help: "The current number of remote read queries being executed or waiting.",
- })
)
type apiError struct {
@@ -171,8 +159,9 @@ type TSDBAdminStats interface {
// API can register a set of endpoints in a router and handle
// them using the provided storage and query engine.
type API struct {
- Queryable storage.SampleAndChunkQueryable
- QueryEngine *promql.Engine
+ Queryable storage.SampleAndChunkQueryable
+ QueryEngine *promql.Engine
+ ExemplarQueryable storage.ExemplarQueryable
targetRetriever func(context.Context) TargetRetriever
alertmanagerRetriever func(context.Context) AlertmanagerRetriever
@@ -183,23 +172,22 @@ type API struct {
ready func(http.HandlerFunc) http.HandlerFunc
globalURLOptions GlobalURLOptions
- db TSDBAdminStats
- dbDir string
- enableAdmin bool
- logger log.Logger
- remoteReadSampleLimit int
- remoteReadMaxBytesInFrame int
- remoteReadGate *gate.Gate
- CORSOrigin *regexp.Regexp
- buildInfo *PrometheusVersion
- runtimeInfo func() (RuntimeInfo, error)
- gatherer prometheus.Gatherer
- remoteWriteHandler http.Handler
+ db TSDBAdminStats
+ dbDir string
+ enableAdmin bool
+ logger log.Logger
+ CORSOrigin *regexp.Regexp
+ buildInfo *PrometheusVersion
+ runtimeInfo func() (RuntimeInfo, error)
+ gatherer prometheus.Gatherer
+
+ remoteWriteHandler http.Handler
+ remoteReadHandler http.Handler
}
func init() {
jsoniter.RegisterTypeEncoderFunc("promql.Point", marshalPointJSON, marshalPointJSONIsEmpty)
- prometheus.MustRegister(remoteReadQueries)
+ jsoniter.RegisterTypeEncoderFunc("exemplar.Exemplar", marshalExemplarJSON, marshalExemplarJSONEmpty)
}
// NewAPI returns an initialized API type.
@@ -207,6 +195,7 @@ func NewAPI(
qe *promql.Engine,
q storage.SampleAndChunkQueryable,
ap storage.Appendable,
+ eq storage.ExemplarQueryable,
tr func(context.Context) TargetRetriever,
ar func(context.Context) AlertmanagerRetriever,
configFunc func() config.Config,
@@ -225,31 +214,32 @@ func NewAPI(
runtimeInfo func() (RuntimeInfo, error),
buildInfo *PrometheusVersion,
gatherer prometheus.Gatherer,
+ registerer prometheus.Registerer,
) *API {
a := &API{
- QueryEngine: qe,
- Queryable: q,
+ QueryEngine: qe,
+ Queryable: q,
+ ExemplarQueryable: eq,
targetRetriever: tr,
alertmanagerRetriever: ar,
- now: time.Now,
- config: configFunc,
- flagsMap: flagsMap,
- ready: readyFunc,
- globalURLOptions: globalURLOptions,
- db: db,
- dbDir: dbDir,
- enableAdmin: enableAdmin,
- rulesRetriever: rr,
- remoteReadSampleLimit: remoteReadSampleLimit,
- remoteReadGate: gate.New(remoteReadConcurrencyLimit),
- remoteReadMaxBytesInFrame: remoteReadMaxBytesInFrame,
- logger: logger,
- CORSOrigin: CORSOrigin,
- runtimeInfo: runtimeInfo,
- buildInfo: buildInfo,
- gatherer: gatherer,
+ now: time.Now,
+ config: configFunc,
+ flagsMap: flagsMap,
+ ready: readyFunc,
+ globalURLOptions: globalURLOptions,
+ db: db,
+ dbDir: dbDir,
+ enableAdmin: enableAdmin,
+ rulesRetriever: rr,
+ logger: logger,
+ CORSOrigin: CORSOrigin,
+ runtimeInfo: runtimeInfo,
+ buildInfo: buildInfo,
+ gatherer: gatherer,
+
+ remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame),
}
if ap != nil {
@@ -297,6 +287,8 @@ func (api *API) Register(r *route.Router) {
r.Post("/query", wrap(api.query))
r.Get("/query_range", wrap(api.queryRange))
r.Post("/query_range", wrap(api.queryRange))
+ r.Get("/query_exemplars", wrap(api.queryExemplars))
+ r.Post("/query_exemplars", wrap(api.queryExemplars))
r.Get("/labels", wrap(api.labelNames))
r.Post("/labels", wrap(api.labelNames))
@@ -331,7 +323,6 @@ func (api *API) Register(r *route.Router) {
r.Put("/admin/tsdb/delete_series", wrap(api.deleteSeries))
r.Put("/admin/tsdb/clean_tombstones", wrap(api.cleanTombstones))
r.Put("/admin/tsdb/snapshot", wrap(api.snapshot))
-
}
type queryData struct {
@@ -370,6 +361,8 @@ func (api *API) query(r *http.Request) (result apiFuncResult) {
qry, err := api.QueryEngine.NewInstantQuery(api.Queryable, r.FormValue("query"), ts)
if err == promql.ErrValidationAtModifierDisabled {
err = errors.New("@ modifier is disabled, use --enable-feature=promql-at-modifier to enable it")
+ } else if err == promql.ErrValidationNegativeOffsetDisabled {
+ err = errors.New("negative offset is disabled, use --enable-feature=promql-negative-offset to enable it")
}
if err != nil {
return invalidParamError(err, "query")
@@ -448,6 +441,8 @@ func (api *API) queryRange(r *http.Request) (result apiFuncResult) {
qry, err := api.QueryEngine.NewRangeQuery(api.Queryable, r.FormValue("query"), start, end, step)
if err == promql.ErrValidationAtModifierDisabled {
err = errors.New("@ modifier is disabled, use --enable-feature=promql-at-modifier to enable it")
+ } else if err == promql.ErrValidationNegativeOffsetDisabled {
+ err = errors.New("negative offset is disabled, use --enable-feature=promql-negative-offset to enable it")
}
if err != nil {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
@@ -481,6 +476,44 @@ func (api *API) queryRange(r *http.Request) (result apiFuncResult) {
}, nil, res.Warnings, qry.Close}
}
+func (api *API) queryExemplars(r *http.Request) apiFuncResult {
+ start, err := parseTimeParam(r, "start", minTime)
+ if err != nil {
+ return invalidParamError(err, "start")
+ }
+ end, err := parseTimeParam(r, "end", maxTime)
+ if err != nil {
+ return invalidParamError(err, "end")
+ }
+ if end.Before(start) {
+ err := errors.New("end timestamp must not be before start timestamp")
+ return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
+ }
+
+ expr, err := parser.ParseExpr(r.FormValue("query"))
+ if err != nil {
+ return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
+ }
+
+ selectors := parser.ExtractSelectors(expr)
+ if len(selectors) < 1 {
+ return apiFuncResult{nil, nil, nil, nil}
+ }
+
+ ctx := r.Context()
+ eq, err := api.ExemplarQueryable.ExemplarQuerier(ctx)
+ if err != nil {
+ return apiFuncResult{nil, &apiError{errorExec, err}, nil, nil}
+ }
+
+ res, err := eq.Select(timestamp.FromTime(start), timestamp.FromTime(end), selectors...)
+ if err != nil {
+ return apiFuncResult{nil, &apiError{errorExec, err}, nil, nil}
+ }
+
+ return apiFuncResult{res, nil, nil, nil}
+}
+
func returnAPIError(err error) *apiError {
if err == nil {
return nil
@@ -1319,211 +1352,12 @@ func (api *API) serveTSDBStatus(*http.Request) apiFuncResult {
}
func (api *API) remoteRead(w http.ResponseWriter, r *http.Request) {
- ctx := r.Context()
- if err := api.remoteReadGate.Start(ctx); err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
- remoteReadQueries.Inc()
-
- defer api.remoteReadGate.Done()
- defer remoteReadQueries.Dec()
-
- req, err := remote.DecodeReadRequest(r)
- if err != nil {
- http.Error(w, err.Error(), http.StatusBadRequest)
- return
- }
-
- externalLabels := api.config().GlobalConfig.ExternalLabels.Map()
-
- sortedExternalLabels := make([]prompb.Label, 0, len(externalLabels))
- for name, value := range externalLabels {
- sortedExternalLabels = append(sortedExternalLabels, prompb.Label{
- Name: name,
- Value: value,
- })
- }
- sort.Slice(sortedExternalLabels, func(i, j int) bool {
- return sortedExternalLabels[i].Name < sortedExternalLabels[j].Name
- })
-
- responseType, err := remote.NegotiateResponseType(req.AcceptedResponseTypes)
- if err != nil {
- http.Error(w, err.Error(), http.StatusBadRequest)
- return
- }
-
- switch responseType {
- case prompb.ReadRequest_STREAMED_XOR_CHUNKS:
- api.remoteReadStreamedXORChunks(ctx, w, req, externalLabels, sortedExternalLabels)
- default:
- // On empty or unknown types in req.AcceptedResponseTypes we default to non streamed, raw samples response.
- api.remoteReadSamples(ctx, w, req, externalLabels, sortedExternalLabels)
- }
-}
-
-func (api *API) remoteReadSamples(
- ctx context.Context,
- w http.ResponseWriter,
- req *prompb.ReadRequest,
- externalLabels map[string]string,
- sortedExternalLabels []prompb.Label,
-) {
- w.Header().Set("Content-Type", "application/x-protobuf")
- w.Header().Set("Content-Encoding", "snappy")
-
- resp := prompb.ReadResponse{
- Results: make([]*prompb.QueryResult, len(req.Queries)),
- }
- for i, query := range req.Queries {
- if err := func() error {
- filteredMatchers, err := filterExtLabelsFromMatchers(query.Matchers, externalLabels)
- if err != nil {
- return err
- }
-
- querier, err := api.Queryable.Querier(ctx, query.StartTimestampMs, query.EndTimestampMs)
- if err != nil {
- return err
- }
- defer func() {
- if err := querier.Close(); err != nil {
- level.Warn(api.logger).Log("msg", "Error on querier close", "err", err.Error())
- }
- }()
-
- var hints *storage.SelectHints
- if query.Hints != nil {
- hints = &storage.SelectHints{
- Start: query.Hints.StartMs,
- End: query.Hints.EndMs,
- Step: query.Hints.StepMs,
- Func: query.Hints.Func,
- Grouping: query.Hints.Grouping,
- Range: query.Hints.RangeMs,
- By: query.Hints.By,
- }
- }
-
- var ws storage.Warnings
- resp.Results[i], ws, err = remote.ToQueryResult(querier.Select(false, hints, filteredMatchers...), api.remoteReadSampleLimit)
- if err != nil {
- return err
- }
- for _, w := range ws {
- level.Warn(api.logger).Log("msg", "Warnings on remote read query", "err", w.Error())
- }
- for _, ts := range resp.Results[i].Timeseries {
- ts.Labels = remote.MergeLabels(ts.Labels, sortedExternalLabels)
- }
- return nil
- }(); err != nil {
- if httpErr, ok := err.(remote.HTTPError); ok {
- http.Error(w, httpErr.Error(), httpErr.Status())
- return
- }
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
- }
-
- if err := remote.EncodeReadResponse(&resp, w); err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
-}
-
-func (api *API) remoteReadStreamedXORChunks(ctx context.Context, w http.ResponseWriter, req *prompb.ReadRequest, externalLabels map[string]string, sortedExternalLabels []prompb.Label) {
- w.Header().Set("Content-Type", "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse")
-
- f, ok := w.(http.Flusher)
- if !ok {
- http.Error(w, "internal http.ResponseWriter does not implement http.Flusher interface", http.StatusInternalServerError)
- return
- }
-
- for i, query := range req.Queries {
- if err := func() error {
- filteredMatchers, err := filterExtLabelsFromMatchers(query.Matchers, externalLabels)
- if err != nil {
- return err
- }
-
- querier, err := api.Queryable.ChunkQuerier(ctx, query.StartTimestampMs, query.EndTimestampMs)
- if err != nil {
- return err
- }
- defer func() {
- if err := querier.Close(); err != nil {
- level.Warn(api.logger).Log("msg", "Error on chunk querier close", "err", err.Error())
- }
- }()
-
- var hints *storage.SelectHints
- if query.Hints != nil {
- hints = &storage.SelectHints{
- Start: query.Hints.StartMs,
- End: query.Hints.EndMs,
- Step: query.Hints.StepMs,
- Func: query.Hints.Func,
- Grouping: query.Hints.Grouping,
- Range: query.Hints.RangeMs,
- By: query.Hints.By,
- }
- }
-
- ws, err := remote.StreamChunkedReadResponses(
- remote.NewChunkedWriter(w, f),
- int64(i),
- // The streaming API has to provide the series sorted.
- querier.Select(true, hints, filteredMatchers...),
- sortedExternalLabels,
- api.remoteReadMaxBytesInFrame,
- )
- if err != nil {
- return err
- }
-
- for _, w := range ws {
- level.Warn(api.logger).Log("msg", "Warnings on chunked remote read query", "warnings", w.Error())
- }
- return nil
- }(); err != nil {
- if httpErr, ok := err.(remote.HTTPError); ok {
- http.Error(w, httpErr.Error(), httpErr.Status())
- return
- }
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
- }
-}
-
-// filterExtLabelsFromMatchers change equality matchers which match external labels
-// to a matcher that looks for an empty label,
-// as that label should not be present in the storage.
-func filterExtLabelsFromMatchers(pbMatchers []*prompb.LabelMatcher, externalLabels map[string]string) ([]*labels.Matcher, error) {
- matchers, err := remote.FromLabelMatchers(pbMatchers)
- if err != nil {
- return nil, err
- }
-
- filteredMatchers := make([]*labels.Matcher, 0, len(matchers))
- for _, m := range matchers {
- value := externalLabels[m.Name]
- if m.Type == labels.MatchEqual && value == m.Value {
- matcher, err := labels.NewMatcher(labels.MatchEqual, m.Name, "")
- if err != nil {
- return nil, err
- }
- filteredMatchers = append(filteredMatchers, matcher)
- } else {
- filteredMatchers = append(filteredMatchers, m)
- }
+ // This is only really for tests - this will never be nil IRL.
+ if api.remoteReadHandler != nil {
+ api.remoteReadHandler.ServeHTTP(w, r)
+ } else {
+ http.Error(w, "not found", http.StatusNotFound)
}
-
- return filteredMatchers, nil
}
func (api *API) remoteWrite(w http.ResponseWriter, r *http.Request) {
@@ -1746,12 +1580,60 @@ OUTER:
return matcherSets, nil
}
+// marshalPointJSON writes `[ts, "val"]`.
func marshalPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
p := *((*promql.Point)(ptr))
stream.WriteArrayStart()
+ marshalTimestamp(p.T, stream)
+ stream.WriteMore()
+ marshalValue(p.V, stream)
+ stream.WriteArrayEnd()
+}
+
+func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool {
+ return false
+}
+
+// marshalExemplarJSON writes.
+// {
+// labels: <labels>,
+// value: "<string>",
+// timestamp: <float>
+// }
+func marshalExemplarJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
+ p := *((*exemplar.Exemplar)(ptr))
+ stream.WriteObjectStart()
+
+ // "labels" key.
+ stream.WriteObjectField(`labels`)
+ lbls, err := p.Labels.MarshalJSON()
+ if err != nil {
+ stream.Error = err
+ return
+ }
+ stream.SetBuffer(append(stream.Buffer(), lbls...))
+
+ // "value" key.
+ stream.WriteMore()
+ stream.WriteObjectField(`value`)
+ marshalValue(p.Value, stream)
+
+ // "timestamp" key.
+ stream.WriteMore()
+ stream.WriteObjectField(`timestamp`)
+ marshalTimestamp(p.Ts, stream)
+ //marshalTimestamp(p.Ts, stream)
+
+ stream.WriteObjectEnd()
+}
+
+func marshalExemplarJSONEmpty(ptr unsafe.Pointer) bool {
+ return false
+}
+
+func marshalTimestamp(t int64, stream *jsoniter.Stream) {
// Write out the timestamp as a float divided by 1000.
// This is ~3x faster than converting to a float.
- t := p.T
if t < 0 {
stream.WriteRaw(`-`)
t = -t
@@ -1768,13 +1650,14 @@ func marshalPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
}
stream.WriteInt64(fraction)
}
- stream.WriteMore()
- stream.WriteRaw(`"`)
+}
+func marshalValue(v float64, stream *jsoniter.Stream) {
+ stream.WriteRaw(`"`)
// Taken from https://github.com/json-iterator/go/blob/master/stream_float.go#L71 as a workaround
// to https://github.com/json-iterator/go/issues/365 (jsoniter, to follow json standard, doesn't allow inf/nan).
buf := stream.Buffer()
- abs := math.Abs(p.V)
+ abs := math.Abs(v)
fmt := byte('f')
// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
if abs != 0 {
@@ -1782,13 +1665,7 @@ func marshalPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
fmt = 'e'
}
}
- buf = strconv.AppendFloat(buf, p.V, fmt, -1, 64)
+ buf = strconv.AppendFloat(buf, v, fmt, -1, 64)
stream.SetBuffer(buf)
-
stream.WriteRaw(`"`)
- stream.WriteArrayEnd()
-}
-
-func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool {
- return false
}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index cacbd15e5af23..752a9d7aec282 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -174,7 +174,7 @@ github.com/coreos/go-systemd/internal/dlopen
github.com/coreos/go-systemd/sdjournal
# github.com/coreos/go-systemd/v22 v22.1.0
github.com/coreos/go-systemd/v22/journal
-# github.com/cortexproject/cortex v1.7.1-0.20210310133228-161f103ed5ba
+# github.com/cortexproject/cortex v1.7.1-0.20210323110114-8a2e2c1eeb65
## explicit
github.com/cortexproject/cortex/pkg/alertmanager
github.com/cortexproject/cortex/pkg/alertmanager/alertmanagerpb
@@ -429,6 +429,8 @@ github.com/go-redis/redis/v8/internal/rand
github.com/go-redis/redis/v8/internal/util
# github.com/go-stack/stack v1.8.0
github.com/go-stack/stack
+# github.com/go-zookeeper/zk v1.0.2
+github.com/go-zookeeper/zk
# github.com/gocql/gocql v0.0.0-20200526081602-cd04bd7f22a7 => github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85
github.com/gocql/gocql
github.com/gocql/gocql/internal/lru
@@ -473,7 +475,7 @@ github.com/golang/protobuf/ptypes/duration
github.com/golang/protobuf/ptypes/empty
github.com/golang/protobuf/ptypes/timestamp
github.com/golang/protobuf/ptypes/wrappers
-# github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3
+# github.com/golang/snappy v0.0.3
## explicit
github.com/golang/snappy
# github.com/google/btree v1.0.0
@@ -713,7 +715,7 @@ github.com/pierrec/lz4/v4/internal/xxh32
github.com/pkg/errors
# github.com/pmezard/go-difflib v1.0.0
github.com/pmezard/go-difflib/difflib
-# github.com/prometheus/alertmanager v0.21.1-0.20210303154452-7866b9bb0927
+# github.com/prometheus/alertmanager v0.21.1-0.20210310093010-0f9cab6991e6
github.com/prometheus/alertmanager/api
github.com/prometheus/alertmanager/api/metrics
github.com/prometheus/alertmanager/api/v1
@@ -781,7 +783,7 @@ github.com/prometheus/node_exporter/https
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
-# github.com/prometheus/prometheus v1.8.2-0.20210215121130-6f488061dfb4
+# github.com/prometheus/prometheus v1.8.2-0.20210321183757-31a518faab18
## explicit
github.com/prometheus/prometheus/config
github.com/prometheus/prometheus/discovery
@@ -843,8 +845,6 @@ github.com/prometheus/prometheus/web/api/v1
github.com/rs/cors
# github.com/rs/xid v1.2.1
github.com/rs/xid
-# github.com/samuel/go-zookeeper v0.0.0-20201211165307-7117e9ea2414
-github.com/samuel/go-zookeeper/zk
# github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b => github.com/satori/go.uuid v1.2.0
## explicit
github.com/satori/go.uuid
|
loki
|
Update cortex version and fix resulting changes (#3532)
|
8c18463285f214ba5b0b9a127bbe0071a2ec7d69
|
2024-04-19 20:32:17
|
Alan Edwardes
|
fix: Remove Hardcoded Bucket Name from EventBridge Example CloudFormation Template (#12609)
| false
|
diff --git a/tools/lambda-promtail/template-eventbridge.yaml b/tools/lambda-promtail/template-eventbridge.yaml
index a6c2789d2c3ab..4b08ba037b0ec 100644
--- a/tools/lambda-promtail/template-eventbridge.yaml
+++ b/tools/lambda-promtail/template-eventbridge.yaml
@@ -88,7 +88,7 @@ Resources:
- Effect: Allow
Action:
- s3:GetObject
- Resource: arn:aws:s3:::thepalbi-lambda-lb-access-logs/*
+ Resource: !Sub 'arn:aws:s3:::${EventSourceS3Bucket}/*'
RoleName: iam_for_lambda
LambdaPromtailFunction:
Type: AWS::Lambda::Function
|
fix
|
Remove Hardcoded Bucket Name from EventBridge Example CloudFormation Template (#12609)
|
0084262269f4e2cb94d04e0cc0d40e9666177f06
|
2024-04-10 14:21:11
|
Bayan Taani
|
fix(operator): Configure Loki to use virtual-host-style URLs for S3 AWS endpoints (#12469)
| false
|
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md
index ef3d8ccc0cdb8..a45aca6e2c12d 100644
--- a/operator/CHANGELOG.md
+++ b/operator/CHANGELOG.md
@@ -1,5 +1,6 @@
## Main
+- [12469](https://github.com/grafana/loki/pull/12469) **btaani**: Configure Loki to use virtual-host-style URLs for S3 AWS endpoints
- [12181](https://github.com/grafana/loki/pull/12181) **btaani**: Improve validation of provided S3 storage configuration
- [12370](https://github.com/grafana/loki/pull/12370) **periklis**: Update Loki operand to v2.9.6
- [12333](https://github.com/grafana/loki/pull/12333) **periklis**: Bump max OpenShift version to next release
diff --git a/operator/internal/handlers/internal/storage/secrets.go b/operator/internal/handlers/internal/storage/secrets.go
index 7188216311dff..2b591ba34f3f1 100644
--- a/operator/internal/handlers/internal/storage/secrets.go
+++ b/operator/internal/handlers/internal/storage/secrets.go
@@ -404,7 +404,8 @@ func extractS3ConfigSecret(s *corev1.Secret, credentialMode lokiv1.CredentialMod
roleArn = s.Data[storage.KeyAWSRoleArn]
audience = s.Data[storage.KeyAWSAudience]
// Optional fields
- region = s.Data[storage.KeyAWSRegion]
+ region = s.Data[storage.KeyAWSRegion]
+ forcePathStyle = !strings.HasSuffix(string(endpoint), awsEndpointSuffix)
)
sseCfg, err := extractS3SSEConfig(s.Data)
@@ -413,9 +414,10 @@ func extractS3ConfigSecret(s *corev1.Secret, credentialMode lokiv1.CredentialMod
}
cfg := &storage.S3StorageConfig{
- Buckets: string(buckets),
- Region: string(region),
- SSE: sseCfg,
+ Buckets: string(buckets),
+ Region: string(region),
+ SSE: sseCfg,
+ ForcePathStyle: forcePathStyle,
}
switch credentialMode {
diff --git a/operator/internal/handlers/internal/storage/secrets_test.go b/operator/internal/handlers/internal/storage/secrets_test.go
index a85e2f6911d6e..465ffb31d8aef 100644
--- a/operator/internal/handlers/internal/storage/secrets_test.go
+++ b/operator/internal/handlers/internal/storage/secrets_test.go
@@ -9,6 +9,7 @@ import (
configv1 "github.com/grafana/loki/operator/apis/config/v1"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/manifests/storage"
)
func TestHashSecretData(t *testing.T) {
@@ -617,6 +618,62 @@ func TestS3Extract(t *testing.T) {
}
}
+func TestS3Extract_S3ForcePathStyle(t *testing.T) {
+ tt := []struct {
+ desc string
+ secret *corev1.Secret
+ wantOptions *storage.S3StorageConfig
+ }{
+ {
+ desc: "aws s3 endpoint",
+ secret: &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
+ Data: map[string][]byte{
+ "endpoint": []byte("https://s3.region.amazonaws.com"),
+ "region": []byte("region"),
+ "bucketnames": []byte("this,that"),
+ "access_key_id": []byte("id"),
+ "access_key_secret": []byte("secret"),
+ },
+ },
+ wantOptions: &storage.S3StorageConfig{
+ Endpoint: "https://s3.region.amazonaws.com",
+ Region: "region",
+ Buckets: "this,that",
+ },
+ },
+ {
+ desc: "non-aws s3 endpoint",
+ secret: &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
+ Data: map[string][]byte{
+ "endpoint": []byte("https://test.default.svc.cluster.local:9000"),
+ "region": []byte("region"),
+ "bucketnames": []byte("this,that"),
+ "access_key_id": []byte("id"),
+ "access_key_secret": []byte("secret"),
+ },
+ },
+ wantOptions: &storage.S3StorageConfig{
+ Endpoint: "https://test.default.svc.cluster.local:9000",
+ Region: "region",
+ Buckets: "this,that",
+ ForcePathStyle: true,
+ },
+ },
+ }
+
+ for _, tc := range tt {
+ tc := tc
+ t.Run(tc.desc, func(t *testing.T) {
+ t.Parallel()
+ options, err := extractS3ConfigSecret(tc.secret, lokiv1.CredentialModeStatic)
+ require.NoError(t, err)
+ require.Equal(t, tc.wantOptions, options)
+ })
+ }
+}
+
func TestS3Extract_WithOpenShiftTokenCCOAuth(t *testing.T) {
fg := configv1.FeatureGates{
OpenShift: configv1.OpenShiftFeatureGates{
diff --git a/operator/internal/manifests/internal/config/build_test.go b/operator/internal/manifests/internal/config/build_test.go
index 53650429196e0..fa20c45a0226b 100644
--- a/operator/internal/manifests/internal/config/build_test.go
+++ b/operator/internal/manifests/internal/config/build_test.go
@@ -25,7 +25,7 @@ chunk_store_config:
common:
storage:
s3:
- s3: http://test.default.svc.cluster.local.:9000
+ endpoint: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
access_key_id: ${AWS_ACCESS_KEY_ID}
@@ -245,9 +245,10 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
+ ForcePathStyle: true,
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -282,7 +283,7 @@ chunk_store_config:
common:
storage:
s3:
- s3: http://test.default.svc.cluster.local.:9000
+ endpoint: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
access_key_id: ${AWS_ACCESS_KEY_ID}
@@ -592,9 +593,10 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
+ ForcePathStyle: true,
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -676,9 +678,10 @@ func TestBuild_ConfigAndRuntimeConfig_CreateLokiConfigFailed(t *testing.T) {
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
+ ForcePathStyle: true,
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -707,7 +710,7 @@ chunk_store_config:
common:
storage:
s3:
- s3: http://test.default.svc.cluster.local.:9000
+ endpoint: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
access_key_id: ${AWS_ACCESS_KEY_ID}
@@ -1028,9 +1031,10 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
+ ForcePathStyle: true,
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -1065,7 +1069,7 @@ chunk_store_config:
common:
storage:
s3:
- s3: http://test.default.svc.cluster.local.:9000
+ endpoint: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
access_key_id: ${AWS_ACCESS_KEY_ID}
@@ -1387,9 +1391,10 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
+ ForcePathStyle: true,
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -1424,7 +1429,7 @@ chunk_store_config:
common:
storage:
s3:
- s3: http://test.default.svc.cluster.local.:9000
+ endpoint: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
access_key_id: ${AWS_ACCESS_KEY_ID}
@@ -1776,9 +1781,10 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
+ ForcePathStyle: true,
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -1813,7 +1819,7 @@ chunk_store_config:
common:
storage:
s3:
- s3: http://test.default.svc.cluster.local.:9000
+ endpoint: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
access_key_id: ${AWS_ACCESS_KEY_ID}
@@ -2111,9 +2117,10 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
+ ForcePathStyle: true,
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -2151,7 +2158,7 @@ chunk_store_config:
common:
storage:
s3:
- s3: http://test.default.svc.cluster.local.:9000
+ endpoint: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
access_key_id: ${AWS_ACCESS_KEY_ID}
@@ -2533,9 +2540,10 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
+ ForcePathStyle: true,
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -2570,12 +2578,11 @@ chunk_store_config:
common:
storage:
s3:
- s3: http://test.default.svc.cluster.local.:9000
+ endpoint: http://s3.us-east.amazonaws.com
bucketnames: loki
region: us-east
access_key_id: ${AWS_ACCESS_KEY_ID}
secret_access_key: ${AWS_ACCESS_KEY_SECRET}
- s3forcepathstyle: true
compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095
ring:
kvstore:
@@ -2879,7 +2886,7 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Endpoint: "http://s3.us-east.amazonaws.com",
Region: "us-east",
Buckets: "loki",
},
@@ -2916,7 +2923,7 @@ chunk_store_config:
common:
storage:
s3:
- s3: http://test.default.svc.cluster.local.:9000
+ endpoint: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
access_key_id: ${AWS_ACCESS_KEY_ID}
@@ -3375,9 +3382,10 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
+ ForcePathStyle: true,
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -3412,7 +3420,7 @@ chunk_store_config:
common:
storage:
s3:
- s3: http://test.default.svc.cluster.local.:9000
+ endpoint: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
access_key_id: ${AWS_ACCESS_KEY_ID}
@@ -3635,9 +3643,10 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
+ ForcePathStyle: true,
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -3672,7 +3681,7 @@ chunk_store_config:
common:
storage:
s3:
- s3: http://test.default.svc.cluster.local.:9000
+ endpoint: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
access_key_id: ${AWS_ACCESS_KEY_ID}
@@ -3897,9 +3906,10 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
+ ForcePathStyle: true,
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -3934,7 +3944,7 @@ chunk_store_config:
common:
storage:
s3:
- s3: http://test.default.svc.cluster.local.:9000
+ endpoint: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
access_key_id: ${AWS_ACCESS_KEY_ID}
@@ -4157,9 +4167,10 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
+ ForcePathStyle: true,
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -4194,7 +4205,7 @@ chunk_store_config:
common:
storage:
s3:
- s3: http://test.default.svc.cluster.local.:9000
+ endpoint: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
access_key_id: ${AWS_ACCESS_KEY_ID}
@@ -4454,9 +4465,10 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
+ ForcePathStyle: true,
SSE: storage.S3SSEConfig{
Type: storage.SSEKMSType,
@@ -4496,7 +4508,7 @@ chunk_store_config:
common:
storage:
s3:
- s3: http://test.default.svc.cluster.local.:9000
+ endpoint: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
access_key_id: ${AWS_ACCESS_KEY_ID}
@@ -4753,9 +4765,10 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
+ ForcePathStyle: true,
SSE: storage.S3SSEConfig{
Type: storage.SSES3Type,
@@ -4795,7 +4808,7 @@ chunk_store_config:
common:
storage:
s3:
- s3: http://test.default.svc.cluster.local.:9000
+ endpoint: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
access_key_id: ${AWS_ACCESS_KEY_ID}
@@ -5011,9 +5024,10 @@ overrides:
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
+ ForcePathStyle: true,
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -5100,9 +5114,10 @@ func defaultOptions() Options {
ObjectStorage: storage.Options{
SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
- Endpoint: "http://test.default.svc.cluster.local.:9000",
- Region: "us-east",
- Buckets: "loki",
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
+ ForcePathStyle: true,
},
Schemas: []lokiv1.ObjectStorageSchema{
{
@@ -5284,7 +5299,7 @@ chunk_store_config:
common:
storage:
s3:
- s3: http://test.default.svc.cluster.local.:9000
+ endpoint: http://test.default.svc.cluster.local.:9000
bucketnames: loki
region: us-east
access_key_id: ${AWS_ACCESS_KEY_ID}
diff --git a/operator/internal/manifests/internal/config/loki-config.yaml b/operator/internal/manifests/internal/config/loki-config.yaml
index 2b29c51806cf7..3df0ac7463881 100644
--- a/operator/internal/manifests/internal/config/loki-config.yaml
+++ b/operator/internal/manifests/internal/config/loki-config.yaml
@@ -33,12 +33,14 @@ common:
region: {{.Region}}
s3forcepathstyle: false
{{- else }}
- s3: {{ .Endpoint }}
+ endpoint: {{ .Endpoint }}
bucketnames: {{ .Buckets }}
region: {{ .Region }}
access_key_id: ${AWS_ACCESS_KEY_ID}
secret_access_key: ${AWS_ACCESS_KEY_SECRET}
+ {{- if .ForcePathStyle }}
s3forcepathstyle: true
+ {{- end}}
{{- end }}
{{- with .SSE }}
{{- if .Type }}
diff --git a/operator/internal/manifests/storage/options.go b/operator/internal/manifests/storage/options.go
index 47779ce554793..1f8d3d904b71a 100644
--- a/operator/internal/manifests/storage/options.go
+++ b/operator/internal/manifests/storage/options.go
@@ -42,12 +42,13 @@ type GCSStorageConfig struct {
// S3StorageConfig for S3 storage config
type S3StorageConfig struct {
- Endpoint string
- Region string
- Buckets string
- Audience string
- STS bool
- SSE S3SSEConfig
+ Endpoint string
+ Region string
+ Buckets string
+ Audience string
+ STS bool
+ SSE S3SSEConfig
+ ForcePathStyle bool
}
type S3SSEType string
|
fix
|
Configure Loki to use virtual-host-style URLs for S3 AWS endpoints (#12469)
|
b7bccfcec3275b1d6d76c7450415ac8744e4d7b0
|
2024-10-03 23:44:23
|
renovate[bot]
|
fix(deps): update module go.etcd.io/bbolt to v1.3.11 (#14358)
| false
|
diff --git a/go.mod b/go.mod
index 8873bcba24e61..32cae2cba59dc 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,8 @@
module github.com/grafana/loki/v3
-go 1.21.8
+go 1.22
+
+toolchain go1.23.2
require (
cloud.google.com/go/bigtable v1.29.0
@@ -93,7 +95,7 @@ require (
github.com/stretchr/testify v1.9.0
github.com/uber/jaeger-client-go v2.30.0+incompatible
github.com/xdg-go/scram v1.1.2
- go.etcd.io/bbolt v1.3.10
+ go.etcd.io/bbolt v1.3.11
go.uber.org/atomic v1.11.0
go.uber.org/goleak v1.3.0
golang.org/x/crypto v0.27.0
diff --git a/go.sum b/go.sum
index bca64f1bbbcce..00afdeb6e1231 100644
--- a/go.sum
+++ b/go.sum
@@ -1889,8 +1889,8 @@ go.einride.tech/aip v0.67.1/go.mod h1:ZGX4/zKw8dcgzdLsrvpOOGxfxI2QSk12SlP7d6c0/X
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
-go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
-go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
+go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
+go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
go.etcd.io/etcd v0.5.0-alpha.5.0.20190917205325-a14579fbfb1a/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
go.etcd.io/etcd/api/v3 v3.5.4 h1:OHVyt3TopwtUQ2GKdd5wu3PmmipR4FTwCqoEjSyRdIc=
diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der
deleted file mode 100644
index 958f3cfaddf36..0000000000000
Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der and /dev/null differ
diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der
deleted file mode 100644
index d2817641bafb0..0000000000000
Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der and /dev/null differ
diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der
deleted file mode 100644
index d8c3710c85f9f..0000000000000
Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der and /dev/null differ
diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der
deleted file mode 100644
index dae619c097512..0000000000000
Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der and /dev/null differ
diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der
deleted file mode 100644
index ce7f8d31d6802..0000000000000
Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der and /dev/null differ
diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der
deleted file mode 100644
index 04b0d73600b72..0000000000000
Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der and /dev/null differ
diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der
deleted file mode 100644
index d8c3710c85f9f..0000000000000
Binary files a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der and /dev/null differ
diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem
deleted file mode 100644
index 493a5a2648101..0000000000000
--- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem
+++ /dev/null
@@ -1,24 +0,0 @@
------BEGIN CERTIFICATE-----
-MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL
-BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2
-YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE
-AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN
-MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ
-BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx
-ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ
-KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9
-a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0
-OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3
-RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK
-P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316
-HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu
-0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl
-MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6
-EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9
-/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA
-QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ
-nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD
-X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco
-pKklVz0=
------END CERTIFICATE-----
diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem
deleted file mode 100644
index 55a7f10c742db..0000000000000
--- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF
-l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj
-+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G
-4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA
-xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh
-68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ
-/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL
-Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA
-VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9
-9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH
-MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt
-aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq
-xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx
-2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv
-EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z
-aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq
-udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs
-VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm
-56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT
-GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V
-Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm
-HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q
-BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH
-qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh
-GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w=
------END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der
deleted file mode 100644
index 04b0d73600b72..0000000000000
Binary files a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der and /dev/null differ
diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem
deleted file mode 100644
index 0f98322c72446..0000000000000
--- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem
+++ /dev/null
@@ -1,24 +0,0 @@
------BEGIN CERTIFICATE-----
-MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL
-BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2
-YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE
-AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN
-MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ
-BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx
-ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ
-KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT
-fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ
-qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE
-xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es
-Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2
-Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM
-ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX
-MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR
-e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X
-POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl
-AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg
-odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+
-PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN
-Dhm6uZM=
------END CERTIFICATE-----
diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem
deleted file mode 100644
index 81afea783df9f..0000000000000
--- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs
-8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO
-QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk
-XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA
-Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc
-gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf
-LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl
-jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0
-4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q
-Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P
-nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1
-drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE
-duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50
-L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG
-06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm
-eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD
-uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7
-lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL
-a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb
-hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ
-7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j
-r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7
-eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD
-B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz
-7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g==
------END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem
deleted file mode 100644
index 493a5a2648101..0000000000000
--- a/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem
+++ /dev/null
@@ -1,24 +0,0 @@
------BEGIN CERTIFICATE-----
-MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL
-BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2
-YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE
-AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN
-MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ
-BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx
-ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ
-KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9
-a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0
-OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3
-RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK
-P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316
-HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu
-0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl
-MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6
-EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9
-/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA
-QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ
-nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD
-X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco
-pKklVz0=
------END CERTIFICATE-----
diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem
deleted file mode 100644
index 55a7f10c742db..0000000000000
--- a/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF
-l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj
-+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G
-4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA
-xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh
-68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ
-/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL
-Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA
-VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9
-9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH
-MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt
-aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq
-xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx
-2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv
-EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z
-aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq
-udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs
-VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm
-56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT
-GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V
-Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm
-HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q
-BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH
-qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh
-GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w=
------END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem
deleted file mode 100644
index 0f98322c72446..0000000000000
--- a/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem
+++ /dev/null
@@ -1,24 +0,0 @@
------BEGIN CERTIFICATE-----
-MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL
-BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2
-YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE
-AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN
-MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ
-BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx
-ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ
-KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT
-fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ
-qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE
-xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es
-Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2
-Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM
-ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX
-MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR
-e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X
-POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl
-AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg
-odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+
-PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN
-Dhm6uZM=
------END CERTIFICATE-----
diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem
deleted file mode 100644
index 81afea783df9f..0000000000000
--- a/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs
-8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO
-QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk
-XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA
-Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc
-gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf
-LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl
-jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0
-4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q
-Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P
-nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1
-drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE
-duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50
-L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG
-06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm
-eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD
-uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7
-lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL
-a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb
-hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ
-7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j
-r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7
-eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD
-B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz
-7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g==
------END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem
deleted file mode 100644
index 493a5a2648101..0000000000000
--- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem
+++ /dev/null
@@ -1,24 +0,0 @@
------BEGIN CERTIFICATE-----
-MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL
-BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2
-YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE
-AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN
-MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ
-BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx
-ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ
-KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9
-a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0
-OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3
-RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK
-P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316
-HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu
-0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl
-MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6
-EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9
-/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA
-QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ
-nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD
-X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco
-pKklVz0=
------END CERTIFICATE-----
diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem
deleted file mode 100644
index 55a7f10c742db..0000000000000
--- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF
-l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj
-+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G
-4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA
-xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh
-68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ
-/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL
-Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA
-VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9
-9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH
-MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt
-aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq
-xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx
-2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv
-EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z
-aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq
-udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs
-VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm
-56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT
-GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V
-Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm
-HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q
-BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH
-qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh
-GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w=
------END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem
deleted file mode 100644
index 0f98322c72446..0000000000000
--- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem
+++ /dev/null
@@ -1,24 +0,0 @@
------BEGIN CERTIFICATE-----
-MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL
-BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2
-YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE
-AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN
-MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ
-BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx
-ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ
-KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT
-fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ
-qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE
-xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es
-Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2
-Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM
-ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX
-MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR
-e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X
-POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl
-AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg
-odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+
-PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN
-Dhm6uZM=
------END CERTIFICATE-----
diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem
deleted file mode 100644
index 81afea783df9f..0000000000000
--- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs
-8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO
-QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk
-XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA
-Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc
-gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf
-LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl
-jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0
-4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q
-Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P
-nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1
-drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE
-duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50
-L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG
-06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm
-eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD
-uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7
-lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL
-a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb
-hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ
-7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j
-r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7
-eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD
-B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz
-7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g==
------END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/google/s2a-go/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/testdata/client_cert.pem
deleted file mode 100644
index 493a5a2648101..0000000000000
--- a/vendor/github.com/google/s2a-go/testdata/client_cert.pem
+++ /dev/null
@@ -1,24 +0,0 @@
------BEGIN CERTIFICATE-----
-MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL
-BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2
-YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE
-AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN
-MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ
-BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx
-ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ
-KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9
-a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0
-OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3
-RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK
-P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316
-HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu
-0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl
-MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6
-EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9
-/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA
-QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ
-nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD
-X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco
-pKklVz0=
------END CERTIFICATE-----
diff --git a/vendor/github.com/google/s2a-go/testdata/client_key.pem b/vendor/github.com/google/s2a-go/testdata/client_key.pem
deleted file mode 100644
index 55a7f10c742db..0000000000000
--- a/vendor/github.com/google/s2a-go/testdata/client_key.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF
-l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj
-+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G
-4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA
-xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh
-68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ
-/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL
-Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA
-VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9
-9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH
-MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt
-aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq
-xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx
-2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv
-EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z
-aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq
-udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs
-VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm
-56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT
-GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V
-Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm
-HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q
-BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH
-qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh
-GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w=
------END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem b/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem
deleted file mode 100644
index 60c4cf0691574..0000000000000
--- a/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem
+++ /dev/null
@@ -1,19 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDCDCCAfACFFlYsYCFit01ZpYmfjxpo7/6wMEbMA0GCSqGSIb3DQEBCwUAMEgx
-CzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEPMA0GA1UECgwGR29vZ2xlMRswGQYD
-VQQDDBJ0ZXN0LXMyYS1tdGxzLXJvb3QwHhcNMjMwODIyMTY0NTE4WhcNNDMwODIy
-MTY0NTE4WjA5MQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExHTAbBgNVBAMMFHRl
-c3QtczJhLW10bHMtY2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
-AQEAqrQQMyxNtmdCB+uY3szgRsfPrKC+TV9Fusnd8PfaCVuGTGcSBKM018nV2TDn
-3IYFQ1HgLpGwGwOFDBb3y0o9i2/l2VJySriX1GSNX6nDmVasQlO1wuOLCP7/LRmO
-7b6Kise5W0IFhYaptKyWnekn2pS0tAjimqpfn2w0U6FDGtQUqg/trQQmGtTSJHjb
-A+OFd0EFC18KGP8Q+jOMaMkJRmpeEiAPyHPDoMhqQNT26RApv9j2Uzo4SuXzHH6T
-cAdm1+zG+EXY/UZKX9oDkSbwIJvN+gCmNyORLalJ12gsGYOCjMd8K0mlXBqrmmbO
-VHVbUm9062lhE7x59AA8DK4DoQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCPOvtL
-dq2hxFHlIy0YUK8jp/DtwJZPwzx1id5FtWwd0CxBS1StIgmkHMxtkJGz1iyQLplI
-je+Msd4sTsb5zZi/8kGKehi8Wj4lghp4oP30cpob41OvM68M9RC/wSOVk9igSww+
-l3zof6wKRIswsi5VHrL16ruIVVoDlyFbKr8yk+cp9OPOV8hNNN7ewY9xC8OgnTt8
-YtdaLe6uTplKBLW+j3GtshigRhyfkGJyPFYL4LAeDJCHlC1qmBnkyP0ijMp6vneM
-E8TLavnMTMcpihWTWpyKeRkO6HDRsP4AofQAp7VAiAdSOplga+w2qgrVICV+m8MK
-BTq2PBvc59T6OFLq
------END CERTIFICATE-----
diff --git a/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem b/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem
deleted file mode 100644
index 9d112d1e9ff9a..0000000000000
--- a/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem
+++ /dev/null
@@ -1,28 +0,0 @@
------BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCqtBAzLE22Z0IH
-65jezOBGx8+soL5NX0W6yd3w99oJW4ZMZxIEozTXydXZMOfchgVDUeAukbAbA4UM
-FvfLSj2Lb+XZUnJKuJfUZI1fqcOZVqxCU7XC44sI/v8tGY7tvoqKx7lbQgWFhqm0
-rJad6SfalLS0COKaql+fbDRToUMa1BSqD+2tBCYa1NIkeNsD44V3QQULXwoY/xD6
-M4xoyQlGal4SIA/Ic8OgyGpA1PbpECm/2PZTOjhK5fMcfpNwB2bX7Mb4Rdj9Rkpf
-2gORJvAgm836AKY3I5EtqUnXaCwZg4KMx3wrSaVcGquaZs5UdVtSb3TraWETvHn0
-ADwMrgOhAgMBAAECggEAUccupZ1ZY4OHTi0PkNk8rpwFwTFGyeFVEf2ofkr24RnA
-NnUAXEllxOUUNlcoFOz9s3kTeavg3qgqgpa0QmdAIb9LMXg+ec6CKkW7trMpGho8
-LxBUWNfSoU4sKEqAvyPT0lWJVo9D/up6/avbAi6TIbOw+Djzel4ZrlHTpabxc3WT
-EilXzn4q54b3MzxCQeQjcnzTieW4Q5semG2kLiXFToHIY2di01P/O8awUjgrD+uW
-/Cb6H49MnHm9VPkqea1iwZeMQd6Gh5FrC7RezsBjdB1JBcfsv6PFt2ySInjB8SF+
-XR5Gr3Cc5sh9s0LfprZ9Dq0rlSWmwasPMI1COK6SswKBgQDczgeWd3erQ1JX9LEI
-wollawqC9y7uJhEsw1hrPqA3uqZYiLUc7Nmi4laZ12mcGoXNDS3R3XmD58qGmGaU
-lxEVTb8KDVWBgw450VoBKzSMQnCP6zn4nZxTYxeqMKjDGf6TRB6TZc843qsG3eRC
-k91yxrCQ/0HV6PT48C+lieDzLwKBgQDF6aNKiyrswr457undBnM1H8q/Y6xC5ZlK
-UtiQdhuyBnicvz0U8WPxBY/8gha0OXWuSnBqq/z77iFVNv/zT6p9K7kM7nBGd8cB
-8KO6FNbyaHWFrhCI5zNzRTH4oha0hfvUOoti09vqavCtWD4L+D/63ba1wNLKPO9o
-4gWbCnUCLwKBgQC/vus372csgrnvR761LLrEJ8BpGt7WUJh5luoht7DKtHvgRleB
-Vu1oVcV+s2Iy/ZVUDC3OIdZ0hcWKPK5YOxfKuEk+IXYvke+4peTTPwHTC59UW6Fs
-FPK8N0FFuhvT0a8RlAY5WiAp8rPysp6WcnHMSl7qi8BQUozp4Sp/RsziYQKBgBXv
-r4mzoy5a53rEYGd/L4XT4EUWZyGDEVqLlDVu4eL5lKTLDZokp08vrqXuRVX0iHap
-CYzJQ2EpI8iuL/BoBB2bmwcz5n3pCMXORld5t9lmeqA2it6hwbIlGUTVsm6P6zm6
-w3hQwy9YaxTLkxUAjxbfPEEo/jQsTNzzMGve3NlBAoGAbgJExpDyMDnaD2Vi5eyr
-63b54BsqeLHqxJmADifyRCj7G1SJMm3zMKkNNOS0vsXgoiId973STFf1XQiojiv8
-Slbxyv5rczcY0n3LOuQYcM5OzsjzpNFZsT2dDnMfNRUF3rx3Geu/FuJ9scF1b00r
-fVMrcL3jSf/W1Xh4TgtyoU8=
------END PRIVATE KEY-----
diff --git a/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem b/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem
deleted file mode 100644
index 44e436f6ec7cb..0000000000000
--- a/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem
+++ /dev/null
@@ -1,21 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDcTCCAlmgAwIBAgIUDUkgI+2FZtuUHyUUi0ZBH7JvN00wDQYJKoZIhvcNAQEL
-BQAwSDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQ8wDQYDVQQKDAZHb29nbGUx
-GzAZBgNVBAMMEnRlc3QtczJhLW10bHMtcm9vdDAeFw0yMzA4MjEyMTI5MTVaFw00
-MzA4MjEyMTI5MTVaMEgxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEPMA0GA1UE
-CgwGR29vZ2xlMRswGQYDVQQDDBJ0ZXN0LXMyYS1tdGxzLXJvb3QwggEiMA0GCSqG
-SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCbFEQfpvla27bATedrN4BAWsI9GSwSnJLW
-QWzXcnAk6cKxQBAhnaKHRxHY8ttLhNTtxQeub894CLzJvHE/0xDhuMzjtCCCZ7i2
-r08tKZ1KcEzPJCPNlxlzAXPA45XU3LRlbGvju/PBPhm6n1hCEKTNI/KETJ5DEaYg
-Cf2LcXVsl/zW20MwDZ+e2w/9a2a6n6DdpW1ekOR550hXAUOIxvmXRBeYeGLFvp1n
-rQgZBhRaxP03UB+PQD2oMi/4mfsS96uGCXdzzX8qV46O8m132HUbnA/wagIwboEe
-d7Bx237dERDyHw5GFnll7orgA0FOtoEufXdeQxWVvTjO0+PVPgsvAgMBAAGjUzBR
-MB0GA1UdDgQWBBRyMtg/yutV8hw8vOq0i8x0eBQi7DAfBgNVHSMEGDAWgBRyMtg/
-yutV8hw8vOq0i8x0eBQi7DAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUA
-A4IBAQArN/gdqWMxd5Rvq2eJMTp6I4RepJOT7Go4sMsRsy1caJqqcoS2EvREDZMN
-XNEBcyQBB5kYd6TCcZGoLnEtWYXQ4jjEiXG1g7/+rWxyqw0ZYuP7FWzuHg3Uor/x
-fApbEKwptP5ywVc+33h4qreGcqXkVCCn+sAcstGgrqubdGZW2T5gazUMyammOOuN
-9IWL1PbvXmgEKD+80NUIrk09zanYyrElGdU/zw/kUbZ3Jf6WUBtJGhTzRQ1qZeKa
-VnpCbLoG3vObEB8mxDUAlIzwAtfvw4U32BVIZA8xrocz6OOoAnSW1bTlo3EOIo/G
-MTV7jmY9TBPtfhRuO/cG650+F+cw
------END CERTIFICATE-----
diff --git a/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem b/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem
deleted file mode 100644
index 68c60613458ab..0000000000000
--- a/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem
+++ /dev/null
@@ -1,21 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDbjCCAlagAwIBAgIUbexZ5sZl86Al9dsI2PkOgtqKnkgwDQYJKoZIhvcNAQEL
-BQAwSDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQ8wDQYDVQQKDAZHb29nbGUx
-GzAZBgNVBAMMEnRlc3QtczJhLW10bHMtcm9vdDAeFw0yMzA4MjIwMDMyMDRaFw00
-MzA4MjIwMDMyMDRaMDkxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEdMBsGA1UE
-AwwUdGVzdC1zMmEtbXRscy1zZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
-ggEKAoIBAQCMEzybsGPqfh92GLwy43mt8kQDF3ztr8y06RwU1hVnY7QqYK4obpvh
-HkJVnTz9gwNBF3n5nUalqRzactlf2PCydN9oSYNCO8svVmo7vw1CleKAKFAiV5Qn
-H76QlqD15oJreh7nSM8R4qj5KukIHvt0cN0gD6CJQzIURDtsKJwkW3yQjYyT/FAK
-GYtFrB6buDn3Eg3Hsw6z7uj7CzLBsSl7BIGrQILbpbI9nFNT3rUTUhXZKY/3UtJA
-Ob66AjTmMbD16RGYZR4JsPx6CstheifJ6YSI79r5KgD37zX0jMXFWimvb2SmZmFe
-LoohtC8K7uTyjm/dROx6nHXdDt5TQYXHAgMBAAGjXzBdMBsGA1UdEQQUMBKHEAAA
-AAAAAAAAAAAAAAAAAAAwHQYDVR0OBBYEFI3i2+tIk6YYn0MIxC0q93jk1VsUMB8G
-A1UdIwQYMBaAFHIy2D/K61XyHDy86rSLzHR4FCLsMA0GCSqGSIb3DQEBCwUAA4IB
-AQAUhk+s/lrIAULBbU7E22C8f93AzTxE1mhyHGNlfPPJP3t1Dl+h4X4WkFpkz5gT
-EcNXB//Vvoq99HbEK5/92sxsIPexKdJBdcggeHXIgLDkOrEZEb0Nnh9eaAuU2QDn
-JW44hMB+aF6mEaJvOHE6DRkQw3hwFYFisFKKHtlQ3TyOhw5CHGzSExPZusdSFNIe
-2E7V/0QzGPJEFnEFUNe9N8nTH2P385Paoi+5+Iizlp/nztVXfzv0Cj/i+qGgtDUs
-HB+gBU2wxMw8eYyuNzACH70wqGR1Parj8/JoyYhx0S4+Gjzy3JH3CcAMaxyfH/dI
-4Wcvfz/isxgmH1UqIt3oc6ad
------END CERTIFICATE-----
diff --git a/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem b/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem
deleted file mode 100644
index b14ad0f724eef..0000000000000
--- a/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem
+++ /dev/null
@@ -1,28 +0,0 @@
------BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCMEzybsGPqfh92
-GLwy43mt8kQDF3ztr8y06RwU1hVnY7QqYK4obpvhHkJVnTz9gwNBF3n5nUalqRza
-ctlf2PCydN9oSYNCO8svVmo7vw1CleKAKFAiV5QnH76QlqD15oJreh7nSM8R4qj5
-KukIHvt0cN0gD6CJQzIURDtsKJwkW3yQjYyT/FAKGYtFrB6buDn3Eg3Hsw6z7uj7
-CzLBsSl7BIGrQILbpbI9nFNT3rUTUhXZKY/3UtJAOb66AjTmMbD16RGYZR4JsPx6
-CstheifJ6YSI79r5KgD37zX0jMXFWimvb2SmZmFeLoohtC8K7uTyjm/dROx6nHXd
-Dt5TQYXHAgMBAAECggEAIB5zGdIG/yh/Z1GBqfuOFaxFGx5iJ5BVlLAVH9P9IXFz
-yPnVRXEjbinFlSMSbqEBeIX9EpcVMXxHIPIP1RIGEy2IYr3kiqXyT771ahDDZh6/
-Spqz0UQatSPqyvW3H9uE0Uc12dvQm23JSCUmPRX5m7gbhDQBIChXzdzdcU4Yi59V
-4xmJUvbsAcLw5CBM6kwV+1NGVH9+3mUdhrr9M6B6+sVB/xnaqMGEDfQGiwL8U7EY
-QOuc46KXu3Pd/qCdVLn60IrdjSzDJKeC5UZZ+ejNAo+DfbtOovBj3qu3OCUg4XVy
-0CDBJ1sTdLvUfF4Gb+crjPsd+qBbXcjVfqdadwhsoQKBgQDBF1Pys/NitW8okJwp
-2fiDIASP3TiI+MthWHGyuoZGPvmXQ3H6iuLSm8c/iYI2WPTf53Xff1VcFm1GmQms
-GCsYM8Ax94zCeO6Ei1sYYxwcBloEZfOeV37MPA4pjJF4Lt+n5nveNxP+lrsjksJz
-wToSEgWPDT1b/xcdt4/5j9J85wKBgQC5tiLx+33mwH4DoaFRmSl0+VuSNYFw6DTQ
-SQ+kWqWGH4NENc9wf4Dj2VUZQhpXNhXVSxj+aP2d/ck1NrTJAWqYEXCDtFQOGSa2
-cGPRr+Fhy5NIEaEvR7IXcMBZzx3koYmWVBHricyrXs5FvHrT3N14mGDUG8n24U3f
-R799bau0IQKBgQC97UM+lHCPJCWNggiJRgSifcje9VtZp1btjoBvq/bNe74nYkjn
-htsrC91Fiu1Qpdlfr50K1IXSyaB886VG6JLjAGxI+dUzqJ38M9LLvxj0G+9JKjsi
-AbAQFfZcOg8QZxLJZPVsE0MQhZTXndC06VhEVAOxvPUg214Sde8hK61/+wKBgCRw
-O10VhnePT2pw/VEgZ0T/ZFtEylgYB7zSiRIrgwzVBBGPKVueePC8BPmGwdpYz2Hh
-cU8B1Ll6QU+Co2hJMdwSl+wPpup5PuJPHRbYlrV0lzpt0x2OyL/WrLcyb2Ab3f40
-EqwPhqwdVwXR3JvTW1U9OMqFhVQ+kuP7lPQMX8NhAoGBAJOgZ7Tokipc4Mi68Olw
-SCaOPvjjy4sW2rTRuKyjc1wTAzy7SJ3vXHfGkkN99nTLJFwAyJhWUpnRdwAXGi+x
-gyOa95ImsEfRSwEjbluWfF8/P0IU8GR+ZTqT4NnNCOsi8T/xst4Szd1ECJNnnZDe
-1ChfPP1AH+/75MJCvu6wQBQv
------END PRIVATE KEY-----
diff --git a/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem b/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem
deleted file mode 100644
index ad1bad5984598..0000000000000
--- a/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem
+++ /dev/null
@@ -1,19 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDITCCAgkCFBS8mLoytMpMWBwpAtnRaq3eIKnsMA0GCSqGSIb3DQEBCwUAME0x
-CzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTENMAsGA1UECgwEVGVzdDEiMCAGA1UE
-AwwZdGVzdC1zMmEtbXRscy1zZWxmLXNpZ25lZDAeFw0yMzA4MjIyMTE2MDFaFw00
-MzA4MjIyMTE2MDFaME0xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTENMAsGA1UE
-CgwEVGVzdDEiMCAGA1UEAwwZdGVzdC1zMmEtbXRscy1zZWxmLXNpZ25lZDCCASIw
-DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKFFPsYasKZeCFLEXl3RpE/ZOXFe
-2lhutIalSpZvCmso+mQGoZ4cHK7At+kDjBi5CrnXkYcw7quQAhHgU0frhWdj7tsW
-HUUtq7T8eaGWKBnVD9fl+MjtAl1BmhXwV9qRBbj4EesSKGDSGpKf66dOtzw83JbB
-cU7XlPAH1c1zo2GXC1himcZ+SVGHVrOjn4NmeFs8g94/Dke8dWkHwv5YTMVugFK4
-5KxKgSOKkr4ka7PCBzgxCnW4wYSZNRHcxrqkiArO2HAQq0ACr7u+fVDYH//9mP2Z
-ADo/zch7O5yhkiNbjXJIRrptDWEuVYMRloYDhT773h7bV/Q0Wo0NQGtasJ8CAwEA
-ATANBgkqhkiG9w0BAQsFAAOCAQEAPjbH0TMyegF/MDvglkc0sXr6DqlmTxDCZZmG
-lYPZ5Xy062+rxIHghMARbvO4BxepiG37KsP2agvOldm4TtU8nQ8LyswmSIFm4BQ+
-XQWwdsWyYyd8l0d5sXAdaN6AXwy50fvqCepmEqyreMY6dtLzlwo9gVCBFB7QuAPt
-Nc14phpEUZt/KPNuY6cUlB7bz3tmnFbwxUrWj1p0KBEYsr7+KEVZxR+z0wtlU7S9
-ZBrmUvx0fq5Ef7JWtHW0w4ofg1op742sdYl+53C26GZ76ts4MmqVz2/94DScgRaU
-gT0GLVuuCZXRDVeTXqTb4mditRCfzFPe9cCegYhGhSqBs8yh5A==
------END CERTIFICATE-----
diff --git a/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem b/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem
deleted file mode 100644
index bcf08e4f12f4b..0000000000000
--- a/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem
+++ /dev/null
@@ -1,28 +0,0 @@
------BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQChRT7GGrCmXghS
-xF5d0aRP2TlxXtpYbrSGpUqWbwprKPpkBqGeHByuwLfpA4wYuQq515GHMO6rkAIR
-4FNH64VnY+7bFh1FLau0/HmhligZ1Q/X5fjI7QJdQZoV8FfakQW4+BHrEihg0hqS
-n+unTrc8PNyWwXFO15TwB9XNc6NhlwtYYpnGfklRh1azo5+DZnhbPIPePw5HvHVp
-B8L+WEzFboBSuOSsSoEjipK+JGuzwgc4MQp1uMGEmTUR3Ma6pIgKzthwEKtAAq+7
-vn1Q2B///Zj9mQA6P83IezucoZIjW41ySEa6bQ1hLlWDEZaGA4U++94e21f0NFqN
-DUBrWrCfAgMBAAECggEAR8e8YwyqJ8KezcgdgIC5M9kp2i4v3UCZFX0or8CI0J2S
-pUbWVLuKgLXCpfIwPyjNf15Vpei/spkMcsx4BQDthdFTFSzIpmvni0z9DlD5VFYj
-ESOJElV7wepbHPy2/c+izmuL/ic81aturGiFyRgeMq+cN3WuaztFTXkPTrzzsZGF
-p/Mx3gqm7Hoc3d2xlv+8L5GjCtEJPlQgZJV+s3ennBjOAd8CC7d9qJetE3Er46pn
-r5jedV3bQRZYBzmooYNHjbAs26++wYac/jTE0/U6nKS17eWq4BQZUtlMXUw5N81B
-7LKn7C03rj2KCn+Nf5uin9ALmoy888LXCDdvL/NZkQKBgQDduv1Heu+tOZuNYUdQ
-Hswmd8sVNAAWGZxdxixHMv58zrgbLFXSX6K89X2l5Sj9XON8TH46MuSFdjSwwWw5
-fBrhVEhA5srcqpvVWIBE05yqPpt0s1NQktMWJKELWlG8jOhVKwM5OYDpdxtwehpz
-1g70XJz+nF/LTV8RdTK+OWDDpQKBgQC6MhdbGHUz/56dY3gZpE5TXnN2hkNbZCgk
-emr6z85VHhQflZbedhCzB9PUnZnCKWOGQHQdxRTtRfd46LVboZqCdYO1ZNQv6toP
-ysS7dTpZZFy7CpQaW0Y6/jS65jW6xIDKR1W40vgltZ3sfpG37JaowpzWdw2WuOnw
-Bg0rcJAf8wKBgQCqE+p/z97UwuF8eufWnyj9QNo382E1koOMspv4KTdnyLETtthF
-vDH6O1wbykG8xmmASLRyM+NyNA+KnXNETNvZh2q8zctBpGRQK8iIAsGjHM7ln0AD
-B/x+ea5GJQuZU4RK/+lDFca6TjBwAFkWDVX/PqL18kDQkxKfM4SuwRhmOQKBgDGh
-eoJIsa0LnP787Z2AI3Srf4F/ZmLs/ppCm1OBotEjdF+64v0nYWonUvqgi8SqfaHi
-elEZIGvis4ViGj1zhRjzNAlc+AZRxpBhDzGcnNIJI4Kj3jhsTfsZmXqcNIQ1LtM8
-Uogyi/yZPaA1WKg7Aym2vlGYaGHdplXZdxc2KOSrAoGABRkD9l2OVcwK7RyNgFxo
-mjxx0tfUdDBhHIi2igih1FiHpeP9E+4/kE/K7PnU9DoDrL1jW1MTpXaYV4seOylk
-k9z/9QfcRa9ePD2N4FqbHWSYp5n3aLoIcGq/9jyjTwayZbbIhWO+vNuHE9wIvecZ
-8x3gNkxJRb4NaLIoNzAhCoo=
------END PRIVATE KEY-----
diff --git a/vendor/github.com/google/s2a-go/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/testdata/server_cert.pem
deleted file mode 100644
index 0f98322c72446..0000000000000
--- a/vendor/github.com/google/s2a-go/testdata/server_cert.pem
+++ /dev/null
@@ -1,24 +0,0 @@
------BEGIN CERTIFICATE-----
-MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL
-BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2
-YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE
-AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN
-MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ
-BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx
-ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ
-KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT
-fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ
-qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE
-xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es
-Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2
-Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM
-ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX
-MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR
-e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X
-POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl
-AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg
-odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+
-PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN
-Dhm6uZM=
------END CERTIFICATE-----
diff --git a/vendor/github.com/google/s2a-go/testdata/server_key.pem b/vendor/github.com/google/s2a-go/testdata/server_key.pem
deleted file mode 100644
index 81afea783df9f..0000000000000
--- a/vendor/github.com/google/s2a-go/testdata/server_key.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs
-8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO
-QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk
-XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA
-Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc
-gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf
-LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl
-jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0
-4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q
-Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P
-nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1
-drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE
-duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50
-L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG
-06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm
-eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD
-uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7
-lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL
-a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb
-hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ
-7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j
-r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7
-eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD
-B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz
-7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g==
------END RSA PRIVATE KEY-----
diff --git a/vendor/go.etcd.io/bbolt/.go-version b/vendor/go.etcd.io/bbolt/.go-version
index f124bfa155441..013173af5e9bc 100644
--- a/vendor/go.etcd.io/bbolt/.go-version
+++ b/vendor/go.etcd.io/bbolt/.go-version
@@ -1 +1 @@
-1.21.9
+1.22.6
diff --git a/vendor/go.etcd.io/bbolt/Makefile b/vendor/go.etcd.io/bbolt/Makefile
index 18154c6388233..21407797416eb 100644
--- a/vendor/go.etcd.io/bbolt/Makefile
+++ b/vendor/go.etcd.io/bbolt/Makefile
@@ -41,6 +41,15 @@ coverage:
TEST_FREELIST_TYPE=array go test -v -timeout 30m \
-coverprofile cover-freelist-array.out -covermode atomic
+BOLT_CMD=bbolt
+
+build:
+ go build -o bin/${BOLT_CMD} ./cmd/${BOLT_CMD}
+
+.PHONY: clean
+clean: # Clean binaries
+ rm -f ./bin/${BOLT_CMD}
+
.PHONY: gofail-enable
gofail-enable: install-gofail
gofail enable .
@@ -61,3 +70,7 @@ test-failpoint:
@echo "[failpoint] array freelist test"
TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint
+.PHONY: test-robustness # Running robustness tests requires root permission
+test-robustness:
+ go test -v ${TESTFLAGS} ./tests/dmflakey -test.root
+ go test -v ${TESTFLAGS} ./tests/robustness -test.root
diff --git a/vendor/go.etcd.io/bbolt/db.go b/vendor/go.etcd.io/bbolt/db.go
index 4175bdf3dde9f..822798e41a5bf 100644
--- a/vendor/go.etcd.io/bbolt/db.go
+++ b/vendor/go.etcd.io/bbolt/db.go
@@ -524,7 +524,7 @@ func (db *DB) munmap() error {
// gofail: var unmapError string
// return errors.New(unmapError)
if err := munmap(db); err != nil {
- return fmt.Errorf("unmap error: " + err.Error())
+ return fmt.Errorf("unmap error: %v", err.Error())
}
return nil
@@ -571,7 +571,7 @@ func (db *DB) munlock(fileSize int) error {
// gofail: var munlockError string
// return errors.New(munlockError)
if err := munlock(db, fileSize); err != nil {
- return fmt.Errorf("munlock error: " + err.Error())
+ return fmt.Errorf("munlock error: %v", err.Error())
}
return nil
}
@@ -580,7 +580,7 @@ func (db *DB) mlock(fileSize int) error {
// gofail: var mlockError string
// return errors.New(mlockError)
if err := mlock(db, fileSize); err != nil {
- return fmt.Errorf("mlock error: " + err.Error())
+ return fmt.Errorf("mlock error: %v", err.Error())
}
return nil
}
@@ -1159,6 +1159,8 @@ func (db *DB) grow(sz int) error {
// https://github.com/boltdb/bolt/issues/284
if !db.NoGrowSync && !db.readOnly {
if runtime.GOOS != "windows" {
+ // gofail: var resizeFileError string
+ // return errors.New(resizeFileError)
if err := db.file.Truncate(int64(sz)); err != nil {
return fmt.Errorf("file resize error: %s", err)
}
diff --git a/vendor/go.etcd.io/bbolt/freelist.go b/vendor/go.etcd.io/bbolt/freelist.go
index 61d43f81b46db..dffc7bc749b52 100644
--- a/vendor/go.etcd.io/bbolt/freelist.go
+++ b/vendor/go.etcd.io/bbolt/freelist.go
@@ -252,6 +252,14 @@ func (f *freelist) rollback(txid txid) {
}
// Remove pages from pending list and mark as free if allocated by txid.
delete(f.pending, txid)
+
+ // Remove pgids which are allocated by this txid
+ for pgid, tid := range f.allocs {
+ if tid == txid {
+ delete(f.allocs, pgid)
+ }
+ }
+
f.mergeSpans(m)
}
diff --git a/vendor/go.etcd.io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go
index 2fac8c0a78205..766395de3be7d 100644
--- a/vendor/go.etcd.io/bbolt/tx.go
+++ b/vendor/go.etcd.io/bbolt/tx.go
@@ -1,6 +1,7 @@
package bbolt
import (
+ "errors"
"fmt"
"io"
"os"
@@ -185,6 +186,10 @@ func (tx *Tx) Commit() error {
// If the high water mark has moved up then attempt to grow the database.
if tx.meta.pgid > opgid {
+ _ = errors.New("")
+ // gofail: var lackOfDiskSpace string
+ // tx.rollback()
+ // return errors.New(lackOfDiskSpace)
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
tx.rollback()
return err
@@ -470,6 +475,7 @@ func (tx *Tx) write() error {
// Ignore file sync if flag is set on DB.
if !tx.db.NoSync || IgnoreNoSync {
+ // gofail: var beforeSyncDataPages struct{}
if err := fdatasync(tx.db); err != nil {
return err
}
@@ -507,6 +513,7 @@ func (tx *Tx) writeMeta() error {
return err
}
if !tx.db.NoSync || IgnoreNoSync {
+ // gofail: var beforeSyncMetaPage struct{}
if err := fdatasync(tx.db); err != nil {
return err
}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 5cac5030511cf..4e50c0c98bded 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1672,8 +1672,8 @@ github.com/yuin/gopher-lua/pm
# github.com/yusufpapurcu/wmi v1.2.4
## explicit; go 1.16
github.com/yusufpapurcu/wmi
-# go.etcd.io/bbolt v1.3.10
-## explicit; go 1.21
+# go.etcd.io/bbolt v1.3.11
+## explicit; go 1.22
go.etcd.io/bbolt
# go.etcd.io/etcd/api/v3 v3.5.4
## explicit; go 1.16
|
fix
|
update module go.etcd.io/bbolt to v1.3.11 (#14358)
|
02fae4e1e97e507b5e30666108e7e7e8bc3dbf00
|
2024-08-29 18:01:34
|
Jess Males
|
chore: Update nginx-unprivileged (#13978)
| false
|
diff --git a/docs/sources/setup/install/helm/reference.md b/docs/sources/setup/install/helm/reference.md
index ff9eca098eb6b..b3c2bba85351e 100644
--- a/docs/sources/setup/install/helm/reference.md
+++ b/docs/sources/setup/install/helm/reference.md
@@ -3637,7 +3637,7 @@ null
<td>string</td>
<td>The gateway image tag</td>
<td><pre lang="json">
-"1.24-alpine"
+"1.27-alpine"
</pre>
</td>
</tr>
diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md
index a9cead21ece46..ba47de6f8442f 100644
--- a/production/helm/loki/CHANGELOG.md
+++ b/production/helm/loki/CHANGELOG.md
@@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang
[//]: # (<AUTOMATED_UPDATES_LOCATOR> : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.)
+## 6.10.2
+
+- [CHANGE] Bumped version of `nginxinc/nginx-unprivileged` to 1.27-alpine; this remediates several CVE
+
## 6.10.1
- [CHANGE] Bumped version of `kiwigrid/k8s-sidecar` to 1.27.5; this remediates several CVE
diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml
index 52b52d5af56a6..9c6acfe2bdd28 100644
--- a/production/helm/loki/Chart.yaml
+++ b/production/helm/loki/Chart.yaml
@@ -3,7 +3,7 @@ name: loki
description: Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes.
type: application
appVersion: 3.1.1
-version: 6.10.1
+version: 6.10.2
home: https://grafana.github.io/helm-charts
sources:
- https://github.com/grafana/loki
diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md
index d0cb010b446b5..59208a9c6413c 100644
--- a/production/helm/loki/README.md
+++ b/production/helm/loki/README.md
@@ -1,6 +1,6 @@
# loki
-  
+  
Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes.
diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml
index 9a5b451b9a638..ef0c506f585a4 100644
--- a/production/helm/loki/values.yaml
+++ b/production/helm/loki/values.yaml
@@ -898,7 +898,7 @@ gateway:
# -- The gateway image repository
repository: nginxinc/nginx-unprivileged
# -- The gateway image tag
- tag: 1.24-alpine
+ tag: 1.27-alpine
# -- Overrides the gateway image tag with an image digest
digest: null
# -- The gateway image pull policy
|
chore
|
Update nginx-unprivileged (#13978)
|
bc5687e772c1302310c4078b89962e8432e2a600
|
2024-12-02 20:24:57
|
renovate[bot]
|
fix(deps): update module github.com/aws/aws-sdk-go-v2/config to v1.28.6 (#15202)
| false
|
diff --git a/nix/packages/loki.nix b/nix/packages/loki.nix
index 03c17fc74f8c0..c2f0fda208376 100644
--- a/nix/packages/loki.nix
+++ b/nix/packages/loki.nix
@@ -5,7 +5,7 @@ let
pname = "lambda-promtail";
src = ./../../tools/lambda-promtail;
- vendorHash = "sha256-90pY7nXU92K3HC2tr1oHT2AYpiaimbcdnIhyP9awkTk=";
+ vendorHash = "sha256-HfIjAMaS9L3hRuKQo99964e9GH4fsdZQ27Awk9Vzwpo=";
doCheck = false;
diff --git a/tools/lambda-promtail/go.mod b/tools/lambda-promtail/go.mod
index b73cd9736837a..5564da4268043 100644
--- a/tools/lambda-promtail/go.mod
+++ b/tools/lambda-promtail/go.mod
@@ -5,7 +5,7 @@ go 1.22
require (
github.com/aws/aws-lambda-go v1.47.0
github.com/aws/aws-sdk-go-v2 v1.32.6
- github.com/aws/aws-sdk-go-v2/config v1.28.5
+ github.com/aws/aws-sdk-go-v2/config v1.28.6
github.com/aws/aws-sdk-go-v2/service/s3 v1.69.0
github.com/go-kit/log v0.2.1
github.com/gogo/protobuf v1.3.2
@@ -24,19 +24,19 @@ require (
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect
- github.com/aws/aws-sdk-go-v2/credentials v1.17.46 // indirect
- github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.20 // indirect
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.24 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.24 // indirect
+ github.com/aws/aws-sdk-go-v2/credentials v1.17.47 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.24 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.5 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.5 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.5 // indirect
- github.com/aws/aws-sdk-go-v2/service/sso v1.24.6 // indirect
- github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.5 // indirect
- github.com/aws/aws-sdk-go-v2/service/sts v1.33.1 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 // indirect
github.com/aws/smithy-go v1.22.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500 // indirect
diff --git a/tools/lambda-promtail/go.sum b/tools/lambda-promtail/go.sum
index 34e76123cdaac..1e9cb2e9c3eda 100644
--- a/tools/lambda-promtail/go.sum
+++ b/tools/lambda-promtail/go.sum
@@ -52,16 +52,16 @@ github.com/aws/aws-sdk-go-v2 v1.32.6 h1:7BokKRgRPuGmKkFMhEg/jSul+tB9VvXhcViILtfG
github.com/aws/aws-sdk-go-v2 v1.32.6/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 h1:lL7IfaFzngfx0ZwUGOZdsFFnQ5uLvR0hWqqhyE7Q9M8=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7/go.mod h1:QraP0UcVlQJsmHfioCrveWOC1nbiWUl3ej08h4mXWoc=
-github.com/aws/aws-sdk-go-v2/config v1.28.5 h1:Za41twdCXbuyyWv9LndXxZZv3QhTG1DinqlFsSuvtI0=
-github.com/aws/aws-sdk-go-v2/config v1.28.5/go.mod h1:4VsPbHP8JdcdUDmbTVgNL/8w9SqOkM5jyY8ljIxLO3o=
-github.com/aws/aws-sdk-go-v2/credentials v1.17.46 h1:AU7RcriIo2lXjUfHFnFKYsLCwgbz1E7Mm95ieIRDNUg=
-github.com/aws/aws-sdk-go-v2/credentials v1.17.46/go.mod h1:1FmYyLGL08KQXQ6mcTlifyFXfJVCNJTVGuQP4m0d/UA=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.20 h1:sDSXIrlsFSFJtWKLQS4PUWRvrT580rrnuLydJrCQ/yA=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.20/go.mod h1:WZ/c+w0ofps+/OUqMwWgnfrgzZH1DZO1RIkktICsqnY=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.24 h1:4usbeaes3yJnCFC7kfeyhkdkPtoRYPa/hTmCqMpKpLI=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.24/go.mod h1:5CI1JemjVwde8m2WG3cz23qHKPOxbpkq0HaoreEgLIY=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.24 h1:N1zsICrQglfzaBnrfM0Ys00860C+QFwu6u/5+LomP+o=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.24/go.mod h1:dCn9HbJ8+K31i8IQ8EWmWj0EiIk0+vKiHNMxTTYveAg=
+github.com/aws/aws-sdk-go-v2/config v1.28.6 h1:D89IKtGrs/I3QXOLNTH93NJYtDhm8SYa9Q5CsPShmyo=
+github.com/aws/aws-sdk-go-v2/config v1.28.6/go.mod h1:GDzxJ5wyyFSCoLkS+UhGB0dArhb9mI+Co4dHtoTxbko=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.47 h1:48bA+3/fCdi2yAwVt+3COvmatZ6jUDNkDTIsqDiMUdw=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.47/go.mod h1:+KdckOejLW3Ks3b0E3b5rHsr2f9yuORBum0WPnE5o5w=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 h1:AmoU1pziydclFT/xRV+xXE/Vb8fttJCLRPv8oAkprc0=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21/go.mod h1:AjUdLYe4Tgs6kpH4Bv7uMZo7pottoyHMn4eTcIcneaY=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 h1:s/fF4+yDQDoElYhfIVvSNyeCydfbuTKzhxSXDXCPasU=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25/go.mod h1:IgPfDv5jqFIzQSNbUEMoitNooSMXjRSDkhXv8jiROvU=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 h1:ZntTCl5EsYnhN/IygQEUugpdwbhdkom9uHcbCftiGgA=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25/go.mod h1:DBdPrgeocww+CSl1C8cEV8PN1mHMBhuCDLpXezyvWkE=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.24 h1:JX70yGKLj25+lMC5Yyh8wBtvB01GDilyRuJvXJ4piD0=
@@ -70,18 +70,18 @@ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhv
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.5 h1:gvZOjQKPxFXy1ft3QnEyXmT+IqneM9QAUWlM3r0mfqw=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.5/go.mod h1:DLWnfvIcm9IET/mmjdxeXbBKmTCm0ZB8p1za9BVteM8=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.5 h1:wtpJ4zcwrSbwhECWQoI/g6WM9zqCcSpHDJIWSbMLOu4=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.5/go.mod h1:qu/W9HXQbbQ4+1+JcZp0ZNPV31ym537ZJN+fiS7Ti8E=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 h1:50+XsN70RS7dwJ2CkVNXzj7U2L1HKP8nqTd3XWEXBN4=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6/go.mod h1:WqgLmwY7so32kG01zD8CPTJWVWM+TzJoOVHwTg4aPug=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.5 h1:P1doBzv5VEg1ONxnJss1Kh5ZG/ewoIE4MQtKKc6Crgg=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.5/go.mod h1:NOP+euMW7W3Ukt28tAxPuoWao4rhhqJD3QEBk7oCg7w=
github.com/aws/aws-sdk-go-v2/service/s3 v1.69.0 h1:Q2ax8S21clKOnHhhr933xm3JxdJebql+R7aNo7p7GBQ=
github.com/aws/aws-sdk-go-v2/service/s3 v1.69.0/go.mod h1:ralv4XawHjEMaHOWnTFushl0WRqim/gQWesAMF6hTow=
-github.com/aws/aws-sdk-go-v2/service/sso v1.24.6 h1:3zu537oLmsPfDMyjnUS2g+F2vITgy5pB74tHI+JBNoM=
-github.com/aws/aws-sdk-go-v2/service/sso v1.24.6/go.mod h1:WJSZH2ZvepM6t6jwu4w/Z45Eoi75lPN7DcydSRtJg6Y=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.5 h1:K0OQAsDywb0ltlFrZm0JHPY3yZp/S9OaoLU33S7vPS8=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.5/go.mod h1:ORITg+fyuMoeiQFiVGoqB3OydVTLkClw/ljbblMq6Cc=
-github.com/aws/aws-sdk-go-v2/service/sts v1.33.1 h1:6SZUVRQNvExYlMLbHdlKB48x0fLbc2iVROyaNEwBHbU=
-github.com/aws/aws-sdk-go-v2/service/sts v1.33.1/go.mod h1:GqWyYCwLXnlUB1lOAXQyNSPqPLQJvmo8J0DWBzp9mtg=
+github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 h1:rLnYAfXQ3YAccocshIH5mzNNwZBkBo+bP6EhIxak6Hw=
+github.com/aws/aws-sdk-go-v2/service/sso v1.24.7/go.mod h1:ZHtuQJ6t9A/+YDuxOLnbryAmITtr8UysSny3qcyvJTc=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 h1:JnhTZR3PiYDNKlXy50/pNeix9aGMo6lLpXwJ1mw8MD4=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6/go.mod h1:URronUEGfXZN1VpdktPSD1EkAL9mfrV+2F4sjH38qOY=
+github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 h1:s4074ZO1Hk8qv65GqNXqDjmkf4HSQqJukaLuuW0TpDA=
+github.com/aws/aws-sdk-go-v2/service/sts v1.33.2/go.mod h1:mVggCnIWoM09jP71Wh+ea7+5gAp53q+49wDFs1SW5z8=
github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro=
github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
|
fix
|
update module github.com/aws/aws-sdk-go-v2/config to v1.28.6 (#15202)
|
7e224d53de8a5c43448ffd341f0d9c48abb335ef
|
2024-08-04 19:28:29
|
Sandeep Sukhani
|
fix: try reading chunks which have incorrect offset for blocks (#13720)
| false
|
diff --git a/pkg/chunkenc/memchunk.go b/pkg/chunkenc/memchunk.go
index 3d87aec705dc5..328e91c94deb3 100644
--- a/pkg/chunkenc/memchunk.go
+++ b/pkg/chunkenc/memchunk.go
@@ -441,13 +441,20 @@ func newByteChunk(b []byte, blockSize, targetSize int, fromCheckpoint bool) (*Me
metasOffset := uint64(0)
metasLen := uint64(0)
+ // There is a rare issue where chunks built by Loki have incorrect offset for some blocks which causes Loki to fail to read those chunks.
+ // While the root cause is yet to be identified, we will try to read those problematic chunks using the expected offset for blocks calculated using other relative offsets in the chunk.
+ expectedBlockOffset := 0
if version >= ChunkFormatV4 {
- // version >= 4 starts writing length of sections after their offsets
+ // version >= 4 starts writing length of sections before their offsets
metasLen, metasOffset = readSectionLenAndOffset(chunkMetasSectionIdx)
+ structuredMetadataLength, structuredMetadataOffset := readSectionLenAndOffset(chunkStructuredMetadataSectionIdx)
+ expectedBlockOffset = int(structuredMetadataLength + structuredMetadataOffset + 4)
} else {
// version <= 3 does not store length of metas. metas are followed by metasOffset + hash and then the chunk ends
metasOffset = binary.BigEndian.Uint64(b[len(b)-8:])
metasLen = uint64(len(b)-(8+4)) - metasOffset
+ // version 1 writes blocks after version number while version 2 and 3 write blocks after chunk encoding
+ expectedBlockOffset = len(b) - len(db.b)
}
mb := b[metasOffset : metasOffset+metasLen]
db = decbuf{b: mb}
@@ -476,18 +483,35 @@ func newByteChunk(b []byte, blockSize, targetSize int, fromCheckpoint bool) (*Me
blk.uncompressedSize = db.uvarint()
}
l := db.uvarint()
- if blk.offset+l > len(b) {
- return nil, fmt.Errorf("block %d offset %d + length %d exceeds chunk length %d", i, blk.offset, l, len(b))
- }
- blk.b = b[blk.offset : blk.offset+l]
- // Verify checksums.
- expCRC := binary.BigEndian.Uint32(b[blk.offset+l:])
- if expCRC != crc32.Checksum(blk.b, castagnoliTable) {
- _ = level.Error(util_log.Logger).Log("msg", "Checksum does not match for a block in chunk, this block will be skipped", "err", ErrInvalidChecksum)
- continue
+ invalidBlockErr := validateBlock(b, blk.offset, l)
+ if invalidBlockErr != nil {
+ level.Error(util_log.Logger).Log("msg", "invalid block found", "err", invalidBlockErr)
+ // if block is expected to have different offset than what is encoded, see if we get a valid block using expected offset
+ if blk.offset != expectedBlockOffset {
+ _ = level.Error(util_log.Logger).Log("msg", "block offset does not match expected one, will try reading with expected offset", "actual", blk.offset, "expected", expectedBlockOffset)
+ blk.offset = expectedBlockOffset
+ if err := validateBlock(b, blk.offset, l); err != nil {
+ level.Error(util_log.Logger).Log("msg", "could not find valid block using expected offset", "err", err)
+ } else {
+ invalidBlockErr = nil
+ level.Info(util_log.Logger).Log("msg", "valid block found using expected offset")
+ }
+ }
+
+ // if the block read with expected offset is still invalid, do not continue further
+ if invalidBlockErr != nil {
+ if errors.Is(invalidBlockErr, ErrInvalidChecksum) {
+ expectedBlockOffset += l + 4
+ continue
+ }
+ return nil, invalidBlockErr
+ }
}
+ // next block starts at current block start + current block length + checksum
+ expectedBlockOffset = blk.offset + l + 4
+ blk.b = b[blk.offset : blk.offset+l]
bc.blocks = append(bc.blocks, blk)
// Update the counter used to track the size of cut blocks.
@@ -1696,3 +1720,21 @@ func (e *sampleBufferedIterator) StreamHash() uint64 { return e.extractor.BaseLa
func (e *sampleBufferedIterator) At() logproto.Sample {
return e.cur
}
+
+// validateBlock validates block by doing following checks:
+// 1. Offset+length do not overrun size of the chunk from which we are reading the block.
+// 2. Checksum of the block we will read matches the stored checksum in the chunk.
+func validateBlock(chunkBytes []byte, offset, length int) error {
+ if offset+length > len(chunkBytes) {
+ return fmt.Errorf("offset %d + length %d exceeds chunk length %d", offset, length, len(chunkBytes))
+ }
+
+ blockBytes := chunkBytes[offset : offset+length]
+ // Verify checksums.
+ expCRC := binary.BigEndian.Uint32(chunkBytes[offset+length:])
+ if expCRC != crc32.Checksum(blockBytes, castagnoliTable) {
+ return ErrInvalidChecksum
+ }
+
+ return nil
+}
diff --git a/pkg/chunkenc/memchunk_test.go b/pkg/chunkenc/memchunk_test.go
index 6c48a28b0650f..daa97a2616917 100644
--- a/pkg/chunkenc/memchunk_test.go
+++ b/pkg/chunkenc/memchunk_test.go
@@ -5,6 +5,7 @@ import (
"context"
"encoding/binary"
"fmt"
+ "hash"
"math"
"math/rand"
"sort"
@@ -2044,3 +2045,119 @@ func TestMemChunk_IteratorWithStructuredMetadata(t *testing.T) {
})
}
}
+
+func TestDecodeChunkIncorrectBlockOffset(t *testing.T) {
+ // use small block size to build multiple blocks in the test chunk
+ blockSize := 10
+
+ for _, format := range allPossibleFormats {
+ t.Run(fmt.Sprintf("chunkFormat:%v headBlockFmt:%v", format.chunkFormat, format.headBlockFmt), func(t *testing.T) {
+ for incorrectOffsetBlockNum := 0; incorrectOffsetBlockNum < 3; incorrectOffsetBlockNum++ {
+ t.Run(fmt.Sprintf("inorrect offset block: %d", incorrectOffsetBlockNum), func(t *testing.T) {
+ chk := NewMemChunk(format.chunkFormat, EncNone, format.headBlockFmt, blockSize, testTargetSize)
+ ts := time.Now().Unix()
+ for i := 0; i < 3; i++ {
+ dup, err := chk.Append(&logproto.Entry{
+ Timestamp: time.Now(),
+ Line: fmt.Sprintf("%d-%d", ts, i),
+ StructuredMetadata: []logproto.LabelAdapter{
+ {Name: "foo", Value: fmt.Sprintf("%d-%d", ts, i)},
+ },
+ })
+ require.NoError(t, err)
+ require.False(t, dup)
+ }
+
+ require.Len(t, chk.blocks, 3)
+
+ b, err := chk.Bytes()
+ require.NoError(t, err)
+
+ metasOffset := binary.BigEndian.Uint64(b[len(b)-8:])
+
+ w := bytes.NewBuffer(nil)
+ eb := EncodeBufferPool.Get().(*encbuf)
+ defer EncodeBufferPool.Put(eb)
+
+ crc32Hash := crc32HashPool.Get().(hash.Hash32)
+ defer crc32HashPool.Put(crc32Hash)
+
+ crc32Hash.Reset()
+ eb.reset()
+
+ // BEGIN - code copied from writeTo func starting from encoding of block metas to change offset of a block
+ eb.putUvarint(len(chk.blocks))
+
+ for i, b := range chk.blocks {
+ eb.putUvarint(b.numEntries)
+ eb.putVarint64(b.mint)
+ eb.putVarint64(b.maxt)
+ // change offset of one block
+ blockOffset := b.offset
+ if i == incorrectOffsetBlockNum {
+ blockOffset += 5
+ }
+ eb.putUvarint(blockOffset)
+ if chk.format >= ChunkFormatV3 {
+ eb.putUvarint(b.uncompressedSize)
+ }
+ eb.putUvarint(len(b.b))
+ }
+ metasLen := len(eb.get())
+ eb.putHash(crc32Hash)
+
+ _, err = w.Write(eb.get())
+ require.NoError(t, err)
+
+ if chk.format >= ChunkFormatV4 {
+ // Write structured metadata offset and length
+ eb.reset()
+
+ eb.putBE64int(int(binary.BigEndian.Uint64(b[len(b)-32:])))
+ eb.putBE64int(int(binary.BigEndian.Uint64(b[len(b)-24:])))
+ _, err = w.Write(eb.get())
+ require.NoError(t, err)
+ }
+
+ // Write the metasOffset.
+ eb.reset()
+ if chk.format >= ChunkFormatV4 {
+ eb.putBE64int(metasLen)
+ }
+ eb.putBE64int(int(metasOffset))
+ _, err = w.Write(eb.get())
+ require.NoError(t, err)
+ // END - code copied from writeTo func
+
+ // build chunk using pre-block meta section + rewritten remainder of the chunk with incorrect offset for a block
+ chkWithIncorrectOffset := make([]byte, int(metasOffset)+w.Len())
+ copy(chkWithIncorrectOffset, b[:metasOffset])
+ copy(chkWithIncorrectOffset[metasOffset:], w.Bytes())
+
+ // decoding the problematic chunk should succeed
+ decodedChkWithIncorrectOffset, err := newByteChunk(chkWithIncorrectOffset, blockSize, testTargetSize, false)
+ require.NoError(t, err)
+
+ require.Len(t, decodedChkWithIncorrectOffset.blocks, len(chk.blocks))
+
+ // both chunks should have same log lines
+ origChunkItr, err := chk.Iterator(context.Background(), time.Unix(0, 0), time.Unix(0, math.MaxInt64), logproto.FORWARD, log.NewNoopPipeline().ForStream(labels.Labels{}))
+ require.NoError(t, err)
+
+ corruptChunkItr, err := decodedChkWithIncorrectOffset.Iterator(context.Background(), time.Unix(0, 0), time.Unix(0, math.MaxInt64), logproto.FORWARD, log.NewNoopPipeline().ForStream(labels.Labels{}))
+ require.NoError(t, err)
+
+ numEntriesFound := 0
+ for origChunkItr.Next() {
+ numEntriesFound++
+ require.True(t, corruptChunkItr.Next())
+ require.Equal(t, origChunkItr.At(), corruptChunkItr.At())
+ }
+
+ require.False(t, corruptChunkItr.Next())
+ require.Equal(t, 3, numEntriesFound)
+ })
+ }
+ })
+ }
+}
|
fix
|
try reading chunks which have incorrect offset for blocks (#13720)
|
41d9c8b5ecd0ea31cd12f8fce052b47abd341ff8
|
2025-02-10 20:57:25
|
Dylan Guedes
|
fix: Make policy test non-flaky (#16158)
| false
|
diff --git a/pkg/validation/ingestion_policies_test.go b/pkg/validation/ingestion_policies_test.go
index 802a7a03ebfb4..591624dfcd682 100644
--- a/pkg/validation/ingestion_policies_test.go
+++ b/pkg/validation/ingestion_policies_test.go
@@ -31,7 +31,7 @@ func Test_PolicyStreamMapping_PolicyFor(t *testing.T) {
"policy3": []*PriorityStream{
{
Selector: `{qyx="qzx", qox="qox"}`,
- Priority: 1,
+ Priority: 2,
Matchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "qyx", "qzx"),
labels.MustNewMatcher(labels.MatchEqual, "qox", "qox"),
@@ -51,7 +51,7 @@ func Test_PolicyStreamMapping_PolicyFor(t *testing.T) {
"policy5": []*PriorityStream{
{
Selector: `{qab=~"qzx.*"}`,
- Priority: 1,
+ Priority: 2,
Matchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchRegexp, "qab", "qzx.*"),
},
@@ -94,7 +94,7 @@ func Test_PolicyStreamMapping_PolicyFor(t *testing.T) {
require.Equal(t, "policy1", mapping.PolicyFor(labels.FromStrings("foo", "bar")))
// matches both policy2 and policy1 but policy1 has higher priority.
require.Equal(t, "policy1", mapping.PolicyFor(labels.FromStrings("foo", "bar", "daz", "baz")))
- // matches policy3 and policy4 but policy3 appears first.
+ // matches policy3 and policy4 but policy3 has higher priority..
require.Equal(t, "policy3", mapping.PolicyFor(labels.FromStrings("qyx", "qzx", "qox", "qox")))
// matches no policy.
require.Equal(t, "", mapping.PolicyFor(labels.FromStrings("foo", "fooz", "daz", "qux", "quux", "corge")))
|
fix
|
Make policy test non-flaky (#16158)
|
fad06ee692576854ff2098e440237a1223a32715
|
2024-12-19 02:47:55
|
Ned Andreev
|
fix: data race in chunk client hedging tests (#15466)
| false
|
diff --git a/pkg/storage/chunk/client/hedging/hedging.go b/pkg/storage/chunk/client/hedging/hedging.go
index bb4bdf6c9a9c7..879c102acc464 100644
--- a/pkg/storage/chunk/client/hedging/hedging.go
+++ b/pkg/storage/chunk/client/hedging/hedging.go
@@ -64,12 +64,6 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
f.IntVar(&cfg.MaxPerSecond, prefix+"hedge-max-per-second", 5, "The maximum of hedge requests allowed per seconds.")
}
-// Client returns a hedged http client.
-// The client transport will be mutated to use the hedged roundtripper.
-func (cfg *Config) Client(client *http.Client) (*http.Client, error) {
- return cfg.ClientWithRegisterer(client, prometheus.DefaultRegisterer)
-}
-
// ClientWithRegisterer returns a hedged http client with instrumentation registered to the provided registerer.
// The client transport will be mutated to use the hedged roundtripper.
func (cfg *Config) ClientWithRegisterer(client *http.Client, reg prometheus.Registerer) (*http.Client, error) {
diff --git a/pkg/storage/chunk/client/hedging/hedging_test.go b/pkg/storage/chunk/client/hedging/hedging_test.go
index 1baf0f757dbd0..f44423e0345d8 100644
--- a/pkg/storage/chunk/client/hedging/hedging_test.go
+++ b/pkg/storage/chunk/client/hedging/hedging_test.go
@@ -18,22 +18,24 @@ func (fn RoundTripperFunc) RoundTrip(req *http.Request) (*http.Response, error)
return fn(req)
}
-func resetMetrics() {
+func resetMetrics() *prometheus.Registry {
+ //TODO: clean up this massive hack...
reg := prometheus.NewRegistry()
prometheus.DefaultRegisterer = reg
prometheus.DefaultGatherer = reg
initMetrics()
+ return reg
}
func TestHedging(t *testing.T) {
- resetMetrics()
+ reg := resetMetrics()
cfg := &Config{
At: time.Duration(1),
UpTo: 3,
MaxPerSecond: 1000,
}
count := atomic.NewInt32(0)
- client, err := cfg.Client(&http.Client{
+ client, err := cfg.ClientWithRegisterer(&http.Client{
Transport: RoundTripperFunc(func(_ *http.Request) (*http.Response, error) {
count.Inc()
time.Sleep(200 * time.Millisecond)
@@ -41,14 +43,14 @@ func TestHedging(t *testing.T) {
StatusCode: http.StatusOK,
}, nil
}),
- })
+ }, reg)
if err != nil {
t.Fatal(err)
}
_, _ = client.Get("http://example.com")
require.Equal(t, int32(3), count.Load())
- require.NoError(t, testutil.GatherAndCompare(prometheus.DefaultGatherer,
+ require.NoError(t, testutil.GatherAndCompare(reg,
strings.NewReader(`
# HELP hedged_requests_rate_limited_total The total number of hedged requests rejected via rate limiting.
# TYPE hedged_requests_rate_limited_total counter
@@ -61,14 +63,14 @@ hedged_requests_total 2
}
func TestHedgingRateLimit(t *testing.T) {
- resetMetrics()
+ reg := resetMetrics()
cfg := &Config{
At: time.Duration(1),
UpTo: 20,
MaxPerSecond: 1,
}
count := atomic.NewInt32(0)
- client, err := cfg.Client(&http.Client{
+ client, err := cfg.ClientWithRegisterer(&http.Client{
Transport: RoundTripperFunc(func(_ *http.Request) (*http.Response, error) {
count.Inc()
time.Sleep(200 * time.Millisecond)
@@ -76,14 +78,14 @@ func TestHedgingRateLimit(t *testing.T) {
StatusCode: http.StatusOK,
}, nil
}),
- })
+ }, reg)
if err != nil {
t.Fatal(err)
}
_, _ = client.Get("http://example.com")
require.Equal(t, int32(2), count.Load())
- require.NoError(t, testutil.GatherAndCompare(prometheus.DefaultGatherer,
+ require.NoError(t, testutil.GatherAndCompare(reg,
strings.NewReader(`
# HELP hedged_requests_rate_limited_total The total number of hedged requests rejected via rate limiting.
# TYPE hedged_requests_rate_limited_total counter
|
fix
|
data race in chunk client hedging tests (#15466)
|
fdbd66806a63d710c5575ebc15c28ca05717d022
|
2025-02-14 16:21:09
|
Cyril Tovena
|
chore(dataobj): Only stop reading from dataobj reader on EOF (#16274)
| false
|
diff --git a/pkg/dataobj/querier/iter.go b/pkg/dataobj/querier/iter.go
index 4d6f84866935b..8d2a66100d642 100644
--- a/pkg/dataobj/querier/iter.go
+++ b/pkg/dataobj/querier/iter.go
@@ -77,7 +77,7 @@ func newEntryIterator(ctx context.Context,
return nil, err
}
- if n == 0 {
+ if n == 0 && err == io.EOF {
break
}
@@ -299,7 +299,7 @@ func newSampleIterator(ctx context.Context,
}
// Handle end of stream or empty read
- if n == 0 {
+ if n == 0 && err == io.EOF {
iterators = appendIteratorFromSeries(iterators, series)
break
}
diff --git a/pkg/dataobj/querier/metadata.go b/pkg/dataobj/querier/metadata.go
index 4b3d0f49c0f48..586b97e4f5ec5 100644
--- a/pkg/dataobj/querier/metadata.go
+++ b/pkg/dataobj/querier/metadata.go
@@ -197,7 +197,7 @@ func (sp *streamProcessor) processSingleReader(ctx context.Context, reader *data
if err != nil && err != io.EOF {
return err
}
- if n == 0 {
+ if n == 0 && err == io.EOF {
break
}
for _, stream := range streams[:n] {
diff --git a/pkg/dataobj/querier/store.go b/pkg/dataobj/querier/store.go
index a89a6cf9d9251..910f4aa5fc10a 100644
--- a/pkg/dataobj/querier/store.go
+++ b/pkg/dataobj/querier/store.go
@@ -404,7 +404,7 @@ func (s *shardedObject) matchStreams(ctx context.Context) error {
if err != nil && err != io.EOF {
return err
}
- if n == 0 {
+ if n == 0 && err == io.EOF {
break
}
|
chore
|
Only stop reading from dataobj reader on EOF (#16274)
|
1308d903d4d13a8e16397ce6d5f3bcef89125775
|
2024-09-13 20:38:56
|
Christian Haudum
|
chore(ci): Temporarily remove Backport PR action (#14136)
| false
|
diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml
deleted file mode 100644
index 2efd4ef90d4ec..0000000000000
--- a/.github/workflows/backport.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-name: Backport PR Creator
-on:
- pull_request_target:
- types:
- - closed
- - labeled
-
-jobs:
- main:
- runs-on: ubuntu-latest
- steps:
- - name: Checkout Actions
- uses: actions/checkout@v4
- with:
- repository: "grafana/grafana-github-actions"
- path: ./actions
- ref: main
- - name: Install Actions
- run: npm install --production --prefix ./actions
- - name: Run backport
- uses: ./actions/backport
- with:
- metricsWriteAPIKey: ${{secrets.GRAFANA_MISC_STATS_API_KEY}}
- token: ${{secrets.GH_BOT_ACCESS_TOKEN}}
- labelsToAdd: "backport"
- title: "chore: [{{base}}] {{originalTitle}}"
|
chore
|
Temporarily remove Backport PR action (#14136)
|
cafc7b6f87605f5dc1560b5725cd96f7d7e9d444
|
2024-04-09 16:38:06
|
Jack Baldry
|
docs: Fix up storage page front matter (#12532)
| false
|
diff --git a/docs/sources/configure/storage.md b/docs/sources/configure/storage.md
index 7a3433414f0f3..83652e81bd184 100644
--- a/docs/sources/configure/storage.md
+++ b/docs/sources/configure/storage.md
@@ -1,4 +1,6 @@
---
+aliases:
+ - ../storage/ # /docs/loki/latest/storage/
title: Storage
description: Describes Loki storage.
weight: 475
diff --git a/docs/sources/operations/storage/_index.md b/docs/sources/operations/storage/_index.md
index 402378a4289c7..f1947d072b56c 100644
--- a/docs/sources/operations/storage/_index.md
+++ b/docs/sources/operations/storage/_index.md
@@ -2,7 +2,6 @@
title: Manage storage
menuTitle: Storage
description: Describes Loki's storage needs and supported stores.
-weight:
---
# Manage storage
|
docs
|
Fix up storage page front matter (#12532)
|
0c06cf3e0a116d8b033981811a686c63bf085508
|
2024-12-17 21:41:42
|
Alex Burnett
|
docs: fix docs for bloom filters query acceleration hyperlink (#15438)
| false
|
diff --git a/docs/sources/operations/bloom-filters.md b/docs/sources/operations/bloom-filters.md
index bca145a2a981b..e1a09cdebcb07 100644
--- a/docs/sources/operations/bloom-filters.md
+++ b/docs/sources/operations/bloom-filters.md
@@ -191,7 +191,7 @@ Unfortunately, the amount of data each stream has is often unbalanced with the r
Query acceleration introduces a new sharding strategy: `bounded`, which uses blooms to reduce the chunks to be processed right away during the planning phase in the query frontend, as well as evenly distributes the amount of chunks each sharded query will need to process.
-[Query acceleration]: https://grafana.com/docs/loki/<LOKI_VERSION>/query/query-acceleration
+[Query acceleration]: https://grafana.com/docs/loki/<LOKI_VERSION>/query/query_acceleration
[structured metadata]: https://grafana.com/docs/loki/<LOKI_VERSION>/get-started/labels/structured-metadata
[tenant-limits]: https://grafana.com/docs/loki/<LOKI_VERSION>/configure/#limits_config
[bloom-gateway-cfg]: https://grafana.com/docs/loki/<LOKI_VERSION>/configure/#bloom_gateway
diff --git a/docs/sources/query/query_accceleration.md b/docs/sources/query/query_acceleration.md
similarity index 100%
rename from docs/sources/query/query_accceleration.md
rename to docs/sources/query/query_acceleration.md
|
docs
|
fix docs for bloom filters query acceleration hyperlink (#15438)
|
8177cd2a55592855e476fd87353593659a8d00d0
|
2023-08-03 17:53:15
|
dependabot[bot]
|
build(deps): bump github.com/aws/aws-sdk-go from 1.44.311 to 1.44.315 (#10159)
| false
|
diff --git a/go.mod b/go.mod
index 4c59c00b39f32..e9cf1235b08d6 100644
--- a/go.mod
+++ b/go.mod
@@ -16,7 +16,7 @@ require (
github.com/Workiva/go-datastructures v1.1.0
github.com/alicebob/miniredis/v2 v2.30.4
github.com/aliyun/aliyun-oss-go-sdk v2.2.7+incompatible
- github.com/aws/aws-sdk-go v1.44.311
+ github.com/aws/aws-sdk-go v1.44.315
github.com/baidubce/bce-sdk-go v0.9.141
github.com/bmatcuk/doublestar v1.3.4
github.com/buger/jsonparser v1.1.1
diff --git a/go.sum b/go.sum
index 2084f1babae8d..c143a9d52d7ba 100644
--- a/go.sum
+++ b/go.sum
@@ -565,8 +565,8 @@ github.com/aws/aws-sdk-go v1.15.24/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZo
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.34.34/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
-github.com/aws/aws-sdk-go v1.44.311 h1:60i8hyVMOXqabKJQPCq4qKRBQ6hRafI/WOcDxGM+J7Q=
-github.com/aws/aws-sdk-go v1.44.311/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
+github.com/aws/aws-sdk-go v1.44.315 h1:kYTC+Y/bJ9M7QQRvkI/LN5OWvhkIOL/YuFFRhS5QAOo=
+github.com/aws/aws-sdk-go v1.44.315/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/aws/aws-sdk-go-v2 v1.16.0 h1:cBAYjiiexRAg9v2z9vb6IdxAa7ef4KCtjW7w7e3GxGo=
github.com/aws/aws-sdk-go-v2 v1.16.0/go.mod h1:lJYcuZZEHWNIb6ugJjbQY1fykdoobWbOS7kJYb4APoI=
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
index f8ab885015118..f3d3fe1206a48 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
@@ -39,6 +39,7 @@ const (
EuWest1RegionID = "eu-west-1" // Europe (Ireland).
EuWest2RegionID = "eu-west-2" // Europe (London).
EuWest3RegionID = "eu-west-3" // Europe (Paris).
+ IlCentral1RegionID = "il-central-1" // Israel (Tel Aviv).
MeCentral1RegionID = "me-central-1" // Middle East (UAE).
MeSouth1RegionID = "me-south-1" // Middle East (Bahrain).
SaEast1RegionID = "sa-east-1" // South America (Sao Paulo).
@@ -117,7 +118,7 @@ var awsPartition = partition{
DNSSuffix: "amazonaws.com",
RegionRegex: regionRegex{
Regexp: func() *regexp.Regexp {
- reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$")
+ reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$")
return reg
}(),
},
@@ -213,6 +214,9 @@ var awsPartition = partition{
"eu-west-3": region{
Description: "Europe (Paris)",
},
+ "il-central-1": region{
+ Description: "Israel (Tel Aviv)",
+ },
"me-central-1": region{
Description: "Middle East (UAE)",
},
@@ -356,6 +360,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -494,6 +501,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -1439,6 +1449,14 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{
+ Hostname: "api.ecr.il-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "il-central-1",
+ },
+ },
endpointKey{
Region: "me-central-1",
}: endpoint{
@@ -1907,6 +1925,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -2247,6 +2268,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -2390,6 +2414,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -2475,6 +2502,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -2617,6 +2647,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -3387,6 +3420,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -3466,6 +3502,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "athena.ap-south-1.api.aws",
},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.ap-south-2.api.aws",
+ },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -3493,6 +3538,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "athena.ap-southeast-3.api.aws",
},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.ap-southeast-4.api.aws",
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -3511,6 +3565,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "athena.eu-central-1.api.aws",
},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.eu-central-2.api.aws",
+ },
endpointKey{
Region: "eu-north-1",
}: endpoint{},
@@ -3529,6 +3592,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "athena.eu-south-1.api.aws",
},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.eu-south-2.api.aws",
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -3812,6 +3884,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -4735,6 +4810,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -4875,6 +4953,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -5040,6 +5121,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -5195,6 +5279,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -5417,6 +5504,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -5733,6 +5823,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -6851,6 +6944,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -7019,6 +7115,12 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
@@ -8116,6 +8218,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -8208,6 +8313,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -8359,6 +8467,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -8578,6 +8689,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -8587,18 +8701,27 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -8608,6 +8731,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -8741,6 +8867,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -8870,6 +8999,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "local",
}: endpoint{
@@ -9075,6 +9207,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -9252,6 +9387,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -9421,6 +9559,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -9599,6 +9740,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -9717,6 +9861,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -9886,6 +10033,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -10523,6 +10673,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -10691,6 +10844,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -10868,6 +11024,9 @@ var awsPartition = partition{
},
"emr-containers": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
@@ -10901,6 +11060,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -11217,6 +11379,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -11398,6 +11563,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -11630,6 +11798,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -12817,6 +12988,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -12826,6 +13000,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -12889,6 +13066,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -13938,6 +14118,12 @@ var awsPartition = partition{
}: endpoint{
Hostname: "internetmonitor.ca-central-1.api.aws",
},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "internetmonitor-fips.ca-central-1.api.aws",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{
@@ -13978,6 +14164,11 @@ var awsPartition = partition{
}: endpoint{
Hostname: "internetmonitor.eu-west-3.api.aws",
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{
+ Hostname: "internetmonitor.il-central-1.api.aws",
+ },
endpointKey{
Region: "me-central-1",
}: endpoint{
@@ -13998,21 +14189,45 @@ var awsPartition = partition{
}: endpoint{
Hostname: "internetmonitor.us-east-1.api.aws",
},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "internetmonitor-fips.us-east-1.api.aws",
+ },
endpointKey{
Region: "us-east-2",
}: endpoint{
Hostname: "internetmonitor.us-east-2.api.aws",
},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "internetmonitor-fips.us-east-2.api.aws",
+ },
endpointKey{
Region: "us-west-1",
}: endpoint{
Hostname: "internetmonitor.us-west-1.api.aws",
},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "internetmonitor-fips.us-west-1.api.aws",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{
Hostname: "internetmonitor.us-west-2.api.aws",
},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "internetmonitor-fips.us-west-2.api.aws",
+ },
},
},
"iot": service{
@@ -15427,6 +15642,11 @@ var awsPartition = partition{
}: endpoint{
Hostname: "kendra-ranking.eu-west-3.api.aws",
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.il-central-1.api.aws",
+ },
endpointKey{
Region: "me-central-1",
}: endpoint{
@@ -15580,6 +15800,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -16135,6 +16358,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "kms-fips.il-central-1.amazonaws.com",
+ },
endpointKey{
Region: "il-central-1-fips",
}: endpoint{
@@ -16142,6 +16374,7 @@ var awsPartition = partition{
CredentialScope: credentialScope{
Region: "il-central-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
Region: "me-central-1",
@@ -16291,6 +16524,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -16300,6 +16536,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -16363,6 +16602,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -16628,6 +16870,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.il-central-1.api.aws",
+ },
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -17263,6 +17514,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -18405,6 +18659,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -18490,6 +18747,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -18953,6 +19213,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -19579,6 +19842,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -20324,6 +20590,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -21263,6 +21532,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -21423,6 +21695,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -21547,6 +21822,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -21987,6 +22265,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -22488,6 +22769,11 @@ var awsPartition = partition{
}: endpoint{
Hostname: "resource-explorer-2.eu-west-3.api.aws",
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{
+ Hostname: "resource-explorer-2.il-central-1.api.aws",
+ },
endpointKey{
Region: "sa-east-1",
}: endpoint{
@@ -22613,6 +22899,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -23110,6 +23399,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -23481,6 +23773,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3.dualstack.il-central-1.amazonaws.com",
+ },
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -24518,6 +24819,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -25053,6 +25357,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -25062,6 +25369,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -25074,12 +25384,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -25384,6 +25700,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "servicediscovery.eu-west-3.api.aws",
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.il-central-1.api.aws",
+ },
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -25897,75 +26222,6 @@ var awsPartition = partition{
},
"sms": service{
Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "sms-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "sms-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "sms-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
@@ -25975,39 +26231,6 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sms-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sms-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sms-fips.us-west-1.amazonaws.com",
- },
endpointKey{
Region: "us-west-2",
}: endpoint{},
@@ -26511,6 +26734,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -26662,6 +26888,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -26825,6 +27054,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -27424,6 +27656,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -27701,6 +27936,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "local",
}: endpoint{
@@ -27804,6 +28042,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -28011,6 +28252,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -28156,6 +28400,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -28265,6 +28512,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -29816,6 +30066,7 @@ var awsPartition = partition{
CredentialScope: credentialScope{
Region: "il-central-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-me-central-1",
@@ -29880,6 +30131,23 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{
+ Hostname: "waf-regional.il-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "il-central-1",
+ },
+ },
+ endpointKey{
+ Region: "il-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "waf-regional-fips.il-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "il-central-1",
+ },
+ },
endpointKey{
Region: "me-central-1",
}: endpoint{
@@ -30530,6 +30798,7 @@ var awsPartition = partition{
CredentialScope: credentialScope{
Region: "il-central-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-me-central-1",
@@ -30594,6 +30863,23 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{
+ Hostname: "wafv2.il-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "il-central-1",
+ },
+ },
+ endpointKey{
+ Region: "il-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "wafv2-fips.il-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "il-central-1",
+ },
+ },
endpointKey{
Region: "me-central-1",
}: endpoint{
@@ -31108,6 +31394,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -32761,9 +33050,6 @@ var awscnPartition = partition{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
},
},
"snowball": service{
@@ -38240,15 +38526,6 @@ var awsusgovPartition = partition{
},
"sms": service{
Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "sms-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
endpointKey{
Region: "fips-us-gov-west-1",
}: endpoint{
@@ -38258,15 +38535,6 @@ var awsusgovPartition = partition{
},
Deprecated: boxedTrue,
},
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sms-fips.us-gov-east-1.amazonaws.com",
- },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
@@ -39836,6 +40104,15 @@ var awsisoPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-us-iso-west-1",
+ }: endpoint{
+ Hostname: "rbin-fips.us-iso-west-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
@@ -39845,6 +40122,15 @@ var awsisoPartition = partition{
}: endpoint{
Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov",
},
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rbin-fips.us-iso-west-1.c2s.ic.gov",
+ },
},
},
"rds": service{
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
index bd1c349966ea7..4f7ecb098c267 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/version.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go
@@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
-const SDKVersion = "1.44.311"
+const SDKVersion = "1.44.315"
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 9829df4e3bbdc..504b8f32169bd 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -251,7 +251,7 @@ github.com/armon/go-metrics/prometheus
# github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2
## explicit; go 1.13
github.com/asaskevich/govalidator
-# github.com/aws/aws-sdk-go v1.44.311
+# github.com/aws/aws-sdk-go v1.44.315
## explicit; go 1.11
github.com/aws/aws-sdk-go/aws
github.com/aws/aws-sdk-go/aws/arn
|
build
|
bump github.com/aws/aws-sdk-go from 1.44.311 to 1.44.315 (#10159)
|
e2952f9ce19e4f0eb6731952b3095bf27b2cb27d
|
2022-09-07 14:14:31
|
Ludovic Cleroux
|
troubleshooting: instructions for loki + istio (#6205)
| false
|
diff --git a/docs/sources/operations/troubleshooting.md b/docs/sources/operations/troubleshooting.md
index 4250301bca57c..02733ff5a1278 100644
--- a/docs/sources/operations/troubleshooting.md
+++ b/docs/sources/operations/troubleshooting.md
@@ -155,3 +155,28 @@ If you deploy with Helm, use the following command:
```bash
$ helm upgrade --install loki loki/loki --set "loki.tracing.jaegerAgentHost=YOUR_JAEGER_AGENT_HOST"
```
+
+## Running Loki with Istio Sidecars
+
+An Istio sidecar runs alongside a pod. It intercepts all traffic to and from the pod.
+When a pod tries to communicate with another pod using a given protocol, Istio inspects the destination's service using [Protocol Selection](https://istio.io/latest/docs/ops/configuration/traffic-management/protocol-selection/).
+This mechanism uses a convention on the port name (for example, `http-my-port` or `grpc-my-port`)
+to determine how to handle this outgoing traffic. Istio can then do operations such as authorization and smart routing.
+
+This works fine when one pod communicates with another pod using a hostname. But,
+Istio does not allow pods to communicate with other pods using IP addresses,
+unless the traffic type is `tcp`.
+
+Loki internally uses DNS to resolve the IP addresses of the different components.
+Loki attempts to send a request to the IP address of those pods. The
+Loki services have a `grpc` (:9095/:9096) port defined, so Istio will consider
+this to be `grpc` traffic. It will not allow Loki components to reach each other using
+an IP address. So, the traffic will fail, and the ring will remain unhealthy.
+
+The solution to this issue is to add `appProtocol: tcp` to all of the `grpc`
+(:9095) and `grpclb` (:9096) service ports of Loki components. This
+overrides the Istio protocol selection, and it force Istio to consider this traffic raw `tcp`, which allows pods to communicate using raw ip addresses.
+
+This disables part of the Istio traffic interception mechanism,
+but still enables mTLS. This allows pods to communicate between themselves
+using IP addresses over grpc.
|
troubleshooting
|
instructions for loki + istio (#6205)
|
82161fe329f6aef99c52368a47a30f096887d14e
|
2022-11-22 20:26:25
|
Periklis Tsirakidis
|
operator: Fix object storage TLS spec CAKey descriptor (#7744)
| false
|
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md
index 34ffe82f460f9..4b75cd6e421cc 100644
--- a/operator/CHANGELOG.md
+++ b/operator/CHANGELOG.md
@@ -1,5 +1,6 @@
## Main
+- [7744](https://github.com/grafana/loki/pull/7744) **periklis**: Fix object storage TLS spec CAKey descriptor
- [7716](https://github.com/grafana/loki/pull/7716) **aminesnow**: Migrate API docs generation tool
- [7710](https://github.com/grafana/loki/pull/7710) **periklis**: Fix LokiStackController watches for cluster-scoped resources
- [7682](https://github.com/grafana/loki/pull/7682) **periklis**: Refactor cluster proxy to use configv1.Proxy on OpenShift
diff --git a/operator/apis/loki/v1/lokistack_types.go b/operator/apis/loki/v1/lokistack_types.go
index 30d33941d7882..169a52def98d4 100644
--- a/operator/apis/loki/v1/lokistack_types.go
+++ b/operator/apis/loki/v1/lokistack_types.go
@@ -353,7 +353,7 @@ type ObjectStorageTLSSpec struct {
// +optional
// +kubebuilder:validation:optional
// +kubebuilder:default:=service-ca.crt
- // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:io.kubernetes:ConfigMap",displayName="CA ConfigMap Key"
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="CA ConfigMap Key"
CAKey string `json:"caKey,omitempty"`
// CA is the name of a ConfigMap containing a CA certificate.
// It needs to be in the same namespace as the LokiStack custom resource.
diff --git a/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml
index 977d1f74629e9..1a9e47425397e 100644
--- a/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml
@@ -489,8 +489,6 @@ spec:
It needs to be in the same namespace as the LokiStack custom resource.
displayName: CA ConfigMap Key
path: storage.tls.caKey
- x-descriptors:
- - urn:alm:descriptor:io.kubernetes:ConfigMap
- description: CA is the name of a ConfigMap containing a CA certificate. It
needs to be in the same namespace as the LokiStack custom resource.
displayName: CA ConfigMap Name
diff --git a/operator/config/manifests/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/bases/loki-operator.clusterserviceversion.yaml
index 213ff7ed84089..6920ba8f4b223 100644
--- a/operator/config/manifests/bases/loki-operator.clusterserviceversion.yaml
+++ b/operator/config/manifests/bases/loki-operator.clusterserviceversion.yaml
@@ -344,8 +344,6 @@ spec:
It needs to be in the same namespace as the LokiStack custom resource.
displayName: CA ConfigMap Key
path: storage.tls.caKey
- x-descriptors:
- - urn:alm:descriptor:io.kubernetes:ConfigMap
- description: CA is the name of a ConfigMap containing a CA certificate. It
needs to be in the same namespace as the LokiStack custom resource.
displayName: CA ConfigMap Name
|
operator
|
Fix object storage TLS spec CAKey descriptor (#7744)
|
ffb961c439b086136db84b6296d35e376749b09f
|
2023-03-28 19:22:00
|
Athira Varghese
|
feat(storage): add support for IBM cloud object storage as storage client (#8826)
| false
|
diff --git a/docs/sources/alert/_index.md b/docs/sources/alert/_index.md
index 8ab06ae773885..4df0343cb47f6 100644
--- a/docs/sources/alert/_index.md
+++ b/docs/sources/alert/_index.md
@@ -264,7 +264,7 @@ ruler:
## Ruler storage
-The Ruler supports five kinds of storage: azure, gcs, s3, swift, and local. Most kinds of storage work with the sharded Ruler configuration in an obvious way, i.e. configure all Rulers to use the same backend.
+The Ruler supports the following types of storage: `azure`, `gcs`, `s3`, `swift`, `cos` and `local`. Most kinds of storage work with the sharded Ruler configuration in an obvious way, that is, configure all Rulers to use the same backend.
The local implementation reads the rule files off of the local filesystem. This is a read-only backend that does not support the creation and deletion of rules through the [Ruler API]({{<relref "../api/#ruler">}}). Despite the fact that it reads the local filesystem this method can still be used in a sharded Ruler configuration if the operator takes care to load the same rules to every Ruler. For instance, this could be accomplished by mounting a [Kubernetes ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) onto every Ruler pod.
diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md
index fe25e33e958db..5ea41da6b129b 100644
--- a/docs/sources/configuration/_index.md
+++ b/docs/sources/configuration/_index.md
@@ -779,7 +779,7 @@ The `ruler` block configures the Loki ruler.
# options instead.
storage:
# Method to use for backend rule storage (configdb, azure, gcs, s3, swift,
- # local, bos)
+ # local, bos, cos)
# CLI flag: -ruler.storage.type
[type: <string> | default = ""]
@@ -807,6 +807,10 @@ storage:
# The CLI flags prefix for this block configuration is: ruler.storage
[swift: <swift_storage_config>]
+ # Configures backend rule storage for IBM Cloud Object Storage (COS).
+ # The CLI flags prefix for this block configuration is: ruler.storage
+ [cos: <cos_storage_config>]
+
# Configures backend rule storage for a local file system directory.
local:
# Directory to scan for rules
@@ -1786,6 +1790,10 @@ hedging:
# in period_config.
[named_stores: <named_stores_config>]
+# The cos_storage_config block configures the connection to IBM Cloud Object
+# Storage (COS) backend.
+[cos: <cos_storage_config>]
+
# Cache validity for active index entries. Should be no higher than
# -ingester.max-chunk-idle.
# CLI flag: -store.index-cache-validity
@@ -1808,16 +1816,16 @@ hedging:
# CLI flag: -store.max-chunk-batch-size
[max_chunk_batch_size: <int> | default = 50]
-# Configures storing index in an Object Store (GCS/S3/Azure/Swift/Filesystem) in
-# the form of boltdb files. Required fields only required when boltdb-shipper is
-# defined in config.
+# Configures storing index in an Object Store
+# (GCS/S3/Azure/Swift/COS/Filesystem) in the form of boltdb files. Required
+# fields only required when boltdb-shipper is defined in config.
boltdb_shipper:
# Directory where ingesters would write index files which would then be
# uploaded by shipper to configured storage
# CLI flag: -boltdb.shipper.active-index-directory
[active_index_directory: <string> | default = ""]
- # Shared store for keeping index files. Supported types: gcs, s3, azure,
+ # Shared store for keeping index files. Supported types: gcs, s3, azure, cos,
# filesystem
# CLI flag: -boltdb.shipper.shared-store
[shared_store: <string> | default = ""]
@@ -1881,7 +1889,7 @@ tsdb_shipper:
# CLI flag: -tsdb.shipper.active-index-directory
[active_index_directory: <string> | default = ""]
- # Shared store for keeping index files. Supported types: gcs, s3, azure,
+ # Shared store for keeping index files. Supported types: gcs, s3, azure, cos,
# filesystem
# CLI flag: -tsdb.shipper.shared-store
[shared_store: <string> | default = ""]
@@ -1976,7 +1984,7 @@ The `compactor` block configures the compactor component, which compacts index s
[working_directory: <string> | default = ""]
# The shared store used for storing boltdb files. Supported types: gcs, s3,
-# azure, swift, filesystem, bos.
+# azure, swift, filesystem, bos, cos.
# CLI flag: -boltdb.shipper.compactor.shared-store
[shared_store: <string> | default = ""]
@@ -2973,6 +2981,11 @@ storage:
# CLI flag: -common.storage.hedge-max-per-second
[max_per_second: <int> | default = 5]
+ # The cos_storage_config block configures the connection to IBM Cloud Object
+ # Storage (COS) backend.
+ # The CLI flags prefix for this block configuration is: common.storage
+ [cos: <cos_storage_config>]
+
[persist_tokens: <boolean>]
[replication_factor: <int>]
@@ -4227,6 +4240,77 @@ The `swift_storage_config` block configures the connection to OpenStack Object S
[request_timeout: <duration> | default = 5s]
```
+### cos_storage_config
+
+The `cos_storage_config` block configures the connection to IBM Cloud Object Storage (COS) backend. The supported CLI flags `<prefix>` used to reference this configuration block are:
+
+- `common.storage`
+- `ruler.storage`
+
+
+
+```yaml
+# Set this to `true` to force the request to use path-style addressing.
+# CLI flag: -<prefix>.cos.force-path-style
+[forcepathstyle: <boolean> | default = false]
+
+# Comma separated list of bucket names to evenly distribute chunks over.
+# CLI flag: -<prefix>.cos.buckets
+[bucketnames: <string> | default = ""]
+
+# COS Endpoint to connect to.
+# CLI flag: -<prefix>.cos.endpoint
+[endpoint: <string> | default = ""]
+
+# COS region to use.
+# CLI flag: -<prefix>.cos.region
+[region: <string> | default = ""]
+
+# COS HMAC Access Key ID.
+# CLI flag: -<prefix>.cos.access-key-id
+[access_key_id: <string> | default = ""]
+
+# COS HMAC Secret Access Key.
+# CLI flag: -<prefix>.cos.secret-access-key
+[secret_access_key: <string> | default = ""]
+
+http_config:
+ # The maximum amount of time an idle connection will be held open.
+ # CLI flag: -<prefix>.cos.http.idle-conn-timeout
+ [idle_conn_timeout: <duration> | default = 1m30s]
+
+ # If non-zero, specifies the amount of time to wait for a server's response
+ # headers after fully writing the request.
+ # CLI flag: -<prefix>.cos.http.response-header-timeout
+ [response_header_timeout: <duration> | default = 0s]
+
+# Configures back off when cos get Object.
+backoff_config:
+ # Minimum backoff time when cos get Object.
+ # CLI flag: -<prefix>.cos.min-backoff
+ [min_period: <duration> | default = 100ms]
+
+ # Maximum backoff time when cos get Object.
+ # CLI flag: -<prefix>.cos.max-backoff
+ [max_period: <duration> | default = 3s]
+
+ # Maximum number of times to retry when cos get Object.
+ # CLI flag: -<prefix>.cos.max-retries
+ [max_retries: <int> | default = 5]
+
+# IAM API key to access COS.
+# CLI flag: -<prefix>.cos.api-key
+[api_key: <string> | default = ""]
+
+# COS service instance id to use.
+# CLI flag: -<prefix>.cos.service-instance-id
+[service_instance_id: <string> | default = ""]
+
+# IAM Auth Endpoint for authentication.
+# CLI flag: -<prefix>.cos.auth-endpoint
+[auth_endpoint: <string> | default = "https://iam.cloud.ibm.com/identity/token"]
+```
+
### local_storage_config
The `local_storage_config` block configures the usage of local file system as object storage backend.
@@ -4264,6 +4348,8 @@ Named store from this example can be used by setting object_store to store-1 in
[alibabacloud: <map of string to alibabacloud_storage_config>]
[swift: <map of string to swift_storage_config>]
+
+[cos: <map of string to cos_storage_config>]
```
## Runtime Configuration file
diff --git a/docs/sources/configuration/examples.md b/docs/sources/configuration/examples.md
index 4a2951cf6c16a..534be9f51dc4b 100644
--- a/docs/sources/configuration/examples.md
+++ b/docs/sources/configuration/examples.md
@@ -333,3 +333,58 @@ storage_config:
kms_key_id: 0987dcba-09fe-87dc-65ba-ab0987654321
```
+
+## 11-COS-HMAC-Example.yaml
+
+```yaml
+
+# This partial configuration uses IBM Cloud Object Storage (COS) for chunk storage. HMAC will be used for authenticating with COS.
+
+schema_config:
+ configs:
+ - from: 2020-10-01
+ index:
+ period: 24h
+ prefix: loki_index_
+ object_store: "cos"
+ schema: v11
+ store: "boltdb-shipper"
+
+storage_config:
+ cos:
+ bucketnames: <bucket1, bucket2>
+ endpoint: <endpoint>
+ region: <region>
+ access_key_id: <access_key_id>
+ secret_access_key: <secret_access_key>
+
+```
+
+
+## 12-COS-APIKey-Example.yaml
+
+```yaml
+
+# This partial configuration uses IBM Cloud Object Storage (COS) for chunk storage. APIKey will be used for authenticating with COS.
+
+schema_config:
+ configs:
+ - from: 2020-10-01
+ index:
+ period: 24h
+ prefix: loki_index_
+ object_store: "cos"
+ schema: v11
+ store: "boltdb-shipper"
+
+storage_config:
+ cos:
+ bucketnames: <bucket1, bucket2>
+ endpoint: <endpoint>
+ region: <region>
+ api_key: <api_key_to_authenticate_with_cos>
+ service_instance_id: <cos_service_instance_id>
+ auth_endpoint: <iam_endpoint_for_authentication>
+
+```
+
diff --git a/docs/sources/configuration/examples/11-COS-HMAC-Example.yaml b/docs/sources/configuration/examples/11-COS-HMAC-Example.yaml
new file mode 100644
index 0000000000000..840252b8ffba3
--- /dev/null
+++ b/docs/sources/configuration/examples/11-COS-HMAC-Example.yaml
@@ -0,0 +1,19 @@
+# This partial configuration uses IBM Cloud Object Storage (COS) for chunk storage. HMAC will be used for authenticating with COS.
+
+schema_config:
+ configs:
+ - from: 2020-10-01
+ index:
+ period: 24h
+ prefix: loki_index_
+ object_store: "cos"
+ schema: v11
+ store: "boltdb-shipper"
+
+storage_config:
+ cos:
+ bucketnames: <bucket1, bucket2>
+ endpoint: <endpoint>
+ region: <region>
+ access_key_id: <access_key_id>
+ secret_access_key: <secret_access_key>
diff --git a/docs/sources/configuration/examples/12-COS-APIKey-Example.yaml b/docs/sources/configuration/examples/12-COS-APIKey-Example.yaml
new file mode 100644
index 0000000000000..ae4ff7fe29f8b
--- /dev/null
+++ b/docs/sources/configuration/examples/12-COS-APIKey-Example.yaml
@@ -0,0 +1,20 @@
+# This partial configuration uses IBM Cloud Object Storage (COS) for chunk storage. APIKey will be used for authenticating with COS.
+
+schema_config:
+ configs:
+ - from: 2020-10-01
+ index:
+ period: 24h
+ prefix: loki_index_
+ object_store: "cos"
+ schema: v11
+ store: "boltdb-shipper"
+
+storage_config:
+ cos:
+ bucketnames: <bucket1, bucket2>
+ endpoint: <endpoint>
+ region: <region>
+ api_key: <api_key_to_authenticate_with_cos>
+ service_instance_id: <cos_service_instance_id>
+ auth_endpoint: <iam_endpoint_for_authentication>
diff --git a/docs/sources/installation/tanka.md b/docs/sources/installation/tanka.md
index 486a92bbcf39d..a9419d0d2f9c0 100644
--- a/docs/sources/installation/tanka.md
+++ b/docs/sources/installation/tanka.md
@@ -45,7 +45,7 @@ Revise the YAML contents of `environments/loki/main.jsonnet`, updating these var
- Update the `username`, `password`, and the relevant `htpasswd` variable values.
- Update the S3 or GCS variable values, depending on your object storage type. See [storage_config](/docs/loki/latest/configuration/#storage_config) for more configuration details.
- Remove from the configuration the S3 or GCS object storage variables that are not part of your setup.
-- Update the value of `boltdb_shipper_shared_store` to the type of object storage you are using. Options are `gcs`, `s3`, `azure`, or `filesystem`. Update the `object_store` variable under the `schema_config` section to the same value.
+- Update the value of `boltdb_shipper_shared_store` to the type of object storage you are using. Options are `gcs`, `s3`, `azure`, `cos` or `filesystem`. Update the `object_store` variable under the `schema_config` section to the same value.
- Update the Promtail configuration `container_root_path` variable's value to reflect your root path for the Docker daemon. Run `docker info | grep "Root Dir"` to acquire your root path.
- Update the `from` value in the Loki `schema_config` section to no more than 14 days prior to the current date. The `from` date represents the first day for which the `schema_config` section is valid. For example, if today is `2021-01-15`, set `from` to `2021-01-01`. This recommendation is based on Loki's default acceptance of log lines up to 14 days in the past. The `reject_old_samples_max_age` configuration variable controls the acceptance range.
diff --git a/docs/sources/operations/storage/_index.md b/docs/sources/operations/storage/_index.md
index d726e1f8b9cf4..36033c1f6952e 100644
--- a/docs/sources/operations/storage/_index.md
+++ b/docs/sources/operations/storage/_index.md
@@ -45,6 +45,7 @@ The following are supported for the chunks:
- [Google Cloud Storage](https://cloud.google.com/storage/)
- [Filesystem]({{<relref "filesystem">}}) (please read more about the filesystem to understand the pros/cons before using with production data)
- [Baidu Object Storage](https://cloud.baidu.com/product/bos.html)
+- [IBM Cloud Object Storage](https://www.ibm.com/cloud/object-storage)
## Cloud Storage Permissions
@@ -109,6 +110,12 @@ Resources: `*`
Resources: `arn:aws:iam::<aws_account_id>:role/<role_name>`
+### IBM Cloud Object Storage
+
+When using IBM Cloud Object Storage (COS) as object storage, IAM `Writer` role is needed.
+
+See the [IBM Cloud Object Storage section]({{<relref "../../storage/#ibm-cloud-object-storage">}}) on the storage page for a detailed setup guide.
+
## Chunk Format
```
diff --git a/docs/sources/storage/_index.md b/docs/sources/storage/_index.md
index c7d504e39812b..7d0f27656d3fd 100644
--- a/docs/sources/storage/_index.md
+++ b/docs/sources/storage/_index.md
@@ -42,6 +42,9 @@ S3 is AWS's hosted object store. It is a good candidate for a managed object sto
Blob Storage is Microsoft Azure's hosted object store. It is a good candidate for a managed object store, especially when you're already running on Azure, and is production safe.
You can authenticate Blob Storage access by using a storage account name and key or by using a Service Principal.
+### IBM Cloud Object Storage (COS)
+[COS](https://www.ibm.com/cloud/object-storage) is IBM Cloud hosted object store. It is a good candidate for a managed object store, especially when you're already running on IBM Cloud, and is production safe.
+
### Notable Mentions
You may use any substitutable services, such as those that implement the S3 API like [MinIO](https://min.io/).
@@ -305,6 +308,29 @@ This guide assumes a provisioned EKS cluster.
`bucket_name` variable.
+### IBM Cloud Object Storage
+
+```yaml
+schema_config:
+ configs:
+ - from: 2020-10-01
+ index:
+ period: 24h
+ prefix: loki_index_
+ object_store: "cos"
+ schema: v11
+ store: "boltdb-shipper"
+
+storage_config:
+ cos:
+ bucketnames: <bucket1, bucket2>
+ endpoint: <endpoint>
+ api_key: <api_key_to_authenticate_with_cos>
+ region: <region>
+ service_instance_id: <cos_service_instance_id>
+ auth_endpoint: <iam_endpoint_for_authentication>
+```
+
### On prem deployment (Cassandra+Cassandra)
**Keeping this for posterity, but this is likely not a common config. Cassandra should work and could be faster in some situations but is likely much more expensive.**
diff --git a/docs/sources/upgrading/_index.md b/docs/sources/upgrading/_index.md
index 507db423cf704..899f3974e1bb4 100644
--- a/docs/sources/upgrading/_index.md
+++ b/docs/sources/upgrading/_index.md
@@ -353,7 +353,7 @@ Following 2 compactor configs that were defined as command line arguments in jso
[working_directory: <string>]
# The shared store used for storing boltdb files.
-# Supported types: gcs, s3, azure, swift, filesystem.
+# Supported types: gcs, s3, azure, swift, cos, filesystem.
# CLI flag: -boltdb.shipper.compactor.shared-store
[shared_store: <string>]
```
diff --git a/go.mod b/go.mod
index b1cc487b4937a..0db151b3eaf70 100644
--- a/go.mod
+++ b/go.mod
@@ -114,6 +114,7 @@ require (
require (
github.com/Azure/go-autorest/autorest v0.11.28
+ github.com/IBM/ibm-cos-sdk-go v1.9.4
github.com/fsnotify/fsnotify v1.6.0
github.com/grafana/loki/pkg/push v0.0.0-20230127102416-571f88bc5765
github.com/heroku/x v0.0.55
diff --git a/go.sum b/go.sum
index 4748da98efd5a..10c4f254670db 100644
--- a/go.sum
+++ b/go.sum
@@ -150,6 +150,8 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym
github.com/DataDog/datadog-go v0.0.0-20160329135253-cc2f4770f4d6/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM=
+github.com/IBM/ibm-cos-sdk-go v1.9.4 h1:jGIcufCP0ys7QFJKEYSy/0EWn3sl5kOo2LKfa6x4gHk=
+github.com/IBM/ibm-cos-sdk-go v1.9.4/go.mod h1:1VnKWJhPE536IvitwDxZFH9ycmj/7VDOiw8Mjljb160=
github.com/Jeffail/gabs v1.1.0/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/MasslessParticle/azure-storage-blob-go v0.14.1-0.20220216145902-b5e698eff68e h1:HisBR+gQKIwJqDe1iNVqUDk+GTRE2IZAbl+fLoDKNBs=
@@ -1665,6 +1667,7 @@ golang.org/x/net v0.0.0-20220725212005-46097bf591d3/go.mod h1:AaygXjzTFtRAg2ttMY
golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
+golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/oauth2 v0.0.0-20170807180024-9a379c6b3e95/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -1822,12 +1825,14 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
+golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1842,6 +1847,7 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
diff --git a/pkg/loki/common/common.go b/pkg/loki/common/common.go
index 29f39fe15b74b..501d39c498889 100644
--- a/pkg/loki/common/common.go
+++ b/pkg/loki/common/common.go
@@ -13,6 +13,7 @@ import (
"github.com/grafana/loki/pkg/storage/chunk/client/gcp"
"github.com/grafana/loki/pkg/storage/chunk/client/hedging"
"github.com/grafana/loki/pkg/storage/chunk/client/openstack"
+ "github.com/grafana/loki/pkg/storage/chunk/client/ibmcloud"
"github.com/grafana/loki/pkg/util"
util_log "github.com/grafana/loki/pkg/util/log"
)
@@ -74,6 +75,7 @@ type Storage struct {
Swift openstack.SwiftConfig `yaml:"swift"`
FSConfig FilesystemConfig `yaml:"filesystem"`
Hedging hedging.Config `yaml:"hedging"`
+ COS ibmcloud.COSConfig `yaml:"cos"`
}
func (s *Storage) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
@@ -85,6 +87,7 @@ func (s *Storage) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
s.BOS.RegisterFlagsWithPrefix(prefix, f)
s.FSConfig.RegisterFlagsWithPrefix(prefix, f)
s.Hedging.RegisterFlagsWithPrefix(prefix, f)
+ s.COS.RegisterFlagsWithPrefix(prefix, f)
}
type FilesystemConfig struct {
diff --git a/pkg/ruler/base/storage.go b/pkg/ruler/base/storage.go
index 5ccb19fce630a..60841ff019fe7 100644
--- a/pkg/ruler/base/storage.go
+++ b/pkg/ruler/base/storage.go
@@ -25,6 +25,7 @@ import (
"github.com/grafana/loki/pkg/storage/chunk/client/baidubce"
"github.com/grafana/loki/pkg/storage/chunk/client/gcp"
"github.com/grafana/loki/pkg/storage/chunk/client/hedging"
+ "github.com/grafana/loki/pkg/storage/chunk/client/ibmcloud"
"github.com/grafana/loki/pkg/storage/chunk/client/openstack"
)
@@ -40,6 +41,7 @@ type RuleStoreConfig struct {
S3 aws.S3Config `yaml:"s3" doc:"description=Configures backend rule storage for S3."`
BOS baidubce.BOSStorageConfig `yaml:"bos" doc:"description=Configures backend rule storage for Baidu Object Storage (BOS)."`
Swift openstack.SwiftConfig `yaml:"swift" doc:"description=Configures backend rule storage for Swift."`
+ COS ibmcloud.COSConfig `yaml:"cos" doc:"description=Configures backend rule storage for IBM Cloud Object Storage (COS)."`
Local local.Config `yaml:"local" doc:"description=Configures backend rule storage for a local file system directory."`
mock rulestore.RuleStore `yaml:"-"`
@@ -54,7 +56,8 @@ func (cfg *RuleStoreConfig) RegisterFlags(f *flag.FlagSet) {
cfg.Swift.RegisterFlagsWithPrefix("ruler.storage.", f)
cfg.Local.RegisterFlagsWithPrefix("ruler.storage.", f)
cfg.BOS.RegisterFlagsWithPrefix("ruler.storage.", f)
- f.StringVar(&cfg.Type, "ruler.storage.type", "", "Method to use for backend rule storage (configdb, azure, gcs, s3, swift, local, bos)")
+ cfg.COS.RegisterFlagsWithPrefix("ruler.storage.", f)
+ f.StringVar(&cfg.Type, "ruler.storage.type", "", "Method to use for backend rule storage (configdb, azure, gcs, s3, swift, local, bos, cos)")
}
// Validate config and returns error on failure
@@ -102,6 +105,8 @@ func NewLegacyRuleStore(cfg RuleStoreConfig, hedgeCfg hedging.Config, clientMetr
client, err = baidubce.NewBOSObjectStorage(&cfg.BOS)
case "swift":
client, err = openstack.NewSwiftObjectClient(cfg.Swift, hedgeCfg)
+ case "cos":
+ client, err = ibmcloud.NewCOSObjectClient(cfg.COS, hedgeCfg)
case "local":
return local.NewLocalRulesClient(cfg.Local, loader)
default:
diff --git a/pkg/storage/chunk/client/ibmcloud/cos_object_client.go b/pkg/storage/chunk/client/ibmcloud/cos_object_client.go
new file mode 100644
index 0000000000000..bfa8543af74c5
--- /dev/null
+++ b/pkg/storage/chunk/client/ibmcloud/cos_object_client.go
@@ -0,0 +1,379 @@
+package ibmcloud
+
+import (
+ "context"
+ "flag"
+ "hash/fnv"
+ "io"
+ "net"
+ "net/http"
+ "strings"
+ "time"
+
+ ibm "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam"
+ "github.com/IBM/ibm-cos-sdk-go/aws/session"
+ cos "github.com/IBM/ibm-cos-sdk-go/service/s3"
+ cosiface "github.com/IBM/ibm-cos-sdk-go/service/s3/s3iface"
+
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/grafana/dskit/backoff"
+ "github.com/grafana/dskit/flagext"
+ "github.com/grafana/loki/pkg/storage/chunk/client"
+ "github.com/grafana/loki/pkg/storage/chunk/client/hedging"
+ "github.com/pkg/errors"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/weaveworks/common/instrument"
+)
+
+const defaultCOSAuthEndpoint = "https://iam.cloud.ibm.com/identity/token"
+
+var (
+ errInvalidCOSHMACCredentials = errors.New("must supply both an Access Key ID and Secret Access Key or neither")
+ errEmptyRegion = errors.New("region should not be empty")
+ errEmptyEndpoint = errors.New("endpoint should not be empty")
+ errEmptyBucket = errors.New("at least one bucket name must be specified")
+ errCOSConfig = "failed to build cos config"
+ errServiceInstanceID = errors.New("must supply ServiceInstanceID")
+ errInvalidCredentials = errors.New("must supply any of Access Key ID and Secret Access Key or API Key")
+)
+
+var cosRequestDuration = instrument.NewHistogramCollector(prometheus.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: "loki",
+ Name: "cos_request_duration_seconds",
+ Help: "Time spent doing cos requests.",
+ Buckets: []float64{.025, .05, .1, .25, .5, 1, 2},
+}, []string{"operation", "status_code"}))
+
+// InjectRequestMiddleware gives users of this client the ability to make arbitrary
+// changes to outgoing requests.
+type InjectRequestMiddleware func(next http.RoundTripper) http.RoundTripper
+
+func init() {
+ cosRequestDuration.Register()
+}
+
+// COSConfig specifies config for storing chunks on IBM cos.
+type COSConfig struct {
+ ForcePathStyle bool `yaml:"forcepathstyle"`
+ BucketNames string `yaml:"bucketnames"`
+ Endpoint string `yaml:"endpoint"`
+ Region string `yaml:"region"`
+ AccessKeyID string `yaml:"access_key_id"`
+ SecretAccessKey flagext.Secret `yaml:"secret_access_key"`
+ HTTPConfig HTTPConfig `yaml:"http_config"`
+ BackoffConfig backoff.Config `yaml:"backoff_config" doc:"description=Configures back off when cos get Object."`
+ APIKey flagext.Secret `yaml:"api_key"`
+ ServiceInstanceID string `yaml:"service_instance_id"`
+ AuthEndpoint string `yaml:"auth_endpoint"`
+}
+
+// HTTPConfig stores the http.Transport configuration
+type HTTPConfig struct {
+ IdleConnTimeout time.Duration `yaml:"idle_conn_timeout"`
+ ResponseHeaderTimeout time.Duration `yaml:"response_header_timeout"`
+}
+
+// RegisterFlags adds the flags required to config this to the given FlagSet
+func (cfg *COSConfig) RegisterFlags(f *flag.FlagSet) {
+ cfg.RegisterFlagsWithPrefix("", f)
+}
+
+// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet with a specified prefix
+func (cfg *COSConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
+ f.BoolVar(&cfg.ForcePathStyle, prefix+"cos.force-path-style", false, "Set this to `true` to force the request to use path-style addressing.")
+ f.StringVar(&cfg.BucketNames, prefix+"cos.buckets", "", "Comma separated list of bucket names to evenly distribute chunks over.")
+
+ f.StringVar(&cfg.Endpoint, prefix+"cos.endpoint", "", "COS Endpoint to connect to.")
+ f.StringVar(&cfg.Region, prefix+"cos.region", "", "COS region to use.")
+ f.StringVar(&cfg.AccessKeyID, prefix+"cos.access-key-id", "", "COS HMAC Access Key ID.")
+ f.Var(&cfg.SecretAccessKey, prefix+"cos.secret-access-key", "COS HMAC Secret Access Key.")
+
+ f.DurationVar(&cfg.HTTPConfig.IdleConnTimeout, prefix+"cos.http.idle-conn-timeout", 90*time.Second, "The maximum amount of time an idle connection will be held open.")
+ f.DurationVar(&cfg.HTTPConfig.ResponseHeaderTimeout, prefix+"cos.http.response-header-timeout", 0, "If non-zero, specifies the amount of time to wait for a server's response headers after fully writing the request.")
+
+ f.DurationVar(&cfg.BackoffConfig.MinBackoff, prefix+"cos.min-backoff", 100*time.Millisecond, "Minimum backoff time when cos get Object.")
+ f.DurationVar(&cfg.BackoffConfig.MaxBackoff, prefix+"cos.max-backoff", 3*time.Second, "Maximum backoff time when cos get Object.")
+ f.IntVar(&cfg.BackoffConfig.MaxRetries, prefix+"cos.max-retries", 5, "Maximum number of times to retry when cos get Object.")
+
+ f.Var(&cfg.APIKey, prefix+"cos.api-key", "IAM API key to access COS.")
+ f.StringVar(&cfg.AuthEndpoint, prefix+"cos.auth-endpoint", defaultCOSAuthEndpoint, "IAM Auth Endpoint for authentication.")
+ f.StringVar(&cfg.ServiceInstanceID, prefix+"cos.service-instance-id", "", "COS service instance id to use.")
+}
+
+type COSObjectClient struct {
+ cfg COSConfig
+
+ bucketNames []string
+ cos cosiface.S3API
+ hedgedCOS cosiface.S3API
+}
+
+// NewCOSObjectClient makes a new COS backed ObjectClient.
+func NewCOSObjectClient(cfg COSConfig, hedgingCfg hedging.Config) (*COSObjectClient, error) {
+ bucketNames, err := buckets(cfg)
+ if err != nil {
+ return nil, err
+ }
+ cosClient, err := buildCOSClient(cfg, hedgingCfg, false)
+ if err != nil {
+ return nil, errors.Wrap(err, errCOSConfig)
+ }
+ cosClientHedging, err := buildCOSClient(cfg, hedgingCfg, true)
+ if err != nil {
+ return nil, errors.Wrap(err, errCOSConfig)
+ }
+ client := COSObjectClient{
+ cfg: cfg,
+ cos: cosClient,
+ hedgedCOS: cosClientHedging,
+ bucketNames: bucketNames,
+ }
+ return &client, nil
+}
+
+func validate(cfg COSConfig) error {
+ if (cfg.AccessKeyID == "" && cfg.SecretAccessKey.String() == "") && cfg.APIKey.String() == "" {
+ return errInvalidCredentials
+ }
+
+ if cfg.AccessKeyID != "" && cfg.SecretAccessKey.String() == "" ||
+ cfg.AccessKeyID == "" && cfg.SecretAccessKey.String() != "" {
+ return errInvalidCOSHMACCredentials
+ }
+
+ if cfg.Region == "" {
+ return errEmptyRegion
+ }
+
+ if cfg.Endpoint == "" {
+ return errEmptyEndpoint
+ }
+
+ if cfg.APIKey.String() != "" && cfg.AuthEndpoint == "" {
+ cfg.AuthEndpoint = defaultCOSAuthEndpoint
+ }
+
+ if cfg.APIKey.String() != "" && cfg.ServiceInstanceID == "" {
+ return errServiceInstanceID
+ }
+ return nil
+}
+
+func getCreds(cfg COSConfig) *credentials.Credentials {
+ if cfg.APIKey.String() != "" {
+ return ibmiam.NewStaticCredentials(ibm.NewConfig(),
+ cfg.AuthEndpoint, cfg.APIKey.String(), cfg.ServiceInstanceID)
+ }
+ if cfg.AccessKeyID != "" && cfg.SecretAccessKey.String() != "" {
+ return credentials.NewStaticCredentials(cfg.AccessKeyID, cfg.SecretAccessKey.String(), "")
+ }
+ return nil
+}
+
+func buildCOSClient(cfg COSConfig, hedgingCfg hedging.Config, hedging bool) (*cos.S3, error) {
+ var err error
+ if err = validate(cfg); err != nil {
+ return nil, err
+ }
+ cosConfig := &ibm.Config{}
+
+ cosConfig = cosConfig.WithMaxRetries(0) // We do our own retries, so we can monitor them
+ cosConfig = cosConfig.WithS3ForcePathStyle(cfg.ForcePathStyle) // support for Path Style cos url if has the flag
+
+ cosConfig = cosConfig.WithEndpoint(cfg.Endpoint)
+
+ cosConfig = cosConfig.WithRegion(cfg.Region)
+
+ cosConfig = cosConfig.WithCredentials(getCreds(cfg))
+
+ transport := http.RoundTripper(&http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }).DialContext,
+ MaxIdleConns: 200,
+ IdleConnTimeout: cfg.HTTPConfig.IdleConnTimeout,
+ MaxIdleConnsPerHost: 200,
+ TLSHandshakeTimeout: 3 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ ResponseHeaderTimeout: cfg.HTTPConfig.ResponseHeaderTimeout,
+ })
+
+ httpClient := &http.Client{
+ Transport: transport,
+ }
+
+ if hedging {
+ httpClient, err = hedgingCfg.ClientWithRegisterer(httpClient, prometheus.WrapRegistererWithPrefix("loki_", prometheus.DefaultRegisterer))
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ cosConfig = cosConfig.WithHTTPClient(httpClient)
+
+ sess, err := session.NewSession(cosConfig)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to create new cos session")
+ }
+
+ cosClient := cos.New(sess)
+
+ return cosClient, nil
+}
+
+func buckets(cfg COSConfig) ([]string, error) {
+ // bucketnames
+ var bucketNames []string
+
+ if cfg.BucketNames != "" {
+ bucketNames = strings.Split(cfg.BucketNames, ",") // comma separated list of bucket names
+ }
+
+ if len(bucketNames) == 0 {
+ return nil, errEmptyBucket
+ }
+ return bucketNames, nil
+}
+
+// bucketFromKey maps a key to a bucket name
+func (c *COSObjectClient) bucketFromKey(key string) string {
+ if len(c.bucketNames) == 0 {
+ return ""
+ }
+
+ hasher := fnv.New32a()
+ hasher.Write([]byte(key)) //nolint: errcheck
+ hash := hasher.Sum32()
+
+ return c.bucketNames[hash%uint32(len(c.bucketNames))]
+}
+
+// Stop fulfills the chunk.ObjectClient interface
+func (c *COSObjectClient) Stop() {}
+
+// DeleteObject deletes the specified objectKey from the appropriate S3 bucket
+func (c *COSObjectClient) DeleteObject(ctx context.Context, objectKey string) error {
+ return instrument.CollectedRequest(ctx, "COS.DeleteObject", cosRequestDuration, instrument.ErrorCode, func(ctx context.Context) error {
+ deleteObjectInput := &cos.DeleteObjectInput{
+ Bucket: ibm.String(c.bucketFromKey(objectKey)),
+ Key: ibm.String(objectKey),
+ }
+
+ _, err := c.cos.DeleteObjectWithContext(ctx, deleteObjectInput)
+ return err
+ })
+}
+
+// GetObject returns a reader and the size for the specified object key from the configured S3 bucket.
+func (c *COSObjectClient) GetObject(ctx context.Context, objectKey string) (io.ReadCloser, int64, error) {
+
+ var resp *cos.GetObjectOutput
+
+ // Map the key into a bucket
+ bucket := c.bucketFromKey(objectKey)
+
+ retries := backoff.New(ctx, c.cfg.BackoffConfig)
+ err := ctx.Err()
+ for retries.Ongoing() {
+ if ctx.Err() != nil {
+ return nil, 0, errors.Wrap(ctx.Err(), "ctx related error during cos getObject")
+ }
+ err = instrument.CollectedRequest(ctx, "COS.GetObject", cosRequestDuration, instrument.ErrorCode, func(ctx context.Context) error {
+ var requestErr error
+ resp, requestErr = c.hedgedCOS.GetObjectWithContext(ctx, &cos.GetObjectInput{
+ Bucket: ibm.String(bucket),
+ Key: ibm.String(objectKey),
+ })
+ return requestErr
+ })
+ var size int64
+ if resp.ContentLength != nil {
+ size = *resp.ContentLength
+ }
+ if err == nil && resp.Body != nil {
+ return resp.Body, size, nil
+ }
+ retries.Wait()
+ }
+ return nil, 0, errors.Wrap(err, "failed to get cos object")
+}
+
+// PutObject into the store
+func (c *COSObjectClient) PutObject(ctx context.Context, objectKey string, object io.ReadSeeker) error {
+ return instrument.CollectedRequest(ctx, "COS.PutObject", cosRequestDuration, instrument.ErrorCode, func(ctx context.Context) error {
+ putObjectInput := &cos.PutObjectInput{
+ Body: object,
+ Bucket: ibm.String(c.bucketFromKey(objectKey)),
+ Key: ibm.String(objectKey),
+ }
+
+ _, err := c.cos.PutObjectWithContext(ctx, putObjectInput)
+ return err
+ })
+}
+
+// List implements chunk.ObjectClient.
+func (c *COSObjectClient) List(ctx context.Context, prefix, delimiter string) ([]client.StorageObject, []client.StorageCommonPrefix, error) {
+ var storageObjects []client.StorageObject
+ var commonPrefixes []client.StorageCommonPrefix
+
+ for i := range c.bucketNames {
+ err := instrument.CollectedRequest(ctx, "COS.List", cosRequestDuration, instrument.ErrorCode, func(ctx context.Context) error {
+ input := cos.ListObjectsV2Input{
+ Bucket: ibm.String(c.bucketNames[i]),
+ Prefix: ibm.String(prefix),
+ Delimiter: ibm.String(delimiter),
+ }
+
+ for {
+ output, err := c.cos.ListObjectsV2WithContext(ctx, &input)
+ if err != nil {
+ return err
+ }
+
+ for _, content := range output.Contents {
+ storageObjects = append(storageObjects, client.StorageObject{
+ Key: *content.Key,
+ ModifiedAt: *content.LastModified,
+ })
+ }
+
+ for _, commonPrefix := range output.CommonPrefixes {
+ commonPrefixes = append(commonPrefixes, client.StorageCommonPrefix(ibm.StringValue(commonPrefix.Prefix)))
+ }
+
+ if output.IsTruncated == nil || !*output.IsTruncated {
+ // No more results to fetch
+ break
+ }
+ if output.NextContinuationToken == nil {
+ // No way to continue
+ break
+ }
+ input.SetContinuationToken(*output.NextContinuationToken)
+ }
+
+ return nil
+ })
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ return storageObjects, commonPrefixes, nil
+}
+
+// IsObjectNotFoundErr returns true if error means that object is not found. Relevant to GetObject and DeleteObject operations.
+func (c *COSObjectClient) IsObjectNotFoundErr(err error) bool {
+ if aerr, ok := errors.Cause(err).(awserr.Error); ok && aerr.Code() == s3.ErrCodeNoSuchKey {
+ return true
+ }
+
+ return false
+}
diff --git a/pkg/storage/chunk/client/ibmcloud/cos_object_client_test.go b/pkg/storage/chunk/client/ibmcloud/cos_object_client_test.go
new file mode 100644
index 0000000000000..7029785230da2
--- /dev/null
+++ b/pkg/storage/chunk/client/ibmcloud/cos_object_client_test.go
@@ -0,0 +1,520 @@
+package ibmcloud
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "net/http"
+ "net/http/httptest"
+ "sort"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/token"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+ "github.com/IBM/ibm-cos-sdk-go/service/s3"
+ "github.com/IBM/ibm-cos-sdk-go/service/s3/s3iface"
+ "github.com/grafana/dskit/backoff"
+ "github.com/grafana/dskit/flagext"
+ "github.com/grafana/loki/pkg/storage/chunk/client"
+ "github.com/grafana/loki/pkg/storage/chunk/client/hedging"
+ "github.com/pkg/errors"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ bucket = "test"
+ timestamp = time.Now().Local()
+
+ testData = map[string][]byte{
+ "key-1": []byte("test data 1"),
+ "key-2": []byte("test data 2"),
+ "key-3": []byte("test data 3"),
+ }
+
+ testDeleteData = map[string][]byte{
+ "key-1": []byte("test data 1")}
+
+ testListData = map[string][]byte{
+ "key-1": []byte("test data 1"),
+ "key-2": []byte("test data 2"),
+ "key-3": []byte("test data 3"),
+ }
+
+ errMissingBucket = errors.New("bucket not found")
+ errMissingKey = errors.New("key not found")
+ errMissingObject = errors.New("Object data not found")
+)
+
+type mockCosClient struct {
+ s3iface.S3API
+ data map[string][]byte
+ bucket string
+}
+
+func newMockCosClient(data map[string][]byte) *mockCosClient {
+ return &mockCosClient{
+ data: data,
+ bucket: bucket,
+ }
+}
+
+func (cosClient *mockCosClient) GetObjectWithContext(ctx context.Context, input *s3.GetObjectInput, opts ...request.Option) (*s3.GetObjectOutput, error) {
+ if *input.Bucket != cosClient.bucket {
+ return &s3.GetObjectOutput{}, errMissingBucket
+ }
+
+ data, ok := cosClient.data[*input.Key]
+ if !ok {
+ return &s3.GetObjectOutput{}, errMissingKey
+ }
+
+ contentLength := int64(len(data))
+ body := io.NopCloser(bytes.NewReader(data))
+ output := s3.GetObjectOutput{
+ Body: body,
+ ContentLength: &contentLength,
+ }
+
+ return &output, nil
+}
+
+func (cosClient *mockCosClient) PutObjectWithContext(ctx context.Context, input *s3.PutObjectInput, opts ...request.Option) (*s3.PutObjectOutput, error) {
+ if *input.Bucket != cosClient.bucket {
+ return &s3.PutObjectOutput{}, errMissingBucket
+ }
+
+ dataBytes, err := io.ReadAll(input.Body)
+ if err != nil {
+ return &s3.PutObjectOutput{}, errMissingObject
+ }
+
+ if string(dataBytes) == "" {
+ return &s3.PutObjectOutput{}, errMissingObject
+ }
+
+ _, ok := cosClient.data[*input.Key]
+ if !ok {
+ cosClient.data[*input.Key] = dataBytes
+ }
+
+ return &s3.PutObjectOutput{}, nil
+}
+
+func (cosClient *mockCosClient) DeleteObjectWithContext(ctx context.Context, input *s3.DeleteObjectInput, opts ...request.Option) (*s3.DeleteObjectOutput, error) {
+ if *input.Bucket != cosClient.bucket {
+ return &s3.DeleteObjectOutput{}, errMissingBucket
+ }
+
+ if _, ok := cosClient.data[*input.Key]; !ok {
+ return &s3.DeleteObjectOutput{}, errMissingObject
+ }
+
+ delete(cosClient.data, *input.Key)
+
+ return &s3.DeleteObjectOutput{}, nil
+}
+
+func (cosClient *mockCosClient) ListObjectsV2WithContext(ctx context.Context, input *s3.ListObjectsV2Input, opts ...request.Option) (*s3.ListObjectsV2Output, error) {
+ if *input.Bucket != cosClient.bucket {
+ return &s3.ListObjectsV2Output{}, errMissingBucket
+ }
+
+ var objects []*s3.Object
+ if *input.Prefix != "key" {
+ return &s3.ListObjectsV2Output{
+ Contents: objects,
+ }, nil
+ }
+
+ for object := range cosClient.data {
+ key := object
+ objects = append(objects, &s3.Object{
+ Key: &key,
+ LastModified: ×tamp,
+ })
+ }
+
+ return &s3.ListObjectsV2Output{
+ Contents: objects,
+ }, nil
+}
+
+func Test_COSConfig(t *testing.T) {
+ tests := []struct {
+ name string
+ cosConfig COSConfig
+ expectedError error
+ }{
+ {
+ "empty accessKeyID and secretAccessKey",
+ COSConfig{
+ BucketNames: "test",
+ Endpoint: "test",
+ Region: "dummy",
+ AccessKeyID: "dummy",
+ },
+ errors.Wrap(errInvalidCOSHMACCredentials, errCOSConfig),
+ },
+ {
+ "region is empty",
+ COSConfig{
+ BucketNames: "test",
+ Endpoint: "test",
+ Region: "",
+ AccessKeyID: "dummy",
+ SecretAccessKey: flagext.SecretWithValue("dummy"),
+ },
+ errors.Wrap(errEmptyRegion, errCOSConfig),
+ },
+ {
+ "endpoint is empty",
+ COSConfig{
+ BucketNames: "test",
+ Endpoint: "",
+ Region: "dummy",
+ AccessKeyID: "dummy",
+ SecretAccessKey: flagext.SecretWithValue("dummy"),
+ },
+ errors.Wrap(errEmptyEndpoint, errCOSConfig),
+ },
+ {
+ "bucket is empty",
+ COSConfig{
+ BucketNames: "",
+ Endpoint: "",
+ Region: "dummy",
+ AccessKeyID: "dummy",
+ SecretAccessKey: flagext.SecretWithValue("dummy"),
+ },
+ errEmptyBucket,
+ },
+ {
+ "Access key ID and Secret Access key and APIKey is empty",
+ COSConfig{
+ BucketNames: "test",
+ Endpoint: "test",
+ Region: "dummy",
+ ServiceInstanceID: "dummy",
+ AuthEndpoint: "dummy",
+ },
+ errors.Wrap(errInvalidCredentials, errCOSConfig),
+ },
+ {
+ "Service Instance ID is empty",
+ COSConfig{
+ BucketNames: "test",
+ Endpoint: "test",
+ Region: "dummy",
+ AccessKeyID: "dummy",
+ SecretAccessKey: flagext.SecretWithValue("dummy"),
+ APIKey: flagext.SecretWithValue("dummy"),
+ ServiceInstanceID: "",
+ AuthEndpoint: "dummy",
+ },
+ errors.Wrap(errServiceInstanceID, errCOSConfig),
+ },
+ {
+ "valid config",
+ COSConfig{
+ BucketNames: "test",
+ Endpoint: "test",
+ Region: "dummy",
+ AccessKeyID: "dummy",
+ SecretAccessKey: flagext.SecretWithValue("dummy"),
+ },
+ nil,
+ },
+ {
+ "valid config with APIKey",
+ COSConfig{
+ BucketNames: "test",
+ Endpoint: "test",
+ Region: "dummy",
+ APIKey: flagext.SecretWithValue("dummy"),
+ ServiceInstanceID: "dummy",
+ AuthEndpoint: "dummy",
+ },
+ nil,
+ },
+ }
+ for _, tt := range tests {
+ cosClient, err := NewCOSObjectClient(tt.cosConfig, hedging.Config{})
+ if tt.expectedError != nil {
+ require.Equal(t, tt.expectedError.Error(), err.Error())
+ continue
+ }
+ require.NotNil(t, cosClient.cos)
+ require.NotNil(t, cosClient.hedgedCOS)
+ require.Equal(t, []string{tt.cosConfig.BucketNames}, cosClient.bucketNames)
+ }
+}
+
+func Test_GetObject(t *testing.T) {
+ tests := []struct {
+ key string
+ wantBytes []byte
+ wantErr error
+ }{
+ {
+ "key-1",
+ []byte("test data 1"),
+ nil,
+ },
+ {
+ "key-0",
+ nil,
+ errors.Wrap(errMissingKey, "failed to get cos object"),
+ },
+ }
+
+ for _, tt := range tests {
+ cosConfig := COSConfig{
+ BucketNames: bucket,
+ Endpoint: "test",
+ Region: "dummy",
+ AccessKeyID: "dummy",
+ SecretAccessKey: flagext.SecretWithValue("dummy"),
+ BackoffConfig: backoff.Config{
+ MaxRetries: 1,
+ },
+ }
+
+ cosClient, err := NewCOSObjectClient(cosConfig, hedging.Config{})
+ require.NoError(t, err)
+
+ cosClient.hedgedCOS = newMockCosClient(testData)
+
+ reader, _, err := cosClient.GetObject(context.Background(), tt.key)
+ if tt.wantErr != nil {
+ require.Equal(t, tt.wantErr.Error(), err.Error())
+ continue
+ }
+ require.NoError(t, err)
+
+ data, err := io.ReadAll(reader)
+ require.NoError(t, err)
+ require.Equal(t, tt.wantBytes, data)
+ }
+}
+
+func Test_PutObject(t *testing.T) {
+ tests := []struct {
+ key string
+ Body []byte
+ wantBytes []byte
+ wantErr error
+ }{
+ {
+ "key-5",
+ []byte("test data 5"),
+ []byte("test data 5"),
+ nil,
+ },
+ }
+
+ for _, tt := range tests {
+ cosConfig := COSConfig{
+ BucketNames: bucket,
+ Endpoint: "test",
+ Region: "dummy",
+ AccessKeyID: "dummy",
+ SecretAccessKey: flagext.SecretWithValue("dummy"),
+ BackoffConfig: backoff.Config{
+ MaxRetries: 1,
+ },
+ }
+
+ cosClient, err := NewCOSObjectClient(cosConfig, hedging.Config{})
+ require.NoError(t, err)
+
+ cosClient.cos = newMockCosClient(testData)
+
+ body := bytes.NewReader(tt.Body)
+
+ err = cosClient.PutObject(context.Background(), tt.key, body)
+ if tt.wantErr != nil {
+ require.Equal(t, tt.wantErr.Error(), err.Error())
+ continue
+ }
+ require.NoError(t, err)
+
+ cosClient.hedgedCOS = newMockCosClient(testData)
+
+ reader, _, err := cosClient.GetObject(context.Background(), tt.key)
+ if tt.wantErr != nil {
+ require.Equal(t, tt.wantErr.Error(), err.Error())
+ continue
+ }
+ require.NoError(t, err)
+
+ data, err := io.ReadAll(reader)
+ require.NoError(t, err)
+ require.Equal(t, tt.Body, data)
+ }
+}
+
+func Test_DeleteObject(t *testing.T) {
+ tests := []struct {
+ key string
+ wantErr error
+ wantGetErr error
+ }{
+ {
+ "key-1",
+ errMissingObject,
+ errors.Wrap(errMissingKey, "failed to get cos object"),
+ },
+ }
+
+ for _, tt := range tests {
+ cosConfig := COSConfig{
+ BucketNames: bucket,
+ Endpoint: "test",
+ Region: "dummy",
+ AccessKeyID: "dummy",
+ SecretAccessKey: flagext.SecretWithValue("dummy"),
+ BackoffConfig: backoff.Config{
+ MaxRetries: 1,
+ },
+ }
+ cosClient, err := NewCOSObjectClient(cosConfig, hedging.Config{})
+ require.NoError(t, err)
+
+ cosClient.cos = newMockCosClient(testDeleteData)
+
+ err = cosClient.DeleteObject(context.Background(), tt.key)
+ require.NoError(t, err)
+
+ cosClient.hedgedCOS = newMockCosClient(testDeleteData)
+
+ // call GetObject() for confirming the deleted object is no longer exist in bucket
+ reader, _, err := cosClient.GetObject(context.Background(), tt.key)
+ if tt.wantGetErr != nil {
+ require.Equal(t, tt.wantGetErr.Error(), err.Error())
+ continue
+ }
+ require.Nil(t, reader)
+
+ err = cosClient.DeleteObject(context.Background(), tt.key)
+ if tt.wantErr != nil {
+ require.Equal(t, tt.wantErr.Error(), err.Error())
+ continue
+ }
+ }
+}
+
+func Test_List(t *testing.T) {
+ tests := []struct {
+ prefix string
+ delimiter string
+ storageObj []client.StorageObject
+ wantErr error
+ }{
+ {
+ "key",
+ "-",
+ []client.StorageObject{{Key: "key-1", ModifiedAt: timestamp}, {Key: "key-2", ModifiedAt: timestamp}, {Key: "key-3", ModifiedAt: timestamp}},
+ nil,
+ },
+ {
+ "test",
+ "/",
+ nil,
+ nil,
+ },
+ }
+
+ for _, tt := range tests {
+ cosConfig := COSConfig{
+ BucketNames: bucket,
+ Endpoint: "test",
+ Region: "dummy",
+ AccessKeyID: "dummy",
+ SecretAccessKey: flagext.SecretWithValue("dummy"),
+ BackoffConfig: backoff.Config{
+ MaxRetries: 1,
+ },
+ }
+
+ cosClient, err := NewCOSObjectClient(cosConfig, hedging.Config{})
+ require.NoError(t, err)
+
+ cosClient.cos = newMockCosClient(testListData)
+
+ storageObj, _, err := cosClient.List(context.Background(), tt.prefix, tt.delimiter)
+ if tt.wantErr != nil {
+ require.Equal(t, tt.wantErr.Error(), err.Error())
+ continue
+ }
+ require.NoError(t, err)
+
+ sort.Slice(storageObj, func(i, j int) bool {
+ return storageObj[i].Key < storageObj[j].Key
+ })
+
+ require.Equal(t, tt.storageObj, storageObj)
+ }
+}
+
+func Test_APIKeyAuth(t *testing.T) {
+ testToken := "test"
+ tokenType := "Bearer"
+ resp := "testGet"
+
+ cosSvr := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.Header.Get("Authorization") != fmt.Sprintf("%s %s", tokenType, testToken) {
+ w.WriteHeader(http.StatusUnauthorized)
+ return
+ }
+ w.WriteHeader(http.StatusOK)
+ fmt.Fprintln(w, resp)
+ }))
+ defer cosSvr.Close()
+
+ authServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ token := token.Token{
+ AccessToken: testToken,
+ RefreshToken: "test",
+ TokenType: tokenType,
+ ExpiresIn: int64((time.Hour * 24).Seconds()),
+ Expiration: time.Now().Add(time.Hour * 24).Unix(),
+ }
+
+ data, err := json.Marshal(token)
+ require.NoError(t, err)
+
+ w.WriteHeader(http.StatusAccepted)
+ _, err = w.Write(data)
+ require.NoError(t, err)
+ }))
+
+ defer authServer.Close()
+
+ cosConfig := COSConfig{
+ BucketNames: "dummy",
+ Endpoint: cosSvr.URL,
+ Region: "dummy",
+ APIKey: flagext.SecretWithValue("dummy"),
+ ServiceInstanceID: "test",
+ ForcePathStyle: true,
+ AuthEndpoint: authServer.URL,
+ BackoffConfig: backoff.Config{
+ MaxRetries: 1,
+ },
+ }
+
+ cosClient, err := NewCOSObjectClient(cosConfig, hedging.Config{})
+ require.NoError(t, err)
+
+ reader, _, err := cosClient.GetObject(context.Background(), "key-1")
+ require.NoError(t, err)
+
+ data, err := io.ReadAll(reader)
+ require.NoError(t, err)
+ require.Equal(t, resp, strings.Trim(string(data), "\n"))
+}
diff --git a/pkg/storage/config/schema_config.go b/pkg/storage/config/schema_config.go
index 1c49983ac6005..ff20ebb72ea03 100644
--- a/pkg/storage/config/schema_config.go
+++ b/pkg/storage/config/schema_config.go
@@ -40,6 +40,7 @@ const (
StorageTypeLocal = "local"
StorageTypeS3 = "s3"
StorageTypeSwift = "swift"
+ StorageTypeCOS = "cos"
// BoltDBShipperType holds the index type for using boltdb with shipper which keeps flushing them to a shared storage
BoltDBShipperType = "boltdb-shipper"
TSDBType = "tsdb"
diff --git a/pkg/storage/factory.go b/pkg/storage/factory.go
index bfdfba313dced..d56d97b94caf4 100644
--- a/pkg/storage/factory.go
+++ b/pkg/storage/factory.go
@@ -21,6 +21,7 @@ import (
"github.com/grafana/loki/pkg/storage/chunk/client/gcp"
"github.com/grafana/loki/pkg/storage/chunk/client/grpc"
"github.com/grafana/loki/pkg/storage/chunk/client/hedging"
+ "github.com/grafana/loki/pkg/storage/chunk/client/ibmcloud"
"github.com/grafana/loki/pkg/storage/chunk/client/local"
"github.com/grafana/loki/pkg/storage/chunk/client/openstack"
"github.com/grafana/loki/pkg/storage/chunk/client/testutils"
@@ -67,6 +68,7 @@ type NamedStores struct {
GCS map[string]gcp.GCSConfig `yaml:"gcs"`
AlibabaCloud map[string]alibaba.OssConfig `yaml:"alibabacloud"`
Swift map[string]openstack.SwiftConfig `yaml:"swift"`
+ COS map[string]ibmcloud.COSConfig `yaml:"cos"`
// contains mapping from named store reference name to store type
storeType map[string]string `yaml:"-"`
@@ -178,15 +180,15 @@ type Config struct {
GrpcConfig grpc.Config `yaml:"grpc_store"`
Hedging hedging.Config `yaml:"hedging"`
NamedStores NamedStores `yaml:"named_stores"`
-
- IndexCacheValidity time.Duration `yaml:"index_cache_validity"`
+ COSConfig ibmcloud.COSConfig `yaml:"cos"`
+ IndexCacheValidity time.Duration `yaml:"index_cache_validity"`
IndexQueriesCacheConfig cache.Config `yaml:"index_queries_cache_config"`
DisableBroadIndexQueries bool `yaml:"disable_broad_index_queries"`
MaxParallelGetChunk int `yaml:"max_parallel_get_chunk"`
MaxChunkBatchSize int `yaml:"max_chunk_batch_size"`
- BoltDBShipperConfig shipper.Config `yaml:"boltdb_shipper" doc:"description=Configures storing index in an Object Store (GCS/S3/Azure/Swift/Filesystem) in the form of boltdb files. Required fields only required when boltdb-shipper is defined in config."`
+ BoltDBShipperConfig shipper.Config `yaml:"boltdb_shipper" doc:"description=Configures storing index in an Object Store (GCS/S3/Azure/Swift/COS/Filesystem) in the form of boltdb files. Required fields only required when boltdb-shipper is defined in config."`
TSDBShipperConfig indexshipper.Config `yaml:"tsdb_shipper"`
// Config for using AsyncStore when using async index stores like `boltdb-shipper`.
@@ -200,6 +202,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
cfg.AWSStorageConfig.RegisterFlags(f)
cfg.AzureStorageConfig.RegisterFlags(f)
cfg.BOSStorageConfig.RegisterFlags(f)
+ cfg.COSConfig.RegisterFlags(f)
cfg.GCPStorageConfig.RegisterFlags(f)
cfg.GCSConfig.RegisterFlags(f)
cfg.CassandraStorageConfig.RegisterFlags(f)
@@ -380,8 +383,14 @@ func NewChunkClient(name string, cfg Config, schemaCfg config.SchemaConfig, clie
return client.NewClientWithMaxParallel(c, client.FSEncoder, cfg.MaxParallelGetChunk, schemaCfg), nil
case config.StorageTypeGrpc:
return grpc.NewStorageClient(cfg.GrpcConfig, schemaCfg)
+ case config.StorageTypeCOS:
+ c, err := NewObjectClient(name, cfg, clientMetrics)
+ if err != nil {
+ return nil, err
+ }
+ return client.NewClientWithMaxParallel(c, nil, cfg.MaxParallelGetChunk, schemaCfg), nil
default:
- return nil, fmt.Errorf("Unrecognized storage client %v, choose one of: %v, %v, %v, %v, %v, %v, %v, %v", name, config.StorageTypeAWS, config.StorageTypeAzure, config.StorageTypeCassandra, config.StorageTypeInMemory, config.StorageTypeGCP, config.StorageTypeBigTable, config.StorageTypeBigTableHashed, config.StorageTypeGrpc)
+ return nil, fmt.Errorf("Unrecognized storage client %v, choose one of: %v, %v, %v, %v, %v, %v, %v, %v, %v", name, config.StorageTypeAWS, config.StorageTypeAzure, config.StorageTypeCassandra, config.StorageTypeInMemory, config.StorageTypeGCP, config.StorageTypeBigTable, config.StorageTypeBigTableHashed, config.StorageTypeGrpc, config.StorageTypeCOS)
}
}
@@ -548,7 +557,18 @@ func NewObjectClient(name string, cfg Config, clientMetrics ClientMetrics) (clie
}
return baidubce.NewBOSObjectStorage(&bosCfg)
+
+ case config.StorageTypeCOS:
+ cosCfg := cfg.COSConfig
+ if namedStore != "" {
+ var ok bool
+ cosCfg, ok = cfg.NamedStores.COS[namedStore]
+ if !ok {
+ return nil, fmt.Errorf("Unrecognized named cos storage config %s", name)
+ }
+ }
+ return ibmcloud.NewCOSObjectClient(cosCfg, cfg.Hedging)
default:
- return nil, fmt.Errorf("Unrecognized storage client %v, choose one of: %v, %v, %v, %v, %v", name, config.StorageTypeAWS, config.StorageTypeS3, config.StorageTypeGCS, config.StorageTypeAzure, config.StorageTypeFileSystem)
+ return nil, fmt.Errorf("Unrecognized storage client %v, choose one of: %v, %v, %v, %v, %v, %v", name, config.StorageTypeAWS, config.StorageTypeS3, config.StorageTypeGCS, config.StorageTypeAzure, config.StorageTypeFileSystem, config.StorageTypeCOS)
}
}
diff --git a/pkg/storage/stores/indexshipper/compactor/compactor.go b/pkg/storage/stores/indexshipper/compactor/compactor.go
index d6f40c2c3c1a3..00dbd243e1c22 100644
--- a/pkg/storage/stores/indexshipper/compactor/compactor.go
+++ b/pkg/storage/stores/indexshipper/compactor/compactor.go
@@ -96,7 +96,7 @@ type Config struct {
// RegisterFlags registers flags.
func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.StringVar(&cfg.WorkingDirectory, "boltdb.shipper.compactor.working-directory", "", "Directory where files can be downloaded for compaction.")
- f.StringVar(&cfg.SharedStoreType, "boltdb.shipper.compactor.shared-store", "", "The shared store used for storing boltdb files. Supported types: gcs, s3, azure, swift, filesystem, bos.")
+ f.StringVar(&cfg.SharedStoreType, "boltdb.shipper.compactor.shared-store", "", "The shared store used for storing boltdb files. Supported types: gcs, s3, azure, swift, filesystem, bos, cos.")
f.StringVar(&cfg.SharedStoreKeyPrefix, "boltdb.shipper.compactor.shared-store.key-prefix", "index/", "Prefix to add to object keys in shared store. Path separator(if any) should always be a '/'. Prefix should never start with a separator but should always end with it.")
f.DurationVar(&cfg.CompactionInterval, "boltdb.shipper.compactor.compaction-interval", 10*time.Minute, "Interval at which to re-run the compaction operation.")
f.DurationVar(&cfg.ApplyRetentionInterval, "boltdb.shipper.compactor.apply-retention-interval", 0, "Interval at which to apply/enforce retention. 0 means run at same interval as compaction. If non-zero, it should always be a multiple of compaction interval.")
diff --git a/pkg/storage/stores/indexshipper/shipper.go b/pkg/storage/stores/indexshipper/shipper.go
index 14d170659a992..f45ccc45f1dc7 100644
--- a/pkg/storage/stores/indexshipper/shipper.go
+++ b/pkg/storage/stores/indexshipper/shipper.go
@@ -79,7 +79,7 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
cfg.IndexGatewayClientConfig.RegisterFlagsWithPrefix(prefix+"shipper.index-gateway-client", f)
f.StringVar(&cfg.ActiveIndexDirectory, prefix+"shipper.active-index-directory", "", "Directory where ingesters would write index files which would then be uploaded by shipper to configured storage")
- f.StringVar(&cfg.SharedStoreType, prefix+"shipper.shared-store", "", "Shared store for keeping index files. Supported types: gcs, s3, azure, filesystem")
+ f.StringVar(&cfg.SharedStoreType, prefix+"shipper.shared-store", "", "Shared store for keeping index files. Supported types: gcs, s3, azure, cos, filesystem")
f.StringVar(&cfg.SharedStoreKeyPrefix, prefix+"shipper.shared-store.key-prefix", "index/", "Prefix to add to Object Keys in Shared store. Path separator(if any) should always be a '/'. Prefix should never start with a separator but should always end with it")
f.StringVar(&cfg.CacheLocation, prefix+"shipper.cache-location", "", "Cache location for restoring index files from storage for queries")
f.DurationVar(&cfg.CacheTTL, prefix+"shipper.cache-ttl", 24*time.Hour, "TTL for index files restored in cache for queries")
diff --git a/tools/doc-generator/parse/root_blocks.go b/tools/doc-generator/parse/root_blocks.go
index 1c3e2b269ef01..7aa636b30cab7 100644
--- a/tools/doc-generator/parse/root_blocks.go
+++ b/tools/doc-generator/parse/root_blocks.go
@@ -29,6 +29,7 @@ import (
"github.com/grafana/loki/pkg/storage/chunk/client/azure"
"github.com/grafana/loki/pkg/storage/chunk/client/baidubce"
"github.com/grafana/loki/pkg/storage/chunk/client/gcp"
+ "github.com/grafana/loki/pkg/storage/chunk/client/ibmcloud"
"github.com/grafana/loki/pkg/storage/chunk/client/local"
"github.com/grafana/loki/pkg/storage/chunk/client/openstack"
storage_config "github.com/grafana/loki/pkg/storage/config"
@@ -226,6 +227,11 @@ var (
StructType: reflect.TypeOf(openstack.SwiftConfig{}),
Desc: "The swift_storage_config block configures the connection to OpenStack Object Storage (Swift) object storage backend.",
},
+ {
+ Name: "cos_storage_config",
+ StructType: reflect.TypeOf(ibmcloud.COSConfig{}),
+ Desc: "The cos_storage_config block configures the connection to IBM Cloud Object Storage (COS) backend.",
+ },
{
Name: "local_storage_config",
StructType: reflect.TypeOf(local.FSConfig{}),
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/LICENSE.txt b/vendor/github.com/IBM/ibm-cos-sdk-go/LICENSE.txt
new file mode 100644
index 0000000000000..d645695673349
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/NOTICE.txt b/vendor/github.com/IBM/ibm-cos-sdk-go/NOTICE.txt
new file mode 100644
index 0000000000000..899129ecc465b
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/NOTICE.txt
@@ -0,0 +1,3 @@
+AWS SDK for Go
+Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+Copyright 2014-2015 Stripe, Inc.
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/arn/arn.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/arn/arn.go
new file mode 100644
index 0000000000000..1c4967429032d
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/arn/arn.go
@@ -0,0 +1,93 @@
+// Package arn provides a parser for interacting with Amazon Resource Names.
+package arn
+
+import (
+ "errors"
+ "strings"
+)
+
+const (
+ arnDelimiter = ":"
+ arnSections = 6
+ arnPrefix = "arn:"
+
+ // zero-indexed
+ sectionPartition = 1
+ sectionService = 2
+ sectionRegion = 3
+ sectionAccountID = 4
+ sectionResource = 5
+
+ // errors
+ invalidPrefix = "arn: invalid prefix"
+ invalidSections = "arn: not enough sections"
+)
+
+// ARN captures the individual fields of an Amazon Resource Name.
+// See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more information.
+type ARN struct {
+ // The partition that the resource is in. For standard AWS regions, the partition is "aws". If you have resources in
+ // other partitions, the partition is "aws-partitionname". For example, the partition for resources in the China
+ // (Beijing) region is "aws-cn".
+ Partition string
+
+ // The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). For a list of
+ // namespaces, see
+ // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces.
+ Service string
+
+ // The region the resource resides in. Note that the ARNs for some resources do not require a region, so this
+ // component might be omitted.
+ Region string
+
+ // The ID of the AWS account that owns the resource, without the hyphens. For example, 123456789012. Note that the
+ // ARNs for some resources don't require an account number, so this component might be omitted.
+ AccountID string
+
+ // The content of this part of the ARN varies by service. It often includes an indicator of the type of resource —
+ // for example, an IAM user or Amazon RDS database - followed by a slash (/) or a colon (:), followed by the
+ // resource name itself. Some services allows paths for resource names, as described in
+ // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-paths.
+ Resource string
+}
+
+// Parse parses an ARN into its constituent parts.
+//
+// Some example ARNs:
+// arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment
+// arn:aws:iam::123456789012:user/David
+// arn:aws:rds:eu-west-1:123456789012:db:mysql-db
+// arn:aws:s3:::my_corporate_bucket/exampleobject.png
+func Parse(arn string) (ARN, error) {
+ if !strings.HasPrefix(arn, arnPrefix) {
+ return ARN{}, errors.New(invalidPrefix)
+ }
+ sections := strings.SplitN(arn, arnDelimiter, arnSections)
+ if len(sections) != arnSections {
+ return ARN{}, errors.New(invalidSections)
+ }
+ return ARN{
+ Partition: sections[sectionPartition],
+ Service: sections[sectionService],
+ Region: sections[sectionRegion],
+ AccountID: sections[sectionAccountID],
+ Resource: sections[sectionResource],
+ }, nil
+}
+
+// IsARN returns whether the given string is an ARN by looking for
+// whether the string starts with "arn:" and contains the correct number
+// of sections delimited by colons(:).
+func IsARN(arn string) bool {
+ return strings.HasPrefix(arn, arnPrefix) && strings.Count(arn, ":") >= arnSections-1
+}
+
+// String returns the canonical representation of the ARN
+func (arn ARN) String() string {
+ return arnPrefix +
+ arn.Partition + arnDelimiter +
+ arn.Service + arnDelimiter +
+ arn.Region + arnDelimiter +
+ arn.AccountID + arnDelimiter +
+ arn.Resource
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awserr/error.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awserr/error.go
new file mode 100644
index 0000000000000..99849c0e19c00
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awserr/error.go
@@ -0,0 +1,164 @@
+// Package awserr represents API error interface accessors for the SDK.
+package awserr
+
+// An Error wraps lower level errors with code, message and an original error.
+// The underlying concrete error type may also satisfy other interfaces which
+// can be to used to obtain more specific information about the error.
+//
+// Calling Error() or String() will always include the full information about
+// an error based on its underlying type.
+//
+// Example:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if awsErr, ok := err.(awserr.Error); ok {
+// // Get error details
+// log.Println("Error:", awsErr.Code(), awsErr.Message())
+//
+// // Prints out full error message, including original error if there was one.
+// log.Println("Error:", awsErr.Error())
+//
+// // Get original error
+// if origErr := awsErr.OrigErr(); origErr != nil {
+// // operate on original error.
+// }
+// } else {
+// fmt.Println(err.Error())
+// }
+// }
+//
+type Error interface {
+ // Satisfy the generic error interface.
+ error
+
+ // Returns the short phrase depicting the classification of the error.
+ Code() string
+
+ // Returns the error details message.
+ Message() string
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErr() error
+}
+
+// BatchError is a batch of errors which also wraps lower level errors with
+// code, message, and original errors. Calling Error() will include all errors
+// that occurred in the batch.
+//
+// Deprecated: Replaced with BatchedErrors. Only defined for backwards
+// compatibility.
+type BatchError interface {
+ // Satisfy the generic error interface.
+ error
+
+ // Returns the short phrase depicting the classification of the error.
+ Code() string
+
+ // Returns the error details message.
+ Message() string
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErrs() []error
+}
+
+// BatchedErrors is a batch of errors which also wraps lower level errors with
+// code, message, and original errors. Calling Error() will include all errors
+// that occurred in the batch.
+//
+// Replaces BatchError
+type BatchedErrors interface {
+ // Satisfy the base Error interface.
+ Error
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErrs() []error
+}
+
+// New returns an Error object described by the code, message, and origErr.
+//
+// If origErr satisfies the Error interface it will not be wrapped within a new
+// Error object and will instead be returned.
+func New(code, message string, origErr error) Error {
+ var errs []error
+ if origErr != nil {
+ errs = append(errs, origErr)
+ }
+ return newBaseError(code, message, errs)
+}
+
+// NewBatchError returns an BatchedErrors with a collection of errors as an
+// array of errors.
+func NewBatchError(code, message string, errs []error) BatchedErrors {
+ return newBaseError(code, message, errs)
+}
+
+// A RequestFailure is an interface to extract request failure information from
+// an Error such as the request ID of the failed request returned by a service.
+// RequestFailures may not always have a requestID value if the request failed
+// prior to reaching the service such as a connection error.
+//
+// Example:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if reqerr, ok := err.(RequestFailure); ok {
+// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
+// } else {
+// log.Println("Error:", err.Error())
+// }
+// }
+//
+// Combined with awserr.Error:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if awsErr, ok := err.(awserr.Error); ok {
+// // Generic AWS Error with Code, Message, and original error (if any)
+// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+//
+// if reqErr, ok := err.(awserr.RequestFailure); ok {
+// // A service error occurred
+// fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
+// }
+// } else {
+// fmt.Println(err.Error())
+// }
+// }
+//
+type RequestFailure interface {
+ Error
+
+ // The status code of the HTTP response.
+ StatusCode() int
+
+ // The request ID returned by the service for a request failure. This will
+ // be empty if no request ID is available such as the request failed due
+ // to a connection error.
+ RequestID() string
+}
+
+// NewRequestFailure returns a wrapped error with additional information for
+// request status code, and service requestID.
+//
+// Should be used to wrap all request which involve service requests. Even if
+// the request failed without a service response, but had an HTTP status code
+// that may be meaningful.
+func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
+ return newRequestError(err, statusCode, reqID)
+}
+
+// UnmarshalError provides the interface for the SDK failing to unmarshal data.
+type UnmarshalError interface {
+ awsError
+ Bytes() []byte
+}
+
+// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding
+// the bytes that fail to unmarshal to the error.
+func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError {
+ return &unmarshalError{
+ awsError: New("UnmarshalError", msg, err),
+ bytes: bytes,
+ }
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awserr/types.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awserr/types.go
new file mode 100644
index 0000000000000..9cf7eaf4007f5
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awserr/types.go
@@ -0,0 +1,221 @@
+package awserr
+
+import (
+ "encoding/hex"
+ "fmt"
+)
+
+// SprintError returns a string of the formatted error code.
+//
+// Both extra and origErr are optional. If they are included their lines
+// will be added, but if they are not included their lines will be ignored.
+func SprintError(code, message, extra string, origErr error) string {
+ msg := fmt.Sprintf("%s: %s", code, message)
+ if extra != "" {
+ msg = fmt.Sprintf("%s\n\t%s", msg, extra)
+ }
+ if origErr != nil {
+ msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
+ }
+ return msg
+}
+
+// A baseError wraps the code and message which defines an error. It also
+// can be used to wrap an original error object.
+//
+// Should be used as the root for errors satisfying the awserr.Error. Also
+// for any error which does not fit into a specific error wrapper type.
+type baseError struct {
+ // Classification of error
+ code string
+
+ // Detailed information about error
+ message string
+
+ // Optional original error this error is based off of. Allows building
+ // chained errors.
+ errs []error
+}
+
+// newBaseError returns an error object for the code, message, and errors.
+//
+// code is a short no whitespace phrase depicting the classification of
+// the error that is being created.
+//
+// message is the free flow string containing detailed information about the
+// error.
+//
+// origErrs is the error objects which will be nested under the new errors to
+// be returned.
+func newBaseError(code, message string, origErrs []error) *baseError {
+ b := &baseError{
+ code: code,
+ message: message,
+ errs: origErrs,
+ }
+
+ return b
+}
+
+// Error returns the string representation of the error.
+//
+// See ErrorWithExtra for formatting.
+//
+// Satisfies the error interface.
+func (b baseError) Error() string {
+ size := len(b.errs)
+ if size > 0 {
+ return SprintError(b.code, b.message, "", errorList(b.errs))
+ }
+
+ return SprintError(b.code, b.message, "", nil)
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (b baseError) String() string {
+ return b.Error()
+}
+
+// Code returns the short phrase depicting the classification of the error.
+func (b baseError) Code() string {
+ return b.code
+}
+
+// Message returns the error details message.
+func (b baseError) Message() string {
+ return b.message
+}
+
+// OrigErr returns the original error if one was set. Nil is returned if no
+// error was set. This only returns the first element in the list. If the full
+// list is needed, use BatchedErrors.
+func (b baseError) OrigErr() error {
+ switch len(b.errs) {
+ case 0:
+ return nil
+ case 1:
+ return b.errs[0]
+ default:
+ if err, ok := b.errs[0].(Error); ok {
+ return NewBatchError(err.Code(), err.Message(), b.errs[1:])
+ }
+ return NewBatchError("BatchedErrors",
+ "multiple errors occurred", b.errs)
+ }
+}
+
+// OrigErrs returns the original errors if one was set. An empty slice is
+// returned if no error was set.
+func (b baseError) OrigErrs() []error {
+ return b.errs
+}
+
+// So that the Error interface type can be included as an anonymous field
+// in the requestError struct and not conflict with the error.Error() method.
+type awsError Error
+
+// A requestError wraps a request or service error.
+//
+// Composed of baseError for code, message, and original error.
+type requestError struct {
+ awsError
+ statusCode int
+ requestID string
+ bytes []byte
+}
+
+// newRequestError returns a wrapped error with additional information for
+// request status code, and service requestID.
+//
+// Should be used to wrap all request which involve service requests. Even if
+// the request failed without a service response, but had an HTTP status code
+// that may be meaningful.
+//
+// Also wraps original errors via the baseError.
+func newRequestError(err Error, statusCode int, requestID string) *requestError {
+ return &requestError{
+ awsError: err,
+ statusCode: statusCode,
+ requestID: requestID,
+ }
+}
+
+// Error returns the string representation of the error.
+// Satisfies the error interface.
+func (r requestError) Error() string {
+ extra := fmt.Sprintf("status code: %d, request id: %s",
+ r.statusCode, r.requestID)
+ return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (r requestError) String() string {
+ return r.Error()
+}
+
+// StatusCode returns the wrapped status code for the error
+func (r requestError) StatusCode() int {
+ return r.statusCode
+}
+
+// RequestID returns the wrapped requestID
+func (r requestError) RequestID() string {
+ return r.requestID
+}
+
+// OrigErrs returns the original errors if one was set. An empty slice is
+// returned if no error was set.
+func (r requestError) OrigErrs() []error {
+ if b, ok := r.awsError.(BatchedErrors); ok {
+ return b.OrigErrs()
+ }
+ return []error{r.OrigErr()}
+}
+
+type unmarshalError struct {
+ awsError
+ bytes []byte
+}
+
+// Error returns the string representation of the error.
+// Satisfies the error interface.
+func (e unmarshalError) Error() string {
+ extra := hex.Dump(e.bytes)
+ return SprintError(e.Code(), e.Message(), extra, e.OrigErr())
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (e unmarshalError) String() string {
+ return e.Error()
+}
+
+// Bytes returns the bytes that failed to unmarshal.
+func (e unmarshalError) Bytes() []byte {
+ return e.bytes
+}
+
+// An error list that satisfies the golang interface
+type errorList []error
+
+// Error returns the string representation of the error.
+//
+// Satisfies the error interface.
+func (e errorList) Error() string {
+ msg := ""
+ // How do we want to handle the array size being zero
+ if size := len(e); size > 0 {
+ for i := 0; i < size; i++ {
+ msg += e[i].Error()
+ // We check the next index to see if it is within the slice.
+ // If it is, then we append a newline. We do this, because unit tests
+ // could be broken with the additional '\n'
+ if i+1 < size {
+ msg += "\n"
+ }
+ }
+ }
+ return msg
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/copy.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/copy.go
new file mode 100644
index 0000000000000..1a3d106d5c1bb
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/copy.go
@@ -0,0 +1,108 @@
+package awsutil
+
+import (
+ "io"
+ "reflect"
+ "time"
+)
+
+// Copy deeply copies a src structure to dst. Useful for copying request and
+// response structures.
+//
+// Can copy between structs of different type, but will only copy fields which
+// are assignable, and exist in both structs. Fields which are not assignable,
+// or do not exist in both structs are ignored.
+func Copy(dst, src interface{}) {
+ dstval := reflect.ValueOf(dst)
+ if !dstval.IsValid() {
+ panic("Copy dst cannot be nil")
+ }
+
+ rcopy(dstval, reflect.ValueOf(src), true)
+}
+
+// CopyOf returns a copy of src while also allocating the memory for dst.
+// src must be a pointer type or this operation will fail.
+func CopyOf(src interface{}) (dst interface{}) {
+ dsti := reflect.New(reflect.TypeOf(src).Elem())
+ dst = dsti.Interface()
+ rcopy(dsti, reflect.ValueOf(src), true)
+ return
+}
+
+// rcopy performs a recursive copy of values from the source to destination.
+//
+// root is used to skip certain aspects of the copy which are not valid
+// for the root node of a object.
+func rcopy(dst, src reflect.Value, root bool) {
+ if !src.IsValid() {
+ return
+ }
+
+ switch src.Kind() {
+ case reflect.Ptr:
+ if _, ok := src.Interface().(io.Reader); ok {
+ if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
+ dst.Elem().Set(src)
+ } else if dst.CanSet() {
+ dst.Set(src)
+ }
+ } else {
+ e := src.Type().Elem()
+ if dst.CanSet() && !src.IsNil() {
+ if _, ok := src.Interface().(*time.Time); !ok {
+ dst.Set(reflect.New(e))
+ } else {
+ tempValue := reflect.New(e)
+ tempValue.Elem().Set(src.Elem())
+ // Sets time.Time's unexported values
+ dst.Set(tempValue)
+ }
+ }
+ if src.Elem().IsValid() {
+ // Keep the current root state since the depth hasn't changed
+ rcopy(dst.Elem(), src.Elem(), root)
+ }
+ }
+ case reflect.Struct:
+ t := dst.Type()
+ for i := 0; i < t.NumField(); i++ {
+ name := t.Field(i).Name
+ srcVal := src.FieldByName(name)
+ dstVal := dst.FieldByName(name)
+ if srcVal.IsValid() && dstVal.CanSet() {
+ rcopy(dstVal, srcVal, false)
+ }
+ }
+ case reflect.Slice:
+ if src.IsNil() {
+ break
+ }
+
+ s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
+ dst.Set(s)
+ for i := 0; i < src.Len(); i++ {
+ rcopy(dst.Index(i), src.Index(i), false)
+ }
+ case reflect.Map:
+ if src.IsNil() {
+ break
+ }
+
+ s := reflect.MakeMap(src.Type())
+ dst.Set(s)
+ for _, k := range src.MapKeys() {
+ v := src.MapIndex(k)
+ v2 := reflect.New(v.Type()).Elem()
+ rcopy(v2, v, false)
+ dst.SetMapIndex(k, v2)
+ }
+ default:
+ // Assign the value if possible. If its not assignable, the value would
+ // need to be converted and the impact of that may be unexpected, or is
+ // not compatible with the dst type.
+ if src.Type().AssignableTo(dst.Type()) {
+ dst.Set(src)
+ }
+ }
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/equal.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/equal.go
new file mode 100644
index 0000000000000..142a7a01c527d
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/equal.go
@@ -0,0 +1,27 @@
+package awsutil
+
+import (
+ "reflect"
+)
+
+// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
+// In addition to this, this method will also dereference the input values if
+// possible so the DeepEqual performed will not fail if one parameter is a
+// pointer and the other is not.
+//
+// DeepEqual will not perform indirection of nested values of the input parameters.
+func DeepEqual(a, b interface{}) bool {
+ ra := reflect.Indirect(reflect.ValueOf(a))
+ rb := reflect.Indirect(reflect.ValueOf(b))
+
+ if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
+ // If the elements are both nil, and of the same type they are equal
+ // If they are of different types they are not equal
+ return reflect.TypeOf(a) == reflect.TypeOf(b)
+ } else if raValid != rbValid {
+ // Both values must be valid to be equal
+ return false
+ }
+
+ return reflect.DeepEqual(ra.Interface(), rb.Interface())
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/path_value.go
new file mode 100644
index 0000000000000..a4eb6a7f43aae
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/path_value.go
@@ -0,0 +1,221 @@
+package awsutil
+
+import (
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/jmespath/go-jmespath"
+)
+
+var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
+
+// rValuesAtPath returns a slice of values found in value v. The values
+// in v are explored recursively so all nested values are collected.
+func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value {
+ pathparts := strings.Split(path, "||")
+ if len(pathparts) > 1 {
+ for _, pathpart := range pathparts {
+ vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm)
+ if len(vals) > 0 {
+ return vals
+ }
+ }
+ return nil
+ }
+
+ values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
+ components := strings.Split(path, ".")
+ for len(values) > 0 && len(components) > 0 {
+ var index *int64
+ var indexStar bool
+ c := strings.TrimSpace(components[0])
+ if c == "" { // no actual component, illegal syntax
+ return nil
+ } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
+ // TODO normalize case for user
+ return nil // don't support unexported fields
+ }
+
+ // parse this component
+ if m := indexRe.FindStringSubmatch(c); m != nil {
+ c = m[1]
+ if m[2] == "" {
+ index = nil
+ indexStar = true
+ } else {
+ i, _ := strconv.ParseInt(m[2], 10, 32)
+ index = &i
+ indexStar = false
+ }
+ }
+
+ nextvals := []reflect.Value{}
+ for _, value := range values {
+ // pull component name out of struct member
+ if value.Kind() != reflect.Struct {
+ continue
+ }
+
+ if c == "*" { // pull all members
+ for i := 0; i < value.NumField(); i++ {
+ if f := reflect.Indirect(value.Field(i)); f.IsValid() {
+ nextvals = append(nextvals, f)
+ }
+ }
+ continue
+ }
+
+ value = value.FieldByNameFunc(func(name string) bool {
+ if c == name {
+ return true
+ } else if !caseSensitive && strings.EqualFold(name, c) {
+ return true
+ }
+ return false
+ })
+
+ if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 {
+ if !value.IsNil() {
+ value.Set(reflect.Zero(value.Type()))
+ }
+ return []reflect.Value{value}
+ }
+
+ if createPath && value.Kind() == reflect.Ptr && value.IsNil() {
+ // TODO if the value is the terminus it should not be created
+ // if the value to be set to its position is nil.
+ value.Set(reflect.New(value.Type().Elem()))
+ value = value.Elem()
+ } else {
+ value = reflect.Indirect(value)
+ }
+
+ if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
+ if !createPath && value.IsNil() {
+ value = reflect.ValueOf(nil)
+ }
+ }
+
+ if value.IsValid() {
+ nextvals = append(nextvals, value)
+ }
+ }
+ values = nextvals
+
+ if indexStar || index != nil {
+ nextvals = []reflect.Value{}
+ for _, valItem := range values {
+ value := reflect.Indirect(valItem)
+ if value.Kind() != reflect.Slice {
+ continue
+ }
+
+ if indexStar { // grab all indices
+ for i := 0; i < value.Len(); i++ {
+ idx := reflect.Indirect(value.Index(i))
+ if idx.IsValid() {
+ nextvals = append(nextvals, idx)
+ }
+ }
+ continue
+ }
+
+ // pull out index
+ i := int(*index)
+ if i >= value.Len() { // check out of bounds
+ if createPath {
+ // TODO resize slice
+ } else {
+ continue
+ }
+ } else if i < 0 { // support negative indexing
+ i = value.Len() + i
+ }
+ value = reflect.Indirect(value.Index(i))
+
+ if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
+ if !createPath && value.IsNil() {
+ value = reflect.ValueOf(nil)
+ }
+ }
+
+ if value.IsValid() {
+ nextvals = append(nextvals, value)
+ }
+ }
+ values = nextvals
+ }
+
+ components = components[1:]
+ }
+ return values
+}
+
+// ValuesAtPath returns a list of values at the case insensitive lexical
+// path inside of a structure.
+func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
+ result, err := jmespath.Search(path, i)
+ if err != nil {
+ return nil, err
+ }
+
+ v := reflect.ValueOf(result)
+ if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) {
+ return nil, nil
+ }
+ if s, ok := result.([]interface{}); ok {
+ return s, err
+ }
+ if v.Kind() == reflect.Map && v.Len() == 0 {
+ return nil, nil
+ }
+ if v.Kind() == reflect.Slice {
+ out := make([]interface{}, v.Len())
+ for i := 0; i < v.Len(); i++ {
+ out[i] = v.Index(i).Interface()
+ }
+ return out, nil
+ }
+
+ return []interface{}{result}, nil
+}
+
+// SetValueAtPath sets a value at the case insensitive lexical path inside
+// of a structure.
+func SetValueAtPath(i interface{}, path string, v interface{}) {
+ rvals := rValuesAtPath(i, path, true, false, v == nil)
+ for _, rval := range rvals {
+ if rval.Kind() == reflect.Ptr && rval.IsNil() {
+ continue
+ }
+ setValue(rval, v)
+ }
+}
+
+func setValue(dstVal reflect.Value, src interface{}) {
+ if dstVal.Kind() == reflect.Ptr {
+ dstVal = reflect.Indirect(dstVal)
+ }
+ srcVal := reflect.ValueOf(src)
+
+ if !srcVal.IsValid() { // src is literal nil
+ if dstVal.CanAddr() {
+ // Convert to pointer so that pointer's value can be nil'ed
+ // dstVal = dstVal.Addr()
+ }
+ dstVal.Set(reflect.Zero(dstVal.Type()))
+
+ } else if srcVal.Kind() == reflect.Ptr {
+ if srcVal.IsNil() {
+ srcVal = reflect.Zero(dstVal.Type())
+ } else {
+ srcVal = reflect.ValueOf(src).Elem()
+ }
+ dstVal.Set(srcVal)
+ } else {
+ dstVal.Set(srcVal)
+ }
+
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/prettify.go
new file mode 100644
index 0000000000000..11d4240d6143c
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/prettify.go
@@ -0,0 +1,123 @@
+package awsutil
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+)
+
+// Prettify returns the string representation of a value.
+func Prettify(i interface{}) string {
+ var buf bytes.Buffer
+ prettify(reflect.ValueOf(i), 0, &buf)
+ return buf.String()
+}
+
+// prettify will recursively walk value v to build a textual
+// representation of the value.
+func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ strtype := v.Type().String()
+ if strtype == "time.Time" {
+ fmt.Fprintf(buf, "%s", v.Interface())
+ break
+ } else if strings.HasPrefix(strtype, "io.") {
+ buf.WriteString("<buffer>")
+ break
+ }
+
+ buf.WriteString("{\n")
+
+ names := []string{}
+ for i := 0; i < v.Type().NumField(); i++ {
+ name := v.Type().Field(i).Name
+ f := v.Field(i)
+ if name[0:1] == strings.ToLower(name[0:1]) {
+ continue // ignore unexported fields
+ }
+ if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
+ continue // ignore unset fields
+ }
+ names = append(names, name)
+ }
+
+ for i, n := range names {
+ val := v.FieldByName(n)
+ ft, ok := v.Type().FieldByName(n)
+ if !ok {
+ panic(fmt.Sprintf("expected to find field %v on type %v, but was not found", n, v.Type()))
+ }
+
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(n + ": ")
+
+ if tag := ft.Tag.Get("sensitive"); tag == "true" {
+ buf.WriteString("<sensitive>")
+ } else {
+ prettify(val, indent+2, buf)
+ }
+
+ if i < len(names)-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ case reflect.Slice:
+ strtype := v.Type().String()
+ if strtype == "[]uint8" {
+ fmt.Fprintf(buf, "<binary> len %d", v.Len())
+ break
+ }
+
+ nl, id, id2 := "", "", ""
+ if v.Len() > 3 {
+ nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+ }
+ buf.WriteString("[" + nl)
+ for i := 0; i < v.Len(); i++ {
+ buf.WriteString(id2)
+ prettify(v.Index(i), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString("," + nl)
+ }
+ }
+
+ buf.WriteString(nl + id + "]")
+ case reflect.Map:
+ buf.WriteString("{\n")
+
+ for i, k := range v.MapKeys() {
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(k.String() + ": ")
+ prettify(v.MapIndex(k), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ default:
+ if !v.IsValid() {
+ fmt.Fprint(buf, "<invalid value>")
+ return
+ }
+ format := "%v"
+ switch v.Interface().(type) {
+ case string:
+ format = "%q"
+ case io.ReadSeeker, io.Reader:
+ format = "buffer(%p)"
+ }
+ fmt.Fprintf(buf, format, v.Interface())
+ }
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/string_value.go
new file mode 100644
index 0000000000000..3f7cffd957997
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/awsutil/string_value.go
@@ -0,0 +1,90 @@
+package awsutil
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// StringValue returns the string representation of a value.
+//
+// Deprecated: Use Prettify instead.
+func StringValue(i interface{}) string {
+ var buf bytes.Buffer
+ stringValue(reflect.ValueOf(i), 0, &buf)
+ return buf.String()
+}
+
+func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ buf.WriteString("{\n")
+
+ for i := 0; i < v.Type().NumField(); i++ {
+ ft := v.Type().Field(i)
+ fv := v.Field(i)
+
+ if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) {
+ continue // ignore unexported fields
+ }
+ if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() {
+ continue // ignore unset fields
+ }
+
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(ft.Name + ": ")
+
+ if tag := ft.Tag.Get("sensitive"); tag == "true" {
+ buf.WriteString("<sensitive>")
+ } else {
+ stringValue(fv, indent+2, buf)
+ }
+
+ buf.WriteString(",\n")
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ case reflect.Slice:
+ nl, id, id2 := "", "", ""
+ if v.Len() > 3 {
+ nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+ }
+ buf.WriteString("[" + nl)
+ for i := 0; i < v.Len(); i++ {
+ buf.WriteString(id2)
+ stringValue(v.Index(i), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString("," + nl)
+ }
+ }
+
+ buf.WriteString(nl + id + "]")
+ case reflect.Map:
+ buf.WriteString("{\n")
+
+ for i, k := range v.MapKeys() {
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(k.String() + ": ")
+ stringValue(v.MapIndex(k), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ default:
+ format := "%v"
+ switch v.Interface().(type) {
+ case string:
+ format = "%q"
+ }
+ fmt.Fprintf(buf, format, v.Interface())
+ }
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/client.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/client.go
new file mode 100644
index 0000000000000..41af3a3343916
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/client.go
@@ -0,0 +1,94 @@
+package client
+
+import (
+ "fmt"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/client/metadata"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+)
+
+// A Config provides configuration to a service client instance.
+type Config struct {
+ Config *aws.Config
+ Handlers request.Handlers
+ PartitionID string
+ Endpoint string
+ SigningRegion string
+ SigningName string
+ ResolvedRegion string
+
+ // States that the signing name did not come from a modeled source but
+ // was derived based on other data. Used by service client constructors
+ // to determine if the signin name can be overridden based on metadata the
+ // service has.
+ SigningNameDerived bool
+}
+
+// ConfigProvider provides a generic way for a service client to receive
+// the ClientConfig without circular dependencies.
+type ConfigProvider interface {
+ ClientConfig(serviceName string, cfgs ...*aws.Config) Config
+}
+
+// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not
+// resolve the endpoint automatically. The service client's endpoint must be
+// provided via the aws.Config.Endpoint field.
+type ConfigNoResolveEndpointProvider interface {
+ ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config
+}
+
+// A Client implements the base client request and response handling
+// used by all service clients.
+type Client struct {
+ request.Retryer
+ metadata.ClientInfo
+
+ Config aws.Config
+ Handlers request.Handlers
+}
+
+// New will return a pointer to a new initialized service client.
+func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client {
+ svc := &Client{
+ Config: cfg,
+ ClientInfo: info,
+ Handlers: handlers.Copy(),
+ }
+
+ switch retryer, ok := cfg.Retryer.(request.Retryer); {
+ case ok:
+ svc.Retryer = retryer
+ case cfg.Retryer != nil && cfg.Logger != nil:
+ s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer)
+ cfg.Logger.Log(s)
+ fallthrough
+ default:
+ maxRetries := aws.IntValue(cfg.MaxRetries)
+ if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
+ maxRetries = DefaultRetryerMaxNumRetries
+ }
+ svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
+ }
+
+ svc.AddDebugHandlers()
+
+ for _, option := range options {
+ option(svc)
+ }
+
+ return svc
+}
+
+// NewRequest returns a new Request pointer for the service API
+// operation and parameters.
+func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
+ return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data)
+}
+
+// AddDebugHandlers injects debug logging handlers into the service to log request
+// debug information.
+func (c *Client) AddDebugHandlers() {
+ c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler)
+ c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler)
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/default_retryer.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/default_retryer.go
new file mode 100644
index 0000000000000..9d9ef8921e677
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/default_retryer.go
@@ -0,0 +1,177 @@
+package client
+
+import (
+ "math"
+ "strconv"
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+ "github.com/IBM/ibm-cos-sdk-go/internal/sdkrand"
+)
+
+// DefaultRetryer implements basic retry logic using exponential backoff for
+// most services. If you want to implement custom retry logic, you can implement the
+// request.Retryer interface.
+//
+type DefaultRetryer struct {
+ // Num max Retries is the number of max retries that will be performed.
+ // By default, this is zero.
+ NumMaxRetries int
+
+ // MinRetryDelay is the minimum retry delay after which retry will be performed.
+ // If not set, the value is 0ns.
+ MinRetryDelay time.Duration
+
+ // MinThrottleRetryDelay is the minimum retry delay when throttled.
+ // If not set, the value is 0ns.
+ MinThrottleDelay time.Duration
+
+ // MaxRetryDelay is the maximum retry delay before which retry must be performed.
+ // If not set, the value is 0ns.
+ MaxRetryDelay time.Duration
+
+ // MaxThrottleDelay is the maximum retry delay when throttled.
+ // If not set, the value is 0ns.
+ MaxThrottleDelay time.Duration
+}
+
+const (
+ // DefaultRetryerMaxNumRetries sets maximum number of retries
+ DefaultRetryerMaxNumRetries = 3
+
+ // DefaultRetryerMinRetryDelay sets minimum retry delay
+ DefaultRetryerMinRetryDelay = 30 * time.Millisecond
+
+ // DefaultRetryerMinThrottleDelay sets minimum delay when throttled
+ DefaultRetryerMinThrottleDelay = 500 * time.Millisecond
+
+ // DefaultRetryerMaxRetryDelay sets maximum retry delay
+ DefaultRetryerMaxRetryDelay = 300 * time.Second
+
+ // DefaultRetryerMaxThrottleDelay sets maximum delay when throttled
+ DefaultRetryerMaxThrottleDelay = 300 * time.Second
+)
+
+// MaxRetries returns the number of maximum returns the service will use to make
+// an individual API request.
+func (d DefaultRetryer) MaxRetries() int {
+ return d.NumMaxRetries
+}
+
+// setRetryerDefaults sets the default values of the retryer if not set
+func (d *DefaultRetryer) setRetryerDefaults() {
+ if d.MinRetryDelay == 0 {
+ d.MinRetryDelay = DefaultRetryerMinRetryDelay
+ }
+ if d.MaxRetryDelay == 0 {
+ d.MaxRetryDelay = DefaultRetryerMaxRetryDelay
+ }
+ if d.MinThrottleDelay == 0 {
+ d.MinThrottleDelay = DefaultRetryerMinThrottleDelay
+ }
+ if d.MaxThrottleDelay == 0 {
+ d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay
+ }
+}
+
+// RetryRules returns the delay duration before retrying this request again
+func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
+
+ // if number of max retries is zero, no retries will be performed.
+ if d.NumMaxRetries == 0 {
+ return 0
+ }
+
+ // Sets default value for retryer members
+ d.setRetryerDefaults()
+
+ // minDelay is the minimum retryer delay
+ minDelay := d.MinRetryDelay
+
+ var initialDelay time.Duration
+
+ isThrottle := r.IsErrorThrottle()
+ if isThrottle {
+ if delay, ok := getRetryAfterDelay(r); ok {
+ initialDelay = delay
+ }
+ minDelay = d.MinThrottleDelay
+ }
+
+ retryCount := r.RetryCount
+
+ // maxDelay the maximum retryer delay
+ maxDelay := d.MaxRetryDelay
+
+ if isThrottle {
+ maxDelay = d.MaxThrottleDelay
+ }
+
+ var delay time.Duration
+
+ // Logic to cap the retry count based on the minDelay provided
+ actualRetryCount := int(math.Log2(float64(minDelay))) + 1
+ if actualRetryCount < 63-retryCount {
+ delay = time.Duration(1<<uint64(retryCount)) * getJitterDelay(minDelay)
+ if delay > maxDelay {
+ delay = getJitterDelay(maxDelay / 2)
+ }
+ } else {
+ delay = getJitterDelay(maxDelay / 2)
+ }
+ return delay + initialDelay
+}
+
+// getJitterDelay returns a jittered delay for retry
+func getJitterDelay(duration time.Duration) time.Duration {
+ return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration))
+}
+
+// ShouldRetry returns true if the request should be retried.
+func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
+
+ // ShouldRetry returns false if number of max retries is 0.
+ if d.NumMaxRetries == 0 {
+ return false
+ }
+
+ // If one of the other handlers already set the retry state
+ // we don't want to override it based on the service's state
+ if r.Retryable != nil {
+ return *r.Retryable
+ }
+ return r.IsErrorRetryable() || r.IsErrorThrottle()
+}
+
+// This will look in the Retry-After header, RFC 7231, for how long
+// it will wait before attempting another request
+func getRetryAfterDelay(r *request.Request) (time.Duration, bool) {
+ if !canUseRetryAfterHeader(r) {
+ return 0, false
+ }
+
+ delayStr := r.HTTPResponse.Header.Get("Retry-After")
+ if len(delayStr) == 0 {
+ return 0, false
+ }
+
+ delay, err := strconv.Atoi(delayStr)
+ if err != nil {
+ return 0, false
+ }
+
+ return time.Duration(delay) * time.Second, true
+}
+
+// Will look at the status code to see if the retry header pertains to
+// the status code.
+func canUseRetryAfterHeader(r *request.Request) bool {
+ switch r.HTTPResponse.StatusCode {
+ case 429:
+ case 503:
+ default:
+ return false
+ }
+
+ return true
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/logger.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/logger.go
new file mode 100644
index 0000000000000..93c5713e78167
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/logger.go
@@ -0,0 +1,206 @@
+package client
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http/httputil"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+)
+
+const logReqMsg = `DEBUG: Request %s/%s Details:
+---[ REQUEST POST-SIGN ]-----------------------------
+%s
+-----------------------------------------------------`
+
+const logReqErrMsg = `DEBUG ERROR: Request %s/%s:
+---[ REQUEST DUMP ERROR ]-----------------------------
+%s
+------------------------------------------------------`
+
+type logWriter struct {
+ // Logger is what we will use to log the payload of a response.
+ Logger aws.Logger
+ // buf stores the contents of what has been read
+ buf *bytes.Buffer
+}
+
+func (logger *logWriter) Write(b []byte) (int, error) {
+ return logger.buf.Write(b)
+}
+
+type teeReaderCloser struct {
+ // io.Reader will be a tee reader that is used during logging.
+ // This structure will read from a body and write the contents to a logger.
+ io.Reader
+ // Source is used just to close when we are done reading.
+ Source io.ReadCloser
+}
+
+func (reader *teeReaderCloser) Close() error {
+ return reader.Source.Close()
+}
+
+// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent
+// to a service. Will include the HTTP request body if the LogLevel of the
+// request matches LogDebugWithHTTPBody.
+var LogHTTPRequestHandler = request.NamedHandler{
+ Name: "awssdk.client.LogRequest",
+ Fn: logRequest,
+}
+
+func logRequest(r *request.Request) {
+ if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil {
+ return
+ }
+
+ logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
+ bodySeekable := aws.IsReaderSeekable(r.Body)
+
+ b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
+ if err != nil {
+ r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, err))
+ return
+ }
+
+ if logBody {
+ if !bodySeekable {
+ r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body))
+ }
+ // Reset the request body because dumpRequest will re-wrap the
+ // r.HTTPRequest's Body as a NoOpCloser and will not be reset after
+ // read by the HTTP client reader.
+ if err := r.Error; err != nil {
+ r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, err))
+ return
+ }
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
+}
+
+// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent
+// to a service. Will only log the HTTP request's headers. The request payload
+// will not be read.
+var LogHTTPRequestHeaderHandler = request.NamedHandler{
+ Name: "awssdk.client.LogRequestHeader",
+ Fn: logRequestHeader,
+}
+
+func logRequestHeader(r *request.Request) {
+ if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil {
+ return
+ }
+
+ b, err := httputil.DumpRequestOut(r.HTTPRequest, false)
+ if err != nil {
+ r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, err))
+ return
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
+}
+
+const logRespMsg = `DEBUG: Response %s/%s Details:
+---[ RESPONSE ]--------------------------------------
+%s
+-----------------------------------------------------`
+
+const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
+---[ RESPONSE DUMP ERROR ]-----------------------------
+%s
+-----------------------------------------------------`
+
+// LogHTTPResponseHandler is a SDK request handler to log the HTTP response
+// received from a service. Will include the HTTP response body if the LogLevel
+// of the request matches LogDebugWithHTTPBody.
+var LogHTTPResponseHandler = request.NamedHandler{
+ Name: "awssdk.client.LogResponse",
+ Fn: logResponse,
+}
+
+func logResponse(r *request.Request) {
+ if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil {
+ return
+ }
+
+ lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)}
+
+ if r.HTTPResponse == nil {
+ lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil"))
+ return
+ }
+
+ logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
+ if logBody {
+ r.HTTPResponse.Body = &teeReaderCloser{
+ Reader: io.TeeReader(r.HTTPResponse.Body, lw),
+ Source: r.HTTPResponse.Body,
+ }
+ }
+
+ handlerFn := func(req *request.Request) {
+ b, err := httputil.DumpResponse(req.HTTPResponse, false)
+ if err != nil {
+ lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
+ req.ClientInfo.ServiceName, req.Operation.Name, err))
+ return
+ }
+
+ lw.Logger.Log(fmt.Sprintf(logRespMsg,
+ req.ClientInfo.ServiceName, req.Operation.Name, string(b)))
+
+ if logBody {
+ b, err := ioutil.ReadAll(lw.buf)
+ if err != nil {
+ lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
+ req.ClientInfo.ServiceName, req.Operation.Name, err))
+ return
+ }
+
+ lw.Logger.Log(string(b))
+ }
+ }
+
+ const handlerName = "awsdk.client.LogResponse.ResponseBody"
+
+ r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{
+ Name: handlerName, Fn: handlerFn,
+ })
+ r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{
+ Name: handlerName, Fn: handlerFn,
+ })
+}
+
+// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP
+// response received from a service. Will only log the HTTP response's headers.
+// The response payload will not be read.
+var LogHTTPResponseHeaderHandler = request.NamedHandler{
+ Name: "awssdk.client.LogResponseHeader",
+ Fn: logResponseHeader,
+}
+
+func logResponseHeader(r *request.Request) {
+ if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil {
+ return
+ }
+
+ b, err := httputil.DumpResponse(r.HTTPResponse, false)
+ if err != nil {
+ r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, err))
+ return
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf(logRespMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/metadata/client_info.go
new file mode 100644
index 0000000000000..a7530ebb38996
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/metadata/client_info.go
@@ -0,0 +1,15 @@
+package metadata
+
+// ClientInfo wraps immutable data from the client.Client structure.
+type ClientInfo struct {
+ ServiceName string
+ ServiceID string
+ APIVersion string
+ PartitionID string
+ Endpoint string
+ SigningName string
+ SigningRegion string
+ JSONVersion string
+ TargetPrefix string
+ ResolvedRegion string
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/no_op_retryer.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/no_op_retryer.go
new file mode 100644
index 0000000000000..8c8ac064791f0
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/client/no_op_retryer.go
@@ -0,0 +1,28 @@
+package client
+
+import (
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+)
+
+// NoOpRetryer provides a retryer that performs no retries.
+// It should be used when we do not want retries to be performed.
+type NoOpRetryer struct{}
+
+// MaxRetries returns the number of maximum returns the service will use to make
+// an individual API; For NoOpRetryer the MaxRetries will always be zero.
+func (d NoOpRetryer) MaxRetries() int {
+ return 0
+}
+
+// ShouldRetry will always return false for NoOpRetryer, as it should never retry.
+func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool {
+ return false
+}
+
+// RetryRules returns the delay duration before retrying this request again;
+// since NoOpRetryer does not retry, RetryRules always returns 0.
+func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration {
+ return 0
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/config.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/config.go
new file mode 100644
index 0000000000000..00d8491336f44
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/config.go
@@ -0,0 +1,612 @@
+package aws
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials"
+ "github.com/IBM/ibm-cos-sdk-go/aws/endpoints"
+)
+
+// UseServiceDefaultRetries instructs the config to use the service's own
+// default number of retries. This will be the default action if
+// Config.MaxRetries is nil also.
+const UseServiceDefaultRetries = -1
+
+// RequestRetryer is an alias for a type that implements the request.Retryer
+// interface.
+type RequestRetryer interface{}
+
+// A Config provides service configuration for service clients. By default,
+// all clients will use the defaults.DefaultConfig structure.
+//
+// // Create Session with MaxRetries configuration to be shared by multiple
+// // service clients.
+// sess := session.Must(session.NewSession(&aws.Config{
+// MaxRetries: aws.Int(3),
+// }))
+//
+// // Create S3 service client with a specific Region.
+// svc := s3.New(sess, &aws.Config{
+// Region: aws.String("us-west-2"),
+// })
+type Config struct {
+ // Enables verbose error printing of all credential chain errors.
+ // Should be used when wanting to see all errors while attempting to
+ // retrieve credentials.
+ CredentialsChainVerboseErrors *bool
+
+ // The credentials object to use when signing requests. Defaults to a
+ // chain of credential providers to search for credentials in environment
+ // variables, shared credential file, and EC2 Instance Roles.
+ Credentials *credentials.Credentials
+
+ // An optional endpoint URL (hostname only or fully qualified URI)
+ // that overrides the default generated endpoint for a client. Set this
+ // to `nil` or the value to `""` to use the default generated endpoint.
+ //
+ // Note: You must still provide a `Region` value when specifying an
+ // endpoint for a client.
+ Endpoint *string
+
+ // The resolver to use for looking up endpoints for AWS service clients
+ // to use based on region.
+ EndpointResolver endpoints.Resolver
+
+ // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call
+ // ShouldRetry regardless of whether or not if request.Retryable is set.
+ // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck
+ // is not set, then ShouldRetry will only be called if request.Retryable is nil.
+ // Proper handling of the request.Retryable field is important when setting this field.
+ EnforceShouldRetryCheck *bool
+
+ // The region to send requests to. This parameter is required and must
+ // be configured globally or on a per-client basis unless otherwise
+ // noted. A full list of regions is found in the "Regions and Endpoints"
+ // document.
+ //
+ // See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS
+ // Regions and Endpoints.
+ Region *string
+
+ // Set this to `true` to disable SSL when sending requests. Defaults
+ // to `false`.
+ DisableSSL *bool
+
+ // The HTTP client to use when sending requests. Defaults to
+ // `http.DefaultClient`.
+ HTTPClient *http.Client
+
+ // An integer value representing the logging level. The default log level
+ // is zero (LogOff), which represents no logging. To enable logging set
+ // to a LogLevel Value.
+ LogLevel *LogLevelType
+
+ // The logger writer interface to write logging messages to. Defaults to
+ // standard out.
+ Logger Logger
+
+ // The maximum number of times that a request will be retried for failures.
+ // Defaults to -1, which defers the max retry setting to the service
+ // specific configuration.
+ MaxRetries *int
+
+ // Retryer guides how HTTP requests should be retried in case of
+ // recoverable failures.
+ //
+ // When nil or the value does not implement the request.Retryer interface,
+ // the client.DefaultRetryer will be used.
+ //
+ // When both Retryer and MaxRetries are non-nil, the former is used and
+ // the latter ignored.
+ //
+ // To set the Retryer field in a type-safe manner and with chaining, use
+ // the request.WithRetryer helper function:
+ //
+ // cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
+ //
+ Retryer RequestRetryer
+
+ // Disables semantic parameter validation, which validates input for
+ // missing required fields and/or other semantic request input errors.
+ DisableParamValidation *bool
+
+ // Disables the computation of request and response checksums, e.g.,
+ // CRC32 checksums in Amazon DynamoDB.
+ DisableComputeChecksums *bool
+
+ // Set this to `true` to force the request to use path-style addressing,
+ // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client
+ // will use virtual hosted bucket addressing when possible
+ // (`http://BUCKET.s3.amazonaws.com/KEY`).
+ //
+ // Note: This configuration option is specific to the Amazon S3 service.
+ //
+ // See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
+ // for Amazon S3: Virtual Hosting of Buckets
+ S3ForcePathStyle *bool
+
+ // Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
+ // header to PUT requests over 2MB of content. 100-Continue instructs the
+ // HTTP client not to send the body until the service responds with a
+ // `continue` status. This is useful to prevent sending the request body
+ // until after the request is authenticated, and validated.
+ //
+ // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
+ //
+ // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s
+ // `ExpectContinueTimeout` for information on adjusting the continue wait
+ // timeout. https://golang.org/pkg/net/http/#Transport
+ //
+ // You should use this flag to disable 100-Continue if you experience issues
+ // with proxies or third party S3 compatible services.
+ S3Disable100Continue *bool
+
+ // Set this to `true` to enable S3 Accelerate feature. For all operations
+ // compatible with S3 Accelerate will use the accelerate endpoint for
+ // requests. Requests not compatible will fall back to normal S3 requests.
+ //
+ // The bucket must be enable for accelerate to be used with S3 client with
+ // accelerate enabled. If the bucket is not enabled for accelerate an error
+ // will be returned. The bucket name must be DNS compatible to also work
+ // with accelerate.
+ S3UseAccelerate *bool
+
+ // S3DisableContentMD5Validation config option is temporarily disabled,
+ // For S3 GetObject API calls, #1837.
+ //
+ // Set this to `true` to disable the S3 service client from automatically
+ // adding the ContentMD5 to S3 Object Put and Upload API calls. This option
+ // will also disable the SDK from performing object ContentMD5 validation
+ // on GetObject API calls.
+ S3DisableContentMD5Validation *bool
+
+ // Set this to `true` to have the S3 service client to use the region specified
+ // in the ARN, when an ARN is provided as an argument to a bucket parameter.
+ S3UseARNRegion *bool
+
+ // Set this to `true` to enable the SDK to unmarshal API response header maps to
+ // normalized lower case map keys.
+ //
+ // For example S3's X-Amz-Meta prefixed header will be unmarshaled to lower case
+ // Metadata member's map keys. The value of the header in the map is unaffected.
+ //
+ // The AWS SDK for Go v2, uses lower case header maps by default. The v1
+ // SDK provides this opt-in for this option, for backwards compatibility.
+ LowerCaseHeaderMaps *bool
+
+ // Set this to `true` to disable the EC2Metadata client from overriding the
+ // default http.Client's Timeout. This is helpful if you do not want the
+ // EC2Metadata client to create a new http.Client. This options is only
+ // meaningful if you're not already using a custom HTTP client with the
+ // SDK. Enabled by default.
+ //
+ // Must be set and provided to the session.NewSession() in order to disable
+ // the EC2Metadata overriding the timeout for default credentials chain.
+ //
+ // Example:
+ // sess := session.Must(session.NewSession(aws.NewConfig()
+ // .WithEC2MetadataDisableTimeoutOverride(true)))
+ //
+ // svc := s3.New(sess)
+ //
+ EC2MetadataDisableTimeoutOverride *bool
+
+ // Instructs the endpoint to be generated for a service client to
+ // be the dual stack endpoint. The dual stack endpoint will support
+ // both IPv4 and IPv6 addressing.
+ //
+ // Setting this for a service which does not support dual stack will fail
+ // to make requests. It is not recommended to set this value on the session
+ // as it will apply to all service clients created with the session. Even
+ // services which don't support dual stack endpoints.
+ //
+ // If the Endpoint config value is also provided the UseDualStack flag
+ // will be ignored.
+ //
+ // Only supported with.
+ //
+ // sess := session.Must(session.NewSession())
+ //
+ // svc := s3.New(sess, &aws.Config{
+ // UseDualStack: aws.Bool(true),
+ // })
+ //
+ // Deprecated: This option will continue to function for S3 and S3 Control for backwards compatibility.
+ // UseDualStackEndpoint should be used to enable usage of a service's dual-stack endpoint for all service clients
+ // moving forward. For S3 and S3 Control, when UseDualStackEndpoint is set to a non-zero value it takes higher
+ // precedence then this option.
+ UseDualStack *bool
+
+ // Sets the resolver to resolve a dual-stack endpoint for the service.
+ UseDualStackEndpoint endpoints.DualStackEndpointState
+
+ // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint.
+ // IBM Unsupported
+ // UseFIPSEndpoint endpoints.FIPSEndpointState
+ // SleepDelay is an override for the func the SDK will call when sleeping
+ // during the lifecycle of a request. Specifically this will be used for
+ // request delays. This value should only be used for testing. To adjust
+ // the delay of a request see the aws/client.DefaultRetryer and
+ // aws/request.Retryer.
+ //
+ // SleepDelay will prevent any Context from being used for canceling retry
+ // delay of an API operation. It is recommended to not use SleepDelay at all
+ // and specify a Retryer instead.
+ SleepDelay func(time.Duration)
+
+ // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests.
+ // Will default to false. This would only be used for empty directory names in s3 requests.
+ //
+ // Example:
+ // sess := session.Must(session.NewSession(&aws.Config{
+ // DisableRestProtocolURICleaning: aws.Bool(true),
+ // }))
+ //
+ // svc := s3.New(sess)
+ // out, err := svc.GetObject(&s3.GetObjectInput {
+ // Bucket: aws.String("bucketname"),
+ // Key: aws.String("//foo//bar//moo"),
+ // })
+ DisableRestProtocolURICleaning *bool
+
+ // EnableEndpointDiscovery will allow for endpoint discovery on operations that
+ // have the definition in its model. By default, endpoint discovery is off.
+ // To use EndpointDiscovery, Endpoint should be unset or set to an empty string.
+ //
+ // Example:
+ // sess := session.Must(session.NewSession(&aws.Config{
+ // EnableEndpointDiscovery: aws.Bool(true),
+ // }))
+ //
+ // svc := s3.New(sess)
+ // out, err := svc.GetObject(&s3.GetObjectInput {
+ // Bucket: aws.String("bucketname"),
+ // Key: aws.String("/foo/bar/moo"),
+ // })
+ EnableEndpointDiscovery *bool
+
+ // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing
+ // request endpoint hosts with modeled information.
+ //
+ // Disabling this feature is useful when you want to use local endpoints
+ // for testing that do not support the modeled host prefix pattern.
+ DisableEndpointHostPrefix *bool
+
+ // S3UsEast1RegionalEndpoint will enable regional or legacy endpoint resolving
+ S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint
+}
+
+// NewConfig returns a new Config pointer that can be chained with builder
+// methods to set multiple configuration values inline without using pointers.
+//
+// // Create Session with MaxRetries configuration to be shared by multiple
+// // service clients.
+// sess := session.Must(session.NewSession(aws.NewConfig().
+// WithMaxRetries(3),
+// ))
+//
+// // Create S3 service client with a specific Region.
+// svc := s3.New(sess, aws.NewConfig().
+// WithRegion("us-west-2"),
+// )
+func NewConfig() *Config {
+ return &Config{}
+}
+
+// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning
+// a Config pointer.
+func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config {
+ c.CredentialsChainVerboseErrors = &verboseErrs
+ return c
+}
+
+// WithCredentials sets a config Credentials value returning a Config pointer
+// for chaining.
+func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
+ c.Credentials = creds
+ return c
+}
+
+// WithEndpoint sets a config Endpoint value returning a Config pointer for
+// chaining.
+func (c *Config) WithEndpoint(endpoint string) *Config {
+ c.Endpoint = &endpoint
+ return c
+}
+
+// WithEndpointResolver sets a config EndpointResolver value returning a
+// Config pointer for chaining.
+func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config {
+ c.EndpointResolver = resolver
+ return c
+}
+
+// WithRegion sets a config Region value returning a Config pointer for
+// chaining.
+func (c *Config) WithRegion(region string) *Config {
+ c.Region = ®ion
+ return c
+}
+
+// WithDisableSSL sets a config DisableSSL value returning a Config pointer
+// for chaining.
+func (c *Config) WithDisableSSL(disable bool) *Config {
+ c.DisableSSL = &disable
+ return c
+}
+
+// WithHTTPClient sets a config HTTPClient value returning a Config pointer
+// for chaining.
+func (c *Config) WithHTTPClient(client *http.Client) *Config {
+ c.HTTPClient = client
+ return c
+}
+
+// WithMaxRetries sets a config MaxRetries value returning a Config pointer
+// for chaining.
+func (c *Config) WithMaxRetries(max int) *Config {
+ c.MaxRetries = &max
+ return c
+}
+
+// WithDisableParamValidation sets a config DisableParamValidation value
+// returning a Config pointer for chaining.
+func (c *Config) WithDisableParamValidation(disable bool) *Config {
+ c.DisableParamValidation = &disable
+ return c
+}
+
+// WithDisableComputeChecksums sets a config DisableComputeChecksums value
+// returning a Config pointer for chaining.
+func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
+ c.DisableComputeChecksums = &disable
+ return c
+}
+
+// WithLogLevel sets a config LogLevel value returning a Config pointer for
+// chaining.
+func (c *Config) WithLogLevel(level LogLevelType) *Config {
+ c.LogLevel = &level
+ return c
+}
+
+// WithLogger sets a config Logger value returning a Config pointer for
+// chaining.
+func (c *Config) WithLogger(logger Logger) *Config {
+ c.Logger = logger
+ return c
+}
+
+// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
+// pointer for chaining.
+func (c *Config) WithS3ForcePathStyle(force bool) *Config {
+ c.S3ForcePathStyle = &force
+ return c
+}
+
+// WithS3Disable100Continue sets a config S3Disable100Continue value returning
+// a Config pointer for chaining.
+func (c *Config) WithS3Disable100Continue(disable bool) *Config {
+ c.S3Disable100Continue = &disable
+ return c
+}
+
+// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config
+// pointer for chaining.
+func (c *Config) WithS3UseAccelerate(enable bool) *Config {
+ c.S3UseAccelerate = &enable
+ return c
+
+}
+
+// WithS3DisableContentMD5Validation sets a config
+// S3DisableContentMD5Validation value returning a Config pointer for chaining.
+func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config {
+ c.S3DisableContentMD5Validation = &enable
+ return c
+
+}
+
+// WithS3UseARNRegion sets a config S3UseARNRegion value and
+// returning a Config pointer for chaining
+func (c *Config) WithS3UseARNRegion(enable bool) *Config {
+ c.S3UseARNRegion = &enable
+ return c
+}
+
+// WithUseDualStack sets a config UseDualStack value returning a Config
+// pointer for chaining.
+func (c *Config) WithUseDualStack(enable bool) *Config {
+ c.UseDualStack = &enable
+ return c
+}
+
+// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
+// returning a Config pointer for chaining.
+func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
+ c.EC2MetadataDisableTimeoutOverride = &enable
+ return c
+}
+
+// WithSleepDelay overrides the function used to sleep while waiting for the
+// next retry. Defaults to time.Sleep.
+func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
+ c.SleepDelay = fn
+ return c
+}
+
+// WithEndpointDiscovery will set whether or not to use endpoint discovery.
+func (c *Config) WithEndpointDiscovery(t bool) *Config {
+ c.EnableEndpointDiscovery = &t
+ return c
+}
+
+// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix
+// when making requests.
+func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config {
+ c.DisableEndpointHostPrefix = &t
+ return c
+}
+
+// WithS3UsEast1RegionalEndpoint will set whether or not to use regional endpoint flag
+// when resolving the endpoint for a service
+func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEndpoint) *Config {
+ c.S3UsEast1RegionalEndpoint = sre
+ return c
+}
+
+// WithLowerCaseHeaderMaps sets a config LowerCaseHeaderMaps value
+// returning a Config pointer for chaining.
+func (c *Config) WithLowerCaseHeaderMaps(t bool) *Config {
+ c.LowerCaseHeaderMaps = &t
+ return c
+}
+
+// WithDisableRestProtocolURICleaning sets a config DisableRestProtocolURICleaning value
+// returning a Config pointer for chaining.
+func (c *Config) WithDisableRestProtocolURICleaning(t bool) *Config {
+ c.DisableRestProtocolURICleaning = &t
+ return c
+}
+
+// MergeIn merges the passed in configs into the existing config object.
+func (c *Config) MergeIn(cfgs ...*Config) {
+ for _, other := range cfgs {
+ mergeInConfig(c, other)
+ }
+}
+
+func mergeInConfig(dst *Config, other *Config) {
+ if other == nil {
+ return
+ }
+
+ if other.CredentialsChainVerboseErrors != nil {
+ dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors
+ }
+
+ if other.Credentials != nil {
+ dst.Credentials = other.Credentials
+ }
+
+ if other.Endpoint != nil {
+ dst.Endpoint = other.Endpoint
+ }
+
+ if other.EndpointResolver != nil {
+ dst.EndpointResolver = other.EndpointResolver
+ }
+
+ if other.Region != nil {
+ dst.Region = other.Region
+ }
+
+ if other.DisableSSL != nil {
+ dst.DisableSSL = other.DisableSSL
+ }
+
+ if other.HTTPClient != nil {
+ dst.HTTPClient = other.HTTPClient
+ }
+
+ if other.LogLevel != nil {
+ dst.LogLevel = other.LogLevel
+ }
+
+ if other.Logger != nil {
+ dst.Logger = other.Logger
+ }
+
+ if other.MaxRetries != nil {
+ dst.MaxRetries = other.MaxRetries
+ }
+
+ if other.Retryer != nil {
+ dst.Retryer = other.Retryer
+ }
+
+ if other.DisableParamValidation != nil {
+ dst.DisableParamValidation = other.DisableParamValidation
+ }
+
+ if other.DisableComputeChecksums != nil {
+ dst.DisableComputeChecksums = other.DisableComputeChecksums
+ }
+
+ if other.S3ForcePathStyle != nil {
+ dst.S3ForcePathStyle = other.S3ForcePathStyle
+ }
+
+ if other.S3Disable100Continue != nil {
+ dst.S3Disable100Continue = other.S3Disable100Continue
+ }
+
+ if other.S3UseAccelerate != nil {
+ dst.S3UseAccelerate = other.S3UseAccelerate
+ }
+
+ if other.S3DisableContentMD5Validation != nil {
+ dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation
+ }
+
+ if other.S3UseARNRegion != nil {
+ dst.S3UseARNRegion = other.S3UseARNRegion
+ }
+
+ if other.UseDualStack != nil {
+ dst.UseDualStack = other.UseDualStack
+ }
+
+ if other.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset {
+ dst.UseDualStackEndpoint = other.UseDualStackEndpoint
+ }
+
+ if other.EC2MetadataDisableTimeoutOverride != nil {
+ dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
+ }
+
+ if other.SleepDelay != nil {
+ dst.SleepDelay = other.SleepDelay
+ }
+
+ if other.DisableRestProtocolURICleaning != nil {
+ dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning
+ }
+
+ if other.EnforceShouldRetryCheck != nil {
+ dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck
+ }
+
+ if other.EnableEndpointDiscovery != nil {
+ dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery
+ }
+
+ if other.DisableEndpointHostPrefix != nil {
+ dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix
+ }
+
+ if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint {
+ dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint
+ }
+
+ if other.LowerCaseHeaderMaps != nil {
+ dst.LowerCaseHeaderMaps = other.LowerCaseHeaderMaps
+ }
+ if other.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset {
+ dst.UseDualStackEndpoint = other.UseDualStackEndpoint
+ }
+}
+
+// Copy will return a shallow copy of the Config object. If any additional
+// configurations are provided they will be merged into the new config returned.
+func (c *Config) Copy(cfgs ...*Config) *Config {
+ dst := &Config{}
+ dst.MergeIn(c)
+
+ for _, cfg := range cfgs {
+ dst.MergeIn(cfg)
+ }
+
+ return dst
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/context_1_5.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/context_1_5.go
new file mode 100644
index 0000000000000..89aad2c677135
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/context_1_5.go
@@ -0,0 +1,38 @@
+//go:build !go1.9
+// +build !go1.9
+
+package aws
+
+import "time"
+
+// Context is an copy of the Go v1.7 stdlib's context.Context interface.
+// It is represented as a SDK interface to enable you to use the "WithContext"
+// API methods with Go v1.6 and a Context type such as golang.org/x/net/context.
+//
+// See https://golang.org/pkg/context on how to use contexts.
+type Context interface {
+ // Deadline returns the time when work done on behalf of this context
+ // should be canceled. Deadline returns ok==false when no deadline is
+ // set. Successive calls to Deadline return the same results.
+ Deadline() (deadline time.Time, ok bool)
+
+ // Done returns a channel that's closed when work done on behalf of this
+ // context should be canceled. Done may return nil if this context can
+ // never be canceled. Successive calls to Done return the same value.
+ Done() <-chan struct{}
+
+ // Err returns a non-nil error value after Done is closed. Err returns
+ // Canceled if the context was canceled or DeadlineExceeded if the
+ // context's deadline passed. No other values for Err are defined.
+ // After Done is closed, successive calls to Err return the same value.
+ Err() error
+
+ // Value returns the value associated with this context for key, or nil
+ // if no value is associated with key. Successive calls to Value with
+ // the same key returns the same result.
+ //
+ // Use context values only for request-scoped data that transits
+ // processes and API boundaries, not for passing optional parameters to
+ // functions.
+ Value(key interface{}) interface{}
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/context_1_9.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/context_1_9.go
new file mode 100644
index 0000000000000..6ee9ddd18bb99
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/context_1_9.go
@@ -0,0 +1,12 @@
+//go:build go1.9
+// +build go1.9
+
+package aws
+
+import "context"
+
+// Context is an alias of the Go stdlib's context.Context interface.
+// It can be used within the SDK's API operation "WithContext" methods.
+//
+// See https://golang.org/pkg/context on how to use contexts.
+type Context = context.Context
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/context_background_1_7.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/context_background_1_7.go
new file mode 100644
index 0000000000000..9975d561bb28f
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/context_background_1_7.go
@@ -0,0 +1,21 @@
+//go:build go1.7
+// +build go1.7
+
+package aws
+
+import "context"
+
+// BackgroundContext returns a context that will never be canceled, has no
+// values, and no deadline. This context is used by the SDK to provide
+// backwards compatibility with non-context API operations and functionality.
+//
+// Go 1.6 and before:
+// This context function is equivalent to context.Background in the Go stdlib.
+//
+// Go 1.7 and later:
+// The context returned will be the value returned by context.Background()
+//
+// See https://golang.org/pkg/context for more information on Contexts.
+func BackgroundContext() Context {
+ return context.Background()
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/context_sleep.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/context_sleep.go
new file mode 100644
index 0000000000000..304fd156120c1
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/context_sleep.go
@@ -0,0 +1,24 @@
+package aws
+
+import (
+ "time"
+)
+
+// SleepWithContext will wait for the timer duration to expire, or the context
+// is canceled. Which ever happens first. If the context is canceled the Context's
+// error will be returned.
+//
+// Expects Context to always return a non-nil error if the Done channel is closed.
+func SleepWithContext(ctx Context, dur time.Duration) error {
+ t := time.NewTimer(dur)
+ defer t.Stop()
+
+ select {
+ case <-t.C:
+ break
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/convert_types.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/convert_types.go
new file mode 100644
index 0000000000000..4e076c1837a74
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/convert_types.go
@@ -0,0 +1,918 @@
+package aws
+
+import "time"
+
+// String returns a pointer to the string value passed in.
+func String(v string) *string {
+ return &v
+}
+
+// StringValue returns the value of the string pointer passed in or
+// "" if the pointer is nil.
+func StringValue(v *string) string {
+ if v != nil {
+ return *v
+ }
+ return ""
+}
+
+// StringSlice converts a slice of string values into a slice of
+// string pointers
+func StringSlice(src []string) []*string {
+ dst := make([]*string, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// StringValueSlice converts a slice of string pointers into a slice of
+// string values
+func StringValueSlice(src []*string) []string {
+ dst := make([]string, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// StringMap converts a string map of string values into a string
+// map of string pointers
+func StringMap(src map[string]string) map[string]*string {
+ dst := make(map[string]*string)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// StringValueMap converts a string map of string pointers into a string
+// map of string values
+func StringValueMap(src map[string]*string) map[string]string {
+ dst := make(map[string]string)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Bool returns a pointer to the bool value passed in.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// BoolValue returns the value of the bool pointer passed in or
+// false if the pointer is nil.
+func BoolValue(v *bool) bool {
+ if v != nil {
+ return *v
+ }
+ return false
+}
+
+// BoolSlice converts a slice of bool values into a slice of
+// bool pointers
+func BoolSlice(src []bool) []*bool {
+ dst := make([]*bool, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// BoolValueSlice converts a slice of bool pointers into a slice of
+// bool values
+func BoolValueSlice(src []*bool) []bool {
+ dst := make([]bool, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// BoolMap converts a string map of bool values into a string
+// map of bool pointers
+func BoolMap(src map[string]bool) map[string]*bool {
+ dst := make(map[string]*bool)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// BoolValueMap converts a string map of bool pointers into a string
+// map of bool values
+func BoolValueMap(src map[string]*bool) map[string]bool {
+ dst := make(map[string]bool)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int returns a pointer to the int value passed in.
+func Int(v int) *int {
+ return &v
+}
+
+// IntValue returns the value of the int pointer passed in or
+// 0 if the pointer is nil.
+func IntValue(v *int) int {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// IntSlice converts a slice of int values into a slice of
+// int pointers
+func IntSlice(src []int) []*int {
+ dst := make([]*int, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// IntValueSlice converts a slice of int pointers into a slice of
+// int values
+func IntValueSlice(src []*int) []int {
+ dst := make([]int, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// IntMap converts a string map of int values into a string
+// map of int pointers
+func IntMap(src map[string]int) map[string]*int {
+ dst := make(map[string]*int)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// IntValueMap converts a string map of int pointers into a string
+// map of int values
+func IntValueMap(src map[string]*int) map[string]int {
+ dst := make(map[string]int)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Uint returns a pointer to the uint value passed in.
+func Uint(v uint) *uint {
+ return &v
+}
+
+// UintValue returns the value of the uint pointer passed in or
+// 0 if the pointer is nil.
+func UintValue(v *uint) uint {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// UintSlice converts a slice of uint values uinto a slice of
+// uint pointers
+func UintSlice(src []uint) []*uint {
+ dst := make([]*uint, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// UintValueSlice converts a slice of uint pointers uinto a slice of
+// uint values
+func UintValueSlice(src []*uint) []uint {
+ dst := make([]uint, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// UintMap converts a string map of uint values uinto a string
+// map of uint pointers
+func UintMap(src map[string]uint) map[string]*uint {
+ dst := make(map[string]*uint)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// UintValueMap converts a string map of uint pointers uinto a string
+// map of uint values
+func UintValueMap(src map[string]*uint) map[string]uint {
+ dst := make(map[string]uint)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int8 returns a pointer to the int8 value passed in.
+func Int8(v int8) *int8 {
+ return &v
+}
+
+// Int8Value returns the value of the int8 pointer passed in or
+// 0 if the pointer is nil.
+func Int8Value(v *int8) int8 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Int8Slice converts a slice of int8 values into a slice of
+// int8 pointers
+func Int8Slice(src []int8) []*int8 {
+ dst := make([]*int8, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Int8ValueSlice converts a slice of int8 pointers into a slice of
+// int8 values
+func Int8ValueSlice(src []*int8) []int8 {
+ dst := make([]int8, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Int8Map converts a string map of int8 values into a string
+// map of int8 pointers
+func Int8Map(src map[string]int8) map[string]*int8 {
+ dst := make(map[string]*int8)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Int8ValueMap converts a string map of int8 pointers into a string
+// map of int8 values
+func Int8ValueMap(src map[string]*int8) map[string]int8 {
+ dst := make(map[string]int8)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int16 returns a pointer to the int16 value passed in.
+func Int16(v int16) *int16 {
+ return &v
+}
+
+// Int16Value returns the value of the int16 pointer passed in or
+// 0 if the pointer is nil.
+func Int16Value(v *int16) int16 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Int16Slice converts a slice of int16 values into a slice of
+// int16 pointers
+func Int16Slice(src []int16) []*int16 {
+ dst := make([]*int16, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Int16ValueSlice converts a slice of int16 pointers into a slice of
+// int16 values
+func Int16ValueSlice(src []*int16) []int16 {
+ dst := make([]int16, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Int16Map converts a string map of int16 values into a string
+// map of int16 pointers
+func Int16Map(src map[string]int16) map[string]*int16 {
+ dst := make(map[string]*int16)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Int16ValueMap converts a string map of int16 pointers into a string
+// map of int16 values
+func Int16ValueMap(src map[string]*int16) map[string]int16 {
+ dst := make(map[string]int16)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int32 returns a pointer to the int32 value passed in.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int32Value returns the value of the int32 pointer passed in or
+// 0 if the pointer is nil.
+func Int32Value(v *int32) int32 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Int32Slice converts a slice of int32 values into a slice of
+// int32 pointers
+func Int32Slice(src []int32) []*int32 {
+ dst := make([]*int32, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Int32ValueSlice converts a slice of int32 pointers into a slice of
+// int32 values
+func Int32ValueSlice(src []*int32) []int32 {
+ dst := make([]int32, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Int32Map converts a string map of int32 values into a string
+// map of int32 pointers
+func Int32Map(src map[string]int32) map[string]*int32 {
+ dst := make(map[string]*int32)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Int32ValueMap converts a string map of int32 pointers into a string
+// map of int32 values
+func Int32ValueMap(src map[string]*int32) map[string]int32 {
+ dst := make(map[string]int32)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int64 returns a pointer to the int64 value passed in.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Int64Value returns the value of the int64 pointer passed in or
+// 0 if the pointer is nil.
+func Int64Value(v *int64) int64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Int64Slice converts a slice of int64 values into a slice of
+// int64 pointers
+func Int64Slice(src []int64) []*int64 {
+ dst := make([]*int64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Int64ValueSlice converts a slice of int64 pointers into a slice of
+// int64 values
+func Int64ValueSlice(src []*int64) []int64 {
+ dst := make([]int64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Int64Map converts a string map of int64 values into a string
+// map of int64 pointers
+func Int64Map(src map[string]int64) map[string]*int64 {
+ dst := make(map[string]*int64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Int64ValueMap converts a string map of int64 pointers into a string
+// map of int64 values
+func Int64ValueMap(src map[string]*int64) map[string]int64 {
+ dst := make(map[string]int64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Uint8 returns a pointer to the uint8 value passed in.
+func Uint8(v uint8) *uint8 {
+ return &v
+}
+
+// Uint8Value returns the value of the uint8 pointer passed in or
+// 0 if the pointer is nil.
+func Uint8Value(v *uint8) uint8 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Uint8Slice converts a slice of uint8 values into a slice of
+// uint8 pointers
+func Uint8Slice(src []uint8) []*uint8 {
+ dst := make([]*uint8, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Uint8ValueSlice converts a slice of uint8 pointers into a slice of
+// uint8 values
+func Uint8ValueSlice(src []*uint8) []uint8 {
+ dst := make([]uint8, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Uint8Map converts a string map of uint8 values into a string
+// map of uint8 pointers
+func Uint8Map(src map[string]uint8) map[string]*uint8 {
+ dst := make(map[string]*uint8)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Uint8ValueMap converts a string map of uint8 pointers into a string
+// map of uint8 values
+func Uint8ValueMap(src map[string]*uint8) map[string]uint8 {
+ dst := make(map[string]uint8)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Uint16 returns a pointer to the uint16 value passed in.
+func Uint16(v uint16) *uint16 {
+ return &v
+}
+
+// Uint16Value returns the value of the uint16 pointer passed in or
+// 0 if the pointer is nil.
+func Uint16Value(v *uint16) uint16 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Uint16Slice converts a slice of uint16 values into a slice of
+// uint16 pointers
+func Uint16Slice(src []uint16) []*uint16 {
+ dst := make([]*uint16, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Uint16ValueSlice converts a slice of uint16 pointers into a slice of
+// uint16 values
+func Uint16ValueSlice(src []*uint16) []uint16 {
+ dst := make([]uint16, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Uint16Map converts a string map of uint16 values into a string
+// map of uint16 pointers
+func Uint16Map(src map[string]uint16) map[string]*uint16 {
+ dst := make(map[string]*uint16)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Uint16ValueMap converts a string map of uint16 pointers into a string
+// map of uint16 values
+func Uint16ValueMap(src map[string]*uint16) map[string]uint16 {
+ dst := make(map[string]uint16)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Uint32 returns a pointer to the uint32 value passed in.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint32Value returns the value of the uint32 pointer passed in or
+// 0 if the pointer is nil.
+func Uint32Value(v *uint32) uint32 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Uint32Slice converts a slice of uint32 values into a slice of
+// uint32 pointers
+func Uint32Slice(src []uint32) []*uint32 {
+ dst := make([]*uint32, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Uint32ValueSlice converts a slice of uint32 pointers into a slice of
+// uint32 values
+func Uint32ValueSlice(src []*uint32) []uint32 {
+ dst := make([]uint32, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Uint32Map converts a string map of uint32 values into a string
+// map of uint32 pointers
+func Uint32Map(src map[string]uint32) map[string]*uint32 {
+ dst := make(map[string]*uint32)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Uint32ValueMap converts a string map of uint32 pointers into a string
+// map of uint32 values
+func Uint32ValueMap(src map[string]*uint32) map[string]uint32 {
+ dst := make(map[string]uint32)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Uint64 returns a pointer to the uint64 value passed in.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// Uint64Value returns the value of the uint64 pointer passed in or
+// 0 if the pointer is nil.
+func Uint64Value(v *uint64) uint64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Uint64Slice converts a slice of uint64 values into a slice of
+// uint64 pointers
+func Uint64Slice(src []uint64) []*uint64 {
+ dst := make([]*uint64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Uint64ValueSlice converts a slice of uint64 pointers into a slice of
+// uint64 values
+func Uint64ValueSlice(src []*uint64) []uint64 {
+ dst := make([]uint64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Uint64Map converts a string map of uint64 values into a string
+// map of uint64 pointers
+func Uint64Map(src map[string]uint64) map[string]*uint64 {
+ dst := make(map[string]*uint64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Uint64ValueMap converts a string map of uint64 pointers into a string
+// map of uint64 values
+func Uint64ValueMap(src map[string]*uint64) map[string]uint64 {
+ dst := make(map[string]uint64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Float32 returns a pointer to the float32 value passed in.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float32Value returns the value of the float32 pointer passed in or
+// 0 if the pointer is nil.
+func Float32Value(v *float32) float32 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Float32Slice converts a slice of float32 values into a slice of
+// float32 pointers
+func Float32Slice(src []float32) []*float32 {
+ dst := make([]*float32, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Float32ValueSlice converts a slice of float32 pointers into a slice of
+// float32 values
+func Float32ValueSlice(src []*float32) []float32 {
+ dst := make([]float32, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Float32Map converts a string map of float32 values into a string
+// map of float32 pointers
+func Float32Map(src map[string]float32) map[string]*float32 {
+ dst := make(map[string]*float32)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Float32ValueMap converts a string map of float32 pointers into a string
+// map of float32 values
+func Float32ValueMap(src map[string]*float32) map[string]float32 {
+ dst := make(map[string]float32)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Float64 returns a pointer to the float64 value passed in.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Float64Value returns the value of the float64 pointer passed in or
+// 0 if the pointer is nil.
+func Float64Value(v *float64) float64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Float64Slice converts a slice of float64 values into a slice of
+// float64 pointers
+func Float64Slice(src []float64) []*float64 {
+ dst := make([]*float64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Float64ValueSlice converts a slice of float64 pointers into a slice of
+// float64 values
+func Float64ValueSlice(src []*float64) []float64 {
+ dst := make([]float64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Float64Map converts a string map of float64 values into a string
+// map of float64 pointers
+func Float64Map(src map[string]float64) map[string]*float64 {
+ dst := make(map[string]*float64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Float64ValueMap converts a string map of float64 pointers into a string
+// map of float64 values
+func Float64ValueMap(src map[string]*float64) map[string]float64 {
+ dst := make(map[string]float64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Time returns a pointer to the time.Time value passed in.
+func Time(v time.Time) *time.Time {
+ return &v
+}
+
+// TimeValue returns the value of the time.Time pointer passed in or
+// time.Time{} if the pointer is nil.
+func TimeValue(v *time.Time) time.Time {
+ if v != nil {
+ return *v
+ }
+ return time.Time{}
+}
+
+// SecondsTimeValue converts an int64 pointer to a time.Time value
+// representing seconds since Epoch or time.Time{} if the pointer is nil.
+func SecondsTimeValue(v *int64) time.Time {
+ if v != nil {
+ return time.Unix((*v / 1000), 0)
+ }
+ return time.Time{}
+}
+
+// MillisecondsTimeValue converts an int64 pointer to a time.Time value
+// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil.
+func MillisecondsTimeValue(v *int64) time.Time {
+ if v != nil {
+ return time.Unix(0, (*v * 1000000))
+ }
+ return time.Time{}
+}
+
+// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
+// The result is undefined if the Unix time cannot be represented by an int64.
+// Which includes calling TimeUnixMilli on a zero Time is undefined.
+//
+// This utility is useful for service API's such as CloudWatch Logs which require
+// their unix time values to be in milliseconds.
+//
+// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information.
+func TimeUnixMilli(t time.Time) int64 {
+ return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
+}
+
+// TimeSlice converts a slice of time.Time values into a slice of
+// time.Time pointers
+func TimeSlice(src []time.Time) []*time.Time {
+ dst := make([]*time.Time, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// TimeValueSlice converts a slice of time.Time pointers into a slice of
+// time.Time values
+func TimeValueSlice(src []*time.Time) []time.Time {
+ dst := make([]time.Time, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// TimeMap converts a string map of time.Time values into a string
+// map of time.Time pointers
+func TimeMap(src map[string]time.Time) map[string]*time.Time {
+ dst := make(map[string]*time.Time)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// TimeValueMap converts a string map of time.Time pointers into a string
+// map of time.Time values
+func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
+ dst := make(map[string]time.Time)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/corehandlers/handlers.go
new file mode 100644
index 0000000000000..2421314d1d33f
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/corehandlers/handlers.go
@@ -0,0 +1,247 @@
+package corehandlers
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strconv"
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+)
+
+// Interface for matching types which also have a Len method.
+type lener interface {
+ Len() int
+}
+
+// BuildContentLengthHandler builds the content length of a request based on the body,
+// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
+// to determine request body length and no "Content-Length" was specified it will panic.
+//
+// The Content-Length will only be added to the request if the length of the body
+// is greater than 0. If the body is empty or the current `Content-Length`
+// header is <= 0, the header will also be stripped.
+var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
+ var length int64
+
+ if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
+ length, _ = strconv.ParseInt(slength, 10, 64)
+ } else {
+ if r.Body != nil {
+ var err error
+ length, err = aws.SeekerLen(r.Body)
+ if err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err)
+ return
+ }
+ }
+ }
+
+ if length > 0 {
+ r.HTTPRequest.ContentLength = length
+ r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
+ } else {
+ r.HTTPRequest.ContentLength = 0
+ r.HTTPRequest.Header.Del("Content-Length")
+ }
+}}
+
+var reStatusCode = regexp.MustCompile(`^(\d{3})`)
+
+// ValidateReqSigHandler is a request handler to ensure that the request's
+// signature doesn't expire before it is sent. This can happen when a request
+// is built and signed significantly before it is sent. Or significant delays
+// occur when retrying requests that would cause the signature to expire.
+var ValidateReqSigHandler = request.NamedHandler{
+ Name: "core.ValidateReqSigHandler",
+ Fn: func(r *request.Request) {
+ // Unsigned requests are not signed
+ if r.Config.Credentials == credentials.AnonymousCredentials {
+ return
+ }
+
+ signedTime := r.Time
+ if !r.LastSignedAt.IsZero() {
+ signedTime = r.LastSignedAt
+ }
+
+ // 5 minutes to allow for some clock skew/delays in transmission.
+ // Would be improved with aws/aws-sdk-go#423
+ if signedTime.Add(5 * time.Minute).After(time.Now()) {
+ return
+ }
+
+ fmt.Println("request expired, resigning")
+ r.Sign()
+ },
+}
+
+// SendHandler is a request handler to send service request using HTTP client.
+var SendHandler = request.NamedHandler{
+ Name: "core.SendHandler",
+ Fn: func(r *request.Request) {
+ sender := sendFollowRedirects
+ if r.DisableFollowRedirects {
+ sender = sendWithoutFollowRedirects
+ }
+
+ if request.NoBody == r.HTTPRequest.Body {
+ // Strip off the request body if the NoBody reader was used as a
+ // place holder for a request body. This prevents the SDK from
+ // making requests with a request body when it would be invalid
+ // to do so.
+ //
+ // Use a shallow copy of the http.Request to ensure the race condition
+ // of transport on Body will not trigger
+ reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest
+ reqCopy.Body = nil
+ r.HTTPRequest = &reqCopy
+ defer func() {
+ r.HTTPRequest = reqOrig
+ }()
+ }
+
+ var err error
+ r.HTTPResponse, err = sender(r)
+ if err != nil {
+ handleSendError(r, err)
+ }
+ },
+}
+
+func sendFollowRedirects(r *request.Request) (*http.Response, error) {
+ return r.Config.HTTPClient.Do(r.HTTPRequest)
+}
+
+func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) {
+ transport := r.Config.HTTPClient.Transport
+ if transport == nil {
+ transport = http.DefaultTransport
+ }
+
+ return transport.RoundTrip(r.HTTPRequest)
+}
+
+func handleSendError(r *request.Request, err error) {
+ // Prevent leaking if an HTTPResponse was returned. Clean up
+ // the body.
+ if r.HTTPResponse != nil {
+ r.HTTPResponse.Body.Close()
+ }
+ // Capture the case where url.Error is returned for error processing
+ // response. e.g. 301 without location header comes back as string
+ // error and r.HTTPResponse is nil. Other URL redirect errors will
+ // comeback in a similar method.
+ if e, ok := err.(*url.Error); ok && e.Err != nil {
+ if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil {
+ code, _ := strconv.ParseInt(s[1], 10, 64)
+ r.HTTPResponse = &http.Response{
+ StatusCode: int(code),
+ Status: http.StatusText(int(code)),
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
+ }
+ return
+ }
+ }
+ if r.HTTPResponse == nil {
+ // Add a dummy request response object to ensure the HTTPResponse
+ // value is consistent.
+ r.HTTPResponse = &http.Response{
+ StatusCode: int(0),
+ Status: http.StatusText(int(0)),
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
+ }
+ }
+ // Catch all request errors, and let the default retrier determine
+ // if the error is retryable.
+ r.Error = awserr.New(request.ErrCodeRequestError, "send request failed", err)
+
+ // Override the error with a context canceled error, if that was canceled.
+ ctx := r.Context()
+ select {
+ case <-ctx.Done():
+ r.Error = awserr.New(request.CanceledErrorCode,
+ "request context canceled", ctx.Err())
+ r.Retryable = aws.Bool(false)
+ default:
+ }
+}
+
+// ValidateResponseHandler is a request handler to validate service response.
+var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) {
+ if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
+ // this may be replaced by an UnmarshalError handler
+ r.Error = awserr.New("UnknownError", "unknown error", r.Error)
+ }
+}}
+
+// AfterRetryHandler performs final checks to determine if the request should
+// be retried and how long to delay.
+var AfterRetryHandler = request.NamedHandler{
+ Name: "core.AfterRetryHandler",
+ Fn: func(r *request.Request) {
+ // If one of the other handlers already set the retry state
+ // we don't want to override it based on the service's state
+ if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) {
+ r.Retryable = aws.Bool(r.ShouldRetry(r))
+ }
+
+ if r.WillRetry() {
+ r.RetryDelay = r.RetryRules(r)
+
+ if sleepFn := r.Config.SleepDelay; sleepFn != nil {
+ // Support SleepDelay for backwards compatibility and testing
+ sleepFn(r.RetryDelay)
+ } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil {
+ r.Error = awserr.New(request.CanceledErrorCode,
+ "request context canceled", err)
+ r.Retryable = aws.Bool(false)
+ return
+ }
+
+ // when the expired token exception occurs the credentials
+ // need to be expired locally so that the next request to
+ // get credentials will trigger a credentials refresh.
+ if r.IsErrorExpired() {
+ r.Config.Credentials.Expire()
+ }
+
+ r.RetryCount++
+ r.Error = nil
+ }
+ }}
+
+// ValidateEndpointHandler is a request handler to validate a request had the
+// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
+// region is not valid.
+var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) {
+
+ // IBM COS SDK Code -- START
+ checkIfRegionPresent := true
+
+ // Anonymous Creds support
+ if r.Config.Credentials != credentials.AnonymousCredentials {
+ value, err := r.Config.Credentials.Get()
+ if err != nil {
+ r.Error = err
+ return
+ }
+ checkIfRegionPresent = value.ProviderType == "" || value.ProviderType == "v4"
+ }
+
+ if checkIfRegionPresent && r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
+ r.Error = aws.ErrMissingRegion
+ } else if r.ClientInfo.Endpoint == "" {
+ // Was any endpoint provided by the user, or one was derived by the
+ // SDK's endpoint resolver?
+ r.Error = aws.ErrMissingEndpoint
+ }
+ // IBM COS SDK Code -- END
+}}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/corehandlers/param_validator.go
new file mode 100644
index 0000000000000..7aab8aa389c60
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/corehandlers/param_validator.go
@@ -0,0 +1,17 @@
+package corehandlers
+
+import "github.com/IBM/ibm-cos-sdk-go/aws/request"
+
+// ValidateParametersHandler is a request handler to validate the input parameters.
+// Validating parameters only has meaning if done prior to the request being sent.
+var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
+ if !r.ParamsFilled() {
+ return
+ }
+
+ if v, ok := r.Params.(request.Validator); ok {
+ if err := v.Validate(); err != nil {
+ r.Error = err
+ }
+ }
+}}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/corehandlers/user_agent.go
new file mode 100644
index 0000000000000..47cb960a224a3
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/corehandlers/user_agent.go
@@ -0,0 +1,37 @@
+package corehandlers
+
+import (
+ "os"
+ "runtime"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+)
+
+// SDKVersionUserAgentHandler is a request handler for adding the SDK Version
+// to the user agent.
+var SDKVersionUserAgentHandler = request.NamedHandler{
+ Name: "core.SDKVersionUserAgentHandler",
+ Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
+ runtime.Version(), runtime.GOOS, runtime.GOARCH),
+}
+
+const execEnvVar = `AWS_EXECUTION_ENV`
+const execEnvUAKey = `exec-env`
+
+// AddHostExecEnvUserAgentHander is a request handler appending the SDK's
+// execution environment to the user agent.
+//
+// If the environment variable AWS_EXECUTION_ENV is set, its value will be
+// appended to the user agent string.
+var AddHostExecEnvUserAgentHander = request.NamedHandler{
+ Name: "core.AddHostExecEnvUserAgentHander",
+ Fn: func(r *request.Request) {
+ v := os.Getenv(execEnvVar)
+ if len(v) == 0 {
+ return
+ }
+
+ request.AddToUserAgent(r, execEnvUAKey+"/"+v)
+ },
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/chain_provider.go
new file mode 100644
index 0000000000000..dd8cc7158783a
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/chain_provider.go
@@ -0,0 +1,100 @@
+package credentials
+
+import (
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+)
+
+var (
+ // ErrNoValidProvidersFoundInChain Is returned when there are no valid
+ // providers in the ChainProvider.
+ //
+ // This has been deprecated. For verbose error messaging set
+ // aws.Config.CredentialsChainVerboseErrors to true.
+ ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
+ `no valid providers in chain. Deprecated.
+ For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,
+ nil)
+)
+
+// A ChainProvider will search for a provider which returns credentials
+// and cache that provider until Retrieve is called again.
+//
+// The ChainProvider provides a way of chaining multiple providers together
+// which will pick the first available using priority order of the Providers
+// in the list.
+//
+// If none of the Providers retrieve valid credentials Value, ChainProvider's
+// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
+//
+// If a Provider is found which returns valid credentials Value ChainProvider
+// will cache that Provider for all calls to IsExpired(), until Retrieve is
+// called again.
+//
+// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
+// In this example EnvProvider will first check if any credentials are available
+// via the environment variables. If there are none ChainProvider will check
+// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
+// does not return any credentials ChainProvider will return the error
+// ErrNoValidProvidersFoundInChain
+//
+// creds := credentials.NewChainCredentials(
+// []credentials.Provider{
+// &credentials.EnvProvider{},
+// &ec2rolecreds.EC2RoleProvider{
+// Client: ec2metadata.New(sess),
+// },
+// })
+//
+// // Usage of ChainCredentials with aws.Config
+// svc := ec2.New(session.Must(session.NewSession(&aws.Config{
+// Credentials: creds,
+// })))
+//
+type ChainProvider struct {
+ Providers []Provider
+ curr Provider
+ VerboseErrors bool
+}
+
+// NewChainCredentials returns a pointer to a new Credentials object
+// wrapping a chain of providers.
+func NewChainCredentials(providers []Provider) *Credentials {
+ return NewCredentials(&ChainProvider{
+ Providers: append([]Provider{}, providers...),
+ })
+}
+
+// Retrieve returns the credentials value or error if no provider returned
+// without error.
+//
+// If a provider is found it will be cached and any calls to IsExpired()
+// will return the expired state of the cached provider.
+func (c *ChainProvider) Retrieve() (Value, error) {
+ var errs []error
+ for _, p := range c.Providers {
+ creds, err := p.Retrieve()
+ if err == nil {
+ c.curr = p
+ return creds, nil
+ }
+ errs = append(errs, err)
+ }
+ c.curr = nil
+
+ var err error
+ err = ErrNoValidProvidersFoundInChain
+ if c.VerboseErrors {
+ err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs)
+ }
+ return Value{}, err
+}
+
+// IsExpired will returned the expired state of the currently cached provider
+// if there is one. If there is no current provider, true will be returned.
+func (c *ChainProvider) IsExpired() bool {
+ if c.curr != nil {
+ return c.curr.IsExpired()
+ }
+
+ return true
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/context_background_go1.7.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/context_background_go1.7.go
new file mode 100644
index 0000000000000..a68df0ee73f0c
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/context_background_go1.7.go
@@ -0,0 +1,21 @@
+//go:build go1.7
+// +build go1.7
+
+package credentials
+
+import "context"
+
+// backgroundContext returns a context that will never be canceled, has no
+// values, and no deadline. This context is used by the SDK to provide
+// backwards compatibility with non-context API operations and functionality.
+//
+// Go 1.6 and before:
+// This context function is equivalent to context.Background in the Go stdlib.
+//
+// Go 1.7 and later:
+// The context returned will be the value returned by context.Background()
+//
+// See https://golang.org/pkg/context for more information on Contexts.
+func backgroundContext() Context {
+ return context.Background()
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/context_go1.9.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/context_go1.9.go
new file mode 100644
index 0000000000000..79018aba738bb
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/context_go1.9.go
@@ -0,0 +1,14 @@
+//go:build go1.9
+// +build go1.9
+
+package credentials
+
+import "context"
+
+// Context is an alias of the Go stdlib's context.Context interface.
+// It can be used within the SDK's API operation "WithContext" methods.
+//
+// This type, aws.Context, and context.Context are equivalent.
+//
+// See https://golang.org/pkg/context on how to use contexts.
+type Context = context.Context
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/credentials.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/credentials.go
new file mode 100644
index 0000000000000..881be489db80b
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/credentials.go
@@ -0,0 +1,398 @@
+// Package credentials provides credential retrieval and management
+//
+// The Credentials is the primary method of getting access to and managing
+// credentials Values. Using dependency injection retrieval of the credential
+// values is handled by a object which satisfies the Provider interface.
+//
+// By default the Credentials.Get() will cache the successful result of a
+// Provider's Retrieve() until Provider.IsExpired() returns true. At which
+// point Credentials will call Provider's Retrieve() to get new credential Value.
+//
+// The Provider is responsible for determining when credentials Value have expired.
+// It is also important to note that Credentials will always call Retrieve the
+// first time Credentials.Get() is called.
+//
+// Example of using the environment variable credentials.
+//
+// creds := credentials.NewEnvCredentials()
+//
+// // Retrieve the credentials value
+// credValue, err := creds.Get()
+// if err != nil {
+// // handle error
+// }
+//
+// Example of forcing credentials to expire and be refreshed on the next Get().
+// This may be helpful to proactively expire credentials and refresh them sooner
+// than they would naturally expire on their own.
+//
+// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{})
+// creds.Expire()
+// credsValue, err := creds.Get()
+// // New credentials will be retrieved instead of from cache.
+//
+//
+// Custom Provider
+//
+// Each Provider built into this package also provides a helper method to generate
+// a Credentials pointer setup with the provider. To use a custom Provider just
+// create a type which satisfies the Provider interface and pass it to the
+// NewCredentials method.
+//
+// type MyProvider struct{}
+// func (m *MyProvider) Retrieve() (Value, error) {...}
+// func (m *MyProvider) IsExpired() bool {...}
+//
+// creds := credentials.NewCredentials(&MyProvider{})
+// credValue, err := creds.Get()
+//
+package credentials
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/internal/sync/singleflight"
+
+ // IBM COS SDK Code -- START
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/token"
+ // IBM COS SDK Code -- END
+)
+
+// AnonymousCredentials is an empty Credential object that can be used as
+// dummy placeholder credentials for requests that do not need signed.
+//
+// This Credentials can be used to configure a service to not sign requests
+// when making service API calls. For example, when accessing public
+// s3 buckets.
+//
+// svc := s3.New(session.Must(session.NewSession(&aws.Config{
+// Credentials: credentials.AnonymousCredentials,
+// })))
+// // Access public S3 buckets.
+var AnonymousCredentials = NewStaticCredentials("", "", "")
+
+// A Value is the AWS credentials value for individual credential fields.
+type Value struct {
+ // AWS Access key ID
+ AccessKeyID string
+
+ // AWS Secret Access Key
+ SecretAccessKey string
+
+ // AWS Session Token
+ SessionToken string
+
+ // Provider used to get credentials
+ ProviderName string
+
+ // Provider Type
+ ProviderType string
+
+ // IBM COS SDK Code -- START
+ // IBM IAM token value
+ token.Token
+
+ // Service Intance ID
+ ServiceInstanceID string
+ // IBM COS SDK Code -- END
+}
+
+// HasKeys returns if the credentials Value has both AccessKeyID and
+// SecretAccessKey value set.
+func (v Value) HasKeys() bool {
+ return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0
+}
+
+// A Provider is the interface for any component which will provide credentials
+// Value. A provider is required to manage its own Expired state, and what to
+// be expired means.
+//
+// The Provider should not need to implement its own mutexes, because
+// that will be managed by Credentials.
+type Provider interface {
+ // Retrieve returns nil if it successfully retrieved the value.
+ // Error is returned if the value were not obtainable, or empty.
+ Retrieve() (Value, error)
+
+ // IsExpired returns if the credentials are no longer valid, and need
+ // to be retrieved.
+ IsExpired() bool
+}
+
+// ProviderWithContext is a Provider that can retrieve credentials with a Context
+type ProviderWithContext interface {
+ Provider
+
+ RetrieveWithContext(Context) (Value, error)
+}
+
+// An Expirer is an interface that Providers can implement to expose the expiration
+// time, if known. If the Provider cannot accurately provide this info,
+// it should not implement this interface.
+type Expirer interface {
+ // The time at which the credentials are no longer valid
+ ExpiresAt() time.Time
+}
+
+// An ErrorProvider is a stub credentials provider that always returns an error
+// this is used by the SDK when construction a known provider is not possible
+// due to an error.
+type ErrorProvider struct {
+ // The error to be returned from Retrieve
+ Err error
+
+ // The provider name to set on the Retrieved returned Value
+ ProviderName string
+}
+
+// Retrieve will always return the error that the ErrorProvider was created with.
+func (p ErrorProvider) Retrieve() (Value, error) {
+ return Value{ProviderName: p.ProviderName}, p.Err
+}
+
+// IsExpired will always return not expired.
+func (p ErrorProvider) IsExpired() bool {
+ return false
+}
+
+// A Expiry provides shared expiration logic to be used by credentials
+// providers to implement expiry functionality.
+//
+// The best method to use this struct is as an anonymous field within the
+// provider's struct.
+//
+// Example:
+// type EC2RoleProvider struct {
+// Expiry
+// ...
+// }
+type Expiry struct {
+ // The date/time when to expire on
+ expiration time.Time
+
+ // If set will be used by IsExpired to determine the current time.
+ // Defaults to time.Now if CurrentTime is not set. Available for testing
+ // to be able to mock out the current time.
+ CurrentTime func() time.Time
+}
+
+// SetExpiration sets the expiration IsExpired will check when called.
+//
+// If window is greater than 0 the expiration time will be reduced by the
+// window value.
+//
+// Using a window is helpful to trigger credentials to expire sooner than
+// the expiration time given to ensure no requests are made with expired
+// tokens.
+func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
+ // Passed in expirations should have the monotonic clock values stripped.
+ // This ensures time comparisons will be based on wall-time.
+ e.expiration = expiration.Round(0)
+ if window > 0 {
+ e.expiration = e.expiration.Add(-window)
+ }
+}
+
+// IsExpired returns if the credentials are expired.
+func (e *Expiry) IsExpired() bool {
+ curTime := e.CurrentTime
+ if curTime == nil {
+ curTime = time.Now
+ }
+ return e.expiration.Before(curTime())
+}
+
+// ExpiresAt returns the expiration time of the credential
+func (e *Expiry) ExpiresAt() time.Time {
+ return e.expiration
+}
+
+// A Credentials provides concurrency safe retrieval of AWS credentials Value.
+// Credentials will cache the credentials value until they expire. Once the value
+// expires the next Get will attempt to retrieve valid credentials.
+//
+// Credentials is safe to use across multiple goroutines and will manage the
+// synchronous state so the Providers do not need to implement their own
+// synchronization.
+//
+// The first Credentials.Get() will always call Provider.Retrieve() to get the
+// first instance of the credentials Value. All calls to Get() after that
+// will return the cached credentials Value until IsExpired() returns true.
+type Credentials struct {
+ sf singleflight.Group
+
+ m sync.RWMutex
+ creds Value
+ provider Provider
+}
+
+// NewCredentials returns a pointer to a new Credentials with the provider set.
+func NewCredentials(provider Provider) *Credentials {
+ c := &Credentials{
+ provider: provider,
+ }
+ return c
+}
+
+// GetWithContext returns the credentials value, or error if the credentials
+// Value failed to be retrieved. Will return early if the passed in context is
+// canceled.
+//
+// Will return the cached credentials Value if it has not expired. If the
+// credentials Value has expired the Provider's Retrieve() will be called
+// to refresh the credentials.
+//
+// If Credentials.Expire() was called the credentials Value will be force
+// expired, and the next call to Get() will cause them to be refreshed.
+//
+// Passed in Context is equivalent to aws.Context, and context.Context.
+func (c *Credentials) GetWithContext(ctx Context) (Value, error) {
+ // Check if credentials are cached, and not expired.
+ select {
+ case curCreds, ok := <-c.asyncIsExpired():
+ // ok will only be true, of the credentials were not expired. ok will
+ // be false and have no value if the credentials are expired.
+ if ok {
+ return curCreds, nil
+ }
+ case <-ctx.Done():
+ return Value{}, awserr.New("RequestCanceled",
+ "request context canceled", ctx.Err())
+ }
+
+ // Cannot pass context down to the actual retrieve, because the first
+ // context would cancel the whole group when there is not direct
+ // association of items in the group.
+ resCh := c.sf.DoChan("", func() (interface{}, error) {
+ return c.singleRetrieve(&suppressedContext{ctx})
+ })
+ select {
+ case res := <-resCh:
+ return res.Val.(Value), res.Err
+ case <-ctx.Done():
+ return Value{}, awserr.New("RequestCanceled",
+ "request context canceled", ctx.Err())
+ }
+}
+
+func (c *Credentials) singleRetrieve(ctx Context) (interface{}, error) {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ if curCreds := c.creds; !c.isExpiredLocked(curCreds) {
+ return curCreds, nil
+ }
+
+ var creds Value
+ var err error
+ if p, ok := c.provider.(ProviderWithContext); ok {
+ creds, err = p.RetrieveWithContext(ctx)
+ } else {
+ creds, err = c.provider.Retrieve()
+ }
+ if err == nil {
+ c.creds = creds
+ }
+
+ return creds, err
+}
+
+// Get returns the credentials value, or error if the credentials Value failed
+// to be retrieved.
+//
+// Will return the cached credentials Value if it has not expired. If the
+// credentials Value has expired the Provider's Retrieve() will be called
+// to refresh the credentials.
+//
+// If Credentials.Expire() was called the credentials Value will be force
+// expired, and the next call to Get() will cause them to be refreshed.
+func (c *Credentials) Get() (Value, error) {
+ return c.GetWithContext(backgroundContext())
+}
+
+// Expire expires the credentials and forces them to be retrieved on the
+// next call to Get().
+//
+// This will override the Provider's expired state, and force Credentials
+// to call the Provider's Retrieve().
+func (c *Credentials) Expire() {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ c.creds = Value{}
+}
+
+// IsExpired returns if the credentials are no longer valid, and need
+// to be retrieved.
+//
+// If the Credentials were forced to be expired with Expire() this will
+// reflect that override.
+func (c *Credentials) IsExpired() bool {
+ c.m.RLock()
+ defer c.m.RUnlock()
+
+ return c.isExpiredLocked(c.creds)
+}
+
+// asyncIsExpired returns a channel of credentials Value. If the channel is
+// closed the credentials are expired and credentials value are not empty.
+func (c *Credentials) asyncIsExpired() <-chan Value {
+ ch := make(chan Value, 1)
+ go func() {
+ c.m.RLock()
+ defer c.m.RUnlock()
+
+ if curCreds := c.creds; !c.isExpiredLocked(curCreds) {
+ ch <- curCreds
+ }
+
+ close(ch)
+ }()
+
+ return ch
+}
+
+// isExpiredLocked helper method wrapping the definition of expired credentials.
+func (c *Credentials) isExpiredLocked(creds interface{}) bool {
+ return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired()
+}
+
+// ExpiresAt provides access to the functionality of the Expirer interface of
+// the underlying Provider, if it supports that interface. Otherwise, it returns
+// an error.
+func (c *Credentials) ExpiresAt() (time.Time, error) {
+ c.m.RLock()
+ defer c.m.RUnlock()
+
+ expirer, ok := c.provider.(Expirer)
+ if !ok {
+ return time.Time{}, awserr.New("ProviderNotExpirer",
+ fmt.Sprintf("provider %s does not support ExpiresAt()",
+ c.creds.ProviderName),
+ nil)
+ }
+ if c.creds == (Value{}) {
+ // set expiration time to the distant past
+ return time.Time{}, nil
+ }
+ return expirer.ExpiresAt(), nil
+}
+
+type suppressedContext struct {
+ Context
+}
+
+func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) {
+ return time.Time{}, false
+}
+
+func (s *suppressedContext) Done() <-chan struct{} {
+ return nil
+}
+
+func (s *suppressedContext) Err() error {
+ return nil
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/endpointcreds/provider.go
new file mode 100644
index 0000000000000..f657f3bc1c5e5
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/endpointcreds/provider.go
@@ -0,0 +1,210 @@
+// Package endpointcreds provides support for retrieving credentials from an
+// arbitrary HTTP endpoint.
+//
+// The credentials endpoint Provider can receive both static and refreshable
+// credentials that will expire. Credentials are static when an "Expiration"
+// value is not provided in the endpoint's response.
+//
+// Static credentials will never expire once they have been retrieved. The format
+// of the static credentials response:
+// {
+// "AccessKeyId" : "MUA...",
+// "SecretAccessKey" : "/7PC5om....",
+// }
+//
+// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration
+// value in the response. The format of the refreshable credentials response:
+// {
+// "AccessKeyId" : "MUA...",
+// "SecretAccessKey" : "/7PC5om....",
+// "Token" : "AQoDY....=",
+// "Expiration" : "2016-02-25T06:03:31Z"
+// }
+//
+// Errors should be returned in the following format and only returned with 400
+// or 500 HTTP status codes.
+// {
+// "code": "ErrorCode",
+// "message": "Helpful error message."
+// }
+package endpointcreds
+
+import (
+ "encoding/json"
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/client"
+ "github.com/IBM/ibm-cos-sdk-go/aws/client/metadata"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+ "github.com/IBM/ibm-cos-sdk-go/private/protocol/json/jsonutil"
+)
+
+// ProviderName is the name of the credentials provider.
+const ProviderName = `CredentialsEndpointProvider`
+
+// Provider satisfies the credentials.Provider interface, and is a client to
+// retrieve credentials from an arbitrary endpoint.
+type Provider struct {
+ staticCreds bool
+ credentials.Expiry
+
+ // Requires a AWS Client to make HTTP requests to the endpoint with.
+ // the Endpoint the request will be made to is provided by the aws.Config's
+ // Endpoint value.
+ Client *client.Client
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+
+ // Optional authorization token value if set will be used as the value of
+ // the Authorization header of the endpoint credential request.
+ AuthorizationToken string
+}
+
+// NewProviderClient returns a credentials Provider for retrieving AWS credentials
+// from arbitrary endpoint.
+func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider {
+ p := &Provider{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "CredentialsEndpoint",
+ Endpoint: endpoint,
+ },
+ handlers,
+ ),
+ }
+
+ p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler)
+ p.Client.Handlers.UnmarshalError.PushBack(unmarshalError)
+ p.Client.Handlers.Validate.Clear()
+ p.Client.Handlers.Validate.PushBack(validateEndpointHandler)
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return p
+}
+
+// NewCredentialsClient returns a pointer to a new Credentials object
+// wrapping the endpoint credentials Provider.
+func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials {
+ return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...))
+}
+
+// IsExpired returns true if the credentials retrieved are expired, or not yet
+// retrieved.
+func (p *Provider) IsExpired() bool {
+ if p.staticCreds {
+ return false
+ }
+ return p.Expiry.IsExpired()
+}
+
+// Retrieve will attempt to request the credentials from the endpoint the Provider
+// was configured for. And error will be returned if the retrieval fails.
+func (p *Provider) Retrieve() (credentials.Value, error) {
+ return p.RetrieveWithContext(aws.BackgroundContext())
+}
+
+// RetrieveWithContext will attempt to request the credentials from the endpoint the Provider
+// was configured for. And error will be returned if the retrieval fails.
+func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) {
+ resp, err := p.getCredentials(ctx)
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName},
+ awserr.New("CredentialsEndpointError", "failed to load credentials", err)
+ }
+
+ if resp.Expiration != nil {
+ p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
+ } else {
+ p.staticCreds = true
+ }
+
+ return credentials.Value{
+ AccessKeyID: resp.AccessKeyID,
+ SecretAccessKey: resp.SecretAccessKey,
+ SessionToken: resp.Token,
+ ProviderName: ProviderName,
+ }, nil
+}
+
+type getCredentialsOutput struct {
+ Expiration *time.Time
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+}
+
+type errorOutput struct {
+ Code string `json:"code"`
+ Message string `json:"message"`
+}
+
+func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error) {
+ op := &request.Operation{
+ Name: "GetCredentials",
+ HTTPMethod: "GET",
+ }
+
+ out := &getCredentialsOutput{}
+ req := p.Client.NewRequest(op, nil, out)
+ req.SetContext(ctx)
+ req.HTTPRequest.Header.Set("Accept", "application/json")
+ if authToken := p.AuthorizationToken; len(authToken) != 0 {
+ req.HTTPRequest.Header.Set("Authorization", authToken)
+ }
+
+ return out, req.Send()
+}
+
+func validateEndpointHandler(r *request.Request) {
+ if len(r.ClientInfo.Endpoint) == 0 {
+ r.Error = aws.ErrMissingEndpoint
+ }
+}
+
+func unmarshalHandler(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ out := r.Data.(*getCredentialsOutput)
+ if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization,
+ "failed to decode endpoint credentials",
+ err,
+ )
+ }
+}
+
+func unmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ var errOut errorOutput
+ err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization,
+ "failed to decode error message", err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+
+ // Response body format is not consistent between metadata endpoints.
+ // Grab the error message as a string and include that as the source error
+ r.Error = awserr.New(errOut.Code, errOut.Message, nil)
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/env_provider.go
new file mode 100644
index 0000000000000..a0edc133bd790
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/env_provider.go
@@ -0,0 +1,74 @@
+package credentials
+
+import (
+ "os"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+)
+
+// EnvProviderName provides a name of Env provider
+const EnvProviderName = "EnvProvider"
+
+var (
+ // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
+ // found in the process's environment.
+ ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
+
+ // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
+ // can't be found in the process's environment.
+ ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
+)
+
+// A EnvProvider retrieves credentials from the environment variables of the
+// running process. Environment credentials never expire.
+//
+// Environment variables used:
+//
+// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
+//
+// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
+type EnvProvider struct {
+ retrieved bool
+}
+
+// NewEnvCredentials returns a pointer to a new Credentials object
+// wrapping the environment variable provider.
+func NewEnvCredentials() *Credentials {
+ return NewCredentials(&EnvProvider{})
+}
+
+// Retrieve retrieves the keys from the environment.
+func (e *EnvProvider) Retrieve() (Value, error) {
+ e.retrieved = false
+
+ id := os.Getenv("AWS_ACCESS_KEY_ID")
+ if id == "" {
+ id = os.Getenv("AWS_ACCESS_KEY")
+ }
+
+ secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
+ if secret == "" {
+ secret = os.Getenv("AWS_SECRET_KEY")
+ }
+
+ if id == "" {
+ return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound
+ }
+
+ if secret == "" {
+ return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound
+ }
+
+ e.retrieved = true
+ return Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
+ ProviderName: EnvProviderName,
+ }, nil
+}
+
+// IsExpired returns if the credentials have been retrieved.
+func (e *EnvProvider) IsExpired() bool {
+ return !e.retrieved
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/example.ini b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/example.ini
new file mode 100644
index 0000000000000..7fc91d9d2047b
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/example.ini
@@ -0,0 +1,12 @@
+[default]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
+aws_session_token = token
+
+[no_token]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
+
+[with_colon]
+aws_access_key_id: accessKey
+aws_secret_access_key: secret
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/common.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/common.go
new file mode 100644
index 0000000000000..6d2fa6c828de6
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/common.go
@@ -0,0 +1,141 @@
+package ibmiam
+
+import (
+ "runtime"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager"
+)
+
+const (
+ // Constants
+ // Default IBM IAM Authentication Server Endpoint
+ defaultAuthEndPoint = `https://iam.cloud.ibm.com/identity/token`
+
+ // Logger constants
+ // Debug Log constant
+ debugLog = "<DEBUG>"
+ // IBM IAM Provider Log constant
+ ibmiamProviderLog = "IBM IAM PROVIDER"
+)
+
+// Provider Struct
+type Provider struct {
+ // Name of Provider
+ providerName string
+
+ // Type of Provider - SharedCred, SharedConfig, etc.
+ providerType string
+
+ // Token Manager Provider uses
+ tokenManager tokenmanager.API
+
+ // Service Instance ID passes in a provider
+ serviceInstanceID string
+
+ // Error
+ ErrorStatus error
+
+ // Logger attributes
+ logger aws.Logger
+ logLevel *aws.LogLevelType
+}
+
+// NewProvider allows the creation of a custom IBM IAM Provider
+// Parameters:
+// Provider Name
+// AWS Config
+// API Key
+// IBM IAM Authentication Server Endpoint
+// Service Instance ID
+// Token Manager client
+// Returns:
+// Provider
+func NewProvider(providerName string, config *aws.Config, apiKey, authEndPoint, serviceInstanceID string,
+ client tokenmanager.IBMClientDo) (provider *Provider) { //linter complain about (provider *Provider) {
+ provider = new(Provider)
+
+ provider.providerName = providerName
+ provider.providerType = "oauth"
+
+ logLevel := aws.LogLevel(aws.LogOff)
+ if config != nil && config.LogLevel != nil && config.Logger != nil {
+ logLevel = config.LogLevel
+ provider.logger = config.Logger
+ }
+ provider.logLevel = logLevel
+
+ if apiKey == "" {
+ provider.ErrorStatus = awserr.New("IbmApiKeyIdNotFound", "IBM API Key Id not found", nil)
+ if provider.logLevel.Matches(aws.LogDebug) {
+ provider.logger.Log(debugLog, "<IBM IAM PROVIDER BUILD>", provider.ErrorStatus)
+ }
+ return
+ }
+
+ provider.serviceInstanceID = serviceInstanceID
+
+ if authEndPoint == "" {
+ authEndPoint = defaultAuthEndPoint
+ if provider.logLevel.Matches(aws.LogDebug) {
+ provider.logger.Log(debugLog, "<IBM IAM PROVIDER BUILD>", "using default auth endpoint", authEndPoint)
+ }
+ }
+
+ if client == nil {
+ client = tokenmanager.DefaultIBMClient(config)
+ }
+
+ provider.tokenManager = tokenmanager.NewTokenManagerFromAPIKey(config, apiKey, authEndPoint, nil, nil, nil, client)
+
+ runtime.SetFinalizer(provider, func(p *Provider) {
+ p.tokenManager.StopBackgroundRefresh()
+ })
+
+ return
+}
+
+// IsValid ...
+// Returns:
+// Provider validation - boolean
+func (p *Provider) IsValid() bool {
+ return nil == p.ErrorStatus
+}
+
+// Retrieve ...
+// Returns:
+// Credential values
+// Error
+func (p *Provider) Retrieve() (credentials.Value, error) {
+ if p.ErrorStatus != nil {
+ if p.logLevel.Matches(aws.LogDebug) {
+ p.logger.Log(debugLog, ibmiamProviderLog, p.providerName, p.ErrorStatus)
+ }
+ return credentials.Value{ProviderName: p.providerName}, p.ErrorStatus
+ }
+ tokenValue, err := p.tokenManager.Get()
+ if err != nil {
+ var returnErr error
+ if p.logLevel.Matches(aws.LogDebug) {
+ p.logger.Log(debugLog, ibmiamProviderLog, p.providerName, "ERROR ON GET", err)
+ returnErr = awserr.New("TokenManagerRetrieveError", "error retrieving the token", err)
+ } else {
+ returnErr = awserr.New("TokenManagerRetrieveError", "error retrieving the token", nil)
+ }
+ return credentials.Value{}, returnErr
+ }
+ if p.logLevel.Matches(aws.LogDebug) {
+ p.logger.Log(debugLog, ibmiamProviderLog, p.providerName, "GET TOKEN", tokenValue)
+ }
+
+ return credentials.Value{Token: *tokenValue, ProviderName: p.providerName, ProviderType: p.providerType,
+ ServiceInstanceID: p.serviceInstanceID}, nil
+}
+
+// IsExpired ...
+// Provider expired or not - boolean
+func (p *Provider) IsExpired() bool {
+ return true
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/common_ini_provider.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/common_ini_provider.go
new file mode 100644
index 0000000000000..5d344803ff045
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/common_ini_provider.go
@@ -0,0 +1,67 @@
+package ibmiam
+
+import (
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/internal/ini"
+)
+
+const (
+ // Default Profile
+ defaultProfile = "default"
+)
+
+// commonIni constructor of the IBM IAM provider that loads IAM credentials from
+// an ini file
+// Parameters:
+// AWS Config
+// Profile filename
+// Profile prefix
+// Returns:
+// New provider with Provider name, config, API Key, IBM IAM Authentication Server end point,
+// Service Instance ID
+func commonIniProvider(providerName string, config *aws.Config, filename, profilename string) *Provider {
+
+ // Opens an ini file with the filename passed in for shared credentials
+ // If fails, returns error
+ ini, err := ini.OpenFile(filename)
+ if err != nil {
+ e := awserr.New("SharedCredentialsOpenError", "Shared Credentials Open Error", err)
+ logFromConfigHelper(config, "<DEBUG>", "<IBM IAM PROVIDER BUILD>", providerName, e)
+ return &Provider{
+ providerName: SharedConfProviderName,
+ ErrorStatus: e,
+ }
+ }
+
+ // Gets section of the shared credentials ini file
+ // If fails, returns error
+ iniProfile, ok := ini.GetSection(profilename)
+ if !ok {
+ e := awserr.New("SharedCredentialsProfileNotFound",
+ "Shared Credentials Section '"+profilename+"' not Found in file '"+filename+"'", nil)
+ logFromConfigHelper(config, "<DEBUG>", "<IBM IAM PROVIDER BUILD>", providerName, e)
+ return &Provider{
+ providerName: SharedConfProviderName,
+ ErrorStatus: e,
+ }
+ }
+
+ // Populaute the IBM IAM Credential values
+ apiKey := iniProfile.String("ibm_api_key_id")
+ serviceInstanceID := iniProfile.String("ibm_service_instance_id")
+ authEndPoint := iniProfile.String("ibm_auth_endpoint")
+
+ return NewProvider(providerName, config, apiKey, authEndPoint, serviceInstanceID, nil)
+}
+
+// Log From Config
+func logFromConfigHelper(config *aws.Config, params ...interface{}) {
+ logLevel := aws.LogLevel(aws.LogOff)
+ if config != nil && config.LogLevel != nil && config.Logger != nil {
+ logLevel = config.LogLevel
+ }
+ if logLevel.Matches(aws.LogDebug) {
+ config.Logger.Log(params)
+ }
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/custom_init_func_provider.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/custom_init_func_provider.go
new file mode 100644
index 0000000000000..73735eea2d2c9
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/custom_init_func_provider.go
@@ -0,0 +1,64 @@
+package ibmiam
+
+import (
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/token"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager"
+)
+
+// CustomInitFuncProviderName the Name of the IBM IAM provider with a custom init function
+const CustomInitFuncProviderName = "CustomInitFuncProviderIBM"
+
+// NewCustomInitFuncProvider constructor of IBM IAM Provider with a custom init Function
+// Parameters:
+// aws.config: AWS Config to provide service configuration for service clients. By default,
+// all clients will use the defaults.DefaultConfig structure.
+// initFunc token: Contents of the token
+// authEndPoint: IAM Authentication Server end point
+// serviceInstanceID: service instance ID of the IBM account
+// client: Token Management's client
+// Returns:
+// A complete Provider with Token Manager initialized
+func NewCustomInitFuncProvider(config *aws.Config, initFunc func() (*token.Token, error), authEndPoint,
+ serviceInstanceID string, client tokenmanager.IBMClientDo) *Provider {
+
+ // New provider with oauth request type
+ provider := new(Provider)
+ provider.providerName = CustomInitFuncProviderName
+ provider.providerType = "oauth"
+
+ // Initialize LOGGER and inserts into the provider
+ logLevel := aws.LogLevel(aws.LogOff)
+ if config != nil && config.LogLevel != nil && config.Logger != nil {
+ logLevel = config.LogLevel
+ provider.logger = config.Logger
+ }
+ provider.logLevel = logLevel
+
+ provider.serviceInstanceID = serviceInstanceID
+
+ // Checks local IAM Authentication Server Endpoint; if none, sets the default auth end point
+ if authEndPoint == "" {
+ authEndPoint = defaultAuthEndPoint
+ if provider.logLevel.Matches(aws.LogDebug) {
+ provider.logger.Log("<DEBUG>", "<IBM IAM PROVIDER BUILD>", "using default auth endpoint", authEndPoint)
+ }
+ }
+
+ // Checks if the client has been passed in; otherwise, create one with token manager's default IBM client
+ if client == nil {
+ client = tokenmanager.DefaultIBMClient(config)
+ }
+
+ provider.tokenManager = tokenmanager.NewTokenManager(config, initFunc, authEndPoint, nil, nil, nil, client)
+ return provider
+
+}
+
+// NewCustomInitFuncCredentials costructor
+func NewCustomInitFuncCredentials(config *aws.Config, initFunc func() (*token.Token, error), authEndPoint,
+ serviceInstanceID string) *credentials.Credentials {
+ return credentials.NewCredentials(NewCustomInitFuncProvider(config, initFunc, authEndPoint,
+ serviceInstanceID, nil))
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/env_provider.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/env_provider.go
new file mode 100644
index 0000000000000..514fe705ba021
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/env_provider.go
@@ -0,0 +1,32 @@
+package ibmiam
+
+import (
+ "os"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials"
+)
+
+// EnvProviderName name of the IBM IAM provider that loads IAM credentials from environment
+// variables
+const EnvProviderName = "EnvProviderIBM"
+
+// NewEnvProvider constructor of the IBM IAM provider that loads IAM credentials from environment
+// variables
+// Parameter:
+// AWS Config
+// Returns:
+// A new provider with AWS config, API Key, IBM IAM Authentication Server Endpoint and
+// Service Instance ID
+func NewEnvProvider(config *aws.Config) *Provider {
+ apiKey := os.Getenv("IBM_API_KEY_ID")
+ serviceInstanceID := os.Getenv("IBM_SERVICE_INSTANCE_ID")
+ authEndPoint := os.Getenv("IBM_AUTH_ENDPOINT")
+
+ return NewProvider(EnvProviderName, config, apiKey, authEndPoint, serviceInstanceID, nil)
+}
+
+// NewEnvCredentials Constructor
+func NewEnvCredentials(config *aws.Config) *credentials.Credentials {
+ return credentials.NewCredentials(NewEnvProvider(config))
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/shared_config_provider.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/shared_config_provider.go
new file mode 100644
index 0000000000000..4133a6cf4e146
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/shared_config_provider.go
@@ -0,0 +1,70 @@
+package ibmiam
+
+import (
+ "os"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials"
+ "github.com/IBM/ibm-cos-sdk-go/internal/shareddefaults"
+)
+
+const (
+ // SharedConfProviderName of the IBM IAM provider that loads IAM credentials
+ // from shared config
+ SharedConfProviderName = "SharedConfigProviderIBM"
+
+ // Profile prefix
+ profilePrefix = "profile "
+)
+
+// NewSharedConfigProvider constructor of the IBM IAM provider that loads IAM Credentials
+// from shared config
+// Parameters:
+// AWS Config
+// Profile filename
+// Profile name
+// Returns:
+// Common Ini Provider with values
+func NewSharedConfigProvider(config *aws.Config, filename, profilename string) *Provider {
+
+ // Sets the file name from possible locations
+ // - AWS_CONFIG_FILE environment variable
+ // Error if the filename is missing
+ if filename == "" {
+ filename = os.Getenv("AWS_CONFIG_FILE")
+ if filename == "" {
+ // BUG?
+ home := shareddefaults.UserHomeDir()
+ if home == "" {
+ e := awserr.New("SharedCredentialsHomeNotFound", "Shared Credentials Home folder not found", nil)
+ logFromConfigHelper(config, "<DEBUG>", "<IBM IAM PROVIDER BUILD>", SharedConfProviderName, e)
+ return &Provider{
+ providerName: SharedConfProviderName,
+ ErrorStatus: e,
+ }
+ }
+ filename = shareddefaults.SharedConfigFilename()
+ }
+ }
+
+ // Sets the profile name
+ // Otherwise sets the prefix with profile name passed in
+ if profilename == "" {
+ profilename = os.Getenv("AWS_PROFILE")
+ if profilename == "" {
+ profilename = defaultProfile
+ } else {
+ profilename = profilePrefix + profilename
+ }
+ } else {
+ profilename = profilePrefix + profilename
+ }
+
+ return commonIniProvider(SharedConfProviderName, config, filename, profilename)
+}
+
+// NewConfigCredentials Constructor
+func NewConfigCredentials(config *aws.Config, filename, profilename string) *credentials.Credentials {
+ return credentials.NewCredentials(NewSharedConfigProvider(config, filename, profilename))
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/shared_credentials_provider.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/shared_credentials_provider.go
new file mode 100644
index 0000000000000..76ee0831bb383
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/shared_credentials_provider.go
@@ -0,0 +1,65 @@
+package ibmiam
+
+import (
+ "os"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials"
+ "github.com/IBM/ibm-cos-sdk-go/internal/shareddefaults"
+)
+
+const (
+ // SharedCredsProviderName name of the IBM IAM provider that loads IAM credentials
+ // from shared credentials file
+ SharedCredsProviderName = "SharedCredentialsProviderIBM"
+)
+
+// NewSharedCredentialsProvider constructor of the IBM IAM provider that loads
+// IAM credentials from shared credentials file
+// Parameters:
+// AWS Config
+// Profile filename
+// Profile prefix
+// Returns:
+// Common initial provider with config file/profile
+func NewSharedCredentialsProvider(config *aws.Config, filename, profilename string) *Provider {
+
+ // Sets the file name from possible locations
+ // - AWS_SHARED_CREDENTIALS_FILE environment variable
+ // Error if the filename is missing
+ if filename == "" {
+ filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE")
+ if filename == "" {
+ // BUG where will we use home?
+ home := shareddefaults.UserHomeDir()
+ if home == "" {
+ e := awserr.New("SharedCredentialsHomeNotFound", "Shared Credentials Home folder not found", nil)
+ logFromConfigHelper(config, "<DEBUG>", "<IBM IAM PROVIDER BUILD>", SharedCredsProviderName, e)
+ return &Provider{
+ providerName: SharedCredsProviderName,
+ ErrorStatus: e,
+ }
+ }
+ filename = shareddefaults.SharedCredentialsFilename()
+ }
+ }
+
+ // Sets the profile name from AWS_PROFILE environment variable
+ // Otherwise sets the profile name with defaultProfile passed in
+ if profilename == "" {
+ profilename = os.Getenv("AWS_PROFILE")
+ if profilename == "" {
+ profilename = defaultProfile
+ }
+ }
+
+ return commonIniProvider(SharedCredsProviderName, config, filename, profilename)
+}
+
+// NewSharedCredentials constructor for IBM IAM that uses IAM credentials passed in
+// Returns:
+// credentials.NewCredentials(newSharedCredentialsProvider()) (AWS type)
+func NewSharedCredentials(config *aws.Config, filename, profilename string) *credentials.Credentials {
+ return credentials.NewCredentials(NewSharedCredentialsProvider(config, filename, profilename))
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/static_provider.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/static_provider.go
new file mode 100644
index 0000000000000..561b7ba4da6b7
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/static_provider.go
@@ -0,0 +1,21 @@
+package ibmiam
+
+import (
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials"
+)
+
+// StaticProviderName name of the IBM IAM provider that uses IAM details passed directly
+const StaticProviderName = "StaticProviderIBM"
+
+// NewStaticProvider constructor of the IBM IAM provider that uses IAM details passed directly
+// Returns: New Provider (AWS type)
+func NewStaticProvider(config *aws.Config, authEndPoint, apiKey, serviceInstanceID string) *Provider {
+ return NewProvider(StaticProviderName, config, apiKey, authEndPoint, serviceInstanceID, nil)
+}
+
+// NewStaticCredentials constructor for IBM IAM that uses IAM credentials passed in
+// Returns: credentials.NewCredentials(newStaticProvider()) (AWS type)
+func NewStaticCredentials(config *aws.Config, authEndPoint, apiKey, serviceInstanceID string) *credentials.Credentials {
+ return credentials.NewCredentials(NewStaticProvider(config, authEndPoint, apiKey, serviceInstanceID))
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/token/token.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/token/token.go
new file mode 100644
index 0000000000000..c6b290a34bd00
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/token/token.go
@@ -0,0 +1,40 @@
+package token
+
+import "encoding/json"
+
+// IBMIAMToken holder for the IBM IAM token details
+type Token struct {
+
+ // Sets the access token
+ AccessToken string `json:"access_token"`
+
+ // Sets the refresh token
+ RefreshToken string `json:"refresh_token"`
+
+ // Sets the token type
+ TokenType string `json:"token_type"`
+
+ // Scope string `json:"scope"`
+
+ // Sets the expiry timestamp
+ ExpiresIn int64 `json:"expires_in"`
+
+ // Sets the expiration timestamp
+ Expiration int64 `json:"expiration"`
+}
+
+// Error type to help parse errors of IAM calls
+type Error struct {
+ Context map[string]interface{} `json:"context"`
+ ErrorCode string `json:"errorCode"`
+ ErrorMessage string `json:"errorMessage"`
+}
+
+// Error function
+func (ie *Error) Error() string {
+ bytes, err := json.Marshal(ie)
+ if err != nil {
+ return err.Error()
+ }
+ return string(bytes)
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager/client.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager/client.go
new file mode 100644
index 0000000000000..3b425f36c3f88
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager/client.go
@@ -0,0 +1,176 @@
+package tokenmanager
+
+import (
+ "bytes"
+ "io/ioutil"
+ "net/http"
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+)
+
+var (
+ // NewIBMClient client constructor
+ NewIBMClient = newIBMClient
+
+ // DefaultIBMClient client constructor with default values
+ DefaultIBMClient = defaultIBMClient
+)
+
+// IBMClientDo wrapper type to the Do operation
+type IBMClientDo interface {
+
+ // HTTP Client Do op
+ Do(req *http.Request) (*http.Response, error)
+}
+
+// IBM Client Implementation typer wrapper
+type defaultIBMCImplementation struct {
+
+ // Internal client
+ Client *http.Client
+
+ // Sets maximum number of retries
+ MaxRetries int
+
+ // Times the initial of back off
+ InitialBackOff time.Duration
+
+ // Times the duration of back off progress
+ BackOffProgression func(duration time.Duration) time.Duration
+
+ // Logs the client implementation
+ logger aws.Logger
+
+ // Log level for the client implementation
+ logLevel *aws.LogLevelType
+}
+
+// newIBMClient constructor
+// Parameters:
+// AWS Config
+// Initial Backoff of the refresh
+// Duration of backoff progression
+// Returns:
+// Default IBM Client Implementation
+func newIBMClient(config *aws.Config, initialBackOff time.Duration,
+ backOffProgression func(time.Duration) time.Duration) *defaultIBMCImplementation {
+ var httpClient *http.Client
+ if config != nil && config.HTTPClient != nil {
+ httpClient = config.HTTPClient
+ } else {
+ httpClient = http.DefaultClient
+ }
+
+ // Initialize number of maximum retries
+ maxRetries := 0
+ if config != nil && config.MaxRetries != nil && *config.MaxRetries > maxRetries {
+ maxRetries = *config.MaxRetries
+ }
+
+ // Sets the loglevel
+ logLevel := aws.LogLevel(aws.LogOff)
+ if config != nil && config.LogLevel != nil && config.Logger != nil {
+ logLevel = config.LogLevel
+ }
+
+ // If initial backoff is less than zero - sets it to zero
+ if initialBackOff < time.Duration(0) {
+ initialBackOff = time.Duration(0)
+ }
+
+ // If back off progressoin is nil, set it to time duration of zero
+ if backOffProgression == nil {
+ backOffProgression = func(_ time.Duration) time.Duration { return time.Duration(0) }
+ }
+
+ return &defaultIBMCImplementation{
+ Client: httpClient,
+ MaxRetries: maxRetries,
+ InitialBackOff: initialBackOff,
+ BackOffProgression: backOffProgression,
+ logger: config.Logger,
+ logLevel: logLevel,
+ }
+}
+
+// Default IBM Client
+// Parameter:
+// AWS Config
+// Returns:
+// A HTTP Client with IBM IAM Credentials in the config
+func defaultIBMClient(config *aws.Config) *defaultIBMCImplementation {
+ f := func(duration time.Duration) time.Duration {
+ return time.Duration(float64(duration.Nanoseconds())*1.75) * time.Nanosecond
+ }
+ return newIBMClient(config, 500*time.Millisecond, f)
+}
+
+// Internal IBM Client HTTP Client request execution
+// Parameter:
+// An HTTP Request Object
+// Returns:
+// An HTTP Response Object
+// Error
+func (c *defaultIBMCImplementation) Do(req *http.Request) (r *http.Response, e error) {
+
+ // Enablese Log if Debugger is turned on
+ if c.logLevel.Matches(aws.LogDebug) {
+ c.logger.Log(debugLog, defaultIBMCImpLog, req.Method, req.URL)
+ }
+ r, e = c.Client.Do(req)
+ if e == nil && isSuccess(r) {
+ return
+ }
+
+ // Sets the current status if request is nil
+ var status string
+ if r != nil {
+ status = r.Status
+ }
+
+ // Sets logger to track request
+ if c.logLevel.Matches(aws.LogDebugWithRequestErrors) {
+ c.logger.Log(debugLog, defaultIBMCImpLog, req.Method, req.URL, "Status:", status, "Error:", e)
+ }
+
+ // Needs explanation -- RDS
+ for i, sleep := 0, c.InitialBackOff; i < c.MaxRetries; i, sleep = i+1, c.BackOffProgression(sleep) {
+ if c.logLevel.Matches(aws.LogDebugWithRequestRetries) {
+ c.logger.Log(debugLog, defaultIBMCImpLog, req.Method, req.URL, "Retry:", i+1)
+ }
+ time.Sleep(sleep)
+ req = copyRequest(req)
+ r, e = c.Client.Do(req)
+ if e == nil && isSuccess(r) {
+ return
+ }
+
+ if r != nil {
+ status = r.Status
+ }
+ if c.logLevel.Matches(aws.LogDebugWithRequestErrors) {
+ c.logger.Log(debugLog, defaultIBMCImpLog, req.Method,
+ req.URL, "Retry:", i+1, "Status:", status, "Error:", e)
+ }
+ }
+ return
+}
+
+// only copies method, url, body , headers
+// tight coupled to the token manager and the way request is build
+// Paramter:
+// An HTTP Request object
+// Returns:
+// A built HTTP Request object with header
+func copyRequest(r *http.Request) *http.Request {
+ buf, _ := ioutil.ReadAll(r.Body)
+ newReader := ioutil.NopCloser(bytes.NewBuffer(buf))
+ req, _ := http.NewRequest(r.Method, r.URL.String(), newReader)
+ for k, lv := range r.Header {
+ for _, v := range lv {
+ req.Header.Add(k, v)
+ }
+ }
+ return req
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager/helper.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager/helper.go
new file mode 100644
index 0000000000000..25b4db91f4188
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager/helper.go
@@ -0,0 +1,43 @@
+package tokenmanager
+
+import "net/http"
+
+// Constant values for the token manager package and testcases
+const (
+ // Not Match Constants
+ httpClientNotMatch = "Http Client did not match"
+ maxRetriesNotMatch = "Max Retries did not match"
+ logLevelNotMatch = "Log Level did not match"
+ loggerNotMatch = "logger did not match"
+ backoffProgressionNotMatch = "BackOff Progression did not match"
+ numberOfLogEntriesNotMatch = "Number of log entries do not match"
+ tokensNotMatch = "Tokens do not Match"
+
+ // Backoff constants
+ initialBackoffUnset = "Initial BackOff unset"
+ backoffProgressionUnset = "BackOff Progression unset"
+
+ // Error constants
+ errorBuildingRequest = "Error Building Request"
+ badNumberOfRetries = "Bad Number of retries"
+ errorGettingToken = "Error getting token"
+
+ // Global LOGGER constant
+ debugLog = "<DEBUG>"
+
+ // LOGGER constant for IBM Client Implementation
+ defaultIBMCImpLog = "defaultIBMCImplementation"
+
+ // LOGGER constant for IBM Token Management Implementation
+ defaultTMImpLog = "defaultTMImplementation"
+ getOpsLog = "GET OPERATION"
+ backgroundRefreshLog = "BACKGROUND REFRESH"
+
+ // Global constants
+ endPoint = "EndPoint"
+)
+
+// Returns a success response code (200 <= code < 300)
+func isSuccess(response *http.Response) bool {
+ return response.StatusCode >= 200 && response.StatusCode < 300
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager/token_manager.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager/token_manager.go
new file mode 100644
index 0000000000000..52ff662d99bd8
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager/token_manager.go
@@ -0,0 +1,551 @@
+package tokenmanager
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/token"
+)
+
+// Constants used to retrieve the initial token and to refresh tokens
+const (
+ iamClientID = "bx"
+ iamClientSecret = "bx"
+
+ grantAPIKey = "urn:ibm:params:oauth:grant-type:apikey"
+ grantRefreshToken = "refresh_token"
+)
+
+var (
+ // minimum time before expiration to refresh
+ minimumDelta = time.Duration(3) * time.Second
+
+ // minimum time between refresh daemons calls,
+ // to avoid background thread flooding and
+ // starvation of the Get when the token does not renew
+ minimumWait = time.Duration(1) * time.Second
+
+ // DefaultAdvisoryTimeoutFunc set the advisory timeout to 25% of remaining time - usually 15 minutes on 1 hour expiry
+ DefaultAdvisoryTimeoutFunc = func(ttl time.Duration) time.Duration {
+ return time.Duration(float64(ttl.Nanoseconds())*0.25) * time.Nanosecond
+ }
+
+ // DefaultMandatoryTimeoutFunc set the mandatory timeout to 17% of remaining time - usually 10 minutes on 1 hour expiry
+ DefaultMandatoryTimeoutFunc = func(ttl time.Duration) time.Duration {
+ return time.Duration(float64(ttl.Nanoseconds())*0.17) * time.Nanosecond
+ }
+
+ // ErrFetchingIAMTokenFn returns the error fetching token for Token Manager
+ ErrFetchingIAMTokenFn = func(err error) awserr.Error {
+ return awserr.New("ErrFetchingIAMToken", "error fetching token", err)
+ }
+)
+
+type defaultTMImplementation struct {
+ // endpoint used to retrieve tokens
+ authEndPoint string
+ // client used to retrieve tokens, implements the Retry behaviour
+ client IBMClientDo
+
+ // timeout used by background thread
+ advisoryRefreshTimeout func(ttl time.Duration) time.Duration
+ // timeout used by get token to decide if blocks and refresh token
+ // and by background refresh when advisory not set or smaller than mandatory
+ mandatoryRefreshTimeout func(ttl time.Duration) time.Duration
+ // time provider used to get current time
+ timeProvider func() time.Time
+ // original time to live of the token at the moment it was retrieved
+ tokenTTL time.Duration
+
+ // token value kept for its TTL
+ Cache *token.Token
+ // timer used to refresh the Token
+ timer *time.Timer
+ // nullable boolean used to enable disable the background refresh
+ enableBackgroundRefresh *bool
+ // read write mutex to sync access
+ mutex sync.RWMutex
+ // function used to retrieve initial token
+ initFunc func() (*token.Token, error)
+
+ // logger where the logging is sent
+ logger aws.Logger
+ // level of logging enabled
+ logLevel *aws.LogLevelType
+}
+
+// function to create a new token manager using an APIKey to retrieve first token
+func newTokenManagerFromAPIKey(config *aws.Config, apiKey, authEndPoint string, advisoryRefreshTimeout,
+ mandatoryRefreshTimeout func(time.Duration) time.Duration, timeFunc func() time.Time,
+ client IBMClientDo) *defaultTMImplementation {
+ // when the client is nil creates a new one using the config passed as argument
+ if client == nil {
+ client = defaultIBMClient(config)
+ }
+
+ // set the function to get the initial token the defaultInit that uses the APIKey passed as argument
+ initFunc := defaultInit(apiKey, authEndPoint, client)
+ return newTokenManager(config, initFunc, authEndPoint, advisoryRefreshTimeout, mandatoryRefreshTimeout, timeFunc,
+ client)
+}
+
+// default init function,
+// uses the APIKey passed as argument to obtain the first token
+func defaultInit(apiKey string, authEndPoint string, client IBMClientDo) func() (*token.Token, error) {
+ return func() (*token.Token, error) {
+ data := url.Values{
+ "apikey": {apiKey},
+ }
+ // build the http request
+ req, err := buildRequest(authEndPoint, grantAPIKey, data)
+ // checks for errors
+ if err != nil {
+ return nil, ErrFetchingIAMTokenFn(err)
+ }
+ // calls the end point
+ response, err := client.Do(req)
+ // checks for errors
+ if err != nil {
+ return nil, ErrFetchingIAMTokenFn(err)
+ }
+ // parse the response
+ tokenValue, err := processResponse(response)
+ // checks for errors
+ if err != nil {
+ return nil, ErrFetchingIAMTokenFn(err)
+ }
+ // returns the token
+ return tokenValue, nil
+ }
+}
+
+// creates a token manager,
+// the initial token is obtained using a custom function
+func newTokenManager(config *aws.Config, initFunc func() (*token.Token, error), authEndPoint string,
+ advisoryRefreshTimeout, mandatoryRefreshTimeout func(time.Duration) time.Duration, timeFunc func() time.Time,
+ client IBMClientDo) *defaultTMImplementation {
+ // if no time function passed uses the time.Now
+ if timeFunc == nil {
+ timeFunc = time.Now
+ }
+
+ // if no value passed use the one stored as global
+ if advisoryRefreshTimeout == nil {
+ advisoryRefreshTimeout = DefaultAdvisoryTimeoutFunc
+ }
+
+ // if no value passed use the one stored as global
+ if mandatoryRefreshTimeout == nil {
+ mandatoryRefreshTimeout = DefaultMandatoryTimeoutFunc
+ }
+
+ // checks the logLevel and logger,
+ // only sets the loveLevel when logLevel and logger are not ZERO values
+ // helps reducing the logic since logLevel needs to be checked
+ logLevel := aws.LogLevel(aws.LogOff)
+ if config != nil && config.LogLevel != nil && config.Logger != nil {
+ logLevel = config.LogLevel
+ }
+
+ // builds a defaultTMImplementation using the provided parameters
+ tm := &defaultTMImplementation{
+ authEndPoint: authEndPoint,
+ client: client,
+ advisoryRefreshTimeout: advisoryRefreshTimeout,
+ mandatoryRefreshTimeout: mandatoryRefreshTimeout,
+ timeProvider: timeFunc,
+ initFunc: initFunc,
+
+ logLevel: logLevel,
+ logger: config.Logger,
+ }
+ return tm
+}
+
+// function to obtain to initialize the token manager in a concurrent safe way
+func (tm *defaultTMImplementation) init() (*token.Token, error) {
+ // checks logLevel and logs
+ if tm.logLevel.Matches(aws.LogDebug) {
+ tm.logger.Log(debugLog, defaultTMImpLog, "INIT")
+ }
+ // fetches the initial vale using the init function
+ tokenValue, err := tm.initFunc()
+ if err != nil {
+ // checks logLevel and logs
+ if tm.logLevel.Matches(aws.LogDebug) {
+ tm.logger.Log(debugLog, defaultTMImpLog, "INIT FAILED", err)
+ }
+ return nil, err
+ }
+ // sets current cache value the value fetched by the init call
+ tm.Cache = tokenValue
+ result := *tm.Cache
+ // sets token time to live
+ tm.tokenTTL = getTTL(tokenValue.Expiration, tm.timeProvider)
+ // checks and sets if background thread is enabled
+ if tm.enableBackgroundRefresh == nil {
+ tm.enableBackgroundRefresh = aws.Bool(true)
+ }
+ // resets the time
+ tm.resetTimer()
+ // checks logLevel and logs
+ if tm.logLevel.Matches(aws.LogDebug) {
+ tm.logger.Log(debugLog, defaultTMImpLog, "INIT SUCCEEDED")
+ }
+ return &result, nil
+}
+
+// function to call the init operation in a concurrent safe way, managing the RWLock
+func retrieveInit(tm *defaultTMImplementation) (unlockOP func(), tk *token.Token, err error) {
+ // escalate the READ lock to a WRITE lock
+ now := time.Now()
+ tm.mutex.RUnlock()
+ tm.mutex.Lock()
+ // set unlock Operation to Write Unlock
+ unlockOP = tm.mutex.Unlock
+
+ // checks logLevel and logs
+ if tm.logLevel.Matches(aws.LogDebug) {
+ tm.logger.Log(debugLog, defaultTMImpLog, getOpsLog,
+ "TOKEN MANAGER NOT INITIALIZED - ACQUIRED FULL LOCK IN", time.Now().Sub(now))
+ }
+
+ // since another routine could be scheduled between the release of Read mutex and the acquire of Write mutex
+ // re-check the init is still required
+ if tm.Cache == nil {
+ tk, err = tm.init()
+ } else {
+ tk = retrieveCheckGet(tm)
+ }
+ return
+}
+
+// function to call the refresh operation in a concurrent safe way, managing the RWLock
+func retrieveFetch(tm *defaultTMImplementation) (unlockOP func(), tk *token.Token, err error) {
+
+ // escalate the READ lock to a WRITE lock
+ now := time.Now()
+ tm.mutex.RUnlock()
+ tm.mutex.Lock()
+ // set unlock Operation to Write Unlock
+ unlockOP = tm.mutex.Unlock
+
+ // checks logLevel and logs
+ if tm.logLevel.Matches(aws.LogDebug) {
+ tm.logger.Log(debugLog, defaultTMImpLog, getOpsLog,
+ "TOKEN REFRESH - ACQUIRED FULL LOCK IN", time.Now().Sub(now))
+ }
+
+ // since another routine could be scheduled between the release of Read mutex and the acquire of Write mutex
+ // re-check the refresh is still required
+ tk = retrieveCheckGet(tm)
+ for tk == nil {
+ err := tm.refresh()
+ if err != nil {
+ // checks logLevel and logs
+ if tm.logLevel.Matches(aws.LogDebug) {
+ tm.logger.Log(debugLog, defaultTMImpLog, getOpsLog, "REFRESH FAILED", err)
+ }
+ return unlockOP, nil, err
+ }
+ tk = retrieveCheckGet(tm)
+ }
+
+ return
+}
+
+// function to check the the token in cache and get it if valid
+func retrieveCheckGet(tm *defaultTMImplementation) (tk *token.Token) {
+ // calculates the TTL of the token in the cache
+ tokenTTL := waitingTime(tm.tokenTTL, tm.Cache.Expiration, nil, tm.mandatoryRefreshTimeout, tm.timeProvider)
+ // check if token is valid
+ if tokenTTL == nil || *tokenTTL > minimumDelta {
+ // set result to be cache content
+ tk = tm.Cache
+ }
+ return
+}
+
+// Get retrieves the value of the auth token, checks the cache if the token is valid returns it,
+// if not valid does a refresh and then returns it
+func (tm *defaultTMImplementation) Get() (tk *token.Token, err error) {
+
+ // holder for the func to be called in the defer
+ var unlockOP func()
+ // defer the call of the unlock operation
+ defer func() {
+ unlockOP()
+ }()
+
+ now := time.Now()
+
+ // acquire Read lock
+ tm.mutex.RLock()
+ // set unlock operation to ReadUnlock
+ unlockOP = tm.mutex.RUnlock
+
+ // checks logLevel and logs
+ if tm.logLevel.Matches(aws.LogDebug) {
+ tm.logger.Log(debugLog, defaultTMImpLog, getOpsLog, "ACQUIRED RLOCK IN", time.Now().Sub(now))
+ }
+
+ //check if cache was initialized
+ if tm.Cache == nil {
+ // if cache not initialized, initialize it
+ unlockOP, tk, err = retrieveInit(tm)
+ return
+ }
+
+ // check and retrieves content of cache
+ tk = retrieveCheckGet(tm)
+ // check if the content of cache is valid
+ if tk == nil {
+ // content of the cache invalid
+ // refresh cache content
+ unlockOP, tk, err = retrieveFetch(tm)
+ }
+
+ return
+}
+
+// function to do the refresh operation calls
+func (tm *defaultTMImplementation) refresh() error {
+ // stop the timer
+ tm.stopTimer()
+ // defer timer reset
+ defer tm.resetTimer()
+ // set the refresh token parameter of the request
+ data := url.Values{
+ "refresh_token": {tm.Cache.RefreshToken},
+ }
+ // build the request
+ req, err := buildRequest(tm.authEndPoint, grantRefreshToken, data)
+ if err != nil {
+ return ErrFetchingIAMTokenFn(err)
+ }
+ // call the endpoint
+ response, err := tm.client.Do(req)
+ if err != nil {
+ return ErrFetchingIAMTokenFn(err)
+ }
+ // parse the response
+ tokenValue, err := processResponse(response)
+ if err != nil {
+ if response.StatusCode == 400 {
+ // Initialize new token when REFRESH TOKEN got invalid
+ if tm.logLevel.Matches(aws.LogDebug) {
+ tm.logger.Log(debugLog, defaultTMImpLog, "REFRESH TOKEN INVALID. NEW TOKEN INITIALIZED", err, response.Header["Transaction-Id"], response.Body)
+ }
+ tm.init()
+ return nil
+ } else {
+ if tm.logLevel.Matches(aws.LogDebug) {
+ tm.logger.Log(debugLog, defaultTMImpLog, "REFRESH TOKEN EXCHANGE FAILED", err, response.Header["Transaction-Id"], response.Body)
+ }
+ return ErrFetchingIAMTokenFn(err)
+ }
+ }
+ // sets current token to the value fetched
+ tm.Cache = tokenValue
+ // sets TTL
+ tm.tokenTTL = getTTL(tokenValue.Expiration, tm.timeProvider)
+ return nil
+}
+
+// Refresh forces the refresh of the token in the cache in a concurrent safe way
+func (tm *defaultTMImplementation) Refresh() error {
+ // acquire a Write lock
+ tm.mutex.Lock()
+ // defer the release of the write lock
+ defer tm.mutex.Unlock()
+ // checks logLevel and logs
+ if tm.logLevel.Matches(aws.LogDebug) {
+ tm.logger.Log(debugLog, defaultTMImpLog, "MANUAL TRIGGER BACKGROUND REFRESH")
+ }
+ return tm.refresh()
+}
+
+// callback function used by to timer to refresh tokens in background
+func (tm *defaultTMImplementation) backgroundRefreshFunc() {
+ now := time.Now()
+ // acquire a Write lock
+ tm.mutex.Lock()
+ // defer the release of the write lock
+ defer tm.mutex.Unlock()
+
+ // checks logLevel and logs
+ if tm.logLevel.Matches(aws.LogDebug) {
+ tm.logger.Log(debugLog, defaultTMImpLog, backgroundRefreshLog,
+ "ACQUIRED FULL LOCK IN", time.Now().Sub(now))
+ }
+ wait := waitingTime(tm.tokenTTL, tm.Cache.Expiration, tm.advisoryRefreshTimeout,
+ tm.mandatoryRefreshTimeout, tm.timeProvider)
+ // checks logLevel and logs
+ if tm.logLevel.Matches(aws.LogDebug) {
+ tm.logger.Log(debugLog, defaultTMImpLog, backgroundRefreshLog, "TOKEN TTL", wait)
+ }
+ if wait != nil && *wait < minimumDelta {
+ // checks logLevel and logs
+ if tm.logLevel.Matches(aws.LogDebug) {
+ tm.logger.Log(debugLog, defaultTMImpLog, backgroundRefreshLog, "TOKEN NEED UPDATE")
+ }
+ tm.refresh()
+ } else {
+ // checks logLevel and logs
+ if tm.logLevel.Matches(aws.LogDebug) {
+ tm.logger.Log(debugLog, defaultTMImpLog, backgroundRefreshLog, "TOKEN UPDATE SKIPPED")
+ }
+ tm.resetTimer()
+ }
+}
+
+// StopBackgroundRefresh force the stop of the refresh background token in a concurrent safe way
+func (tm *defaultTMImplementation) StopBackgroundRefresh() {
+ // acquire a Write lock
+ tm.mutex.Lock()
+ // defer the release of the write lock
+ defer tm.mutex.Unlock()
+ tm.stopTimer()
+ tm.enableBackgroundRefresh = aws.Bool(false)
+ // checks logLevel and logs
+ if tm.logLevel.Matches(aws.LogDebug) {
+ tm.logger.Log(debugLog, defaultTMImpLog, "STOP BACKGROUND REFRESH")
+ }
+}
+
+// StartBackgroundRefresh starts the background refresh thread in a concurrent sage way
+func (tm *defaultTMImplementation) StartBackgroundRefresh() {
+ // acquire a Write lock
+ tm.mutex.Lock()
+ // defer the release of the write lock
+ defer tm.mutex.Unlock()
+ tm.enableBackgroundRefresh = aws.Bool(true)
+ tm.resetTimer()
+ // checks logLevel and logs
+ if tm.logLevel.Matches(aws.LogDebug) {
+ tm.logger.Log(debugLog, defaultTMImpLog, "START BACKGROUND REFRESH")
+ }
+}
+
+// helper function to stop the timer in the token manager used to trigger token background refresh
+func (tm *defaultTMImplementation) stopTimer() {
+ if tm.timer != nil {
+ tm.timer.Stop()
+ }
+}
+
+// helper function used to reset the timer
+func (tm *defaultTMImplementation) resetTimer() {
+ // checks if background refresh is enabled
+ if tm.enableBackgroundRefresh != nil && *tm.enableBackgroundRefresh {
+ // calculates the how long tpo wait for next refresh
+ refreshIn := waitingTime(tm.tokenTTL, tm.Cache.Expiration, tm.advisoryRefreshTimeout,
+ tm.mandatoryRefreshTimeout, tm.timeProvider)
+ // checks if waiting time is not nil,
+ // no nedd to refresh
+ if refreshIn != nil {
+ // checks if timer exists
+ // rest time of the existing timer
+ if tm.timer != nil {
+ if minimumWait > *refreshIn {
+ *refreshIn = minimumWait
+ }
+ tm.timer.Reset(*refreshIn)
+ } else {
+ // if timer not exists
+ // create a new timer
+ tm.timer = time.AfterFunc(*refreshIn, tm.backgroundRefreshFunc)
+ }
+ } else {
+ tm.timer = nil
+ }
+ }
+}
+
+// helper function used to build the http request used to retrieve initial and refresh tokens
+func buildRequest(endPoint string, grantType string, customValues url.Values) (*http.Request, error) {
+ data := url.Values{
+ "grant_type": {grantType},
+ "response_type": {"cloud_iam"},
+ }
+ for key, value := range customValues {
+ data[key] = value
+ }
+ req, err := http.NewRequest(http.MethodPost, endPoint, strings.NewReader(data.Encode()))
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s",
+ iamClientID, iamClientSecret))))
+ req.Header.Set("accept", "application/json")
+ req.Header.Set("Cache-control", "no-Cache")
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ return req, nil
+}
+
+// helper function used to parse the http response into a Token struct
+func processResponse(response *http.Response) (*token.Token, error) {
+ bodyContent, err := ioutil.ReadAll(response.Body)
+ if err != nil {
+ return nil, err
+ }
+ err = response.Body.Close()
+ if err != nil {
+ return nil, err
+ }
+ if isSuccess(response) {
+ tokenValue := token.Token{}
+ err = json.Unmarshal(bodyContent, &tokenValue)
+ if err != nil {
+ return nil, err
+ }
+ return &tokenValue, nil
+ } else if response.StatusCode == 400 || response.StatusCode == 401 || response.StatusCode == 403 {
+ apiErr := token.Error{}
+ err = json.Unmarshal(bodyContent, &apiErr)
+ if err != nil {
+ return nil, err
+ }
+ return nil, &apiErr
+ } else {
+ return nil, fmt.Errorf("Response: Bad Status Code: %s", response.Status)
+ }
+}
+
+// helper function used to calculate the time before token expires,
+// it takes in consideration the mandatory and advisory timeouts
+func waitingTime(ttl time.Duration, unixTime int64, advisoryRefreshTimeout,
+ mandatoryRefreshTimeout func(time.Duration) time.Duration, timeFunc func() time.Time) *time.Duration {
+ if unixTime == 0 {
+ return nil
+ }
+ timeoutAt := time.Unix(unixTime, 0)
+ result := timeoutAt.Sub(timeFunc())
+ delta := minimumDelta
+ if advisoryRefreshTimeout != nil && advisoryRefreshTimeout(ttl) > minimumDelta {
+ delta = advisoryRefreshTimeout(ttl)
+ }
+ if mandatoryRefreshTimeout != nil && mandatoryRefreshTimeout(ttl) > delta {
+ delta = mandatoryRefreshTimeout(ttl)
+ }
+ result -= delta
+ return &result
+}
+
+func getTTL(unixTime int64, timeFunc func() time.Time) time.Duration {
+ if unixTime > 0 {
+ timeoutAt := time.Unix(unixTime, 0)
+ return timeoutAt.Sub(timeFunc())
+ }
+ return 0
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager/token_manager_interface.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager/token_manager_interface.go
new file mode 100644
index 0000000000000..49dc7201f8e2f
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager/token_manager_interface.go
@@ -0,0 +1,45 @@
+package tokenmanager
+
+import (
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/token"
+)
+
+// API Token Manager interface
+type API interface {
+
+ // Token Management Get Function
+ Get() (*token.Token, error)
+
+ // Token Management Retries Function
+ Refresh() error
+
+ // Token Management Stop Background Refresh
+ StopBackgroundRefresh()
+
+ // Token Management Start Background Refresh
+ StartBackgroundRefresh()
+}
+
+// default implementations
+// wrap implementation in the interface
+var (
+ // NewTokenManager token manager constructor using a custom initial function to retrieve first token
+ NewTokenManager = func(config *aws.Config, initFunc func() (*token.Token, error), authEndPoint string,
+ advisoryRefreshTimeout, mandatoryRefreshTimeout func(time.Duration) time.Duration, timeFunc func() time.Time,
+ client IBMClientDo) API {
+ return newTokenManager(config, initFunc, authEndPoint, advisoryRefreshTimeout, mandatoryRefreshTimeout,
+ timeFunc, client)
+
+ }
+
+ // NewTokenManagerFromAPIKey token manager constructor using api key to retrieve first token
+ NewTokenManagerFromAPIKey = func(config *aws.Config, apiKey, authEndPoint string, advisoryRefreshTimeout,
+ mandatoryRefreshTimeout func(time.Duration) time.Duration, timeFunc func() time.Time,
+ client IBMClientDo) API {
+ return newTokenManagerFromAPIKey(config, apiKey, authEndPoint, advisoryRefreshTimeout,
+ mandatoryRefreshTimeout, timeFunc, client)
+ }
+)
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/processcreds/provider.go
new file mode 100644
index 0000000000000..a81dda5f32dc7
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/processcreds/provider.go
@@ -0,0 +1,426 @@
+/*
+Package processcreds is a credential Provider to retrieve `credential_process`
+credentials.
+
+WARNING: The following describes a method of sourcing credentials from an external
+process. This can potentially be dangerous, so proceed with caution. Other
+credential providers should be preferred if at all possible. If using this
+option, you should make sure that the config file is as locked down as possible
+using security best practices for your operating system.
+
+You can use credentials from a `credential_process` in a variety of ways.
+
+One way is to setup your shared config file, located in the default
+location, with the `credential_process` key and the command you want to be
+called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable
+(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file.
+
+ [default]
+ credential_process = /command/to/call
+
+Creating a new session will use the credential process to retrieve credentials.
+NOTE: If there are credentials in the profile you are using, the credential
+process will not be used.
+
+ // Initialize a session to load credentials.
+ sess, _ := session.NewSession(&aws.Config{
+ Region: aws.String("us-east-1")},
+ )
+
+ // Create S3 service client to use the credentials.
+ svc := s3.New(sess)
+
+Another way to use the `credential_process` method is by using
+`credentials.NewCredentials()` and providing a command to be executed to
+retrieve credentials:
+
+ // Create credentials using the ProcessProvider.
+ creds := processcreds.NewCredentials("/path/to/command")
+
+ // Create service client value configured for credentials.
+ svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+You can set a non-default timeout for the `credential_process` with another
+constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To
+set a one minute timeout:
+
+ // Create credentials using the ProcessProvider.
+ creds := processcreds.NewCredentialsTimeout(
+ "/path/to/command",
+ time.Duration(500) * time.Millisecond)
+
+If you need more control, you can set any configurable options in the
+credentials using one or more option functions. For example, you can set a two
+minute timeout, a credential duration of 60 minutes, and a maximum stdout
+buffer size of 2k.
+
+ creds := processcreds.NewCredentials(
+ "/path/to/command",
+ func(opt *ProcessProvider) {
+ opt.Timeout = time.Duration(2) * time.Minute
+ opt.Duration = time.Duration(60) * time.Minute
+ opt.MaxBufSize = 2048
+ })
+
+You can also use your own `exec.Cmd`:
+
+ // Create an exec.Cmd
+ myCommand := exec.Command("/path/to/command")
+
+ // Create credentials using your exec.Cmd and custom timeout
+ creds := processcreds.NewCredentialsCommand(
+ myCommand,
+ func(opt *processcreds.ProcessProvider) {
+ opt.Timeout = time.Duration(1) * time.Second
+ })
+*/
+package processcreds
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials"
+ "github.com/IBM/ibm-cos-sdk-go/internal/sdkio"
+)
+
+const (
+ // ProviderName is the name this credentials provider will label any
+ // returned credentials Value with.
+ ProviderName = `ProcessProvider`
+
+ // ErrCodeProcessProviderParse error parsing process output
+ ErrCodeProcessProviderParse = "ProcessProviderParseError"
+
+ // ErrCodeProcessProviderVersion version error in output
+ ErrCodeProcessProviderVersion = "ProcessProviderVersionError"
+
+ // ErrCodeProcessProviderRequired required attribute missing in output
+ ErrCodeProcessProviderRequired = "ProcessProviderRequiredError"
+
+ // ErrCodeProcessProviderExecution execution of command failed
+ ErrCodeProcessProviderExecution = "ProcessProviderExecutionError"
+
+ // errMsgProcessProviderTimeout process took longer than allowed
+ errMsgProcessProviderTimeout = "credential process timed out"
+
+ // errMsgProcessProviderProcess process error
+ errMsgProcessProviderProcess = "error in credential_process"
+
+ // errMsgProcessProviderParse problem parsing output
+ errMsgProcessProviderParse = "parse failed of credential_process output"
+
+ // errMsgProcessProviderVersion version error in output
+ errMsgProcessProviderVersion = "wrong version in process output (not 1)"
+
+ // errMsgProcessProviderMissKey missing access key id in output
+ errMsgProcessProviderMissKey = "missing AccessKeyId in process output"
+
+ // errMsgProcessProviderMissSecret missing secret acess key in output
+ errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output"
+
+ // errMsgProcessProviderPrepareCmd prepare of command failed
+ errMsgProcessProviderPrepareCmd = "failed to prepare command"
+
+ // errMsgProcessProviderEmptyCmd command must not be empty
+ errMsgProcessProviderEmptyCmd = "command must not be empty"
+
+ // errMsgProcessProviderPipe failed to initialize pipe
+ errMsgProcessProviderPipe = "failed to initialize pipe"
+
+ // DefaultDuration is the default amount of time in minutes that the
+ // credentials will be valid for.
+ DefaultDuration = time.Duration(15) * time.Minute
+
+ // DefaultBufSize limits buffer size from growing to an enormous
+ // amount due to a faulty process.
+ DefaultBufSize = int(8 * sdkio.KibiByte)
+
+ // DefaultTimeout default limit on time a process can run.
+ DefaultTimeout = time.Duration(1) * time.Minute
+)
+
+// ProcessProvider satisfies the credentials.Provider interface, and is a
+// client to retrieve credentials from a process.
+type ProcessProvider struct {
+ staticCreds bool
+ credentials.Expiry
+ originalCommand []string
+
+ // Expiry duration of the credentials. Defaults to 15 minutes if not set.
+ Duration time.Duration
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+
+ // A string representing an os command that should return a JSON with
+ // credential information.
+ command *exec.Cmd
+
+ // MaxBufSize limits memory usage from growing to an enormous
+ // amount due to a faulty process.
+ MaxBufSize int
+
+ // Timeout limits the time a process can run.
+ Timeout time.Duration
+}
+
+// NewCredentials returns a pointer to a new Credentials object wrapping the
+// ProcessProvider. The credentials will expire every 15 minutes by default.
+func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials {
+ p := &ProcessProvider{
+ command: exec.Command(command),
+ Duration: DefaultDuration,
+ Timeout: DefaultTimeout,
+ MaxBufSize: DefaultBufSize,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// NewCredentialsTimeout returns a pointer to a new Credentials object with
+// the specified command and timeout, and default duration and max buffer size.
+func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials {
+ p := NewCredentials(command, func(opt *ProcessProvider) {
+ opt.Timeout = timeout
+ })
+
+ return p
+}
+
+// NewCredentialsCommand returns a pointer to a new Credentials object with
+// the specified command, and default timeout, duration and max buffer size.
+func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials {
+ p := &ProcessProvider{
+ command: command,
+ Duration: DefaultDuration,
+ Timeout: DefaultTimeout,
+ MaxBufSize: DefaultBufSize,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+type credentialProcessResponse struct {
+ Version int
+ AccessKeyID string `json:"AccessKeyId"`
+ SecretAccessKey string
+ SessionToken string
+ Expiration *time.Time
+}
+
+// Retrieve executes the 'credential_process' and returns the credentials.
+func (p *ProcessProvider) Retrieve() (credentials.Value, error) {
+ out, err := p.executeCredentialProcess()
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+
+ // Serialize and validate response
+ resp := &credentialProcessResponse{}
+ if err = json.Unmarshal(out, resp); err != nil {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New(
+ ErrCodeProcessProviderParse,
+ fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)),
+ err)
+ }
+
+ if resp.Version != 1 {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New(
+ ErrCodeProcessProviderVersion,
+ errMsgProcessProviderVersion,
+ nil)
+ }
+
+ if len(resp.AccessKeyID) == 0 {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New(
+ ErrCodeProcessProviderRequired,
+ errMsgProcessProviderMissKey,
+ nil)
+ }
+
+ if len(resp.SecretAccessKey) == 0 {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New(
+ ErrCodeProcessProviderRequired,
+ errMsgProcessProviderMissSecret,
+ nil)
+ }
+
+ // Handle expiration
+ p.staticCreds = resp.Expiration == nil
+ if resp.Expiration != nil {
+ p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
+ }
+
+ return credentials.Value{
+ ProviderName: ProviderName,
+ AccessKeyID: resp.AccessKeyID,
+ SecretAccessKey: resp.SecretAccessKey,
+ SessionToken: resp.SessionToken,
+ }, nil
+}
+
+// IsExpired returns true if the credentials retrieved are expired, or not yet
+// retrieved.
+func (p *ProcessProvider) IsExpired() bool {
+ if p.staticCreds {
+ return false
+ }
+ return p.Expiry.IsExpired()
+}
+
+// prepareCommand prepares the command to be executed.
+func (p *ProcessProvider) prepareCommand() error {
+
+ var cmdArgs []string
+ if runtime.GOOS == "windows" {
+ cmdArgs = []string{"cmd.exe", "/C"}
+ } else {
+ cmdArgs = []string{"sh", "-c"}
+ }
+
+ if len(p.originalCommand) == 0 {
+ p.originalCommand = make([]string, len(p.command.Args))
+ copy(p.originalCommand, p.command.Args)
+
+ // check for empty command because it succeeds
+ if len(strings.TrimSpace(p.originalCommand[0])) < 1 {
+ return awserr.New(
+ ErrCodeProcessProviderExecution,
+ fmt.Sprintf(
+ "%s: %s",
+ errMsgProcessProviderPrepareCmd,
+ errMsgProcessProviderEmptyCmd),
+ nil)
+ }
+ }
+
+ cmdArgs = append(cmdArgs, p.originalCommand...)
+ p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...)
+ p.command.Env = os.Environ()
+
+ return nil
+}
+
+// executeCredentialProcess starts the credential process on the OS and
+// returns the results or an error.
+func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) {
+
+ if err := p.prepareCommand(); err != nil {
+ return nil, err
+ }
+
+ // Setup the pipes
+ outReadPipe, outWritePipe, err := os.Pipe()
+ if err != nil {
+ return nil, awserr.New(
+ ErrCodeProcessProviderExecution,
+ errMsgProcessProviderPipe,
+ err)
+ }
+
+ p.command.Stderr = os.Stderr // display stderr on console for MFA
+ p.command.Stdout = outWritePipe // get creds json on process's stdout
+ p.command.Stdin = os.Stdin // enable stdin for MFA
+
+ output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize))
+
+ stdoutCh := make(chan error, 1)
+ go readInput(
+ io.LimitReader(outReadPipe, int64(p.MaxBufSize)),
+ output,
+ stdoutCh)
+
+ execCh := make(chan error, 1)
+ go executeCommand(*p.command, execCh)
+
+ finished := false
+ var errors []error
+ for !finished {
+ select {
+ case readError := <-stdoutCh:
+ errors = appendError(errors, readError)
+ finished = true
+ case execError := <-execCh:
+ err := outWritePipe.Close()
+ errors = appendError(errors, err)
+ errors = appendError(errors, execError)
+ if errors != nil {
+ return output.Bytes(), awserr.NewBatchError(
+ ErrCodeProcessProviderExecution,
+ errMsgProcessProviderProcess,
+ errors)
+ }
+ case <-time.After(p.Timeout):
+ finished = true
+ return output.Bytes(), awserr.NewBatchError(
+ ErrCodeProcessProviderExecution,
+ errMsgProcessProviderTimeout,
+ errors) // errors can be nil
+ }
+ }
+
+ out := output.Bytes()
+
+ if runtime.GOOS == "windows" {
+ // windows adds slashes to quotes
+ out = []byte(strings.Replace(string(out), `\"`, `"`, -1))
+ }
+
+ return out, nil
+}
+
+// appendError conveniently checks for nil before appending slice
+func appendError(errors []error, err error) []error {
+ if err != nil {
+ return append(errors, err)
+ }
+ return errors
+}
+
+func executeCommand(cmd exec.Cmd, exec chan error) {
+ // Start the command
+ err := cmd.Start()
+ if err == nil {
+ err = cmd.Wait()
+ }
+
+ exec <- err
+}
+
+func readInput(r io.Reader, w io.Writer, read chan error) {
+ tee := io.TeeReader(r, w)
+
+ _, err := ioutil.ReadAll(tee)
+
+ if err == io.EOF {
+ err = nil
+ }
+
+ read <- err // will only arrive here when write end of pipe is closed
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/shared_credentials_provider.go
new file mode 100644
index 0000000000000..5328e0a6c10d2
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/shared_credentials_provider.go
@@ -0,0 +1,151 @@
+package credentials
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/internal/ini"
+ "github.com/IBM/ibm-cos-sdk-go/internal/shareddefaults"
+)
+
+// SharedCredsProviderName provides a name of SharedCreds provider
+const SharedCredsProviderName = "SharedCredentialsProvider"
+
+var (
+ // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
+ ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
+)
+
+// A SharedCredentialsProvider retrieves access key pair (access key ID,
+// secret access key, and session token if present) credentials from the current
+// user's home directory, and keeps track if those credentials are expired.
+//
+// Profile ini file example: $HOME/.aws/credentials
+type SharedCredentialsProvider struct {
+ // Path to the shared credentials file.
+ //
+ // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
+ // env value is empty will default to current user's home directory.
+ // Linux/OSX: "$HOME/.aws/credentials"
+ // Windows: "%USERPROFILE%\.aws\credentials"
+ Filename string
+
+ // AWS Profile to extract credentials from the shared credentials file. If empty
+ // will default to environment variable "AWS_PROFILE" or "default" if
+ // environment variable is also not set.
+ Profile string
+
+ // retrieved states if the credentials have been successfully retrieved.
+ retrieved bool
+}
+
+// NewSharedCredentials returns a pointer to a new Credentials object
+// wrapping the Profile file provider.
+func NewSharedCredentials(filename, profile string) *Credentials {
+ return NewCredentials(&SharedCredentialsProvider{
+ Filename: filename,
+ Profile: profile,
+ })
+}
+
+// Retrieve reads and extracts the shared credentials from the current
+// users home directory.
+func (p *SharedCredentialsProvider) Retrieve() (Value, error) {
+ p.retrieved = false
+
+ filename, err := p.filename()
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, err
+ }
+
+ creds, err := loadProfile(filename, p.profile())
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, err
+ }
+
+ p.retrieved = true
+ return creds, nil
+}
+
+// IsExpired returns if the shared credentials have expired.
+func (p *SharedCredentialsProvider) IsExpired() bool {
+ return !p.retrieved
+}
+
+// loadProfiles loads from the file pointed to by shared credentials filename for profile.
+// The credentials retrieved from the profile will be returned or error. Error will be
+// returned if it fails to read from the file, or the data is invalid.
+func loadProfile(filename, profile string) (Value, error) {
+ config, err := ini.OpenFile(filename)
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
+ }
+
+ iniProfile, ok := config.GetSection(profile)
+ if !ok {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil)
+ }
+
+ id := iniProfile.String("aws_access_key_id")
+ if len(id) == 0 {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey",
+ fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
+ nil)
+ }
+
+ secret := iniProfile.String("aws_secret_access_key")
+ if len(secret) == 0 {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret",
+ fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
+ nil)
+ }
+
+ // Default to empty string if not found
+ token := iniProfile.String("aws_session_token")
+
+ return Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: token,
+ ProviderName: SharedCredsProviderName,
+ }, nil
+}
+
+// filename returns the filename to use to read AWS shared credentials.
+//
+// Will return an error if the user's home directory path cannot be found.
+func (p *SharedCredentialsProvider) filename() (string, error) {
+ if len(p.Filename) != 0 {
+ return p.Filename, nil
+ }
+
+ if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 {
+ return p.Filename, nil
+ }
+
+ if home := shareddefaults.UserHomeDir(); len(home) == 0 {
+ // Backwards compatibility of home directly not found error being returned.
+ // This error is too verbose, failure when opening the file would of been
+ // a better error to return.
+ return "", ErrSharedCredentialsHomeNotFound
+ }
+
+ p.Filename = shareddefaults.SharedCredentialsFilename()
+
+ return p.Filename, nil
+}
+
+// profile returns the AWS shared credentials profile. If empty will read
+// environment variable "AWS_PROFILE". If that is not set profile will
+// return "default".
+func (p *SharedCredentialsProvider) profile() string {
+ if p.Profile == "" {
+ p.Profile = os.Getenv("AWS_PROFILE")
+ }
+ if p.Profile == "" {
+ p.Profile = "default"
+ }
+
+ return p.Profile
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/static_provider.go
new file mode 100644
index 0000000000000..12e10f0963997
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/credentials/static_provider.go
@@ -0,0 +1,57 @@
+package credentials
+
+import (
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+)
+
+// StaticProviderName provides a name of Static provider
+const StaticProviderName = "StaticProvider"
+
+var (
+ // ErrStaticCredentialsEmpty is emitted when static credentials are empty.
+ ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
+)
+
+// A StaticProvider is a set of credentials which are set programmatically,
+// and will never expire.
+type StaticProvider struct {
+ Value
+}
+
+// NewStaticCredentials returns a pointer to a new Credentials object
+// wrapping a static credentials value provider. Token is only required
+// for temporary security credentials retrieved via STS, otherwise an empty
+// string can be passed for this parameter.
+func NewStaticCredentials(id, secret, token string) *Credentials {
+ return NewCredentials(&StaticProvider{Value: Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: token,
+ }})
+}
+
+// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object
+// wrapping the static credentials value provide. Same as NewStaticCredentials
+// but takes the creds Value instead of individual fields
+func NewStaticCredentialsFromCreds(creds Value) *Credentials {
+ return NewCredentials(&StaticProvider{Value: creds})
+}
+
+// Retrieve returns the credentials or error if the credentials are invalid.
+func (s *StaticProvider) Retrieve() (Value, error) {
+ if s.AccessKeyID == "" || s.SecretAccessKey == "" {
+ return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty
+ }
+
+ if len(s.Value.ProviderName) == 0 {
+ s.Value.ProviderName = StaticProviderName
+ }
+ return s.Value, nil
+}
+
+// IsExpired returns if the credentials are expired.
+//
+// For StaticProvider, the credentials never expired.
+func (s *StaticProvider) IsExpired() bool {
+ return false
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/defaults/defaults.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/defaults/defaults.go
new file mode 100644
index 0000000000000..f6cbb4481cabf
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/defaults/defaults.go
@@ -0,0 +1,181 @@
+// Package defaults is a collection of helpers to retrieve the SDK's default
+// configuration and handlers.
+//
+// Generally this package shouldn't be used directly, but session.Session
+// instead. This package is useful when you need to reset the defaults
+// of a session or service client to the SDK defaults before setting
+// additional parameters.
+package defaults
+
+import (
+ "fmt"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/corehandlers"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials/endpointcreds"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam"
+ "github.com/IBM/ibm-cos-sdk-go/aws/endpoints"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+)
+
+// A Defaults provides a collection of default values for SDK clients.
+type Defaults struct {
+ Config *aws.Config
+ Handlers request.Handlers
+}
+
+// Get returns the SDK's default values with Config and handlers pre-configured.
+func Get() Defaults {
+ cfg := Config()
+ handlers := Handlers()
+ cfg.Credentials = CredChain(cfg, handlers)
+
+ return Defaults{
+ Config: cfg,
+ Handlers: handlers,
+ }
+}
+
+// Config returns the default configuration without credentials.
+// To retrieve a config with credentials also included use
+// `defaults.Get().Config` instead.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the configuration of an
+// existing service client or session.
+func Config() *aws.Config {
+ return aws.NewConfig().
+ WithCredentials(credentials.AnonymousCredentials).
+ WithRegion(os.Getenv("AWS_REGION")).
+ WithHTTPClient(http.DefaultClient).
+ WithMaxRetries(aws.UseServiceDefaultRetries).
+ WithLogger(aws.NewDefaultLogger()).
+ WithLogLevel(aws.LogOff).
+ WithEndpointResolver(endpoints.DefaultResolver())
+}
+
+// Handlers returns the default request handlers.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the request handlers of an
+// existing service client or session.
+func Handlers() request.Handlers {
+ var handlers request.Handlers
+
+ handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
+ handlers.Validate.AfterEachFn = request.HandlerListStopOnError
+ handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
+ handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander)
+ handlers.Build.AfterEachFn = request.HandlerListStopOnError
+ handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
+ handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler)
+ handlers.Send.PushBackNamed(corehandlers.SendHandler)
+ handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
+ handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler)
+
+ return handlers
+}
+
+// CredChain returns the default credential chain.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the credentials of an
+// existing service client or session's Config.
+func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
+ return credentials.NewCredentials(&credentials.ChainProvider{
+ VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
+ Providers: CredProviders(cfg, handlers),
+ })
+}
+
+// CredProviders returns the slice of providers used in
+// the default credential chain.
+//
+// For applications that need to use some other provider (for example use
+// different environment variables for legacy reasons) but still fall back
+// on the default chain of providers. This allows that default chain to be
+// automatically updated
+// IBM COS SDK Code -- START
+func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider {
+ return []credentials.Provider{
+ ibmiam.NewEnvProvider(cfg),
+ ibmiam.NewSharedCredentialsProvider(cfg, "", ""),
+ ibmiam.NewSharedConfigProvider(cfg, "", ""),
+ &credentials.EnvProvider{},
+ &credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
+ }
+}
+
+// IBM COS SDK Code -- END
+
+const (
+ httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN"
+)
+
+var lookupHostFn = net.LookupHost
+
+func isLoopbackHost(host string) (bool, error) {
+ ip := net.ParseIP(host)
+ if ip != nil {
+ return ip.IsLoopback(), nil
+ }
+
+ // Host is not an ip, perform lookup
+ addrs, err := lookupHostFn(host)
+ if err != nil {
+ return false, err
+ }
+ for _, addr := range addrs {
+ if !net.ParseIP(addr).IsLoopback() {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
+ var errMsg string
+
+ parsed, err := url.Parse(u)
+ if err != nil {
+ errMsg = fmt.Sprintf("invalid URL, %v", err)
+ } else {
+ host := aws.URLHostname(parsed)
+ if len(host) == 0 {
+ errMsg = "unable to parse host from local HTTP cred provider URL"
+ } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil {
+ errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr)
+ } else if !isLoopback {
+ errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host)
+ }
+ }
+
+ if len(errMsg) > 0 {
+ if cfg.Logger != nil {
+ cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err)
+ }
+ return credentials.ErrorProvider{
+ Err: awserr.New("CredentialsEndpointError", errMsg, err),
+ ProviderName: endpointcreds.ProviderName,
+ }
+ }
+
+ return httpCredProvider(cfg, handlers, u)
+}
+
+func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
+ return endpointcreds.NewProviderClient(cfg, handlers, u,
+ func(p *endpointcreds.Provider) {
+ p.ExpiryWindow = 5 * time.Minute
+ p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar)
+ },
+ )
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/defaults/shared_config.go
new file mode 100644
index 0000000000000..618e39af1aab6
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/defaults/shared_config.go
@@ -0,0 +1,27 @@
+package defaults
+
+import (
+ "github.com/IBM/ibm-cos-sdk-go/internal/shareddefaults"
+)
+
+// SharedCredentialsFilename returns the SDK's default file path
+// for the shared credentials file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/credentials
+// - Windows: %USERPROFILE%\.aws\credentials
+func SharedCredentialsFilename() string {
+ return shareddefaults.SharedCredentialsFilename()
+}
+
+// SharedConfigFilename returns the SDK's default file path for
+// the shared config file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/config
+// - Windows: %USERPROFILE%\.aws\config
+func SharedConfigFilename() string {
+ return shareddefaults.SharedConfigFilename()
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/doc.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/doc.go
new file mode 100644
index 0000000000000..4fcb6161848e8
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/doc.go
@@ -0,0 +1,56 @@
+// Package aws provides the core SDK's utilities and shared types. Use this package's
+// utilities to simplify setting and reading API operations parameters.
+//
+// Value and Pointer Conversion Utilities
+//
+// This package includes a helper conversion utility for each scalar type the SDK's
+// API use. These utilities make getting a pointer of the scalar, and dereferencing
+// a pointer easier.
+//
+// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value.
+// The Pointer to value will safely dereference the pointer and return its value.
+// If the pointer was nil, the scalar's zero value will be returned.
+//
+// The value to pointer functions will be named after the scalar type. So get a
+// *string from a string value use the "String" function. This makes it easy to
+// to get pointer of a literal string value, because getting the address of a
+// literal requires assigning the value to a variable first.
+//
+// var strPtr *string
+//
+// // Without the SDK's conversion functions
+// str := "my string"
+// strPtr = &str
+//
+// // With the SDK's conversion functions
+// strPtr = aws.String("my string")
+//
+// // Convert *string to string value
+// str = aws.StringValue(strPtr)
+//
+// In addition to scalars the aws package also includes conversion utilities for
+// map and slice for commonly types used in API parameters. The map and slice
+// conversion functions use similar naming pattern as the scalar conversion
+// functions.
+//
+// var strPtrs []*string
+// var strs []string = []string{"Go", "Gophers", "Go"}
+//
+// // Convert []string to []*string
+// strPtrs = aws.StringSlice(strs)
+//
+// // Convert []*string to []string
+// strs = aws.StringValueSlice(strPtrs)
+//
+// SDK Default HTTP Client
+//
+// The SDK will use the http.DefaultClient if a HTTP client is not provided to
+// the SDK's Session, or service client constructor. This means that if the
+// http.DefaultClient is modified by other components of your application the
+// modifications will be picked up by the SDK as well.
+//
+// In some cases this might be intended, but it is a better practice to create
+// a custom HTTP Client to share explicitly through your application. You can
+// configure the SDK to use the custom HTTP Client by setting the HTTPClient
+// value of the SDK's Config type when creating a Session or service client.
+package aws
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/decode.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/decode.go
new file mode 100644
index 0000000000000..89b9117ff52b1
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/decode.go
@@ -0,0 +1,193 @@
+package endpoints
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+)
+
+type modelDefinition map[string]json.RawMessage
+
+// A DecodeModelOptions are the options for how the endpoints model definition
+// are decoded.
+type DecodeModelOptions struct {
+ SkipCustomizations bool
+}
+
+// Set combines all of the option functions together.
+func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) {
+ for _, fn := range optFns {
+ fn(d)
+ }
+}
+
+// DecodeModel unmarshals a Regions and Endpoint model definition file into
+// a endpoint Resolver. If the file format is not supported, or an error occurs
+// when unmarshaling the model an error will be returned.
+//
+// Casting the return value of this func to a EnumPartitions will
+// allow you to get a list of the partitions in the order the endpoints
+// will be resolved in.
+//
+// resolver, err := endpoints.DecodeModel(reader)
+//
+// partitions := resolver.(endpoints.EnumPartitions).Partitions()
+// for _, p := range partitions {
+// // ... inspect partitions
+// }
+func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) {
+ var opts DecodeModelOptions
+ opts.Set(optFns...)
+
+ // Get the version of the partition file to determine what
+ // unmarshaling model to use.
+ modelDef := modelDefinition{}
+ if err := json.NewDecoder(r).Decode(&modelDef); err != nil {
+ return nil, newDecodeModelError("failed to decode endpoints model", err)
+ }
+
+ var version string
+ if b, ok := modelDef["version"]; ok {
+ version = string(b)
+ } else {
+ return nil, newDecodeModelError("endpoints version not found in model", nil)
+ }
+
+ if version == "3" {
+ return decodeV3Endpoints(modelDef, opts)
+ }
+
+ return nil, newDecodeModelError(
+ fmt.Sprintf("endpoints version %s, not supported", version), nil)
+}
+
+func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) {
+ b, ok := modelDef["partitions"]
+ if !ok {
+ return nil, newDecodeModelError("endpoints model missing partitions", nil)
+ }
+
+ ps := partitions{}
+ if err := json.Unmarshal(b, &ps); err != nil {
+ return nil, newDecodeModelError("failed to decode endpoints model", err)
+ }
+
+ if opts.SkipCustomizations {
+ return ps, nil
+ }
+
+ // Customization
+ for i := 0; i < len(ps); i++ {
+ p := &ps[i]
+ custRegionalS3(p)
+ custRmIotDataService(p)
+ custFixAppAutoscalingChina(p)
+ custFixAppAutoscalingUsGov(p)
+ }
+
+ return ps, nil
+}
+
+func custRegionalS3(p *partition) {
+ if p.ID != "aws" {
+ return
+ }
+
+ service, ok := p.Services["s3"]
+ if !ok {
+ return
+ }
+
+ const awsGlobal = "aws-global"
+ const usEast1 = "us-east-1"
+
+ // If global endpoint already exists no customization needed.
+ if _, ok := service.Endpoints[endpointKey{Region: awsGlobal}]; ok {
+ return
+ }
+
+ service.PartitionEndpoint = awsGlobal
+ if _, ok := service.Endpoints[endpointKey{Region: usEast1}]; !ok {
+ service.Endpoints[endpointKey{Region: usEast1}] = endpoint{}
+ }
+ service.Endpoints[endpointKey{Region: awsGlobal}] = endpoint{
+ Hostname: "s3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: usEast1,
+ },
+ }
+
+ p.Services["s3"] = service
+}
+
+func custRmIotDataService(p *partition) {
+ delete(p.Services, "data.iot")
+}
+
+func custFixAppAutoscalingChina(p *partition) {
+ if p.ID != "aws-cn" {
+ return
+ }
+
+ const serviceName = "application-autoscaling"
+ s, ok := p.Services[serviceName]
+ if !ok {
+ return
+ }
+
+ const expectHostname = `autoscaling.{region}.amazonaws.com`
+ serviceDefault := s.Defaults[defaultKey{}]
+ if e, a := expectHostname, serviceDefault.Hostname; e != a {
+ fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a)
+ return
+ }
+ serviceDefault.Hostname = expectHostname + ".cn"
+ s.Defaults[defaultKey{}] = serviceDefault
+ p.Services[serviceName] = s
+}
+
+func custFixAppAutoscalingUsGov(p *partition) {
+ if p.ID != "aws-us-gov" {
+ return
+ }
+
+ const serviceName = "application-autoscaling"
+ s, ok := p.Services[serviceName]
+ if !ok {
+ return
+ }
+
+ serviceDefault := s.Defaults[defaultKey{}]
+ if a := serviceDefault.CredentialScope.Service; a != "" {
+ fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a)
+ return
+ }
+
+ if a := serviceDefault.Hostname; a != "" {
+ fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a)
+ return
+ }
+
+ serviceDefault.CredentialScope.Service = "application-autoscaling"
+ serviceDefault.Hostname = "autoscaling.{region}.amazonaws.com"
+
+ if s.Defaults == nil {
+ s.Defaults = make(endpointDefaults)
+ }
+
+ s.Defaults[defaultKey{}] = serviceDefault
+
+ p.Services[serviceName] = s
+}
+
+type decodeModelError struct {
+ awsError
+}
+
+func newDecodeModelError(msg string, err error) decodeModelError {
+ return decodeModelError{
+ awsError: awserr.New("DecodeEndpointsModelError", msg, err),
+ }
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/defaults.go
new file mode 100644
index 0000000000000..3bb705161a027
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/defaults.go
@@ -0,0 +1,17983 @@
+// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT.
+
+package endpoints
+
+import (
+ "regexp"
+)
+
+// Partition identifiers
+const (
+ AwsPartitionID = "aws" // AWS Standard partition.
+ AwsCnPartitionID = "aws-cn" // AWS China partition.
+ AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition.
+ AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition.
+ AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition.
+)
+
+// AWS Standard partition's regions.
+const (
+ AfSouth1RegionID = "af-south-1" // Africa (Cape Town).
+ ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong).
+ ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo).
+ ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul).
+ ApNortheast3RegionID = "ap-northeast-3" // Asia Pacific (Osaka).
+ ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai).
+ ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore).
+ ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney).
+ CaCentral1RegionID = "ca-central-1" // Canada (Central).
+ EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt).
+ EuNorth1RegionID = "eu-north-1" // Europe (Stockholm).
+ EuSouth1RegionID = "eu-south-1" // Europe (Milan).
+ EuWest1RegionID = "eu-west-1" // Europe (Ireland).
+ EuWest2RegionID = "eu-west-2" // Europe (London).
+ EuWest3RegionID = "eu-west-3" // Europe (Paris).
+ MeSouth1RegionID = "me-south-1" // Middle East (Bahrain).
+ SaEast1RegionID = "sa-east-1" // South America (Sao Paulo).
+ UsEast1RegionID = "us-east-1" // US East (N. Virginia).
+ UsEast2RegionID = "us-east-2" // US East (Ohio).
+ UsWest1RegionID = "us-west-1" // US West (N. California).
+ UsWest2RegionID = "us-west-2" // US West (Oregon).
+)
+
+// AWS China partition's regions.
+const (
+ CnNorth1RegionID = "cn-north-1" // China (Beijing).
+ CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia).
+)
+
+// AWS GovCloud (US) partition's regions.
+const (
+ UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East).
+ UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US-West).
+)
+
+// AWS ISO (US) partition's regions.
+const (
+ UsIsoEast1RegionID = "us-iso-east-1" // US ISO East.
+)
+
+// AWS ISOB (US) partition's regions.
+const (
+ UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio).
+)
+
+// DefaultResolver returns an Endpoint resolver that will be able
+// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US).
+//
+// Use DefaultPartitions() to get the list of the default partitions.
+func DefaultResolver() Resolver {
+ return defaultPartitions
+}
+
+// DefaultPartitions returns a list of the partitions the SDK is bundled
+// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US).
+//
+// partitions := endpoints.DefaultPartitions
+// for _, p := range partitions {
+// // ... inspect partitions
+// }
+func DefaultPartitions() []Partition {
+ return defaultPartitions.Partitions()
+}
+
+var defaultPartitions = partitions{
+ awsPartition,
+ awscnPartition,
+ awsusgovPartition,
+ awsisoPartition,
+ awsisobPartition,
+}
+
+// AwsPartition returns the Resolver for AWS Standard.
+func AwsPartition() Partition {
+ return awsPartition.Partition()
+}
+
+var awsPartition = partition{
+ ID: "aws",
+ Name: "AWS Standard",
+ DNSSuffix: "amazonaws.com",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ Regions: regions{
+ "af-south-1": region{
+ Description: "Africa (Cape Town)",
+ },
+ "ap-east-1": region{
+ Description: "Asia Pacific (Hong Kong)",
+ },
+ "ap-northeast-1": region{
+ Description: "Asia Pacific (Tokyo)",
+ },
+ "ap-northeast-2": region{
+ Description: "Asia Pacific (Seoul)",
+ },
+ "ap-northeast-3": region{
+ Description: "Asia Pacific (Osaka)",
+ },
+ "ap-south-1": region{
+ Description: "Asia Pacific (Mumbai)",
+ },
+ "ap-southeast-1": region{
+ Description: "Asia Pacific (Singapore)",
+ },
+ "ap-southeast-2": region{
+ Description: "Asia Pacific (Sydney)",
+ },
+ "ca-central-1": region{
+ Description: "Canada (Central)",
+ },
+ "eu-central-1": region{
+ Description: "Europe (Frankfurt)",
+ },
+ "eu-north-1": region{
+ Description: "Europe (Stockholm)",
+ },
+ "eu-south-1": region{
+ Description: "Europe (Milan)",
+ },
+ "eu-west-1": region{
+ Description: "Europe (Ireland)",
+ },
+ "eu-west-2": region{
+ Description: "Europe (London)",
+ },
+ "eu-west-3": region{
+ Description: "Europe (Paris)",
+ },
+ "me-south-1": region{
+ Description: "Middle East (Bahrain)",
+ },
+ "sa-east-1": region{
+ Description: "South America (Sao Paulo)",
+ },
+ "us-east-1": region{
+ Description: "US East (N. Virginia)",
+ },
+ "us-east-2": region{
+ Description: "US East (Ohio)",
+ },
+ "us-west-1": region{
+ Description: "US West (N. California)",
+ },
+ "us-west-2": region{
+ Description: "US West (Oregon)",
+ },
+ },
+ Services: services{
+ "a4b": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ },
+ },
+ "access-analyzer": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "access-analyzer-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "access-analyzer-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "access-analyzer-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "access-analyzer-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "acm": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1-fips",
+ }: endpoint{
+ Hostname: "acm-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "acm-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+ Hostname: "acm-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "acm-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "acm-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "acm-pca": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "acm-pca-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "acm-pca-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "acm-pca-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "acm-pca-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "acm-pca-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "airflow": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "amplifybackend": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "api.detective": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "api.detective-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+ Hostname: "api.detective-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "api.detective-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "api.detective-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "api.ecr": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{
+ Hostname: "api.ecr.af-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "af-south-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{
+ Hostname: "api.ecr.ap-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-east-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{
+ Hostname: "api.ecr.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{
+ Hostname: "api.ecr.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{
+ Hostname: "api.ecr.ap-northeast-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-3",
+ },
+ },
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{
+ Hostname: "api.ecr.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{
+ Hostname: "api.ecr.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{
+ Hostname: "api.ecr.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{
+ Hostname: "api.ecr.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{
+ Hostname: "api.ecr.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{
+ Hostname: "api.ecr.eu-north-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{
+ Hostname: "api.ecr.eu-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{
+ Hostname: "api.ecr.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{
+ Hostname: "api.ecr.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{
+ Hostname: "api.ecr.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ endpointKey{
+ Region: "fips-dkr-us-east-1",
+ }: endpoint{
+ Hostname: "ecr-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-dkr-us-east-2",
+ }: endpoint{
+ Hostname: "ecr-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-dkr-us-west-1",
+ }: endpoint{
+ Hostname: "ecr-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-dkr-us-west-2",
+ }: endpoint{
+ Hostname: "ecr-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "ecr-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "ecr-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "ecr-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "ecr-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{
+ Hostname: "api.ecr.me-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-south-1",
+ },
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{
+ Hostname: "api.ecr.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "api.ecr.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{
+ Hostname: "api.ecr.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{
+ Hostname: "api.ecr.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{
+ Hostname: "api.ecr.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "api.elastic-inference": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{
+ Hostname: "api.elastic-inference.ap-northeast-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{
+ Hostname: "api.elastic-inference.ap-northeast-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{
+ Hostname: "api.elastic-inference.eu-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "api.elastic-inference.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{
+ Hostname: "api.elastic-inference.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{
+ Hostname: "api.elastic-inference.us-west-2.amazonaws.com",
+ },
+ },
+ },
+ "api.fleethub.iot": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ },
+ },
+ "api.mediatailor": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "api.pricing": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "pricing",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ },
+ },
+ "api.sagemaker": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+ Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "apigateway": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "app-integrations": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "appflow": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "application-autoscaling": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "appmesh": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "appstream2": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ CredentialScope: credentialScope{
+ Service: "appstream",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips",
+ }: endpoint{
+ Hostname: "appstream2-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "appsync": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "athena": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "athena-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "athena-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "athena-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "athena-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "autoscaling": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "autoscaling-plans": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "backup": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "batch": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "fips.batch.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "fips.batch.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "fips.batch.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "fips.batch.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "budgets": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-global",
+ }: endpoint{
+ Hostname: "budgets.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "ce": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-global",
+ }: endpoint{
+ Hostname: "ce.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "chime": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-global",
+ }: endpoint{
+ Hostname: "chime.us-east-1.amazonaws.com",
+ Protocols: []string{"https"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "cloud9": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "clouddirectory": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "cloudformation": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "cloudformation-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+ Hostname: "cloudformation-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "cloudformation-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "cloudformation-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "cloudfront": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-global",
+ }: endpoint{
+ Hostname: "cloudfront.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "cloudhsm": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "cloudhsmv2": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "cloudhsm",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "cloudsearch": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "cloudtrail": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "cloudtrail-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "cloudtrail-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "cloudtrail-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "cloudtrail-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "codeartifact": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "codebuild": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "codebuild-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+ Hostname: "codebuild-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "codebuild-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "codebuild-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "codecommit": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips",
+ }: endpoint{
+ Hostname: "codecommit-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "codedeploy": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "codedeploy-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+ Hostname: "codedeploy-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "codedeploy-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "codedeploy-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "codeguru-reviewer": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "codepipeline": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "codepipeline-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "codepipeline-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "codepipeline-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "codepipeline-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "codepipeline-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "codestar": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "codestar-connections": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "cognito-identity": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "cognito-identity-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "cognito-identity-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "cognito-identity-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "cognito-idp": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "cognito-idp-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "cognito-idp-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "cognito-idp-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "cognito-idp-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "cognito-sync": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "comprehend": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "comprehend-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "comprehend-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "comprehend-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "comprehendmedical": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "config": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "config-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "config-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "config-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "config-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "connect": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "contact-lens": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "cur": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ },
+ },
+ "data.mediastore": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "dataexchange": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "datapipeline": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "datasync": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "datasync-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "datasync-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "datasync-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "datasync-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "datasync-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "dax": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "devicefarm": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "directconnect": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "directconnect-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "directconnect-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "directconnect-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "directconnect-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "discovery": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "dms": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "dms-fips",
+ }: endpoint{
+ Hostname: "dms-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "docdb": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{
+ Hostname: "rds.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{
+ Hostname: "rds.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{
+ Hostname: "rds.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{
+ Hostname: "rds.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{
+ Hostname: "rds.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{
+ Hostname: "rds.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{
+ Hostname: "rds.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{
+ Hostname: "rds.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{
+ Hostname: "rds.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{
+ Hostname: "rds.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{
+ Hostname: "rds.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "rds.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{
+ Hostname: "rds.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{
+ Hostname: "rds.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "ds": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "ds-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "ds-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "ds-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "ds-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "ds-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "dynamodb": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1-fips",
+ }: endpoint{
+ Hostname: "dynamodb-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "local",
+ }: endpoint{
+ Hostname: "localhost:8000",
+ Protocols: []string{"http"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "dynamodb-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+ Hostname: "dynamodb-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "dynamodb-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "dynamodb-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "ebs": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "ebs-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "ebs-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "ebs-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "ebs-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "ebs-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "ec2": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "ec2-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "ec2-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "ec2-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "ec2-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "ec2-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "ecs": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "ecs-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "ecs-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "ecs-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "ecs-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "eks": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "fips.eks.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "fips.eks.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "fips.eks.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "fips.eks.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "elasticache": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips",
+ }: endpoint{
+ Hostname: "elasticache-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "elasticbeanstalk": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "elasticfilesystem": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-af-south-1",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "af-south-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-ap-east-1",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-ap-northeast-1",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-ap-northeast-2",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-ap-northeast-3",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-3",
+ },
+ },
+ endpointKey{
+ Region: "fips-ap-south-1",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-ap-southeast-1",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-ap-southeast-2",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-eu-central-1",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-eu-north-1",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-eu-south-1",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-eu-west-1",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-eu-west-2",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-eu-west-3",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ endpointKey{
+ Region: "fips-me-south-1",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-south-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-sa-east-1",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "elasticmapreduce": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ SSLCommonName: "{region}.{service}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{
+ SSLCommonName: "{service}.{region}.{dnsSuffix}",
+ },
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ SSLCommonName: "{service}.{region}.{dnsSuffix}",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "elastictranscoder": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "email": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "emr-containers": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "entitlement.marketplace": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "aws-marketplace",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ },
+ },
+ "es": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips",
+ }: endpoint{
+ Hostname: "es-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "events": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "events-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "events-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "events-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "events-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "firehose": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "firehose-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "firehose-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "firehose-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "firehose-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "fms": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-af-south-1",
+ }: endpoint{
+ Hostname: "fms-fips.af-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "af-south-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-ap-east-1",
+ }: endpoint{
+ Hostname: "fms-fips.ap-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-ap-northeast-1",
+ }: endpoint{
+ Hostname: "fms-fips.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-ap-northeast-2",
+ }: endpoint{
+ Hostname: "fms-fips.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-ap-south-1",
+ }: endpoint{
+ Hostname: "fms-fips.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-ap-southeast-1",
+ }: endpoint{
+ Hostname: "fms-fips.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-ap-southeast-2",
+ }: endpoint{
+ Hostname: "fms-fips.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "fms-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-eu-central-1",
+ }: endpoint{
+ Hostname: "fms-fips.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-eu-south-1",
+ }: endpoint{
+ Hostname: "fms-fips.eu-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-eu-west-1",
+ }: endpoint{
+ Hostname: "fms-fips.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-eu-west-2",
+ }: endpoint{
+ Hostname: "fms-fips.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-eu-west-3",
+ }: endpoint{
+ Hostname: "fms-fips.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ endpointKey{
+ Region: "fips-me-south-1",
+ }: endpoint{
+ Hostname: "fms-fips.me-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-south-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-sa-east-1",
+ }: endpoint{
+ Hostname: "fms-fips.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "fms-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "fms-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "fms-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "fms-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "forecast": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "forecastquery": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "fsx": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-prod-ca-central-1",
+ }: endpoint{
+ Hostname: "fsx-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-prod-us-east-1",
+ }: endpoint{
+ Hostname: "fsx-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-prod-us-east-2",
+ }: endpoint{
+ Hostname: "fsx-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-prod-us-west-1",
+ }: endpoint{
+ Hostname: "fsx-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-prod-us-west-2",
+ }: endpoint{
+ Hostname: "fsx-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "gamelift": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "glacier": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "glacier-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "glacier-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "glacier-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "glacier-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "glacier-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "glue": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "glue-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "glue-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "glue-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "glue-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "greengrass": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "groundstation": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "groundstation-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "groundstation-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "groundstation-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "guardduty": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "guardduty-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+ Hostname: "guardduty-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "guardduty-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "guardduty-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "health": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "health-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ },
+ },
+ "healthlake": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ },
+ },
+ "honeycode": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-global",
+ }: endpoint{
+ Hostname: "iam.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "iam-fips",
+ }: endpoint{
+ Hostname: "iam-fips.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "identitystore": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "importexport": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-global",
+ }: endpoint{
+ Hostname: "importexport.amazonaws.com",
+ SignatureVersions: []string{"v2", "v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ Service: "IngestionService",
+ },
+ },
+ },
+ },
+ "inspector": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "inspector-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "inspector-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "inspector-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "inspector-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "iot": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "iotanalytics": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "iotevents": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "ioteventsdata": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{
+ Hostname: "data.iotevents.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{
+ Hostname: "data.iotevents.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{
+ Hostname: "data.iotevents.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{
+ Hostname: "data.iotevents.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{
+ Hostname: "data.iotevents.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{
+ Hostname: "data.iotevents.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{
+ Hostname: "data.iotevents.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "data.iotevents.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{
+ Hostname: "data.iotevents.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{
+ Hostname: "data.iotevents.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "iotsecuredtunneling": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "iotthingsgraph": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "iotthingsgraph",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "iotwireless": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{
+ Hostname: "api.iotwireless.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "api.iotwireless.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "kafka": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "kinesis": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "kinesis-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "kinesis-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "kinesis-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "kinesis-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "kinesisanalytics": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "kinesisvideo": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "kms": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "lakeformation": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "lakeformation-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "lakeformation-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "lakeformation-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "lakeformation-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "lambda": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "lambda-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "lambda-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "lambda-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "lambda-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "license-manager": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "license-manager-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "license-manager-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "license-manager-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "license-manager-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "lightsail": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "logs": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "logs-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "logs-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "logs-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "logs-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "lookoutequipment": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ },
+ },
+ "lookoutvision": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "machinelearning": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ },
+ },
+ "macie": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "macie-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "macie-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "macie2": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "macie2-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "macie2-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "macie2-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "macie2-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "managedblockchain": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ },
+ },
+ "marketplacecommerceanalytics": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ },
+ },
+ "mediaconnect": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "mediaconvert": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "mediaconvert-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "mediaconvert-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "mediaconvert-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "mediaconvert-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "medialive": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "medialive-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "medialive-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "medialive-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "mediapackage": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "mediastore": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "metering.marketplace": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "aws-marketplace",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "mgh": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "mobileanalytics": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ },
+ },
+ "models.lex": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "lex",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "models-fips.lex.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "models-fips.lex.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "monitoring": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "monitoring-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "monitoring-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "monitoring-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "monitoring-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "mq": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "mq-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "mq-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "mq-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "mq-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "mturk-requester": service{
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "sandbox",
+ }: endpoint{
+ Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ },
+ },
+ "neptune": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{
+ Hostname: "rds.ap-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-east-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{
+ Hostname: "rds.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{
+ Hostname: "rds.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{
+ Hostname: "rds.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{
+ Hostname: "rds.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{
+ Hostname: "rds.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{
+ Hostname: "rds.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{
+ Hostname: "rds.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{
+ Hostname: "rds.eu-north-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{
+ Hostname: "rds.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{
+ Hostname: "rds.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{
+ Hostname: "rds.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{
+ Hostname: "rds.me-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-south-1",
+ },
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{
+ Hostname: "rds.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "rds.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{
+ Hostname: "rds.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{
+ Hostname: "rds.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{
+ Hostname: "rds.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "oidc": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{
+ Hostname: "oidc.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{
+ Hostname: "oidc.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{
+ Hostname: "oidc.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{
+ Hostname: "oidc.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{
+ Hostname: "oidc.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{
+ Hostname: "oidc.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{
+ Hostname: "oidc.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{
+ Hostname: "oidc.eu-north-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{
+ Hostname: "oidc.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{
+ Hostname: "oidc.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "oidc.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{
+ Hostname: "oidc.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{
+ Hostname: "oidc.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "opsworks": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "opsworks-cm": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "organizations": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-global",
+ }: endpoint{
+ Hostname: "organizations.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-aws-global",
+ }: endpoint{
+ Hostname: "organizations-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "outposts": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "outposts-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "outposts-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "outposts-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "outposts-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "outposts-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "personalize": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "pinpoint": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "mobiletargeting",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "pinpoint-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "pinpoint-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "pinpoint.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{
+ Hostname: "pinpoint.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "polly": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "polly-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "polly-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "polly-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "polly-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "portal.sso": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{
+ Hostname: "portal.sso.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{
+ Hostname: "portal.sso.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{
+ Hostname: "portal.sso.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{
+ Hostname: "portal.sso.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{
+ Hostname: "portal.sso.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{
+ Hostname: "portal.sso.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "portal.sso.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{
+ Hostname: "portal.sso.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{
+ Hostname: "portal.sso.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "profile": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "projects.iot1click": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "qldb": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "ram": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "ram-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "ram-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "ram-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "ram-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "ram-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "rds": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "rds-fips.ca-central-1",
+ }: endpoint{
+ Hostname: "rds-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "rds-fips.us-east-1",
+ }: endpoint{
+ Hostname: "rds-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "rds-fips.us-east-2",
+ }: endpoint{
+ Hostname: "rds-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "rds-fips.us-west-1",
+ }: endpoint{
+ Hostname: "rds-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "rds-fips.us-west-2",
+ }: endpoint{
+ Hostname: "rds-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ SSLCommonName: "{service}.{dnsSuffix}",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "redshift": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "redshift-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "redshift-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "redshift-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "redshift-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "redshift-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "rekognition": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "rekognition-fips.ca-central-1",
+ }: endpoint{
+ Hostname: "rekognition-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "rekognition-fips.us-east-1",
+ }: endpoint{
+ Hostname: "rekognition-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "rekognition-fips.us-east-2",
+ }: endpoint{
+ Hostname: "rekognition-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "rekognition-fips.us-west-1",
+ }: endpoint{
+ Hostname: "rekognition-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "rekognition-fips.us-west-2",
+ }: endpoint{
+ Hostname: "rekognition-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "resource-groups": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "resource-groups-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "resource-groups-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "resource-groups-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "resource-groups-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "robomaker": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "route53": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-global",
+ }: endpoint{
+ Hostname: "route53.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-aws-global",
+ }: endpoint{
+ Hostname: "route53-fips.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "route53domains": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ },
+ },
+ "route53resolver": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "runtime.lex": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "lex",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "runtime-fips.lex.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "runtime-fips.lex.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "runtime.sagemaker": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+ Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "s3": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedTrue,
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "accesspoint-af-south-1",
+ }: endpoint{
+ Hostname: "s3-accesspoint.af-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "accesspoint-ap-east-1",
+ }: endpoint{
+ Hostname: "s3-accesspoint.ap-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "accesspoint-ap-northeast-1",
+ }: endpoint{
+ Hostname: "s3-accesspoint.ap-northeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "accesspoint-ap-northeast-2",
+ }: endpoint{
+ Hostname: "s3-accesspoint.ap-northeast-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "accesspoint-ap-northeast-3",
+ }: endpoint{
+ Hostname: "s3-accesspoint.ap-northeast-3.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "accesspoint-ap-south-1",
+ }: endpoint{
+ Hostname: "s3-accesspoint.ap-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "accesspoint-ap-southeast-1",
+ }: endpoint{
+ Hostname: "s3-accesspoint.ap-southeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "accesspoint-ap-southeast-2",
+ }: endpoint{
+ Hostname: "s3-accesspoint.ap-southeast-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "accesspoint-ca-central-1",
+ }: endpoint{
+ Hostname: "s3-accesspoint.ca-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "accesspoint-eu-central-1",
+ }: endpoint{
+ Hostname: "s3-accesspoint.eu-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "accesspoint-eu-north-1",
+ }: endpoint{
+ Hostname: "s3-accesspoint.eu-north-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "accesspoint-eu-south-1",
+ }: endpoint{
+ Hostname: "s3-accesspoint.eu-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "accesspoint-eu-west-1",
+ }: endpoint{
+ Hostname: "s3-accesspoint.eu-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "accesspoint-eu-west-2",
+ }: endpoint{
+ Hostname: "s3-accesspoint.eu-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "accesspoint-eu-west-3",
+ }: endpoint{
+ Hostname: "s3-accesspoint.eu-west-3.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "accesspoint-me-south-1",
+ }: endpoint{
+ Hostname: "s3-accesspoint.me-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "accesspoint-sa-east-1",
+ }: endpoint{
+ Hostname: "s3-accesspoint.sa-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "accesspoint-us-east-1",
+ }: endpoint{
+ Hostname: "s3-accesspoint.us-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "accesspoint-us-east-2",
+ }: endpoint{
+ Hostname: "s3-accesspoint.us-east-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "accesspoint-us-west-1",
+ }: endpoint{
+ Hostname: "s3-accesspoint.us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "accesspoint-us-west-2",
+ }: endpoint{
+ Hostname: "s3-accesspoint.us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{
+ Hostname: "s3.ap-northeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{
+ Hostname: "s3.ap-southeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{
+ Hostname: "s3.ap-southeast-2.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpointKey{
+ Region: "aws-global",
+ }: endpoint{
+ Hostname: "s3.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{
+ Hostname: "s3.eu-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-accesspoint-ca-central-1",
+ }: endpoint{
+ Hostname: "s3-accesspoint-fips.ca-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "fips-accesspoint-us-east-1",
+ }: endpoint{
+ Hostname: "s3-accesspoint-fips.us-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "fips-accesspoint-us-east-2",
+ }: endpoint{
+ Hostname: "s3-accesspoint-fips.us-east-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "fips-accesspoint-us-west-1",
+ }: endpoint{
+ Hostname: "s3-accesspoint-fips.us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "fips-accesspoint-us-west-2",
+ }: endpoint{
+ Hostname: "s3-accesspoint-fips.us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "s3-external-1",
+ }: endpoint{
+ Hostname: "s3-external-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{
+ Hostname: "s3.sa-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "s3.us-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{
+ Hostname: "s3.us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{
+ Hostname: "s3.us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ },
+ },
+ "s3-control": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{
+ Hostname: "s3-control.ap-northeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{
+ Hostname: "s3-control.ap-northeast-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{
+ Hostname: "s3-control.ap-northeast-3.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-3",
+ },
+ },
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{
+ Hostname: "s3-control.ap-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{
+ Hostname: "s3-control.ap-southeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{
+ Hostname: "s3-control.ap-southeast-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{
+ Hostname: "s3-control.ca-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "ca-central-1-fips",
+ }: endpoint{
+ Hostname: "s3-control-fips.ca-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{
+ Hostname: "s3-control.eu-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{
+ Hostname: "s3-control.eu-north-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{
+ Hostname: "s3-control.eu-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{
+ Hostname: "s3-control.eu-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{
+ Hostname: "s3-control.eu-west-3.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{
+ Hostname: "s3-control.sa-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "s3-control.us-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "s3-control-fips.us-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{
+ Hostname: "s3-control.us-east-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+ Hostname: "s3-control-fips.us-east-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{
+ Hostname: "s3-control.us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "s3-control-fips.us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{
+ Hostname: "s3-control.us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "s3-control-fips.us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "savingsplans": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-global",
+ }: endpoint{
+ Hostname: "savingsplans.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "schemas": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "sdb": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"v2"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "sdb.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "secretsmanager": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "secretsmanager-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+ Hostname: "secretsmanager-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "secretsmanager-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "secretsmanager-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "securityhub": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "securityhub-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "securityhub-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "securityhub-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "securityhub-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "serverlessrepo": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ },
+ "servicecatalog": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "servicecatalog-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+ Hostname: "servicecatalog-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "servicecatalog-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "servicecatalog-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "servicediscovery": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "servicediscovery-fips",
+ }: endpoint{
+ Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "servicequotas": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "session.qldb": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "shield": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ SSLCommonName: "shield.us-east-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-global",
+ }: endpoint{
+ Hostname: "shield.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-aws-global",
+ }: endpoint{
+ Hostname: "shield-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "sms": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "sms-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "sms-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "sms-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "sms-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "snowball": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ap-northeast-1",
+ }: endpoint{
+ Hostname: "snowball-fips.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-ap-northeast-2",
+ }: endpoint{
+ Hostname: "snowball-fips.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-ap-northeast-3",
+ }: endpoint{
+ Hostname: "snowball-fips.ap-northeast-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-3",
+ },
+ },
+ endpointKey{
+ Region: "fips-ap-south-1",
+ }: endpoint{
+ Hostname: "snowball-fips.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-ap-southeast-1",
+ }: endpoint{
+ Hostname: "snowball-fips.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-ap-southeast-2",
+ }: endpoint{
+ Hostname: "snowball-fips.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "snowball-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-eu-central-1",
+ }: endpoint{
+ Hostname: "snowball-fips.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-eu-west-1",
+ }: endpoint{
+ Hostname: "snowball-fips.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-eu-west-2",
+ }: endpoint{
+ Hostname: "snowball-fips.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-eu-west-3",
+ }: endpoint{
+ Hostname: "snowball-fips.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ endpointKey{
+ Region: "fips-sa-east-1",
+ }: endpoint{
+ Hostname: "snowball-fips.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "snowball-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "snowball-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "snowball-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "snowball-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "sns": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "sns-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "sns-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "sns-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "sns-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "sqs": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ SSLCommonName: "{region}.queue.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "sqs-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "sqs-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "sqs-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "sqs-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ SSLCommonName: "queue.{dnsSuffix}",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "ssm": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "ssm-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "ssm-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "ssm-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "ssm-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "ssm-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "states": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "states-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "states-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "states-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "states-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "storagegateway": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips",
+ }: endpoint{
+ Hostname: "storagegateway-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1-fips",
+ }: endpoint{
+ Hostname: "dynamodb-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "local",
+ }: endpoint{
+ Hostname: "localhost:8000",
+ Protocols: []string{"http"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "dynamodb-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+ Hostname: "dynamodb-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "dynamodb-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "dynamodb-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "sts": service{
+ PartitionEndpoint: "aws-global",
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "aws-global",
+ }: endpoint{
+ Hostname: "sts.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "sts-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+ Hostname: "sts-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "sts-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "sts-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "support": service{
+ PartitionEndpoint: "aws-global",
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-global",
+ }: endpoint{
+ Hostname: "support.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "swf": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "swf-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "swf-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "swf-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "swf-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "tagging": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "transcribe": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "fips.transcribe.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "fips.transcribe.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "fips.transcribe.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "fips.transcribe.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "transcribestreaming": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "transfer": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "transfer-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "transfer-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "transfer-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "transfer-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "transfer-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "translate": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "translate-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+ Hostname: "translate-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "translate-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "waf": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-fips",
+ }: endpoint{
+ Hostname: "waf-fips.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "aws-global",
+ }: endpoint{
+ Hostname: "waf.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "waf-regional": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{
+ Hostname: "waf-regional.af-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "af-south-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{
+ Hostname: "waf-regional.ap-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-east-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{
+ Hostname: "waf-regional.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{
+ Hostname: "waf-regional.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{
+ Hostname: "waf-regional.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{
+ Hostname: "waf-regional.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{
+ Hostname: "waf-regional.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{
+ Hostname: "waf-regional.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{
+ Hostname: "waf-regional.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{
+ Hostname: "waf-regional.eu-north-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{
+ Hostname: "waf-regional.eu-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{
+ Hostname: "waf-regional.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{
+ Hostname: "waf-regional.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{
+ Hostname: "waf-regional.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ endpointKey{
+ Region: "fips-af-south-1",
+ }: endpoint{
+ Hostname: "waf-regional-fips.af-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "af-south-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-ap-east-1",
+ }: endpoint{
+ Hostname: "waf-regional-fips.ap-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-ap-northeast-1",
+ }: endpoint{
+ Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-ap-northeast-2",
+ }: endpoint{
+ Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-ap-south-1",
+ }: endpoint{
+ Hostname: "waf-regional-fips.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-ap-southeast-1",
+ }: endpoint{
+ Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-ap-southeast-2",
+ }: endpoint{
+ Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "waf-regional-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-eu-central-1",
+ }: endpoint{
+ Hostname: "waf-regional-fips.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-eu-north-1",
+ }: endpoint{
+ Hostname: "waf-regional-fips.eu-north-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-eu-south-1",
+ }: endpoint{
+ Hostname: "waf-regional-fips.eu-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-eu-west-1",
+ }: endpoint{
+ Hostname: "waf-regional-fips.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-eu-west-2",
+ }: endpoint{
+ Hostname: "waf-regional-fips.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-eu-west-3",
+ }: endpoint{
+ Hostname: "waf-regional-fips.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ endpointKey{
+ Region: "fips-me-south-1",
+ }: endpoint{
+ Hostname: "waf-regional-fips.me-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-south-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-sa-east-1",
+ }: endpoint{
+ Hostname: "waf-regional-fips.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "waf-regional-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "waf-regional-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "waf-regional-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "waf-regional-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{
+ Hostname: "waf-regional.me-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-south-1",
+ },
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{
+ Hostname: "waf-regional.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "waf-regional.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{
+ Hostname: "waf-regional.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{
+ Hostname: "waf-regional.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{
+ Hostname: "waf-regional.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "workdocs": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "workdocs-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "workdocs-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "workmail": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "workspaces": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "workspaces-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "workspaces-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "xray": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "xray-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "xray-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "xray-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "xray-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ },
+}
+
+// AwsCnPartition returns the Resolver for AWS China.
+func AwsCnPartition() Partition {
+ return awscnPartition.Partition()
+}
+
+var awscnPartition = partition{
+ ID: "aws-cn",
+ Name: "AWS China",
+ DNSSuffix: "amazonaws.com.cn",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ Regions: regions{
+ "cn-north-1": region{
+ Description: "China (Beijing)",
+ },
+ "cn-northwest-1": region{
+ Description: "China (Ningxia)",
+ },
+ },
+ Services: services{
+ "access-analyzer": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "acm": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "api.ecr": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{
+ Hostname: "api.ecr.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "api.sagemaker": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "apigateway": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "application-autoscaling": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "appsync": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "athena": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "autoscaling": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "autoscaling-plans": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "backup": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "batch": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "budgets": service{
+ PartitionEndpoint: "aws-cn-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-cn-global",
+ }: endpoint{
+ Hostname: "budgets.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "ce": service{
+ PartitionEndpoint: "aws-cn-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-cn-global",
+ }: endpoint{
+ Hostname: "ce.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "cloudformation": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "cloudfront": service{
+ PartitionEndpoint: "aws-cn-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-cn-global",
+ }: endpoint{
+ Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "cloudtrail": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "codebuild": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "codecommit": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "codedeploy": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "cognito-identity": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ },
+ },
+ "config": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "cur": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "dax": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "directconnect": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "dms": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "docdb": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Hostname: "rds.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "ds": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "dynamodb": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "ebs": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "ec2": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "ecs": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "eks": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "elasticache": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "elasticbeanstalk": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "elasticfilesystem": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-cn-north-1",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-cn-northwest-1",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "elasticloadbalancing": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "elasticmapreduce": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "es": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "events": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "firehose": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "fsx": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "gamelift": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ },
+ },
+ "glacier": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "glue": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "greengrass": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ },
+ },
+ "guardduty": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "health": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-cn-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-cn-global",
+ }: endpoint{
+ Hostname: "iam.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ },
+ },
+ "iot": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "iotanalytics": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ },
+ },
+ "iotevents": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ },
+ },
+ "ioteventsdata": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{
+ Hostname: "data.iotevents.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ },
+ },
+ "iotsecuredtunneling": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "kafka": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "kinesis": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "kinesisanalytics": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "kms": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "lakeformation": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "lambda": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "license-manager": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "logs": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "mediaconvert": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "monitoring": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "mq": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "neptune": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Hostname: "rds.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "organizations": service{
+ PartitionEndpoint: "aws-cn-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-cn-global",
+ }: endpoint{
+ Hostname: "organizations.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "personalize": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ },
+ },
+ "polly": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "ram": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "rds": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "redshift": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "resource-groups": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "route53": service{
+ PartitionEndpoint: "aws-cn-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-cn-global",
+ }: endpoint{
+ Hostname: "route53.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "route53resolver": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ },
+ },
+ "runtime.sagemaker": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "s3": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "accesspoint-cn-north-1",
+ }: endpoint{
+ Hostname: "s3-accesspoint.cn-north-1.amazonaws.com.cn",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "accesspoint-cn-northwest-1",
+ }: endpoint{
+ Hostname: "s3-accesspoint.cn-northwest-1.amazonaws.com.cn",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "s3-control": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{
+ Hostname: "s3-control.cn-north-1.amazonaws.com.cn",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "secretsmanager": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "securityhub": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "serverlessrepo": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ },
+ "servicediscovery": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "sms": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "snowball": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-cn-north-1",
+ }: endpoint{
+ Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-cn-northwest-1",
+ }: endpoint{
+ Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "sns": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "sqs": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ SSLCommonName: "{region}.queue.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "ssm": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "states": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "storagegateway": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "sts": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "support": service{
+ PartitionEndpoint: "aws-cn-global",
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-cn-global",
+ }: endpoint{
+ Hostname: "support.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ },
+ },
+ "swf": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "tagging": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "transcribe": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{
+ Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "workspaces": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "xray": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ },
+}
+
+// AwsUsGovPartition returns the Resolver for AWS GovCloud (US).
+func AwsUsGovPartition() Partition {
+ return awsusgovPartition.Partition()
+}
+
+var awsusgovPartition = partition{
+ ID: "aws-us-gov",
+ Name: "AWS GovCloud (US)",
+ DNSSuffix: "amazonaws.com",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ Regions: regions{
+ "us-gov-east-1": region{
+ Description: "AWS GovCloud (US-East)",
+ },
+ "us-gov-west-1": region{
+ Description: "AWS GovCloud (US-West)",
+ },
+ },
+ Services: services{
+ "access-analyzer": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "access-analyzer.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "access-analyzer.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "acm": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "acm.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "acm.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "acm-pca": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "acm-pca.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "acm-pca.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "api.detective": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "api.detective-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "api.detective-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "api.ecr": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-dkr-us-gov-east-1",
+ }: endpoint{
+ Hostname: "ecr-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-dkr-us-gov-west-1",
+ }: endpoint{
+ Hostname: "ecr-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "ecr-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "ecr-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "api.ecr.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "api.ecr.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "api.sagemaker": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips-secondary",
+ }: endpoint{
+ Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "apigateway": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "application-autoscaling": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Hostname: "autoscaling.{region}.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "application-autoscaling",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "appstream2": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ CredentialScope: credentialScope{
+ Service: "appstream",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips",
+ }: endpoint{
+ Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "athena": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "athena-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "athena-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "autoscaling": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "autoscaling-plans": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "backup": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "batch": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "batch.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "batch.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "clouddirectory": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "cloudformation": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "cloudformation.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "cloudformation.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "cloudhsm": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "cloudhsmv2": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "cloudhsm",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "cloudtrail": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "cloudtrail.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "cloudtrail.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "codebuild": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "codecommit": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips",
+ }: endpoint{
+ Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "codedeploy": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "codepipeline": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "cognito-identity": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "cognito-identity-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "cognito-idp": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "comprehend": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "comprehendmedical": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "config": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "config.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "config.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "connect": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "datasync": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "datasync-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "datasync-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "directconnect": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "directconnect.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "directconnect.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "dms": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "dms-fips",
+ }: endpoint{
+ Hostname: "dms.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "docdb": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "rds.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "ds": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "ds-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "ds-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "dynamodb": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "dynamodb.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "dynamodb.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "ebs": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "ec2": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "ec2.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "ec2.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "ecs": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "ecs-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "ecs-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "eks": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "eks.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "eks.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "elasticache": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips",
+ }: endpoint{
+ Hostname: "elasticache.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "elasticbeanstalk": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "elasticfilesystem": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "elasticloadbalancing.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "elasticloadbalancing.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "elasticmapreduce": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ },
+ "email": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "email-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "es": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips",
+ }: endpoint{
+ Hostname: "es-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "events": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "events.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "events.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "firehose": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "firehose-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "firehose-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "fms": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "fms-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "fms-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "fsx": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-prod-us-gov-east-1",
+ }: endpoint{
+ Hostname: "fsx-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-prod-us-gov-west-1",
+ }: endpoint{
+ Hostname: "fsx-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "glacier": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "glacier.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "glacier.us-gov-west-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "glue": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "glue-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "glue-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "greengrass": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "dataplane-us-gov-east-1",
+ }: endpoint{
+ Hostname: "greengrass-ats.iot.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "dataplane-us-gov-west-1",
+ }: endpoint{
+ Hostname: "greengrass-ats.iot.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "greengrass-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "greengrass.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "greengrass.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "guardduty": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "guardduty.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "guardduty.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "health": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "health-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-us-gov-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-us-gov-global",
+ }: endpoint{
+ Hostname: "iam.us-gov.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "iam-govcloud-fips",
+ }: endpoint{
+ Hostname: "iam.us-gov.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "inspector": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "inspector-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "inspector-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "iot": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "iotsecuredtunneling": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "kafka": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "kinesis": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "kinesis.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "kinesis.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "kinesisanalytics": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "kms": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ProdFips",
+ }: endpoint{
+ Hostname: "kms-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "lakeformation": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "lambda": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "lambda-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "lambda-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "license-manager": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "logs": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "logs.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "logs.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "mediaconvert": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "mediaconvert.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "metering.marketplace": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "aws-marketplace",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "models.lex": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "lex",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "models-fips.lex.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "monitoring": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "monitoring.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "monitoring.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "neptune": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "rds.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "rds.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "organizations": service{
+ PartitionEndpoint: "aws-us-gov-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-us-gov-global",
+ }: endpoint{
+ Hostname: "organizations.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-aws-us-gov-global",
+ }: endpoint{
+ Hostname: "organizations.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "outposts": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "outposts.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "outposts.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "pinpoint": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "mobiletargeting",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "pinpoint-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "pinpoint.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "polly": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "polly-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "ram": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "ram.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "ram.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "rds": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "rds.us-gov-east-1",
+ }: endpoint{
+ Hostname: "rds.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "rds.us-gov-west-1",
+ }: endpoint{
+ Hostname: "rds.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "redshift": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "redshift.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "redshift.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "rekognition": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "rekognition-fips.us-gov-west-1",
+ }: endpoint{
+ Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "resource-groups": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "resource-groups.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "resource-groups.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "route53": service{
+ PartitionEndpoint: "aws-us-gov-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-us-gov-global",
+ }: endpoint{
+ Hostname: "route53.us-gov.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-aws-us-gov-global",
+ }: endpoint{
+ Hostname: "route53.us-gov.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "route53resolver": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "runtime.lex": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "lex",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "runtime-fips.lex.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "runtime.sagemaker": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "runtime.sagemaker.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "s3": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "accesspoint-us-gov-east-1",
+ }: endpoint{
+ Hostname: "s3-accesspoint.us-gov-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "accesspoint-us-gov-west-1",
+ }: endpoint{
+ Hostname: "s3-accesspoint.us-gov-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "fips-accesspoint-us-gov-east-1",
+ }: endpoint{
+ Hostname: "s3-accesspoint-fips.us-gov-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "fips-accesspoint-us-gov-west-1",
+ }: endpoint{
+ Hostname: "s3-accesspoint-fips.us-gov-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "s3-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "s3.us-gov-east-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "s3.us-gov-west-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "s3-control": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "s3-control.us-gov-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "s3-control.us-gov-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "secretsmanager": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "securityhub": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "serverlessrepo": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com",
+ Protocols: []string{"https"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com",
+ Protocols: []string{"https"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "servicecatalog": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "servicequotas": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "servicequotas.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "servicequotas.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "sms": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "sms-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "sms-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "snowball": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "snowball-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "snowball-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "sns": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "sns.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "sns.us-gov-west-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "sqs": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "sqs.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "sqs.us-gov-west-1.amazonaws.com",
+ SSLCommonName: "{region}.queue.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "ssm": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "ssm.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "ssm.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "states": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "states-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "states.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "storagegateway": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips",
+ }: endpoint{
+ Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "dynamodb.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "dynamodb.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "sts": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "sts.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "sts.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "support": service{
+ PartitionEndpoint: "aws-us-gov-global",
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-us-gov-global",
+ }: endpoint{
+ Hostname: "support.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "support.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "swf": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "swf.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "swf.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "tagging": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "transcribe": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "transfer": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "transfer-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "transfer-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "translate": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "translate-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "waf-regional": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "waf-regional-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "waf-regional.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "waf-regional.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "workspaces": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "workspaces-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "xray": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "xray-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "xray-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ },
+}
+
+// AwsIsoPartition returns the Resolver for AWS ISO (US).
+func AwsIsoPartition() Partition {
+ return awsisoPartition.Partition()
+}
+
+var awsisoPartition = partition{
+ ID: "aws-iso",
+ Name: "AWS ISO (US)",
+ DNSSuffix: "c2s.ic.gov",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^us\\-iso\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ Regions: regions{
+ "us-iso-east-1": region{
+ Description: "US ISO East",
+ },
+ },
+ Services: services{
+ "api.ecr": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{
+ Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
+ },
+ },
+ "api.sagemaker": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "apigateway": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "application-autoscaling": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "autoscaling": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "cloudformation": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "cloudtrail": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "codedeploy": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "comprehend": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "config": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "datapipeline": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "directconnect": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "dms": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "dms-fips",
+ }: endpoint{
+ Hostname: "dms.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "ds": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "dynamodb": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "ec2": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "ecs": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "elasticache": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "elasticmapreduce": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ },
+ "es": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "events": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "firehose": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "glacier": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "health": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-iso-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-iso-global",
+ }: endpoint{
+ Hostname: "iam.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
+ },
+ },
+ "kinesis": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "kms": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ProdFips",
+ }: endpoint{
+ Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "lambda": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "logs": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "medialive": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "mediapackage": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "monitoring": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "outposts": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "rds": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "redshift": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "route53": service{
+ PartitionEndpoint: "aws-iso-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-iso-global",
+ }: endpoint{
+ Hostname: "route53.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
+ },
+ },
+ "runtime.sagemaker": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "s3": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ SignatureVersions: []string{"s3v4"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ },
+ },
+ "secretsmanager": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "snowball": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "sns": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "sqs": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "ssm": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "states": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "sts": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "support": service{
+ PartitionEndpoint: "aws-iso-global",
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-iso-global",
+ }: endpoint{
+ Hostname: "support.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
+ },
+ },
+ "swf": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "transcribe": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "transcribestreaming": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "translate": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "workspaces": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ },
+}
+
+// AwsIsoBPartition returns the Resolver for AWS ISOB (US).
+func AwsIsoBPartition() Partition {
+ return awsisobPartition.Partition()
+}
+
+var awsisobPartition = partition{
+ ID: "aws-iso-b",
+ Name: "AWS ISOB (US)",
+ DNSSuffix: "sc2s.sgov.gov",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^us\\-isob\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ Regions: regions{
+ "us-isob-east-1": region{
+ Description: "US ISOB East (Ohio)",
+ },
+ },
+ Services: services{
+ "api.ecr": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{
+ Hostname: "api.ecr.us-isob-east-1.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ },
+ },
+ },
+ "application-autoscaling": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "autoscaling": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "cloudformation": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "cloudtrail": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "codedeploy": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "config": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "directconnect": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "dms": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "dms-fips",
+ }: endpoint{
+ Hostname: "dms.us-isob-east-1.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "dynamodb": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "ec2": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "ecs": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "elasticache": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ },
+ "elasticmapreduce": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "es": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "events": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "glacier": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "health": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-iso-b-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-iso-b-global",
+ }: endpoint{
+ Hostname: "iam.us-isob-east-1.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ },
+ },
+ },
+ "kinesis": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "kms": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ProdFips",
+ }: endpoint{
+ Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "lambda": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "license-manager": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "logs": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "monitoring": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "rds": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "redshift": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "route53": service{
+ PartitionEndpoint: "aws-iso-b-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-iso-b-global",
+ }: endpoint{
+ Hostname: "route53.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ },
+ },
+ },
+ "s3": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "snowball": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "sns": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "sqs": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ SSLCommonName: "{region}.queue.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "ssm": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "states": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "sts": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "support": service{
+ PartitionEndpoint: "aws-iso-b-global",
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-iso-b-global",
+ }: endpoint{
+ Hostname: "support.us-isob-east-1.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ },
+ },
+ },
+ "swf": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ },
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/dep_service_ids.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/dep_service_ids.go
new file mode 100644
index 0000000000000..ca8fc828e1598
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/dep_service_ids.go
@@ -0,0 +1,141 @@
+package endpoints
+
+// Service identifiers
+//
+// Deprecated: Use client package's EndpointsID value instead of these
+// ServiceIDs. These IDs are not maintained, and are out of date.
+const (
+ A4bServiceID = "a4b" // A4b.
+ AcmServiceID = "acm" // Acm.
+ AcmPcaServiceID = "acm-pca" // AcmPca.
+ ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor.
+ ApiPricingServiceID = "api.pricing" // ApiPricing.
+ ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker.
+ ApigatewayServiceID = "apigateway" // Apigateway.
+ ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling.
+ Appstream2ServiceID = "appstream2" // Appstream2.
+ AppsyncServiceID = "appsync" // Appsync.
+ AthenaServiceID = "athena" // Athena.
+ AutoscalingServiceID = "autoscaling" // Autoscaling.
+ AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans.
+ BatchServiceID = "batch" // Batch.
+ BudgetsServiceID = "budgets" // Budgets.
+ CeServiceID = "ce" // Ce.
+ ChimeServiceID = "chime" // Chime.
+ Cloud9ServiceID = "cloud9" // Cloud9.
+ ClouddirectoryServiceID = "clouddirectory" // Clouddirectory.
+ CloudformationServiceID = "cloudformation" // Cloudformation.
+ CloudfrontServiceID = "cloudfront" // Cloudfront.
+ CloudhsmServiceID = "cloudhsm" // Cloudhsm.
+ Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2.
+ CloudsearchServiceID = "cloudsearch" // Cloudsearch.
+ CloudtrailServiceID = "cloudtrail" // Cloudtrail.
+ CodebuildServiceID = "codebuild" // Codebuild.
+ CodecommitServiceID = "codecommit" // Codecommit.
+ CodedeployServiceID = "codedeploy" // Codedeploy.
+ CodepipelineServiceID = "codepipeline" // Codepipeline.
+ CodestarServiceID = "codestar" // Codestar.
+ CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity.
+ CognitoIdpServiceID = "cognito-idp" // CognitoIdp.
+ CognitoSyncServiceID = "cognito-sync" // CognitoSync.
+ ComprehendServiceID = "comprehend" // Comprehend.
+ ConfigServiceID = "config" // Config.
+ CurServiceID = "cur" // Cur.
+ DatapipelineServiceID = "datapipeline" // Datapipeline.
+ DaxServiceID = "dax" // Dax.
+ DevicefarmServiceID = "devicefarm" // Devicefarm.
+ DirectconnectServiceID = "directconnect" // Directconnect.
+ DiscoveryServiceID = "discovery" // Discovery.
+ DmsServiceID = "dms" // Dms.
+ DsServiceID = "ds" // Ds.
+ DynamodbServiceID = "dynamodb" // Dynamodb.
+ Ec2ServiceID = "ec2" // Ec2.
+ Ec2metadataServiceID = "ec2metadata" // Ec2metadata.
+ EcrServiceID = "ecr" // Ecr.
+ EcsServiceID = "ecs" // Ecs.
+ ElasticacheServiceID = "elasticache" // Elasticache.
+ ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk.
+ ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem.
+ ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing.
+ ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce.
+ ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder.
+ EmailServiceID = "email" // Email.
+ EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace.
+ EsServiceID = "es" // Es.
+ EventsServiceID = "events" // Events.
+ FirehoseServiceID = "firehose" // Firehose.
+ FmsServiceID = "fms" // Fms.
+ GameliftServiceID = "gamelift" // Gamelift.
+ GlacierServiceID = "glacier" // Glacier.
+ GlueServiceID = "glue" // Glue.
+ GreengrassServiceID = "greengrass" // Greengrass.
+ GuarddutyServiceID = "guardduty" // Guardduty.
+ HealthServiceID = "health" // Health.
+ IamServiceID = "iam" // Iam.
+ ImportexportServiceID = "importexport" // Importexport.
+ InspectorServiceID = "inspector" // Inspector.
+ IotServiceID = "iot" // Iot.
+ IotanalyticsServiceID = "iotanalytics" // Iotanalytics.
+ KinesisServiceID = "kinesis" // Kinesis.
+ KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics.
+ KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo.
+ KmsServiceID = "kms" // Kms.
+ LambdaServiceID = "lambda" // Lambda.
+ LightsailServiceID = "lightsail" // Lightsail.
+ LogsServiceID = "logs" // Logs.
+ MachinelearningServiceID = "machinelearning" // Machinelearning.
+ MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics.
+ MediaconvertServiceID = "mediaconvert" // Mediaconvert.
+ MedialiveServiceID = "medialive" // Medialive.
+ MediapackageServiceID = "mediapackage" // Mediapackage.
+ MediastoreServiceID = "mediastore" // Mediastore.
+ MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace.
+ MghServiceID = "mgh" // Mgh.
+ MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics.
+ ModelsLexServiceID = "models.lex" // ModelsLex.
+ MonitoringServiceID = "monitoring" // Monitoring.
+ MturkRequesterServiceID = "mturk-requester" // MturkRequester.
+ NeptuneServiceID = "neptune" // Neptune.
+ OpsworksServiceID = "opsworks" // Opsworks.
+ OpsworksCmServiceID = "opsworks-cm" // OpsworksCm.
+ OrganizationsServiceID = "organizations" // Organizations.
+ PinpointServiceID = "pinpoint" // Pinpoint.
+ PollyServiceID = "polly" // Polly.
+ RdsServiceID = "rds" // Rds.
+ RedshiftServiceID = "redshift" // Redshift.
+ RekognitionServiceID = "rekognition" // Rekognition.
+ ResourceGroupsServiceID = "resource-groups" // ResourceGroups.
+ Route53ServiceID = "route53" // Route53.
+ Route53domainsServiceID = "route53domains" // Route53domains.
+ RuntimeLexServiceID = "runtime.lex" // RuntimeLex.
+ RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker.
+ S3ServiceID = "s3" // S3.
+ S3ControlServiceID = "s3-control" // S3Control.
+ SagemakerServiceID = "api.sagemaker" // Sagemaker.
+ SdbServiceID = "sdb" // Sdb.
+ SecretsmanagerServiceID = "secretsmanager" // Secretsmanager.
+ ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo.
+ ServicecatalogServiceID = "servicecatalog" // Servicecatalog.
+ ServicediscoveryServiceID = "servicediscovery" // Servicediscovery.
+ ShieldServiceID = "shield" // Shield.
+ SmsServiceID = "sms" // Sms.
+ SnowballServiceID = "snowball" // Snowball.
+ SnsServiceID = "sns" // Sns.
+ SqsServiceID = "sqs" // Sqs.
+ SsmServiceID = "ssm" // Ssm.
+ StatesServiceID = "states" // States.
+ StoragegatewayServiceID = "storagegateway" // Storagegateway.
+ StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb.
+ StsServiceID = "sts" // Sts.
+ SupportServiceID = "support" // Support.
+ SwfServiceID = "swf" // Swf.
+ TaggingServiceID = "tagging" // Tagging.
+ TransferServiceID = "transfer" // Transfer.
+ TranslateServiceID = "translate" // Translate.
+ WafServiceID = "waf" // Waf.
+ WafRegionalServiceID = "waf-regional" // WafRegional.
+ WorkdocsServiceID = "workdocs" // Workdocs.
+ WorkmailServiceID = "workmail" // Workmail.
+ WorkspacesServiceID = "workspaces" // Workspaces.
+ XrayServiceID = "xray" // Xray.
+)
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/doc.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/doc.go
new file mode 100644
index 0000000000000..84316b92c0532
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/doc.go
@@ -0,0 +1,66 @@
+// Package endpoints provides the types and functionality for defining regions
+// and endpoints, as well as querying those definitions.
+//
+// The SDK's Regions and Endpoints metadata is code generated into the endpoints
+// package, and is accessible via the DefaultResolver function. This function
+// returns a endpoint Resolver will search the metadata and build an associated
+// endpoint if one is found. The default resolver will search all partitions
+// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and
+// AWS GovCloud (US) (aws-us-gov).
+// .
+//
+// Enumerating Regions and Endpoint Metadata
+//
+// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface
+// will allow you to get access to the list of underlying Partitions with the
+// Partitions method. This is helpful if you want to limit the SDK's endpoint
+// resolving to a single partition, or enumerate regions, services, and endpoints
+// in the partition.
+//
+// resolver := endpoints.DefaultResolver()
+// partitions := resolver.(endpoints.EnumPartitions).Partitions()
+//
+// for _, p := range partitions {
+// fmt.Println("Regions for", p.ID())
+// for id, _ := range p.Regions() {
+// fmt.Println("*", id)
+// }
+//
+// fmt.Println("Services for", p.ID())
+// for id, _ := range p.Services() {
+// fmt.Println("*", id)
+// }
+// }
+//
+// Using Custom Endpoints
+//
+// The endpoints package also gives you the ability to use your own logic how
+// endpoints are resolved. This is a great way to define a custom endpoint
+// for select services, without passing that logic down through your code.
+//
+// If a type implements the Resolver interface it can be used to resolve
+// endpoints. To use this with the SDK's Session and Config set the value
+// of the type to the EndpointsResolver field of aws.Config when initializing
+// the session, or service client.
+//
+// In addition the ResolverFunc is a wrapper for a func matching the signature
+// of Resolver.EndpointFor, converting it to a type that satisfies the
+// Resolver interface.
+//
+//
+// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
+// if service == endpoints.S3ServiceID {
+// return endpoints.ResolvedEndpoint{
+// URL: "s3.custom.endpoint.com",
+// SigningRegion: "custom-signing-region",
+// }, nil
+// }
+//
+// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...)
+// }
+//
+// sess := session.Must(session.NewSession(&aws.Config{
+// Region: aws.String("us-west-2"),
+// EndpointResolver: endpoints.ResolverFunc(myCustomResolver),
+// }))
+package endpoints
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/endpoints.go
new file mode 100644
index 0000000000000..9c07056c39cf2
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/endpoints.go
@@ -0,0 +1,594 @@
+package endpoints
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+)
+
+// A Logger is a minimalistic interface for the SDK to log messages to.
+type Logger interface {
+ Log(...interface{})
+}
+
+// DualStackEndpointState is a constant to describe the dual-stack endpoint resolution
+// behavior.
+type DualStackEndpointState uint
+
+const (
+ // DualStackEndpointStateUnset is the default value behavior for dual-stack endpoint
+ // resolution.
+ DualStackEndpointStateUnset DualStackEndpointState = iota
+
+ // DualStackEndpointStateEnabled enable dual-stack endpoint resolution for endpoints.
+ DualStackEndpointStateEnabled
+
+ // DualStackEndpointStateDisabled disables dual-stack endpoint resolution for endpoints.
+ DualStackEndpointStateDisabled
+)
+
+// FIPSEndpointState is a constant to describe the FIPS endpoint resolution behavior.
+// IBM Unsupported
+// type FIPSEndpointState uint
+
+//const (
+// FIPSEndpointStateUnset is the default value behavior for FIPS endpoint resolution.
+// FIPSEndpointStateUnset FIPSEndpointState = iota
+
+// FIPSEndpointStateEnabled enables FIPS endpoint resolution for service endpoints.
+// FIPSEndpointStateEnabled
+
+// FIPSEndpointStateDisabled disables FIPS endpoint resolution for endpoints.
+// FIPSEndpointStateDisabled
+//)
+
+// Options provide the configuration needed to direct how the
+// endpoints will be resolved.
+type Options struct {
+ // DisableSSL forces the endpoint to be resolved as HTTP.
+ // instead of HTTPS if the service supports it.
+ DisableSSL bool
+
+ // Sets the resolver to resolve the endpoint as a dualstack endpoint
+ // for the service. If dualstack support for a service is not known and
+ // StrictMatching is not enabled a dualstack endpoint for the service will
+ // be returned. This endpoint may not be valid. If StrictMatching is
+ // enabled only services that are known to support dualstack will return
+ // dualstack endpoints.
+ //
+ // Deprecated: This option will continue to function for S3 and S3 Control for backwards compatibility.
+ // UseDualStackEndpoint should be used to enable usage of a service's dual-stack endpoint for all service clients
+ // moving forward. For S3 and S3 Control, when UseDualStackEndpoint is set to a non-zero value it takes higher
+ // precedence then this option.
+ UseDualStack bool
+
+ // Sets the resolver to resolve a dual-stack endpoint for the service.
+ UseDualStackEndpoint DualStackEndpointState
+
+ // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint.
+ // UseFIPSEndpoint FIPSEndpointState
+
+ // Enables strict matching of services and regions resolved endpoints.
+ // If the partition doesn't enumerate the exact service and region an
+ // error will be returned. This option will prevent returning endpoints
+ // that look valid, but may not resolve to any real endpoint.
+ StrictMatching bool
+
+ // Enables resolving a service endpoint based on the region provided if the
+ // service does not exist. The service endpoint ID will be used as the service
+ // domain name prefix. By default the endpoint resolver requires the service
+ // to be known when resolving endpoints.
+ //
+ // If resolving an endpoint on the partition list the provided region will
+ // be used to determine which partition's domain name pattern to the service
+ // endpoint ID with. If both the service and region are unknown and resolving
+ // the endpoint on partition list an UnknownEndpointError error will be returned.
+ //
+ // If resolving and endpoint on a partition specific resolver that partition's
+ // domain name pattern will be used with the service endpoint ID. If both
+ // region and service do not exist when resolving an endpoint on a specific
+ // partition the partition's domain pattern will be used to combine the
+ // endpoint and region together.
+ //
+ // This option is ignored if StrictMatching is enabled.
+ ResolveUnknownService bool
+
+ // S3 Regional Endpoint flag helps with resolving the S3 endpoint
+ S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint
+ // ResolvedRegion is the resolved region string. If provided (non-zero length) it takes priority
+ // over the region name passed to the ResolveEndpoint call.
+ ResolvedRegion string
+ // Logger is the logger that will be used to log messages.
+ Logger Logger
+
+ // Determines whether logging of deprecated endpoints usage is enabled.
+ LogDeprecated bool
+}
+
+func (o Options) getEndpointVariant(service string) (v endpointVariant) {
+ const s3 = "s3"
+ const s3Control = "s3-control"
+
+ if (o.UseDualStackEndpoint == DualStackEndpointStateEnabled) ||
+ ((service == s3 || service == s3Control) && (o.UseDualStackEndpoint == DualStackEndpointStateUnset && o.UseDualStack)) {
+ v |= dualStackVariant
+ }
+ //IBM Unsupported
+ /* if o.UseFIPSEndpoint == FIPSEndpointStateEnabled {
+ v |= fipsVariant
+ }*/
+ return v
+}
+
+// S3UsEast1RegionalEndpoint is an enum for the states of the S3 us-east-1
+// Regional Endpoint options.
+type S3UsEast1RegionalEndpoint int
+
+func (e S3UsEast1RegionalEndpoint) String() string {
+ switch e {
+ case LegacyS3UsEast1Endpoint:
+ return "legacy"
+ case RegionalS3UsEast1Endpoint:
+ return "regional"
+ case UnsetS3UsEast1Endpoint:
+ return ""
+ default:
+ return "unknown"
+ }
+}
+
+const (
+
+ // UnsetS3UsEast1Endpoint represents that S3 Regional Endpoint flag is not
+ // specified.
+ UnsetS3UsEast1Endpoint S3UsEast1RegionalEndpoint = iota
+
+ // LegacyS3UsEast1Endpoint represents when S3 Regional Endpoint flag is
+ // specified to use legacy endpoints.
+ LegacyS3UsEast1Endpoint
+
+ // RegionalS3UsEast1Endpoint represents when S3 Regional Endpoint flag is
+ // specified to use regional endpoints.
+ RegionalS3UsEast1Endpoint
+)
+
+// GetS3UsEast1RegionalEndpoint function returns the S3UsEast1RegionalEndpointFlag based
+// on the input string provided in env config or shared config by the user.
+//
+// `legacy`, `regional` are the only case-insensitive valid strings for
+// resolving the S3 regional Endpoint flag.
+func GetS3UsEast1RegionalEndpoint(s string) (S3UsEast1RegionalEndpoint, error) {
+ switch {
+ case strings.EqualFold(s, "legacy"):
+ return LegacyS3UsEast1Endpoint, nil
+ case strings.EqualFold(s, "regional"):
+ return RegionalS3UsEast1Endpoint, nil
+ default:
+ return UnsetS3UsEast1Endpoint,
+ fmt.Errorf("unable to resolve the value of S3UsEast1RegionalEndpoint for %v", s)
+ }
+}
+
+// Set combines all of the option functions together.
+func (o *Options) Set(optFns ...func(*Options)) {
+ for _, fn := range optFns {
+ fn(o)
+ }
+}
+
+// DisableSSLOption sets the DisableSSL options. Can be used as a functional
+// option when resolving endpoints.
+func DisableSSLOption(o *Options) {
+ o.DisableSSL = true
+}
+
+// UseDualStackOption sets the UseDualStack option. Can be used as a functional
+// option when resolving endpoints.
+//
+// Deprecated: UseDualStackEndpointOption should be used to enable usage of a service's dual-stack endpoint.
+// When DualStackEndpointState is set to a non-zero value it takes higher precedence then this option.
+func UseDualStackOption(o *Options) {
+ o.UseDualStack = true
+}
+
+// UseDualStackEndpointOption sets the UseDualStackEndpoint option to enabled. Can be used as a functional
+// option when resolving endpoints.
+func UseDualStackEndpointOption(o *Options) {
+ o.UseDualStackEndpoint = DualStackEndpointStateEnabled
+}
+
+// StrictMatchingOption sets the StrictMatching option. Can be used as a functional
+// option when resolving endpoints.
+func StrictMatchingOption(o *Options) {
+ o.StrictMatching = true
+}
+
+// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used
+// as a functional option when resolving endpoints.
+func ResolveUnknownServiceOption(o *Options) {
+ o.ResolveUnknownService = true
+}
+
+// A Resolver provides the interface for functionality to resolve endpoints.
+// The build in Partition and DefaultResolver return value satisfy this interface.
+type Resolver interface {
+ EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error)
+}
+
+// ResolverFunc is a helper utility that wraps a function so it satisfies the
+// Resolver interface. This is useful when you want to add additional endpoint
+// resolving logic, or stub out specific endpoints with custom values.
+type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error)
+
+// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface.
+func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return fn(service, region, opts...)
+}
+
+var schemeRE = regexp.MustCompile("^([^:]+)://")
+
+// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no
+// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS.
+//
+// If disableSSL is set, it will only set the URL's scheme if the URL does not
+// contain a scheme.
+func AddScheme(endpoint string, disableSSL bool) string {
+ if !schemeRE.MatchString(endpoint) {
+ scheme := "https"
+ if disableSSL {
+ scheme = "http"
+ }
+ endpoint = fmt.Sprintf("%s://%s", scheme, endpoint)
+ }
+
+ return endpoint
+}
+
+// EnumPartitions a provides a way to retrieve the underlying partitions that
+// make up the SDK's default Resolver, or any resolver decoded from a model
+// file.
+//
+// Use this interface with DefaultResolver and DecodeModels to get the list of
+// Partitions.
+type EnumPartitions interface {
+ Partitions() []Partition
+}
+
+// RegionsForService returns a map of regions for the partition and service.
+// If either the partition or service does not exist false will be returned
+// as the second parameter.
+//
+// This example shows how to get the regions for DynamoDB in the AWS partition.
+// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID)
+//
+// This is equivalent to using the partition directly.
+// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions()
+func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) {
+ for _, p := range ps {
+ if p.ID() != partitionID {
+ continue
+ }
+ if _, ok := p.p.Services[serviceID]; !ok {
+ break
+ }
+
+ s := Service{
+ id: serviceID,
+ p: p.p,
+ }
+ return s.Regions(), true
+ }
+
+ return map[string]Region{}, false
+}
+
+// PartitionForRegion returns the first partition which includes the region
+// passed in. This includes both known regions and regions which match
+// a pattern supported by the partition which may include regions that are
+// not explicitly known by the partition. Use the Regions method of the
+// returned Partition if explicit support is needed.
+func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) {
+ for _, p := range ps {
+ if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) {
+ return p, true
+ }
+ }
+
+ return Partition{}, false
+}
+
+// A Partition provides the ability to enumerate the partition's regions
+// and services.
+type Partition struct {
+ id, dnsSuffix string
+ p *partition
+}
+
+// DNSSuffix returns the base domain name of the partition.
+func (p Partition) DNSSuffix() string { return p.dnsSuffix }
+
+// ID returns the identifier of the partition.
+func (p Partition) ID() string { return p.id }
+
+// EndpointFor attempts to resolve the endpoint based on service and region.
+// See Options for information on configuring how the endpoint is resolved.
+//
+// If the service cannot be found in the metadata the UnknownServiceError
+// error will be returned. This validation will occur regardless if
+// StrictMatching is enabled. To enable resolving unknown services set the
+// "ResolveUnknownService" option to true. When StrictMatching is disabled
+// this option allows the partition resolver to resolve a endpoint based on
+// the service endpoint ID provided.
+//
+// When resolving endpoints you can choose to enable StrictMatching. This will
+// require the provided service and region to be known by the partition.
+// If the endpoint cannot be strictly resolved an error will be returned. This
+// mode is useful to ensure the endpoint resolved is valid. Without
+// StrictMatching enabled the endpoint returned may look valid but may not work.
+// StrictMatching requires the SDK to be updated if you want to take advantage
+// of new regions and services expansions.
+//
+// Errors that can be returned.
+// * UnknownServiceError
+// * UnknownEndpointError
+func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return p.p.EndpointFor(service, region, opts...)
+}
+
+// Regions returns a map of Regions indexed by their ID. This is useful for
+// enumerating over the regions in a partition.
+func (p Partition) Regions() map[string]Region {
+ rs := make(map[string]Region, len(p.p.Regions))
+ for id, r := range p.p.Regions {
+ rs[id] = Region{
+ id: id,
+ desc: r.Description,
+ p: p.p,
+ }
+ }
+
+ return rs
+}
+
+// Services returns a map of Service indexed by their ID. This is useful for
+// enumerating over the services in a partition.
+func (p Partition) Services() map[string]Service {
+ ss := make(map[string]Service, len(p.p.Services))
+ for id := range p.p.Services {
+ ss[id] = Service{
+ id: id,
+ p: p.p,
+ }
+ }
+
+ return ss
+}
+
+// A Region provides information about a region, and ability to resolve an
+// endpoint from the context of a region, given a service.
+type Region struct {
+ id, desc string
+ p *partition
+}
+
+// ID returns the region's identifier.
+func (r Region) ID() string { return r.id }
+
+// Description returns the region's description. The region description
+// is free text, it can be empty, and it may change between SDK releases.
+func (r Region) Description() string { return r.desc }
+
+// ResolveEndpoint resolves an endpoint from the context of the region given
+// a service. See Partition.EndpointFor for usage and errors that can be returned.
+func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return r.p.EndpointFor(service, r.id, opts...)
+}
+
+// Services returns a list of all services that are known to be in this region.
+func (r Region) Services() map[string]Service {
+ ss := map[string]Service{}
+ for id, s := range r.p.Services {
+ if _, ok := s.Endpoints[endpointKey{Region: r.id}]; ok {
+ ss[id] = Service{
+ id: id,
+ p: r.p,
+ }
+ }
+ }
+
+ return ss
+}
+
+// A Service provides information about a service, and ability to resolve an
+// endpoint from the context of a service, given a region.
+type Service struct {
+ id string
+ p *partition
+}
+
+// ID returns the identifier for the service.
+func (s Service) ID() string { return s.id }
+
+// ResolveEndpoint resolves an endpoint from the context of a service given
+// a region. See Partition.EndpointFor for usage and errors that can be returned.
+func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return s.p.EndpointFor(s.id, region, opts...)
+}
+
+// Regions returns a map of Regions that the service is present in.
+//
+// A region is the AWS region the service exists in. Whereas a Endpoint is
+// an URL that can be resolved to a instance of a service.
+func (s Service) Regions() map[string]Region {
+ rs := map[string]Region{}
+ service := s.p.Services[s.id]
+ for id := range service.Endpoints {
+ if id.Variant != 0 {
+ continue
+ }
+ if r, ok := s.p.Regions[id.Region]; ok {
+ rs[id.Region] = Region{
+ id: id.Region,
+ desc: r.Description,
+ p: s.p,
+ }
+ }
+ }
+
+ return rs
+}
+
+// Endpoints returns a map of Endpoints indexed by their ID for all known
+// endpoints for a service.
+//
+// A region is the AWS region the service exists in. Whereas a Endpoint is
+// an URL that can be resolved to a instance of a service.
+func (s Service) Endpoints() map[string]Endpoint {
+ es := make(map[string]Endpoint, len(s.p.Services[s.id].Endpoints))
+ for id := range s.p.Services[s.id].Endpoints {
+ if id.Variant != 0 {
+ continue
+ }
+ es[id.Region] = Endpoint{
+ id: id.Region,
+ serviceID: s.id,
+ p: s.p,
+ }
+ }
+
+ return es
+}
+
+// A Endpoint provides information about endpoints, and provides the ability
+// to resolve that endpoint for the service, and the region the endpoint
+// represents.
+type Endpoint struct {
+ id string
+ serviceID string
+ p *partition
+}
+
+// ID returns the identifier for an endpoint.
+func (e Endpoint) ID() string { return e.id }
+
+// ServiceID returns the identifier the endpoint belongs to.
+func (e Endpoint) ServiceID() string { return e.serviceID }
+
+// ResolveEndpoint resolves an endpoint from the context of a service and
+// region the endpoint represents. See Partition.EndpointFor for usage and
+// errors that can be returned.
+func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return e.p.EndpointFor(e.serviceID, e.id, opts...)
+}
+
+// A ResolvedEndpoint is an endpoint that has been resolved based on a partition
+// service, and region.
+type ResolvedEndpoint struct {
+ // The endpoint URL
+ URL string
+
+ // The endpoint partition
+ PartitionID string
+
+ // The region that should be used for signing requests.
+ SigningRegion string
+
+ // The service name that should be used for signing requests.
+ SigningName string
+
+ // States that the signing name for this endpoint was derived from metadata
+ // passed in, but was not explicitly modeled.
+ SigningNameDerived bool
+
+ // The signing method that should be used for signing requests.
+ SigningMethod string
+}
+
+// So that the Error interface type can be included as an anonymous field
+// in the requestError struct and not conflict with the error.Error() method.
+type awsError awserr.Error
+
+// A EndpointNotFoundError is returned when in StrictMatching mode, and the
+// endpoint for the service and region cannot be found in any of the partitions.
+type EndpointNotFoundError struct {
+ awsError
+ Partition string
+ Service string
+ Region string
+}
+
+// A UnknownServiceError is returned when the service does not resolve to an
+// endpoint. Includes a list of all known services for the partition. Returned
+// when a partition does not support the service.
+type UnknownServiceError struct {
+ awsError
+ Partition string
+ Service string
+ Known []string
+}
+
+// NewUnknownServiceError builds and returns UnknownServiceError.
+func NewUnknownServiceError(p, s string, known []string) UnknownServiceError {
+ return UnknownServiceError{
+ awsError: awserr.New("UnknownServiceError",
+ "could not resolve endpoint for unknown service", nil),
+ Partition: p,
+ Service: s,
+ Known: known,
+ }
+}
+
+// String returns the string representation of the error.
+func (e UnknownServiceError) Error() string {
+ extra := fmt.Sprintf("partition: %q, service: %q",
+ e.Partition, e.Service)
+ if len(e.Known) > 0 {
+ extra += fmt.Sprintf(", known: %v", e.Known)
+ }
+ return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
+}
+
+// String returns the string representation of the error.
+func (e UnknownServiceError) String() string {
+ return e.Error()
+}
+
+// A UnknownEndpointError is returned when in StrictMatching mode and the
+// service is valid, but the region does not resolve to an endpoint. Includes
+// a list of all known endpoints for the service.
+type UnknownEndpointError struct {
+ awsError
+ Partition string
+ Service string
+ Region string
+ Known []string
+}
+
+// NewUnknownEndpointError builds and returns UnknownEndpointError.
+func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError {
+ return UnknownEndpointError{
+ awsError: awserr.New("UnknownEndpointError",
+ "could not resolve endpoint", nil),
+ Partition: p,
+ Service: s,
+ Region: r,
+ Known: known,
+ }
+}
+
+// String returns the string representation of the error.
+func (e UnknownEndpointError) Error() string {
+ extra := fmt.Sprintf("partition: %q, service: %q, region: %q",
+ e.Partition, e.Service, e.Region)
+ if len(e.Known) > 0 {
+ extra += fmt.Sprintf(", known: %v", e.Known)
+ }
+ return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
+}
+
+// String returns the string representation of the error.
+func (e UnknownEndpointError) String() string {
+ return e.Error()
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/legacy_regions.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/legacy_regions.go
new file mode 100644
index 0000000000000..28a7556cca36f
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/legacy_regions.go
@@ -0,0 +1,7 @@
+package endpoints
+
+var legacyGlobalRegions = map[string]map[string]struct{}{
+ "s3": {
+ "us-east-1": {},
+ },
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/v3model.go
new file mode 100644
index 0000000000000..1ff0e9e19c2a4
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/v3model.go
@@ -0,0 +1,559 @@
+package endpoints
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+const dnsSuffixTemplateKey = "{dnsSuffix}"
+
+// defaultKey is a compound map key of a variant and other values.
+type defaultKey struct {
+ Variant endpointVariant
+ ServiceVariant serviceVariant
+}
+
+// endpointKey is a compound map key of a region and associated variant value.
+type endpointKey struct {
+ Region string
+ Variant endpointVariant
+}
+
+// endpointVariant is a bit field to describe the endpoints attributes.
+type endpointVariant uint64
+
+// serviceVariant is a bit field to describe the service endpoint attributes.
+type serviceVariant uint64
+
+const (
+ // fipsVariant indicates that the endpoint is FIPS capable.
+ fipsVariant endpointVariant = 1 << (64 - 1 - iota)
+
+ // dualStackVariant indicates that the endpoint is DualStack capable.
+ dualStackVariant
+)
+
+var regionValidationRegex = regexp.MustCompile(`^[[:alnum:]]([[:alnum:]\-]*[[:alnum:]])?$`)
+
+type partitions []partition
+
+func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ var opt Options
+ opt.Set(opts...)
+
+ if len(opt.ResolvedRegion) > 0 {
+ region = opt.ResolvedRegion
+ }
+
+ for i := 0; i < len(ps); i++ {
+ if !ps[i].canResolveEndpoint(service, region, opt) {
+ continue
+ }
+
+ return ps[i].EndpointFor(service, region, opts...)
+ }
+
+ // If loose matching fallback to first partition format to use
+ // when resolving the endpoint.
+ if !opt.StrictMatching && len(ps) > 0 {
+ return ps[0].EndpointFor(service, region, opts...)
+ }
+
+ return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{})
+}
+
+// Partitions satisfies the EnumPartitions interface and returns a list
+// of Partitions representing each partition represented in the SDK's
+// endpoints model.
+func (ps partitions) Partitions() []Partition {
+ parts := make([]Partition, 0, len(ps))
+ for i := 0; i < len(ps); i++ {
+ parts = append(parts, ps[i].Partition())
+ }
+
+ return parts
+}
+
+type endpointWithVariants struct {
+ endpoint
+ Variants []endpointWithTags `json:"variants"`
+}
+
+type endpointWithTags struct {
+ endpoint
+ Tags []string `json:"tags"`
+}
+
+type endpointDefaults map[defaultKey]endpoint
+
+func (p *endpointDefaults) UnmarshalJSON(data []byte) error {
+ if *p == nil {
+ *p = make(endpointDefaults)
+ }
+
+ var e endpointWithVariants
+ if err := json.Unmarshal(data, &e); err != nil {
+ return err
+ }
+
+ (*p)[defaultKey{Variant: 0}] = e.endpoint
+
+ e.Hostname = ""
+ e.DNSSuffix = ""
+
+ for _, variant := range e.Variants {
+ endpointVariant, unknown := parseVariantTags(variant.Tags)
+ if unknown {
+ continue
+ }
+
+ var ve endpoint
+ ve.mergeIn(e.endpoint)
+ ve.mergeIn(variant.endpoint)
+
+ (*p)[defaultKey{Variant: endpointVariant}] = ve
+ }
+
+ return nil
+}
+
+func parseVariantTags(tags []string) (ev endpointVariant, unknown bool) {
+ if len(tags) == 0 {
+ unknown = true
+ return
+ }
+
+ for _, tag := range tags {
+ switch {
+ //IBM UNSUPPORTED
+ /*case strings.EqualFold("fips", tag):
+ ev |= fipsVariant*/
+ case strings.EqualFold("dualstack", tag):
+ ev |= dualStackVariant
+ default:
+ unknown = true
+ }
+ }
+ return ev, unknown
+}
+
+type partition struct {
+ ID string `json:"partition"`
+ Name string `json:"partitionName"`
+ DNSSuffix string `json:"dnsSuffix"`
+ RegionRegex regionRegex `json:"regionRegex"`
+ Defaults endpointDefaults `json:"defaults"`
+ Regions regions `json:"regions"`
+ Services services `json:"services"`
+}
+
+func (p partition) Partition() Partition {
+ return Partition{
+ dnsSuffix: p.DNSSuffix,
+ id: p.ID,
+ p: &p,
+ }
+}
+
+func (p partition) canResolveEndpoint(service, region string, options Options) bool {
+ s, hasService := p.Services[service]
+ _, hasEndpoint := s.Endpoints[endpointKey{
+ Region: region,
+ Variant: options.getEndpointVariant(service),
+ }]
+
+ if hasEndpoint && hasService {
+ return true
+ }
+
+ if options.StrictMatching {
+ return false
+ }
+
+ return p.RegionRegex.MatchString(region)
+}
+
+func allowLegacyEmptyRegion(service string) bool {
+ legacy := map[string]struct{}{
+ "budgets": {},
+ "ce": {},
+ "chime": {},
+ "cloudfront": {},
+ "ec2metadata": {},
+ "iam": {},
+ "importexport": {},
+ "organizations": {},
+ "route53": {},
+ "sts": {},
+ "support": {},
+ "waf": {},
+ }
+
+ _, allowed := legacy[service]
+ return allowed
+}
+
+func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) {
+ var opt Options
+ opt.Set(opts...)
+
+ if len(opt.ResolvedRegion) > 0 {
+ region = opt.ResolvedRegion
+ }
+
+ s, hasService := p.Services[service]
+ if len(service) == 0 || !(hasService || opt.ResolveUnknownService) {
+ // Only return error if the resolver will not fallback to creating
+ // endpoint based on service endpoint ID passed in.
+ return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services))
+ }
+
+ if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 {
+ region = s.PartitionEndpoint
+ }
+
+ if service == "s3" && opt.S3UsEast1RegionalEndpoint != RegionalS3UsEast1Endpoint {
+ if r, ok := isLegacyGlobalRegion(service, region, opt); ok {
+ region = r
+ }
+ }
+ variant := opt.getEndpointVariant(service)
+
+ endpoints := s.Endpoints
+
+ serviceDefaults, hasServiceDefault := s.Defaults[defaultKey{Variant: variant}]
+ // If we searched for a variant which may have no explicit service defaults,
+ // then we need to inherit the standard service defaults except the hostname and dnsSuffix
+ if variant != 0 && !hasServiceDefault {
+ serviceDefaults = s.Defaults[defaultKey{}]
+ serviceDefaults.Hostname = ""
+ serviceDefaults.DNSSuffix = ""
+ }
+
+ partitionDefaults, hasPartitionDefault := p.Defaults[defaultKey{Variant: variant}]
+
+ var dnsSuffix string
+ if len(serviceDefaults.DNSSuffix) > 0 {
+ dnsSuffix = serviceDefaults.DNSSuffix
+ } else if variant == 0 {
+ // For legacy reasons the partition dnsSuffix is not in the defaults, so if we looked for
+ // a non-variant endpoint then we need to set the dnsSuffix.
+ dnsSuffix = p.DNSSuffix
+ }
+
+ noDefaults := !hasServiceDefault && !hasPartitionDefault
+
+ e, hasEndpoint := s.endpointForRegion(region, endpoints, variant)
+ if len(region) == 0 || (!hasEndpoint && (opt.StrictMatching || noDefaults)) {
+ return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(endpoints, variant))
+ }
+
+ defs := []endpoint{partitionDefaults, serviceDefaults}
+
+ return e.resolve(service, p.ID, region, dnsSuffixTemplateKey, dnsSuffix, defs, opt)
+}
+func isLegacyGlobalRegion(service string, region string, opt Options) (string, bool) {
+ if opt.getEndpointVariant(service) != 0 {
+ return "", false
+ }
+
+ const (
+ sts = "sts"
+ s3 = "s3"
+ awsGlobal = "aws-global"
+ )
+
+ switch {
+ case service == sts:
+ return region, false
+ case service == s3 && opt.S3UsEast1RegionalEndpoint == RegionalS3UsEast1Endpoint:
+ return region, false
+ default:
+ if _, ok := legacyGlobalRegions[service][region]; ok {
+ return awsGlobal, true
+ }
+ }
+
+ return region, false
+}
+
+func serviceList(ss services) []string {
+ list := make([]string, 0, len(ss))
+ for k := range ss {
+ list = append(list, k)
+ }
+ return list
+}
+func endpointList(es serviceEndpoints, variant endpointVariant) []string {
+ list := make([]string, 0, len(es))
+ for k := range es {
+ if k.Variant != variant {
+ continue
+ }
+ list = append(list, k.Region)
+ }
+ return list
+}
+
+type regionRegex struct {
+ *regexp.Regexp
+}
+
+func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) {
+ // Strip leading and trailing quotes
+ regex, err := strconv.Unquote(string(b))
+ if err != nil {
+ return fmt.Errorf("unable to strip quotes from regex, %v", err)
+ }
+
+ rr.Regexp, err = regexp.Compile(regex)
+ if err != nil {
+ return fmt.Errorf("unable to unmarshal region regex, %v", err)
+ }
+ return nil
+}
+
+type regions map[string]region
+
+type region struct {
+ Description string `json:"description"`
+}
+
+type services map[string]service
+
+type service struct {
+ PartitionEndpoint string `json:"partitionEndpoint"`
+ IsRegionalized boxedBool `json:"isRegionalized,omitempty"`
+ Defaults endpointDefaults `json:"defaults"`
+ Endpoints serviceEndpoints `json:"endpoints"`
+}
+
+func (s *service) endpointForRegion(region string, endpoints serviceEndpoints, variant endpointVariant) (endpoint, bool) {
+ if e, ok := endpoints[endpointKey{Region: region, Variant: variant}]; ok {
+ return e, true
+ }
+
+ if s.IsRegionalized == boxedFalse {
+ return endpoints[endpointKey{Region: s.PartitionEndpoint, Variant: variant}], region == s.PartitionEndpoint
+ }
+
+ // Unable to find any matching endpoint, return
+ // blank that will be used for generic endpoint creation.
+ return endpoint{}, false
+}
+
+type serviceEndpoints map[endpointKey]endpoint
+
+func (s *serviceEndpoints) UnmarshalJSON(data []byte) error {
+ if *s == nil {
+ *s = make(serviceEndpoints)
+ }
+
+ var regionToEndpoint map[string]endpointWithVariants
+
+ if err := json.Unmarshal(data, ®ionToEndpoint); err != nil {
+ return err
+ }
+
+ for region, e := range regionToEndpoint {
+ (*s)[endpointKey{Region: region}] = e.endpoint
+
+ e.Hostname = ""
+ e.DNSSuffix = ""
+
+ for _, variant := range e.Variants {
+ endpointVariant, unknown := parseVariantTags(variant.Tags)
+ if unknown {
+ continue
+ }
+
+ var ve endpoint
+ ve.mergeIn(e.endpoint)
+ ve.mergeIn(variant.endpoint)
+
+ (*s)[endpointKey{Region: region, Variant: endpointVariant}] = ve
+ }
+ }
+
+ return nil
+}
+
+type endpoint struct {
+ Hostname string `json:"hostname"`
+ Protocols []string `json:"protocols"`
+ CredentialScope credentialScope `json:"credentialScope"`
+
+ DNSSuffix string `json:"dnsSuffix"`
+
+ // Signature Version not used
+ SignatureVersions []string `json:"signatureVersions"`
+
+ // SSLCommonName not used.
+ SSLCommonName string `json:"sslCommonName"`
+
+ Deprecated boxedBool `json:"deprecated"`
+}
+
+// isZero returns whether the endpoint structure is an empty (zero) value.
+func (e endpoint) isZero() bool {
+ switch {
+ case len(e.Hostname) != 0:
+ return false
+ case len(e.Protocols) != 0:
+ return false
+ case e.CredentialScope != (credentialScope{}):
+ return false
+ case len(e.SignatureVersions) != 0:
+ return false
+ case len(e.SSLCommonName) != 0:
+ return false
+ }
+ return true
+}
+
+const (
+ defaultProtocol = "https"
+ defaultSigner = "v4"
+)
+
+var (
+ protocolPriority = []string{"https", "http"}
+ signerPriority = []string{"v4", "v2"}
+)
+
+func getByPriority(s []string, p []string, def string) string {
+ if len(s) == 0 {
+ return def
+ }
+
+ for i := 0; i < len(p); i++ {
+ for j := 0; j < len(s); j++ {
+ if s[j] == p[i] {
+ return s[j]
+ }
+ }
+ }
+
+ return s[0]
+}
+
+func (e endpoint) resolve(service, partitionID, region, dnsSuffixTemplateVariable, dnsSuffix string, defs []endpoint, opts Options) (ResolvedEndpoint, error) {
+ var merged endpoint
+ for _, def := range defs {
+ merged.mergeIn(def)
+ }
+ merged.mergeIn(e)
+ e = merged
+
+ signingRegion := e.CredentialScope.Region
+ if len(signingRegion) == 0 {
+ signingRegion = region
+ }
+
+ signingName := e.CredentialScope.Service
+ var signingNameDerived bool
+ if len(signingName) == 0 {
+ signingName = service
+ signingNameDerived = true
+ }
+
+ hostname := e.Hostname
+
+ if !validateInputRegion(region) {
+ return ResolvedEndpoint{}, fmt.Errorf("invalid region identifier format provided")
+ }
+
+ if len(merged.DNSSuffix) > 0 {
+ dnsSuffix = merged.DNSSuffix
+ }
+
+ u := strings.Replace(hostname, "{service}", service, 1)
+ u = strings.Replace(u, "{region}", region, 1)
+ u = strings.Replace(u, dnsSuffixTemplateVariable, dnsSuffix, 1)
+
+ scheme := getEndpointScheme(e.Protocols, opts.DisableSSL)
+ u = fmt.Sprintf("%s://%s", scheme, u)
+
+ if e.Deprecated == boxedTrue && opts.LogDeprecated && opts.Logger != nil {
+ opts.Logger.Log(fmt.Sprintf("endpoint identifier %q, url %q marked as deprecated", region, u))
+ }
+
+ return ResolvedEndpoint{
+ URL: u,
+ PartitionID: partitionID,
+ SigningRegion: signingRegion,
+ SigningName: signingName,
+ SigningNameDerived: signingNameDerived,
+ SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
+ }, nil
+}
+
+func getEndpointScheme(protocols []string, disableSSL bool) string {
+ if disableSSL {
+ return "http"
+ }
+
+ return getByPriority(protocols, protocolPriority, defaultProtocol)
+}
+
+func (e *endpoint) mergeIn(other endpoint) {
+ if len(other.Hostname) > 0 {
+ e.Hostname = other.Hostname
+ }
+ if len(other.Protocols) > 0 {
+ e.Protocols = other.Protocols
+ }
+ if len(other.SignatureVersions) > 0 {
+ e.SignatureVersions = other.SignatureVersions
+ }
+ if len(other.CredentialScope.Region) > 0 {
+ e.CredentialScope.Region = other.CredentialScope.Region
+ }
+ if len(other.CredentialScope.Service) > 0 {
+ e.CredentialScope.Service = other.CredentialScope.Service
+ }
+ if len(other.SSLCommonName) > 0 {
+ e.SSLCommonName = other.SSLCommonName
+ }
+ if len(other.DNSSuffix) > 0 {
+ e.DNSSuffix = other.DNSSuffix
+ }
+ if other.Deprecated != boxedBoolUnset {
+ e.Deprecated = other.Deprecated
+ }
+}
+
+type credentialScope struct {
+ Region string `json:"region"`
+ Service string `json:"service"`
+}
+
+type boxedBool int
+
+func (b *boxedBool) UnmarshalJSON(buf []byte) error {
+ v, err := strconv.ParseBool(string(buf))
+ if err != nil {
+ return err
+ }
+
+ if v {
+ *b = boxedTrue
+ } else {
+ *b = boxedFalse
+ }
+
+ return nil
+}
+
+const (
+ boxedBoolUnset boxedBool = iota
+ boxedFalse
+ boxedTrue
+)
+
+func validateInputRegion(region string) bool {
+ return regionValidationRegex.MatchString(region)
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/v3model_codegen.go
new file mode 100644
index 0000000000000..99284996f169b
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/endpoints/v3model_codegen.go
@@ -0,0 +1,413 @@
+//go:build codegen
+// +build codegen
+
+package endpoints
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "text/template"
+ "unicode"
+)
+
+// A CodeGenOptions are the options for code generating the endpoints into
+// Go code from the endpoints model definition.
+type CodeGenOptions struct {
+ // Options for how the model will be decoded.
+ DecodeModelOptions DecodeModelOptions
+
+ // Disables code generation of the service endpoint prefix IDs defined in
+ // the model.
+ DisableGenerateServiceIDs bool
+}
+
+// Set combines all of the option functions together
+func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) {
+ for _, fn := range optFns {
+ fn(d)
+ }
+}
+
+// CodeGenModel given a endpoints model file will decode it and attempt to
+// generate Go code from the model definition. Error will be returned if
+// the code is unable to be generated, or decoded.
+func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error {
+ var opts CodeGenOptions
+ opts.Set(optFns...)
+
+ resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) {
+ *d = opts.DecodeModelOptions
+ })
+ if err != nil {
+ return err
+ }
+
+ v := struct {
+ Resolver
+ CodeGenOptions
+ }{
+ Resolver: resolver,
+ CodeGenOptions: opts,
+ }
+
+ tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl))
+ if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil {
+ return fmt.Errorf("failed to execute template, %v", err)
+ }
+
+ return nil
+}
+
+func toSymbol(v string) string {
+ out := []rune{}
+ for _, c := range strings.Title(v) {
+ if !(unicode.IsNumber(c) || unicode.IsLetter(c)) {
+ continue
+ }
+
+ out = append(out, c)
+ }
+
+ return string(out)
+}
+
+func quoteString(v string) string {
+ return fmt.Sprintf("%q", v)
+}
+
+func regionConstName(p, r string) string {
+ return toSymbol(p) + toSymbol(r)
+}
+
+func partitionGetter(id string) string {
+ return fmt.Sprintf("%sPartition", toSymbol(id))
+}
+
+func partitionVarName(id string) string {
+ return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id)))
+}
+
+func listPartitionNames(ps partitions) string {
+ names := []string{}
+ switch len(ps) {
+ case 1:
+ return ps[0].Name
+ case 2:
+ return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name)
+ default:
+ for i, p := range ps {
+ if i == len(ps)-1 {
+ names = append(names, "and "+p.Name)
+ } else {
+ names = append(names, p.Name)
+ }
+ }
+ return strings.Join(names, ", ")
+ }
+}
+
+func boxedBoolIfSet(msg string, v boxedBool) string {
+ switch v {
+ case boxedTrue:
+ return fmt.Sprintf(msg, "boxedTrue")
+ case boxedFalse:
+ return fmt.Sprintf(msg, "boxedFalse")
+ default:
+ return ""
+ }
+}
+
+func stringIfSet(msg, v string) string {
+ if len(v) == 0 {
+ return ""
+ }
+
+ return fmt.Sprintf(msg, v)
+}
+
+func stringSliceIfSet(msg string, vs []string) string {
+ if len(vs) == 0 {
+ return ""
+ }
+
+ names := []string{}
+ for _, v := range vs {
+ names = append(names, `"`+v+`"`)
+ }
+
+ return fmt.Sprintf(msg, strings.Join(names, ","))
+}
+
+func endpointIsSet(v endpoint) bool {
+ return !reflect.DeepEqual(v, endpoint{})
+}
+
+func serviceSet(ps partitions) map[string]struct{} {
+ set := map[string]struct{}{}
+ for _, p := range ps {
+ for id := range p.Services {
+ set[id] = struct{}{}
+ }
+ }
+
+ return set
+}
+
+func endpointVariantSetter(variant endpointVariant) (string, error) {
+ if variant == 0 {
+ return "0", nil
+ }
+
+ if variant > (dualStackVariant) {
+ return "", fmt.Errorf("unknown endpoint variant")
+ }
+
+ var symbols []string
+ //IBM UNSUPPORTED
+ /*if variant&fipsVariant != 0 {
+ symbols = append(symbols, "fipsVariant")
+ }*/
+ if variant&dualStackVariant != 0 {
+ symbols = append(symbols, "dualStackVariant")
+ }
+ v := strings.Join(symbols, "|")
+
+ return v, nil
+}
+
+func endpointKeySetter(e endpointKey) (string, error) {
+ var sb strings.Builder
+ sb.WriteString("endpointKey{\n")
+ sb.WriteString(fmt.Sprintf("Region: %q,\n", e.Region))
+ if e.Variant != 0 {
+ variantSetter, err := endpointVariantSetter(e.Variant)
+ if err != nil {
+ return "", err
+ }
+ sb.WriteString(fmt.Sprintf("Variant: %s,\n", variantSetter))
+ }
+ sb.WriteString("}")
+ return sb.String(), nil
+}
+
+func defaultKeySetter(e defaultKey) (string, error) {
+ var sb strings.Builder
+ sb.WriteString("defaultKey{\n")
+ if e.Variant != 0 {
+ variantSetter, err := endpointVariantSetter(e.Variant)
+ if err != nil {
+ return "", err
+ }
+ sb.WriteString(fmt.Sprintf("Variant: %s,\n", variantSetter))
+ }
+ sb.WriteString("}")
+ return sb.String(), nil
+}
+
+var funcMap = template.FuncMap{
+ "ToSymbol": toSymbol,
+ "QuoteString": quoteString,
+ "RegionConst": regionConstName,
+ "PartitionGetter": partitionGetter,
+ "PartitionVarName": partitionVarName,
+ "ListPartitionNames": listPartitionNames,
+ "BoxedBoolIfSet": boxedBoolIfSet,
+ "StringIfSet": stringIfSet,
+ "StringSliceIfSet": stringSliceIfSet,
+ "EndpointIsSet": endpointIsSet,
+ "ServicesSet": serviceSet,
+ "EndpointVariantSetter": endpointVariantSetter,
+ "EndpointKeySetter": endpointKeySetter,
+ "DefaultKeySetter": defaultKeySetter,
+}
+
+const v3Tmpl = `
+{{ define "defaults" -}}
+// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT.
+
+package endpoints
+
+import (
+ "regexp"
+)
+
+ {{ template "partition consts" $.Resolver }}
+
+ {{ range $_, $partition := $.Resolver }}
+ {{ template "partition region consts" $partition }}
+ {{ end }}
+
+ {{ if not $.DisableGenerateServiceIDs -}}
+ {{ template "service consts" $.Resolver }}
+ {{- end }}
+
+ {{ template "endpoint resolvers" $.Resolver }}
+{{- end }}
+
+{{ define "partition consts" }}
+ // Partition identifiers
+ const (
+ {{ range $_, $p := . -}}
+ {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition.
+ {{ end -}}
+ )
+{{- end }}
+
+{{ define "partition region consts" }}
+ // {{ .Name }} partition's regions.
+ const (
+ {{ range $id, $region := .Regions -}}
+ {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}.
+ {{ end -}}
+ )
+{{- end }}
+
+{{ define "service consts" }}
+ // Service identifiers
+ const (
+ {{ $serviceSet := ServicesSet . -}}
+ {{ range $id, $_ := $serviceSet -}}
+ {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}.
+ {{ end -}}
+ )
+{{- end }}
+
+{{ define "endpoint resolvers" }}
+ // DefaultResolver returns an Endpoint resolver that will be able
+ // to resolve endpoints for: {{ ListPartitionNames . }}.
+ //
+ // Use DefaultPartitions() to get the list of the default partitions.
+ func DefaultResolver() Resolver {
+ return defaultPartitions
+ }
+
+ // DefaultPartitions returns a list of the partitions the SDK is bundled
+ // with. The available partitions are: {{ ListPartitionNames . }}.
+ //
+ // partitions := endpoints.DefaultPartitions
+ // for _, p := range partitions {
+ // // ... inspect partitions
+ // }
+ func DefaultPartitions() []Partition {
+ return defaultPartitions.Partitions()
+ }
+
+ var defaultPartitions = partitions{
+ {{ range $_, $partition := . -}}
+ {{ PartitionVarName $partition.ID }},
+ {{ end }}
+ }
+
+ {{ range $_, $partition := . -}}
+ {{ $name := PartitionGetter $partition.ID -}}
+ // {{ $name }} returns the Resolver for {{ $partition.Name }}.
+ func {{ $name }}() Partition {
+ return {{ PartitionVarName $partition.ID }}.Partition()
+ }
+ var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }}
+ {{ end }}
+{{ end }}
+
+{{ define "default partitions" }}
+ func DefaultPartitions() []Partition {
+ return []partition{
+ {{ range $_, $partition := . -}}
+ // {{ ToSymbol $partition.ID}}Partition(),
+ {{ end }}
+ }
+ }
+{{ end }}
+
+{{ define "gocode Partition" -}}
+partition{
+ {{ StringIfSet "ID: %q,\n" .ID -}}
+ {{ StringIfSet "Name: %q,\n" .Name -}}
+ {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}}
+ RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }},
+ {{ if (gt (len .Defaults) 0) -}}
+ Defaults: {{ template "gocode Defaults" .Defaults -}},
+ {{ end -}}
+ Regions: {{ template "gocode Regions" .Regions }},
+ Services: {{ template "gocode Services" .Services }},
+}
+{{- end }}
+
+{{ define "gocode RegionRegex" -}}
+regionRegex{
+ Regexp: func() *regexp.Regexp{
+ reg, _ := regexp.Compile({{ QuoteString .Regexp.String }})
+ return reg
+ }(),
+}
+{{- end }}
+
+{{ define "gocode Regions" -}}
+regions{
+ {{ range $id, $region := . -}}
+ "{{ $id }}": {{ template "gocode Region" $region }},
+ {{ end -}}
+}
+{{- end }}
+
+{{ define "gocode Region" -}}
+region{
+ {{ StringIfSet "Description: %q,\n" .Description -}}
+}
+{{- end }}
+
+{{ define "gocode Services" -}}
+services{
+ {{ range $id, $service := . -}}
+ "{{ $id }}": {{ template "gocode Service" $service }},
+ {{ end }}
+}
+{{- end }}
+
+{{ define "gocode Service" -}}
+service{
+ {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}}
+ {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}}
+ {{ if (gt (len .Defaults) 0) -}}
+ Defaults: {{ template "gocode Defaults" .Defaults -}},
+ {{ end -}}
+ {{ if .Endpoints -}}
+ Endpoints: {{ template "gocode Endpoints" .Endpoints }},
+ {{- end }}
+}
+{{- end }}
+
+{{ define "gocode Defaults" -}}
+endpointDefaults{
+ {{ range $id, $endpoint := . -}}
+ {{ DefaultKeySetter $id }}: {{ template "gocode Endpoint" $endpoint }},
+ {{ end }}
+}
+{{- end }}
+
+{{ define "gocode Endpoints" -}}
+serviceEndpoints{
+ {{ range $id, $endpoint := . -}}
+ {{ EndpointKeySetter $id }}: {{ template "gocode Endpoint" $endpoint }},
+ {{ end }}
+}
+{{- end }}
+
+{{ define "gocode Endpoint" -}}
+endpoint{
+ {{ StringIfSet "Hostname: %q,\n" .Hostname -}}
+ {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}}
+ {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}}
+ {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}}
+ {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}}
+ {{ if or .CredentialScope.Region .CredentialScope.Service -}}
+ CredentialScope: credentialScope{
+ {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}}
+ {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}}
+ },
+ {{- end }}
+ {{ BoxedBoolIfSet "Deprecated: %s,\n" .Deprecated -}}
+}
+{{- end }}
+`
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/errors.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/errors.go
new file mode 100644
index 0000000000000..012fb487adb66
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/errors.go
@@ -0,0 +1,13 @@
+package aws
+
+import "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+
+var (
+ // ErrMissingRegion is an error that is returned if region configuration is
+ // not found.
+ ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil)
+
+ // ErrMissingEndpoint is an error that is returned if an endpoint cannot be
+ // resolved for a service.
+ ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
+)
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/jsonvalue.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/jsonvalue.go
new file mode 100644
index 0000000000000..91a6f277a7eb2
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/jsonvalue.go
@@ -0,0 +1,12 @@
+package aws
+
+// JSONValue is a representation of a grab bag type that will be marshaled
+// into a json string. This type can be used just like any other map.
+//
+// Example:
+//
+// values := aws.JSONValue{
+// "Foo": "Bar",
+// }
+// values["Baz"] = "Qux"
+type JSONValue map[string]interface{}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/logger.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/logger.go
new file mode 100644
index 0000000000000..49674cc79ebd8
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/logger.go
@@ -0,0 +1,121 @@
+package aws
+
+import (
+ "log"
+ "os"
+)
+
+// A LogLevelType defines the level logging should be performed at. Used to instruct
+// the SDK which statements should be logged.
+type LogLevelType uint
+
+// LogLevel returns the pointer to a LogLevel. Should be used to workaround
+// not being able to take the address of a non-composite literal.
+func LogLevel(l LogLevelType) *LogLevelType {
+ return &l
+}
+
+// Value returns the LogLevel value or the default value LogOff if the LogLevel
+// is nil. Safe to use on nil value LogLevelTypes.
+func (l *LogLevelType) Value() LogLevelType {
+ if l != nil {
+ return *l
+ }
+ return LogOff
+}
+
+// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
+// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
+// LogLevel is nil, will default to LogOff comparison.
+func (l *LogLevelType) Matches(v LogLevelType) bool {
+ c := l.Value()
+ return c&v == v
+}
+
+// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
+// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default
+// to LogOff comparison.
+func (l *LogLevelType) AtLeast(v LogLevelType) bool {
+ c := l.Value()
+ return c >= v
+}
+
+const (
+ // LogOff states that no logging should be performed by the SDK. This is the
+ // default state of the SDK, and should be use to disable all logging.
+ LogOff LogLevelType = iota * 0x1000
+
+ // LogDebug state that debug output should be logged by the SDK. This should
+ // be used to inspect request made and responses received.
+ LogDebug
+)
+
+// Debug Logging Sub Levels
+const (
+ // LogDebugWithSigning states that the SDK should log request signing and
+ // presigning events. This should be used to log the signing details of
+ // requests for debugging. Will also enable LogDebug.
+ LogDebugWithSigning LogLevelType = LogDebug | (1 << iota)
+
+ // LogDebugWithHTTPBody states the SDK should log HTTP request and response
+ // HTTP bodys in addition to the headers and path. This should be used to
+ // see the body content of requests and responses made while using the SDK
+ // Will also enable LogDebug.
+ LogDebugWithHTTPBody
+
+ // LogDebugWithRequestRetries states the SDK should log when service requests will
+ // be retried. This should be used to log when you want to log when service
+ // requests are being retried. Will also enable LogDebug.
+ LogDebugWithRequestRetries
+
+ // LogDebugWithRequestErrors states the SDK should log when service requests fail
+ // to build, send, validate, or unmarshal.
+ LogDebugWithRequestErrors
+
+ // LogDebugWithEventStreamBody states the SDK should log EventStream
+ // request and response bodys. This should be used to log the EventStream
+ // wire unmarshaled message content of requests and responses made while
+ // using the SDK Will also enable LogDebug.
+ LogDebugWithEventStreamBody
+
+ // LogDebugWithDeprecated states the SDK should log details about deprecated functionality.
+ LogDebugWithDeprecated
+)
+
+// A Logger is a minimalistic interface for the SDK to log messages to. Should
+// be used to provide custom logging writers for the SDK to use.
+type Logger interface {
+ Log(...interface{})
+}
+
+// A LoggerFunc is a convenience type to convert a function taking a variadic
+// list of arguments and wrap it so the Logger interface can be used.
+//
+// Example:
+// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) {
+// fmt.Fprintln(os.Stdout, args...)
+// })})
+type LoggerFunc func(...interface{})
+
+// Log calls the wrapped function with the arguments provided
+func (f LoggerFunc) Log(args ...interface{}) {
+ f(args...)
+}
+
+// NewDefaultLogger returns a Logger which will write log messages to stdout, and
+// use same formatting runes as the stdlib log.Logger
+func NewDefaultLogger() Logger {
+ return &defaultLogger{
+ logger: log.New(os.Stdout, "", log.LstdFlags),
+ }
+}
+
+// A defaultLogger provides a minimalistic logger satisfying the Logger interface.
+type defaultLogger struct {
+ logger *log.Logger
+}
+
+// Log logs the parameters to the stdlib logger. See log.Println.
+func (l defaultLogger) Log(args ...interface{}) {
+ l.logger.Println(args...)
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/connection_reset_error.go
new file mode 100644
index 0000000000000..2ba3c56c11fb9
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/connection_reset_error.go
@@ -0,0 +1,19 @@
+package request
+
+import (
+ "strings"
+)
+
+func isErrConnectionReset(err error) bool {
+ if strings.Contains(err.Error(), "read: connection reset") {
+ return false
+ }
+
+ if strings.Contains(err.Error(), "use of closed network connection") ||
+ strings.Contains(err.Error(), "connection reset") ||
+ strings.Contains(err.Error(), "broken pipe") {
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/handlers.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/handlers.go
new file mode 100644
index 0000000000000..9556332b65e77
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/handlers.go
@@ -0,0 +1,346 @@
+package request
+
+import (
+ "fmt"
+ "strings"
+)
+
+// A Handlers provides a collection of request handlers for various
+// stages of handling requests.
+type Handlers struct {
+ Validate HandlerList
+ Build HandlerList
+ BuildStream HandlerList
+ Sign HandlerList
+ Send HandlerList
+ ValidateResponse HandlerList
+ Unmarshal HandlerList
+ UnmarshalStream HandlerList
+ UnmarshalMeta HandlerList
+ UnmarshalError HandlerList
+ Retry HandlerList
+ AfterRetry HandlerList
+ CompleteAttempt HandlerList
+ Complete HandlerList
+}
+
+// Copy returns a copy of this handler's lists.
+func (h *Handlers) Copy() Handlers {
+ return Handlers{
+ Validate: h.Validate.copy(),
+ Build: h.Build.copy(),
+ BuildStream: h.BuildStream.copy(),
+ Sign: h.Sign.copy(),
+ Send: h.Send.copy(),
+ ValidateResponse: h.ValidateResponse.copy(),
+ Unmarshal: h.Unmarshal.copy(),
+ UnmarshalStream: h.UnmarshalStream.copy(),
+ UnmarshalError: h.UnmarshalError.copy(),
+ UnmarshalMeta: h.UnmarshalMeta.copy(),
+ Retry: h.Retry.copy(),
+ AfterRetry: h.AfterRetry.copy(),
+ CompleteAttempt: h.CompleteAttempt.copy(),
+ Complete: h.Complete.copy(),
+ }
+}
+
+// Clear removes callback functions for all handlers.
+func (h *Handlers) Clear() {
+ h.Validate.Clear()
+ h.Build.Clear()
+ h.BuildStream.Clear()
+ h.Send.Clear()
+ h.Sign.Clear()
+ h.Unmarshal.Clear()
+ h.UnmarshalStream.Clear()
+ h.UnmarshalMeta.Clear()
+ h.UnmarshalError.Clear()
+ h.ValidateResponse.Clear()
+ h.Retry.Clear()
+ h.AfterRetry.Clear()
+ h.CompleteAttempt.Clear()
+ h.Complete.Clear()
+}
+
+// IsEmpty returns if there are no handlers in any of the handlerlists.
+func (h *Handlers) IsEmpty() bool {
+ if h.Validate.Len() != 0 {
+ return false
+ }
+ if h.Build.Len() != 0 {
+ return false
+ }
+ if h.BuildStream.Len() != 0 {
+ return false
+ }
+ if h.Send.Len() != 0 {
+ return false
+ }
+ if h.Sign.Len() != 0 {
+ return false
+ }
+ if h.Unmarshal.Len() != 0 {
+ return false
+ }
+ if h.UnmarshalStream.Len() != 0 {
+ return false
+ }
+ if h.UnmarshalMeta.Len() != 0 {
+ return false
+ }
+ if h.UnmarshalError.Len() != 0 {
+ return false
+ }
+ if h.ValidateResponse.Len() != 0 {
+ return false
+ }
+ if h.Retry.Len() != 0 {
+ return false
+ }
+ if h.AfterRetry.Len() != 0 {
+ return false
+ }
+ if h.CompleteAttempt.Len() != 0 {
+ return false
+ }
+ if h.Complete.Len() != 0 {
+ return false
+ }
+
+ return true
+}
+
+// A HandlerListRunItem represents an entry in the HandlerList which
+// is being run.
+type HandlerListRunItem struct {
+ Index int
+ Handler NamedHandler
+ Request *Request
+}
+
+// A HandlerList manages zero or more handlers in a list.
+type HandlerList struct {
+ list []NamedHandler
+
+ // Called after each request handler in the list is called. If set
+ // and the func returns true the HandlerList will continue to iterate
+ // over the request handlers. If false is returned the HandlerList
+ // will stop iterating.
+ //
+ // Should be used if extra logic to be performed between each handler
+ // in the list. This can be used to terminate a list's iteration
+ // based on a condition such as error like, HandlerListStopOnError.
+ // Or for logging like HandlerListLogItem.
+ AfterEachFn func(item HandlerListRunItem) bool
+}
+
+// A NamedHandler is a struct that contains a name and function callback.
+type NamedHandler struct {
+ Name string
+ Fn func(*Request)
+}
+
+// copy creates a copy of the handler list.
+func (l *HandlerList) copy() HandlerList {
+ n := HandlerList{
+ AfterEachFn: l.AfterEachFn,
+ }
+ if len(l.list) == 0 {
+ return n
+ }
+
+ n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...)
+ return n
+}
+
+// Clear clears the handler list.
+func (l *HandlerList) Clear() {
+ l.list = l.list[0:0]
+}
+
+// Len returns the number of handlers in the list.
+func (l *HandlerList) Len() int {
+ return len(l.list)
+}
+
+// PushBack pushes handler f to the back of the handler list.
+func (l *HandlerList) PushBack(f func(*Request)) {
+ l.PushBackNamed(NamedHandler{"__anonymous", f})
+}
+
+// PushBackNamed pushes named handler f to the back of the handler list.
+func (l *HandlerList) PushBackNamed(n NamedHandler) {
+ if cap(l.list) == 0 {
+ l.list = make([]NamedHandler, 0, 5)
+ }
+ l.list = append(l.list, n)
+}
+
+// PushFront pushes handler f to the front of the handler list.
+func (l *HandlerList) PushFront(f func(*Request)) {
+ l.PushFrontNamed(NamedHandler{"__anonymous", f})
+}
+
+// PushFrontNamed pushes named handler f to the front of the handler list.
+func (l *HandlerList) PushFrontNamed(n NamedHandler) {
+ if cap(l.list) == len(l.list) {
+ // Allocating new list required
+ l.list = append([]NamedHandler{n}, l.list...)
+ } else {
+ // Enough room to prepend into list.
+ l.list = append(l.list, NamedHandler{})
+ copy(l.list[1:], l.list)
+ l.list[0] = n
+ }
+}
+
+// Remove removes a NamedHandler n
+func (l *HandlerList) Remove(n NamedHandler) {
+ l.RemoveByName(n.Name)
+}
+
+// RemoveByName removes a NamedHandler by name.
+func (l *HandlerList) RemoveByName(name string) {
+ for i := 0; i < len(l.list); i++ {
+ m := l.list[i]
+ if m.Name == name {
+ // Shift array preventing creating new arrays
+ copy(l.list[i:], l.list[i+1:])
+ l.list[len(l.list)-1] = NamedHandler{}
+ l.list = l.list[:len(l.list)-1]
+
+ // decrement list so next check to length is correct
+ i--
+ }
+ }
+}
+
+// SwapNamed will swap out any existing handlers with the same name as the
+// passed in NamedHandler returning true if handlers were swapped. False is
+// returned otherwise.
+func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) {
+ for i := 0; i < len(l.list); i++ {
+ if l.list[i].Name == n.Name {
+ l.list[i].Fn = n.Fn
+ swapped = true
+ }
+ }
+
+ return swapped
+}
+
+// Swap will swap out all handlers matching the name passed in. The matched
+// handlers will be swapped in. True is returned if the handlers were swapped.
+func (l *HandlerList) Swap(name string, replace NamedHandler) bool {
+ var swapped bool
+
+ for i := 0; i < len(l.list); i++ {
+ if l.list[i].Name == name {
+ l.list[i] = replace
+ swapped = true
+ }
+ }
+
+ return swapped
+}
+
+// SetBackNamed will replace the named handler if it exists in the handler list.
+// If the handler does not exist the handler will be added to the end of the list.
+func (l *HandlerList) SetBackNamed(n NamedHandler) {
+ if !l.SwapNamed(n) {
+ l.PushBackNamed(n)
+ }
+}
+
+// SetFrontNamed will replace the named handler if it exists in the handler list.
+// If the handler does not exist the handler will be added to the beginning of
+// the list.
+func (l *HandlerList) SetFrontNamed(n NamedHandler) {
+ if !l.SwapNamed(n) {
+ l.PushFrontNamed(n)
+ }
+}
+
+// Run executes all handlers in the list with a given request object.
+func (l *HandlerList) Run(r *Request) {
+ for i, h := range l.list {
+ h.Fn(r)
+ item := HandlerListRunItem{
+ Index: i, Handler: h, Request: r,
+ }
+ if l.AfterEachFn != nil && !l.AfterEachFn(item) {
+ return
+ }
+ }
+}
+
+// HandlerListLogItem logs the request handler and the state of the
+// request's Error value. Always returns true to continue iterating
+// request handlers in a HandlerList.
+func HandlerListLogItem(item HandlerListRunItem) bool {
+ if item.Request.Config.Logger == nil {
+ return true
+ }
+ item.Request.Config.Logger.Log("DEBUG: RequestHandler",
+ item.Index, item.Handler.Name, item.Request.Error)
+
+ return true
+}
+
+// HandlerListStopOnError returns false to stop the HandlerList iterating
+// over request handlers if Request.Error is not nil. True otherwise
+// to continue iterating.
+func HandlerListStopOnError(item HandlerListRunItem) bool {
+ return item.Request.Error == nil
+}
+
+// WithAppendUserAgent will add a string to the user agent prefixed with a
+// single white space.
+func WithAppendUserAgent(s string) Option {
+ return func(r *Request) {
+ r.Handlers.Build.PushBack(func(r2 *Request) {
+ AddToUserAgent(r, s)
+ })
+ }
+}
+
+// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
+// header. If the extra parameters are provided they will be added as metadata to the
+// name/version pair resulting in the following format.
+// "name/version (extra0; extra1; ...)"
+// The user agent part will be concatenated with this current request's user agent string.
+func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) {
+ ua := fmt.Sprintf("%s/%s", name, version)
+ if len(extra) > 0 {
+ ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; "))
+ }
+ return func(r *Request) {
+ AddToUserAgent(r, ua)
+ }
+}
+
+// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header.
+// The input string will be concatenated with the current request's user agent string.
+func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) {
+ return func(r *Request) {
+ AddToUserAgent(r, s)
+ }
+}
+
+// WithSetRequestHeaders updates the operation request's HTTP header to contain
+// the header key value pairs provided. If the header key already exists in the
+// request's HTTP header set, the existing value(s) will be replaced.
+//
+// Header keys added will be added as canonical format with title casing
+// applied via http.Header.Set method.
+func WithSetRequestHeaders(h map[string]string) Option {
+ return withRequestHeader(h).SetRequestHeaders
+}
+
+type withRequestHeader map[string]string
+
+func (h withRequestHeader) SetRequestHeaders(r *Request) {
+ for k, v := range h {
+ r.HTTPRequest.Header.Set(k, v)
+ }
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/http_request.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/http_request.go
new file mode 100644
index 0000000000000..79f79602b03f8
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/http_request.go
@@ -0,0 +1,24 @@
+package request
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+)
+
+func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
+ req := new(http.Request)
+ *req = *r
+ req.URL = &url.URL{}
+ *req.URL = *r.URL
+ req.Body = body
+
+ req.Header = http.Header{}
+ for k, v := range r.Header {
+ for _, vv := range v {
+ req.Header.Add(k, vv)
+ }
+ }
+
+ return req
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/offset_reader.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/offset_reader.go
new file mode 100644
index 0000000000000..d6c80a4291b95
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/offset_reader.go
@@ -0,0 +1,65 @@
+package request
+
+import (
+ "io"
+ "sync"
+
+ "github.com/IBM/ibm-cos-sdk-go/internal/sdkio"
+)
+
+// offsetReader is a thread-safe io.ReadCloser to prevent racing
+// with retrying requests
+type offsetReader struct {
+ buf io.ReadSeeker
+ lock sync.Mutex
+ closed bool
+}
+
+func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) {
+ reader := &offsetReader{}
+ _, err := buf.Seek(offset, sdkio.SeekStart)
+ if err != nil {
+ return nil, err
+ }
+
+ reader.buf = buf
+ return reader, nil
+}
+
+// Close will close the instance of the offset reader's access to
+// the underlying io.ReadSeeker.
+func (o *offsetReader) Close() error {
+ o.lock.Lock()
+ defer o.lock.Unlock()
+ o.closed = true
+ return nil
+}
+
+// Read is a thread-safe read of the underlying io.ReadSeeker
+func (o *offsetReader) Read(p []byte) (int, error) {
+ o.lock.Lock()
+ defer o.lock.Unlock()
+
+ if o.closed {
+ return 0, io.EOF
+ }
+
+ return o.buf.Read(p)
+}
+
+// Seek is a thread-safe seeking operation.
+func (o *offsetReader) Seek(offset int64, whence int) (int64, error) {
+ o.lock.Lock()
+ defer o.lock.Unlock()
+
+ return o.buf.Seek(offset, whence)
+}
+
+// CloseAndCopy will return a new offsetReader with a copy of the old buffer
+// and close the old buffer.
+func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) {
+ if err := o.Close(); err != nil {
+ return nil, err
+ }
+ return newOffsetReader(o.buf, offset)
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/request.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/request.go
new file mode 100644
index 0000000000000..bef2b9541f801
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/request.go
@@ -0,0 +1,722 @@
+package request
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/client/metadata"
+ "github.com/IBM/ibm-cos-sdk-go/internal/sdkio"
+)
+
+const (
+ // ErrCodeSerialization is the serialization error code that is received
+ // during protocol unmarshaling.
+ ErrCodeSerialization = "SerializationError"
+
+ // ErrCodeRead is an error that is returned during HTTP reads.
+ ErrCodeRead = "ReadError"
+
+ // ErrCodeResponseTimeout is the connection timeout error that is received
+ // during body reads.
+ ErrCodeResponseTimeout = "ResponseTimeout"
+
+ // ErrCodeInvalidPresignExpire is returned when the expire time provided to
+ // presign is invalid
+ ErrCodeInvalidPresignExpire = "InvalidPresignExpireError"
+
+ // CanceledErrorCode is the error code that will be returned by an
+ // API request that was canceled. Requests given a aws.Context may
+ // return this error when canceled.
+ CanceledErrorCode = "RequestCanceled"
+
+ // ErrCodeRequestError is an error preventing the SDK from continuing to
+ // process the request.
+ ErrCodeRequestError = "RequestError"
+)
+
+// A Request is the service request to be made.
+type Request struct {
+ Config aws.Config
+ ClientInfo metadata.ClientInfo
+ Handlers Handlers
+
+ Retryer
+ AttemptTime time.Time
+ Time time.Time
+ Operation *Operation
+ HTTPRequest *http.Request
+ HTTPResponse *http.Response
+ Body io.ReadSeeker
+ streamingBody io.ReadCloser
+ BodyStart int64 // offset from beginning of Body that the request body starts
+ Params interface{}
+ Error error
+ Data interface{}
+ RequestID string
+ RetryCount int
+ Retryable *bool
+ RetryDelay time.Duration
+ NotHoist bool
+ SignedHeaderVals http.Header
+ LastSignedAt time.Time
+ DisableFollowRedirects bool
+
+ // Additional API error codes that should be retried. IsErrorRetryable
+ // will consider these codes in addition to its built in cases.
+ RetryErrorCodes []string
+
+ // Additional API error codes that should be retried with throttle backoff
+ // delay. IsErrorThrottle will consider these codes in addition to its
+ // built in cases.
+ ThrottleErrorCodes []string
+
+ // A value greater than 0 instructs the request to be signed as Presigned URL
+ // You should not set this field directly. Instead use Request's
+ // Presign or PresignRequest methods.
+ ExpireTime time.Duration
+
+ context aws.Context
+
+ built bool
+
+ // Need to persist an intermediate body between the input Body and HTTP
+ // request body because the HTTP Client's transport can maintain a reference
+ // to the HTTP request's body after the client has returned. This value is
+ // safe to use concurrently and wrap the input Body for each HTTP request.
+ safeBody *offsetReader
+}
+
+// An Operation is the service API operation to be made.
+type Operation struct {
+ Name string
+ HTTPMethod string
+ HTTPPath string
+ *Paginator
+
+ BeforePresignFn func(r *Request) error
+}
+
+// New returns a new Request pointer for the service API operation and
+// parameters.
+//
+// A Retryer should be provided to direct how the request is retried. If
+// Retryer is nil, a default no retry value will be used. You can use
+// NoOpRetryer in the Client package to disable retry behavior directly.
+//
+// Params is any value of input parameters to be the request payload.
+// Data is pointer value to an object which the request's response
+// payload will be deserialized to.
+func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
+ retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
+
+ if retryer == nil {
+ retryer = noOpRetryer{}
+ }
+
+ method := operation.HTTPMethod
+ if method == "" {
+ method = "POST"
+ }
+
+ httpReq, _ := http.NewRequest(method, "", nil)
+
+ var err error
+ httpReq.URL, err = url.Parse(clientInfo.Endpoint)
+ if err != nil {
+ httpReq.URL = &url.URL{}
+ err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)
+ }
+
+ if len(operation.HTTPPath) != 0 {
+ opHTTPPath := operation.HTTPPath
+ var opQueryString string
+ if idx := strings.Index(opHTTPPath, "?"); idx >= 0 {
+ opQueryString = opHTTPPath[idx+1:]
+ opHTTPPath = opHTTPPath[:idx]
+ }
+
+ if strings.HasSuffix(httpReq.URL.Path, "/") && strings.HasPrefix(opHTTPPath, "/") {
+ opHTTPPath = opHTTPPath[1:]
+ }
+ httpReq.URL.Path += opHTTPPath
+ httpReq.URL.RawQuery = opQueryString
+ }
+
+ r := &Request{
+ Config: cfg,
+ ClientInfo: clientInfo,
+ Handlers: handlers.Copy(),
+
+ Retryer: retryer,
+ Time: time.Now(),
+ ExpireTime: 0,
+ Operation: operation,
+ HTTPRequest: httpReq,
+ Body: nil,
+ Params: params,
+ Error: err,
+ Data: data,
+ }
+ r.SetBufferBody([]byte{})
+
+ return r
+}
+
+// A Option is a functional option that can augment or modify a request when
+// using a WithContext API operation method.
+type Option func(*Request)
+
+// WithGetResponseHeader builds a request Option which will retrieve a single
+// header value from the HTTP Response. If there are multiple values for the
+// header key use WithGetResponseHeaders instead to access the http.Header
+// map directly. The passed in val pointer must be non-nil.
+//
+// This Option can be used multiple times with a single API operation.
+//
+// var id2, versionID string
+// svc.PutObjectWithContext(ctx, params,
+// request.WithGetResponseHeader("x-amz-id-2", &id2),
+// request.WithGetResponseHeader("x-amz-version-id", &versionID),
+// )
+func WithGetResponseHeader(key string, val *string) Option {
+ return func(r *Request) {
+ r.Handlers.Complete.PushBack(func(req *Request) {
+ *val = req.HTTPResponse.Header.Get(key)
+ })
+ }
+}
+
+// WithGetResponseHeaders builds a request Option which will retrieve the
+// headers from the HTTP response and assign them to the passed in headers
+// variable. The passed in headers pointer must be non-nil.
+//
+// var headers http.Header
+// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers))
+func WithGetResponseHeaders(headers *http.Header) Option {
+ return func(r *Request) {
+ r.Handlers.Complete.PushBack(func(req *Request) {
+ *headers = req.HTTPResponse.Header
+ })
+ }
+}
+
+// WithLogLevel is a request option that will set the request to use a specific
+// log level when the request is made.
+//
+// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody)
+func WithLogLevel(l aws.LogLevelType) Option {
+ return func(r *Request) {
+ r.Config.LogLevel = aws.LogLevel(l)
+ }
+}
+
+// ApplyOptions will apply each option to the request calling them in the order
+// the were provided.
+func (r *Request) ApplyOptions(opts ...Option) {
+ for _, opt := range opts {
+ opt(r)
+ }
+}
+
+// Context will always returns a non-nil context. If Request does not have a
+// context aws.BackgroundContext will be returned.
+func (r *Request) Context() aws.Context {
+ if r.context != nil {
+ return r.context
+ }
+ return aws.BackgroundContext()
+}
+
+// SetContext adds a Context to the current request that can be used to cancel
+// a in-flight request. The Context value must not be nil, or this method will
+// panic.
+//
+// Unlike http.Request.WithContext, SetContext does not return a copy of the
+// Request. It is not safe to use use a single Request value for multiple
+// requests. A new Request should be created for each API operation request.
+//
+// Go 1.6 and below:
+// The http.Request's Cancel field will be set to the Done() value of
+// the context. This will overwrite the Cancel field's value.
+//
+// Go 1.7 and above:
+// The http.Request.WithContext will be used to set the context on the underlying
+// http.Request. This will create a shallow copy of the http.Request. The SDK
+// may create sub contexts in the future for nested requests such as retries.
+func (r *Request) SetContext(ctx aws.Context) {
+ if ctx == nil {
+ panic("context cannot be nil")
+ }
+ setRequestContext(r, ctx)
+}
+
+// WillRetry returns if the request's can be retried.
+func (r *Request) WillRetry() bool {
+ if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody {
+ return false
+ }
+ return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
+}
+
+func fmtAttemptCount(retryCount, maxRetries int) string {
+ return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries)
+}
+
+// ParamsFilled returns if the request's parameters have been populated
+// and the parameters are valid. False is returned if no parameters are
+// provided or invalid.
+func (r *Request) ParamsFilled() bool {
+ return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid()
+}
+
+// DataFilled returns true if the request's data for response deserialization
+// target has been set and is a valid. False is returned if data is not
+// set, or is invalid.
+func (r *Request) DataFilled() bool {
+ return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid()
+}
+
+// SetBufferBody will set the request's body bytes that will be sent to
+// the service API.
+func (r *Request) SetBufferBody(buf []byte) {
+ r.SetReaderBody(bytes.NewReader(buf))
+}
+
+// SetStringBody sets the body of the request to be backed by a string.
+func (r *Request) SetStringBody(s string) {
+ r.SetReaderBody(strings.NewReader(s))
+}
+
+// SetReaderBody will set the request's body reader.
+func (r *Request) SetReaderBody(reader io.ReadSeeker) {
+ r.Body = reader
+
+ if aws.IsReaderSeekable(reader) {
+ var err error
+ // Get the Bodies current offset so retries will start from the same
+ // initial position.
+ r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent)
+ if err != nil {
+ r.Error = awserr.New(ErrCodeSerialization,
+ "failed to determine start of request body", err)
+ return
+ }
+ }
+ r.ResetBody()
+}
+
+// SetStreamingBody set the reader to be used for the request that will stream
+// bytes to the server. Request's Body must not be set to any reader.
+func (r *Request) SetStreamingBody(reader io.ReadCloser) {
+ r.streamingBody = reader
+ r.SetReaderBody(aws.ReadSeekCloser(reader))
+}
+
+// Presign returns the request's signed URL. Error will be returned
+// if the signing fails. The expire parameter is only used for presigned Amazon
+// S3 API requests. All other AWS services will use a fixed expiration
+// time of 15 minutes.
+//
+// It is invalid to create a presigned URL with a expire duration 0 or less. An
+// error is returned if expire duration is 0 or less.
+func (r *Request) Presign(expire time.Duration) (string, error) {
+ r = r.copy()
+
+ // Presign requires all headers be hoisted. There is no way to retrieve
+ // the signed headers not hoisted without this. Making the presigned URL
+ // useless.
+ r.NotHoist = false
+
+ u, _, err := getPresignedURL(r, expire)
+ return u, err
+}
+
+// PresignRequest behaves just like presign, with the addition of returning a
+// set of headers that were signed. The expire parameter is only used for
+// presigned Amazon S3 API requests. All other AWS services will use a fixed
+// expiration time of 15 minutes.
+//
+// It is invalid to create a presigned URL with a expire duration 0 or less. An
+// error is returned if expire duration is 0 or less.
+//
+// Returns the URL string for the API operation with signature in the query string,
+// and the HTTP headers that were included in the signature. These headers must
+// be included in any HTTP request made with the presigned URL.
+//
+// To prevent hoisting any headers to the query string set NotHoist to true on
+// this Request value prior to calling PresignRequest.
+func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) {
+ r = r.copy()
+ return getPresignedURL(r, expire)
+}
+
+// IsPresigned returns true if the request represents a presigned API url.
+func (r *Request) IsPresigned() bool {
+ return r.ExpireTime != 0
+}
+
+func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) {
+ if expire <= 0 {
+ return "", nil, awserr.New(
+ ErrCodeInvalidPresignExpire,
+ "presigned URL requires an expire duration greater than 0",
+ nil,
+ )
+ }
+
+ r.ExpireTime = expire
+
+ if r.Operation.BeforePresignFn != nil {
+ if err := r.Operation.BeforePresignFn(r); err != nil {
+ return "", nil, err
+ }
+ }
+
+ if err := r.Sign(); err != nil {
+ return "", nil, err
+ }
+
+ return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil
+}
+
+const (
+ notRetrying = "not retrying"
+)
+
+func debugLogReqError(r *Request, stage, retryStr string, err error) {
+ if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) {
+ return
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
+ stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err))
+}
+
+// Build will build the request's object so it can be signed and sent
+// to the service. Build will also validate all the request's parameters.
+// Any additional build Handlers set on this request will be run
+// in the order they were set.
+//
+// The request will only be built once. Multiple calls to build will have
+// no effect.
+//
+// If any Validate or Build errors occur the build will stop and the error
+// which occurred will be returned.
+func (r *Request) Build() error {
+ if !r.built {
+ r.Handlers.Validate.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Validate Request", notRetrying, r.Error)
+ return r.Error
+ }
+ r.Handlers.Build.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Build Request", notRetrying, r.Error)
+ return r.Error
+ }
+ r.built = true
+ }
+
+ return r.Error
+}
+
+// Sign will sign the request, returning error if errors are encountered.
+//
+// Sign will build the request prior to signing. All Sign Handlers will
+// be executed in the order they were set.
+func (r *Request) Sign() error {
+ r.Build()
+ if r.Error != nil {
+ debugLogReqError(r, "Build Request", notRetrying, r.Error)
+ return r.Error
+ }
+
+ SanitizeHostForHeader(r.HTTPRequest)
+
+ r.Handlers.Sign.Run(r)
+ return r.Error
+}
+
+func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) {
+ if r.streamingBody != nil {
+ return r.streamingBody, nil
+ }
+
+ if r.safeBody != nil {
+ r.safeBody.Close()
+ }
+
+ r.safeBody, err = newOffsetReader(r.Body, r.BodyStart)
+ if err != nil {
+ return nil, awserr.New(ErrCodeSerialization,
+ "failed to get next request body reader", err)
+ }
+
+ // Go 1.8 tightened and clarified the rules code needs to use when building
+ // requests with the http package. Go 1.8 removed the automatic detection
+ // of if the Request.Body was empty, or actually had bytes in it. The SDK
+ // always sets the Request.Body even if it is empty and should not actually
+ // be sent. This is incorrect.
+ //
+ // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http
+ // client that the request really should be sent without a body. The
+ // Request.Body cannot be set to nil, which is preferable, because the
+ // field is exported and could introduce nil pointer dereferences for users
+ // of the SDK if they used that field.
+ //
+ // Related golang/go#18257
+ l, err := aws.SeekerLen(r.Body)
+ if err != nil {
+ return nil, awserr.New(ErrCodeSerialization,
+ "failed to compute request body size", err)
+ }
+
+ if l == 0 {
+ body = NoBody
+ } else if l > 0 {
+ body = r.safeBody
+ } else {
+ // Hack to prevent sending bodies for methods where the body
+ // should be ignored by the server. Sending bodies on these
+ // methods without an associated ContentLength will cause the
+ // request to socket timeout because the server does not handle
+ // Transfer-Encoding: chunked bodies for these methods.
+ //
+ // This would only happen if a aws.ReaderSeekerCloser was used with
+ // a io.Reader that was not also an io.Seeker, or did not implement
+ // Len() method.
+ switch r.Operation.HTTPMethod {
+ case "GET", "HEAD", "DELETE":
+ body = NoBody
+ default:
+ body = r.safeBody
+ }
+ }
+
+ return body, nil
+}
+
+// GetBody will return an io.ReadSeeker of the Request's underlying
+// input body with a concurrency safe wrapper.
+func (r *Request) GetBody() io.ReadSeeker {
+ return r.safeBody
+}
+
+// Send will send the request, returning error if errors are encountered.
+//
+// Send will sign the request prior to sending. All Send Handlers will
+// be executed in the order they were set.
+//
+// Canceling a request is non-deterministic. If a request has been canceled,
+// then the transport will choose, randomly, one of the state channels during
+// reads or getting the connection.
+//
+// readLoop() and getConn(req *Request, cm connectMethod)
+// https://github.com/golang/go/blob/master/src/net/http/transport.go
+//
+// Send will not close the request.Request's body.
+func (r *Request) Send() error {
+ defer func() {
+ // Ensure a non-nil HTTPResponse parameter is set to ensure handlers
+ // checking for HTTPResponse values, don't fail.
+ if r.HTTPResponse == nil {
+ r.HTTPResponse = &http.Response{
+ Header: http.Header{},
+ Body: ioutil.NopCloser(&bytes.Buffer{}),
+ }
+ }
+ // Regardless of success or failure of the request trigger the Complete
+ // request handlers.
+ r.Handlers.Complete.Run(r)
+ }()
+
+ if err := r.Error; err != nil {
+ return err
+ }
+
+ for {
+ r.Error = nil
+ r.AttemptTime = time.Now()
+
+ if err := r.Sign(); err != nil {
+ debugLogReqError(r, "Sign Request", notRetrying, err)
+ return err
+ }
+
+ if err := r.sendRequest(); err == nil {
+ return nil
+ }
+ r.Handlers.Retry.Run(r)
+ r.Handlers.AfterRetry.Run(r)
+
+ if r.Error != nil || !aws.BoolValue(r.Retryable) {
+ return r.Error
+ }
+
+ if err := r.prepareRetry(); err != nil {
+ r.Error = err
+ return err
+ }
+ }
+}
+
+func (r *Request) prepareRetry() error {
+ if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
+ r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
+ r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
+ }
+
+ // The previous http.Request will have a reference to the r.Body
+ // and the HTTP Client's Transport may still be reading from
+ // the request's body even though the Client's Do returned.
+ r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil)
+ r.ResetBody()
+ if err := r.Error; err != nil {
+ return awserr.New(ErrCodeSerialization,
+ "failed to prepare body for retry", err)
+
+ }
+
+ // Closing response body to ensure that no response body is leaked
+ // between retry attempts.
+ if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
+ r.HTTPResponse.Body.Close()
+ }
+
+ return nil
+}
+
+func (r *Request) sendRequest() (sendErr error) {
+ defer r.Handlers.CompleteAttempt.Run(r)
+
+ r.Retryable = nil
+ r.Handlers.Send.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Send Request",
+ fmtAttemptCount(r.RetryCount, r.MaxRetries()),
+ r.Error)
+ return r.Error
+ }
+
+ r.Handlers.UnmarshalMeta.Run(r)
+ r.Handlers.ValidateResponse.Run(r)
+ if r.Error != nil {
+ r.Handlers.UnmarshalError.Run(r)
+ debugLogReqError(r, "Validate Response",
+ fmtAttemptCount(r.RetryCount, r.MaxRetries()),
+ r.Error)
+ return r.Error
+ }
+
+ r.Handlers.Unmarshal.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Unmarshal Response",
+ fmtAttemptCount(r.RetryCount, r.MaxRetries()),
+ r.Error)
+ return r.Error
+ }
+
+ return nil
+}
+
+// copy will copy a request which will allow for local manipulation of the
+// request.
+func (r *Request) copy() *Request {
+ req := &Request{}
+ *req = *r
+ req.Handlers = r.Handlers.Copy()
+ op := *r.Operation
+ req.Operation = &op
+ return req
+}
+
+// AddToUserAgent adds the string to the end of the request's current user agent.
+func AddToUserAgent(r *Request, s string) {
+ curUA := r.HTTPRequest.Header.Get("User-Agent")
+ if len(curUA) > 0 {
+ s = curUA + " " + s
+ }
+ r.HTTPRequest.Header.Set("User-Agent", s)
+}
+
+// SanitizeHostForHeader removes default port from host and updates request.Host
+func SanitizeHostForHeader(r *http.Request) {
+ host := getHost(r)
+ port := portOnly(host)
+ if port != "" && isDefaultPort(r.URL.Scheme, port) {
+ r.Host = stripPort(host)
+ }
+}
+
+// Returns host from request
+func getHost(r *http.Request) string {
+ if r.Host != "" {
+ return r.Host
+ }
+
+ if r.URL == nil {
+ return ""
+ }
+
+ return r.URL.Host
+}
+
+// Hostname returns u.Host, without any port number.
+//
+// If Host is an IPv6 literal with a port number, Hostname returns the
+// IPv6 literal without the square brackets. IPv6 literals may include
+// a zone identifier.
+//
+// Copied from the Go 1.8 standard library (net/url)
+func stripPort(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return hostport
+ }
+ if i := strings.IndexByte(hostport, ']'); i != -1 {
+ return strings.TrimPrefix(hostport[:i], "[")
+ }
+ return hostport[:colon]
+}
+
+// Port returns the port part of u.Host, without the leading colon.
+// If u.Host doesn't contain a port, Port returns an empty string.
+//
+// Copied from the Go 1.8 standard library (net/url)
+func portOnly(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return ""
+ }
+ if i := strings.Index(hostport, "]:"); i != -1 {
+ return hostport[i+len("]:"):]
+ }
+ if strings.Contains(hostport, "]") {
+ return ""
+ }
+ return hostport[colon+len(":"):]
+}
+
+// Returns true if the specified URI is using the standard port
+// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs)
+func isDefaultPort(scheme, port string) bool {
+ if port == "" {
+ return true
+ }
+
+ lowerCaseScheme := strings.ToLower(scheme)
+ if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") {
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/request_1_8.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/request_1_8.go
new file mode 100644
index 0000000000000..b14a6f0c86a08
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/request_1_8.go
@@ -0,0 +1,37 @@
+//go:build go1.8
+// +build go1.8
+
+package request
+
+import (
+ "net/http"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+)
+
+// NoBody is a http.NoBody reader instructing Go HTTP client to not include
+// and body in the HTTP request.
+var NoBody = http.NoBody
+
+// ResetBody rewinds the request body back to its starting position, and
+// sets the HTTP Request body reference. When the body is read prior
+// to being sent in the HTTP request it will need to be rewound.
+//
+// ResetBody will automatically be called by the SDK's build handler, but if
+// the request is being used directly ResetBody must be called before the request
+// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically
+// call ResetBody.
+//
+// Will also set the Go 1.8's http.Request.GetBody member to allow retrying
+// PUT/POST redirects.
+func (r *Request) ResetBody() {
+ body, err := r.getNextRequestBody()
+ if err != nil {
+ r.Error = awserr.New(ErrCodeSerialization,
+ "failed to reset request body", err)
+ return
+ }
+
+ r.HTTPRequest.Body = body
+ r.HTTPRequest.GetBody = r.getNextRequestBody
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/request_context.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/request_context.go
new file mode 100644
index 0000000000000..17a661a05a99e
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/request_context.go
@@ -0,0 +1,15 @@
+//go:build go1.7
+// +build go1.7
+
+package request
+
+import "github.com/IBM/ibm-cos-sdk-go/aws"
+
+// setContext updates the Request to use the passed in context for cancellation.
+// Context will also be used for request retry delay.
+//
+// Creates shallow copy of the http.Request with the WithContext method.
+func setRequestContext(r *Request, ctx aws.Context) {
+ r.context = ctx
+ r.HTTPRequest = r.HTTPRequest.WithContext(ctx)
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/request_pagination.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/request_pagination.go
new file mode 100644
index 0000000000000..cbe280a0cd101
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/request_pagination.go
@@ -0,0 +1,266 @@
+package request
+
+import (
+ "reflect"
+ "sync/atomic"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awsutil"
+)
+
+// A Pagination provides paginating of SDK API operations which are paginatable.
+// Generally you should not use this type directly, but use the "Pages" API
+// operations method to automatically perform pagination for you. Such as,
+// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods.
+//
+// Pagination differs from a Paginator type in that pagination is the type that
+// does the pagination between API operations, and Paginator defines the
+// configuration that will be used per page request.
+//
+// for p.Next() {
+// data := p.Page().(*s3.ListObjectsOutput)
+// // process the page's data
+// // ...
+// // break out of loop to stop fetching additional pages
+// }
+//
+// return p.Err()
+//
+// See service client API operation Pages methods for examples how the SDK will
+// use the Pagination type.
+type Pagination struct {
+ // Function to return a Request value for each pagination request.
+ // Any configuration or handlers that need to be applied to the request
+ // prior to getting the next page should be done here before the request
+ // returned.
+ //
+ // NewRequest should always be built from the same API operations. It is
+ // undefined if different API operations are returned on subsequent calls.
+ NewRequest func() (*Request, error)
+ // EndPageOnSameToken, when enabled, will allow the paginator to stop on
+ // token that are the same as its previous tokens.
+ EndPageOnSameToken bool
+
+ started bool
+ prevTokens []interface{}
+ nextTokens []interface{}
+
+ err error
+ curPage interface{}
+}
+
+// HasNextPage will return true if Pagination is able to determine that the API
+// operation has additional pages. False will be returned if there are no more
+// pages remaining.
+//
+// Will always return true if Next has not been called yet.
+func (p *Pagination) HasNextPage() bool {
+ if !p.started {
+ return true
+ }
+
+ hasNextPage := len(p.nextTokens) != 0
+ if p.EndPageOnSameToken {
+ return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens)
+ }
+ return hasNextPage
+}
+
+// Err returns the error Pagination encountered when retrieving the next page.
+func (p *Pagination) Err() error {
+ return p.err
+}
+
+// Page returns the current page. Page should only be called after a successful
+// call to Next. It is undefined what Page will return if Page is called after
+// Next returns false.
+func (p *Pagination) Page() interface{} {
+ return p.curPage
+}
+
+// Next will attempt to retrieve the next page for the API operation. When a page
+// is retrieved true will be returned. If the page cannot be retrieved, or there
+// are no more pages false will be returned.
+//
+// Use the Page method to retrieve the current page data. The data will need
+// to be cast to the API operation's output type.
+//
+// Use the Err method to determine if an error occurred if Page returns false.
+func (p *Pagination) Next() bool {
+ if !p.HasNextPage() {
+ return false
+ }
+
+ req, err := p.NewRequest()
+ if err != nil {
+ p.err = err
+ return false
+ }
+
+ if p.started {
+ for i, intok := range req.Operation.InputTokens {
+ awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i])
+ }
+ }
+ p.started = true
+
+ err = req.Send()
+ if err != nil {
+ p.err = err
+ return false
+ }
+
+ p.prevTokens = p.nextTokens
+ p.nextTokens = req.nextPageTokens()
+ p.curPage = req.Data
+
+ return true
+}
+
+// A Paginator is the configuration data that defines how an API operation
+// should be paginated. This type is used by the API service models to define
+// the generated pagination config for service APIs.
+//
+// The Pagination type is what provides iterating between pages of an API. It
+// is only used to store the token metadata the SDK should use for performing
+// pagination.
+type Paginator struct {
+ InputTokens []string
+ OutputTokens []string
+ LimitToken string
+ TruncationToken string
+}
+
+// nextPageTokens returns the tokens to use when asking for the next page of data.
+func (r *Request) nextPageTokens() []interface{} {
+ if r.Operation.Paginator == nil {
+ return nil
+ }
+ if r.Operation.TruncationToken != "" {
+ tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken)
+ if len(tr) == 0 {
+ return nil
+ }
+
+ switch v := tr[0].(type) {
+ case *bool:
+ if !aws.BoolValue(v) {
+ return nil
+ }
+ case bool:
+ if !v {
+ return nil
+ }
+ }
+ }
+
+ tokens := []interface{}{}
+ tokenAdded := false
+ for _, outToken := range r.Operation.OutputTokens {
+ vs, _ := awsutil.ValuesAtPath(r.Data, outToken)
+ if len(vs) == 0 {
+ tokens = append(tokens, nil)
+ continue
+ }
+ v := vs[0]
+
+ switch tv := v.(type) {
+ case *string:
+ if len(aws.StringValue(tv)) == 0 {
+ tokens = append(tokens, nil)
+ continue
+ }
+ case string:
+ if len(tv) == 0 {
+ tokens = append(tokens, nil)
+ continue
+ }
+ }
+
+ tokenAdded = true
+ tokens = append(tokens, v)
+ }
+ if !tokenAdded {
+ return nil
+ }
+
+ return tokens
+}
+
+// Ensure a deprecated item is only logged once instead of each time its used.
+func logDeprecatedf(logger aws.Logger, flag *int32, msg string) {
+ if logger == nil {
+ return
+ }
+ if atomic.CompareAndSwapInt32(flag, 0, 1) {
+ logger.Log(msg)
+ }
+}
+
+var (
+ logDeprecatedHasNextPage int32
+ logDeprecatedNextPage int32
+ logDeprecatedEachPage int32
+)
+
+// HasNextPage returns true if this request has more pages of data available.
+//
+// Deprecated Use Pagination type for configurable pagination of API operations
+func (r *Request) HasNextPage() bool {
+ logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage,
+ "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations")
+
+ return len(r.nextPageTokens()) > 0
+}
+
+// NextPage returns a new Request that can be executed to return the next
+// page of result data. Call .Send() on this request to execute it.
+//
+// Deprecated Use Pagination type for configurable pagination of API operations
+func (r *Request) NextPage() *Request {
+ logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage,
+ "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations")
+
+ tokens := r.nextPageTokens()
+ if len(tokens) == 0 {
+ return nil
+ }
+
+ data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface()
+ nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data)
+ for i, intok := range nr.Operation.InputTokens {
+ awsutil.SetValueAtPath(nr.Params, intok, tokens[i])
+ }
+ return nr
+}
+
+// EachPage iterates over each page of a paginated request object. The fn
+// parameter should be a function with the following sample signature:
+//
+// func(page *T, lastPage bool) bool {
+// return true // return false to stop iterating
+// }
+//
+// Where "T" is the structure type matching the output structure of the given
+// operation. For example, a request object generated by
+// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput
+// as the structure "T". The lastPage value represents whether the page is
+// the last page of data or not. The return value of this function should
+// return true to keep iterating or false to stop.
+//
+// Deprecated Use Pagination type for configurable pagination of API operations
+func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
+ logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage,
+ "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations")
+
+ for page := r; page != nil; page = page.NextPage() {
+ if err := page.Send(); err != nil {
+ return err
+ }
+ if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage {
+ return page.Error
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/retryer.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/retryer.go
new file mode 100644
index 0000000000000..4ca08cd43e6f1
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/retryer.go
@@ -0,0 +1,309 @@
+package request
+
+import (
+ "net"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+)
+
+// Retryer provides the interface drive the SDK's request retry behavior. The
+// Retryer implementation is responsible for implementing exponential backoff,
+// and determine if a request API error should be retried.
+//
+// client.DefaultRetryer is the SDK's default implementation of the Retryer. It
+// uses the Request.IsErrorRetryable and Request.IsErrorThrottle methods to
+// determine if the request is retried.
+type Retryer interface {
+ // RetryRules return the retry delay that should be used by the SDK before
+ // making another request attempt for the failed request.
+ RetryRules(*Request) time.Duration
+
+ // ShouldRetry returns if the failed request is retryable.
+ //
+ // Implementations may consider request attempt count when determining if a
+ // request is retryable, but the SDK will use MaxRetries to limit the
+ // number of attempts a request are made.
+ ShouldRetry(*Request) bool
+
+ // MaxRetries is the number of times a request may be retried before
+ // failing.
+ MaxRetries() int
+}
+
+// WithRetryer sets a Retryer value to the given Config returning the Config
+// value for chaining. The value must not be nil.
+func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
+ if retryer == nil {
+ if cfg.Logger != nil {
+ cfg.Logger.Log("ERROR: Request.WithRetryer called with nil retryer. Replacing with retry disabled Retryer.")
+ }
+ retryer = noOpRetryer{}
+ }
+ cfg.Retryer = retryer
+ return cfg
+
+}
+
+// noOpRetryer is a internal no op retryer used when a request is created
+// without a retryer.
+//
+// Provides a retryer that performs no retries.
+// It should be used when we do not want retries to be performed.
+type noOpRetryer struct{}
+
+// MaxRetries returns the number of maximum returns the service will use to make
+// an individual API; For NoOpRetryer the MaxRetries will always be zero.
+func (d noOpRetryer) MaxRetries() int {
+ return 0
+}
+
+// ShouldRetry will always return false for NoOpRetryer, as it should never retry.
+func (d noOpRetryer) ShouldRetry(_ *Request) bool {
+ return false
+}
+
+// RetryRules returns the delay duration before retrying this request again;
+// since NoOpRetryer does not retry, RetryRules always returns 0.
+func (d noOpRetryer) RetryRules(_ *Request) time.Duration {
+ return 0
+}
+
+// retryableCodes is a collection of service response codes which are retry-able
+// without any further action.
+var retryableCodes = map[string]struct{}{
+ ErrCodeRequestError: {},
+ "RequestTimeout": {},
+ ErrCodeResponseTimeout: {},
+ "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout
+}
+
+var throttleCodes = map[string]struct{}{
+ "ProvisionedThroughputExceededException": {},
+ "ThrottledException": {}, // SNS, XRay, ResourceGroupsTagging API
+ "Throttling": {},
+ "ThrottlingException": {},
+ "RequestLimitExceeded": {},
+ "RequestThrottled": {},
+ "RequestThrottledException": {},
+ "TooManyRequestsException": {}, // Lambda functions
+ "PriorRequestNotComplete": {}, // Route53
+ "TransactionInProgressException": {},
+ "EC2ThrottledException": {}, // EC2
+}
+
+// credsExpiredCodes is a collection of error codes which signify the credentials
+// need to be refreshed. Expired tokens require refreshing of credentials, and
+// resigning before the request can be retried.
+var credsExpiredCodes = map[string]struct{}{
+ "ExpiredToken": {},
+ "ExpiredTokenException": {},
+ "RequestExpired": {}, // EC2 Only
+}
+
+func isCodeThrottle(code string) bool {
+ _, ok := throttleCodes[code]
+ return ok
+}
+
+func isCodeRetryable(code string) bool {
+ if _, ok := retryableCodes[code]; ok {
+ return true
+ }
+
+ return isCodeExpiredCreds(code)
+}
+
+func isCodeExpiredCreds(code string) bool {
+ _, ok := credsExpiredCodes[code]
+ return ok
+}
+
+var validParentCodes = map[string]struct{}{
+ ErrCodeSerialization: {},
+ ErrCodeRead: {},
+}
+
+func isNestedErrorRetryable(parentErr awserr.Error) bool {
+ if parentErr == nil {
+ return false
+ }
+
+ if _, ok := validParentCodes[parentErr.Code()]; !ok {
+ return false
+ }
+
+ err := parentErr.OrigErr()
+ if err == nil {
+ return false
+ }
+
+ if aerr, ok := err.(awserr.Error); ok {
+ return isCodeRetryable(aerr.Code())
+ }
+
+ if t, ok := err.(temporary); ok {
+ return t.Temporary() || isErrConnectionReset(err)
+ }
+
+ return isErrConnectionReset(err)
+}
+
+// IsErrorRetryable returns whether the error is retryable, based on its Code.
+// Returns false if error is nil.
+func IsErrorRetryable(err error) bool {
+ if err == nil {
+ return false
+ }
+ return shouldRetryError(err)
+}
+
+type temporary interface {
+ Temporary() bool
+}
+
+func shouldRetryError(origErr error) bool {
+ switch err := origErr.(type) {
+ case awserr.Error:
+ if err.Code() == CanceledErrorCode {
+ return false
+ }
+ if isNestedErrorRetryable(err) {
+ return true
+ }
+
+ origErr := err.OrigErr()
+ var shouldRetry bool
+ if origErr != nil {
+ shouldRetry = shouldRetryError(origErr)
+ if err.Code() == ErrCodeRequestError && !shouldRetry {
+ return false
+ }
+ }
+ if isCodeRetryable(err.Code()) {
+ return true
+ }
+ return shouldRetry
+
+ case *url.Error:
+ if strings.Contains(err.Error(), "connection refused") {
+ // Refused connections should be retried as the service may not yet
+ // be running on the port. Go TCP dial considers refused
+ // connections as not temporary.
+ return true
+ }
+ // *url.Error only implements Temporary after golang 1.6 but since
+ // url.Error only wraps the error:
+ return shouldRetryError(err.Err)
+
+ case temporary:
+ if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" {
+ return true
+ }
+ // If the error is temporary, we want to allow continuation of the
+ // retry process
+ return err.Temporary() || isErrConnectionReset(origErr)
+
+ case nil:
+ // `awserr.Error.OrigErr()` can be nil, meaning there was an error but
+ // because we don't know the cause, it is marked as retryable. See
+ // TestRequest4xxUnretryable for an example.
+ return true
+
+ default:
+ switch err.Error() {
+ case "net/http: request canceled",
+ "net/http: request canceled while waiting for connection":
+ // known 1.5 error case when an http request is cancelled
+ return false
+ }
+ // here we don't know the error; so we allow a retry.
+ return true
+ }
+}
+
+// IsErrorThrottle returns whether the error is to be throttled based on its code.
+// Returns false if error is nil.
+func IsErrorThrottle(err error) bool {
+ if aerr, ok := err.(awserr.Error); ok && aerr != nil {
+ return isCodeThrottle(aerr.Code())
+ }
+ return false
+}
+
+// IsErrorExpiredCreds returns whether the error code is a credential expiry
+// error. Returns false if error is nil.
+func IsErrorExpiredCreds(err error) bool {
+ if aerr, ok := err.(awserr.Error); ok && aerr != nil {
+ return isCodeExpiredCreds(aerr.Code())
+ }
+ return false
+}
+
+// IsErrorRetryable returns whether the error is retryable, based on its Code.
+// Returns false if the request has no Error set.
+//
+// Alias for the utility function IsErrorRetryable
+func (r *Request) IsErrorRetryable() bool {
+ if isErrCode(r.Error, r.RetryErrorCodes) {
+ return true
+ }
+
+ // HTTP response status code 501 should not be retried.
+ // 501 represents Not Implemented which means the request method is not
+ // supported by the server and cannot be handled.
+ if r.HTTPResponse != nil {
+ // HTTP response status code 500 represents internal server error and
+ // should be retried without any throttle.
+ if r.HTTPResponse.StatusCode == 500 {
+ return true
+ }
+ }
+ return IsErrorRetryable(r.Error)
+}
+
+// IsErrorThrottle returns whether the error is to be throttled based on its
+// code. Returns false if the request has no Error set.
+//
+// Alias for the utility function IsErrorThrottle
+func (r *Request) IsErrorThrottle() bool {
+ if isErrCode(r.Error, r.ThrottleErrorCodes) {
+ return true
+ }
+
+ if r.HTTPResponse != nil {
+ switch r.HTTPResponse.StatusCode {
+ case
+ 429, // error caused due to too many requests
+ 502, // Bad Gateway error should be throttled
+ 503, // caused when service is unavailable
+ 504: // error occurred due to gateway timeout
+ return true
+ }
+ }
+
+ return IsErrorThrottle(r.Error)
+}
+
+func isErrCode(err error, codes []string) bool {
+ if aerr, ok := err.(awserr.Error); ok && aerr != nil {
+ for _, code := range codes {
+ if code == aerr.Code() {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+// IsErrorExpired returns whether the error code is a credential expiry error.
+// Returns false if the request has no Error set.
+//
+// Alias for the utility function IsErrorExpiredCreds
+func (r *Request) IsErrorExpired() bool {
+ return IsErrorExpiredCreds(r.Error)
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/timeout_read_closer.go
new file mode 100644
index 0000000000000..87a37232b9c6a
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/timeout_read_closer.go
@@ -0,0 +1,94 @@
+package request
+
+import (
+ "io"
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+)
+
+var timeoutErr = awserr.New(
+ ErrCodeResponseTimeout,
+ "read on body has reached the timeout limit",
+ nil,
+)
+
+type readResult struct {
+ n int
+ err error
+}
+
+// timeoutReadCloser will handle body reads that take too long.
+// We will return a ErrReadTimeout error if a timeout occurs.
+type timeoutReadCloser struct {
+ reader io.ReadCloser
+ duration time.Duration
+}
+
+// Read will spin off a goroutine to call the reader's Read method. We will
+// select on the timer's channel or the read's channel. Whoever completes first
+// will be returned.
+func (r *timeoutReadCloser) Read(b []byte) (int, error) {
+ timer := time.NewTimer(r.duration)
+ c := make(chan readResult, 1)
+
+ go func() {
+ n, err := r.reader.Read(b)
+ timer.Stop()
+ c <- readResult{n: n, err: err}
+ }()
+
+ select {
+ case data := <-c:
+ return data.n, data.err
+ case <-timer.C:
+ return 0, timeoutErr
+ }
+}
+
+func (r *timeoutReadCloser) Close() error {
+ return r.reader.Close()
+}
+
+const (
+ // HandlerResponseTimeout is what we use to signify the name of the
+ // response timeout handler.
+ HandlerResponseTimeout = "ResponseTimeoutHandler"
+)
+
+// adaptToResponseTimeoutError is a handler that will replace any top level error
+// to a ErrCodeResponseTimeout, if its child is that.
+func adaptToResponseTimeoutError(req *Request) {
+ if err, ok := req.Error.(awserr.Error); ok {
+ aerr, ok := err.OrigErr().(awserr.Error)
+ if ok && aerr.Code() == ErrCodeResponseTimeout {
+ req.Error = aerr
+ }
+ }
+}
+
+// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer.
+// This will allow for per read timeouts. If a timeout occurred, we will return the
+// ErrCodeResponseTimeout.
+//
+// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second)
+func WithResponseReadTimeout(duration time.Duration) Option {
+ return func(r *Request) {
+
+ var timeoutHandler = NamedHandler{
+ HandlerResponseTimeout,
+ func(req *Request) {
+ req.HTTPResponse.Body = &timeoutReadCloser{
+ reader: req.HTTPResponse.Body,
+ duration: duration,
+ }
+ }}
+
+ // remove the handler so we are not stomping over any new durations.
+ r.Handlers.Send.RemoveByName(HandlerResponseTimeout)
+ r.Handlers.Send.PushBackNamed(timeoutHandler)
+
+ r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError)
+ r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError)
+ }
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/validation.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/validation.go
new file mode 100644
index 0000000000000..3ffdf5915b90c
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/validation.go
@@ -0,0 +1,286 @@
+package request
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+)
+
+const (
+ // InvalidParameterErrCode is the error code for invalid parameters errors
+ InvalidParameterErrCode = "InvalidParameter"
+ // ParamRequiredErrCode is the error code for required parameter errors
+ ParamRequiredErrCode = "ParamRequiredError"
+ // ParamMinValueErrCode is the error code for fields with too low of a
+ // number value.
+ ParamMinValueErrCode = "ParamMinValueError"
+ // ParamMinLenErrCode is the error code for fields without enough elements.
+ ParamMinLenErrCode = "ParamMinLenError"
+ // ParamMaxLenErrCode is the error code for value being too long.
+ ParamMaxLenErrCode = "ParamMaxLenError"
+
+ // ParamFormatErrCode is the error code for a field with invalid
+ // format or characters.
+ ParamFormatErrCode = "ParamFormatInvalidError"
+)
+
+// Validator provides a way for types to perform validation logic on their
+// input values that external code can use to determine if a type's values
+// are valid.
+type Validator interface {
+ Validate() error
+}
+
+// An ErrInvalidParams provides wrapping of invalid parameter errors found when
+// validating API operation input parameters.
+type ErrInvalidParams struct {
+ // Context is the base context of the invalid parameter group.
+ Context string
+ errs []ErrInvalidParam
+}
+
+// Add adds a new invalid parameter error to the collection of invalid
+// parameters. The context of the invalid parameter will be updated to reflect
+// this collection.
+func (e *ErrInvalidParams) Add(err ErrInvalidParam) {
+ err.SetContext(e.Context)
+ e.errs = append(e.errs, err)
+}
+
+// AddNested adds the invalid parameter errors from another ErrInvalidParams
+// value into this collection. The nested errors will have their nested context
+// updated and base context to reflect the merging.
+//
+// Use for nested validations errors.
+func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) {
+ for _, err := range nested.errs {
+ err.SetContext(e.Context)
+ err.AddNestedContext(nestedCtx)
+ e.errs = append(e.errs, err)
+ }
+}
+
+// Len returns the number of invalid parameter errors
+func (e ErrInvalidParams) Len() int {
+ return len(e.errs)
+}
+
+// Code returns the code of the error
+func (e ErrInvalidParams) Code() string {
+ return InvalidParameterErrCode
+}
+
+// Message returns the message of the error
+func (e ErrInvalidParams) Message() string {
+ return fmt.Sprintf("%d validation error(s) found.", len(e.errs))
+}
+
+// Error returns the string formatted form of the invalid parameters.
+func (e ErrInvalidParams) Error() string {
+ w := &bytes.Buffer{}
+ fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message())
+
+ for _, err := range e.errs {
+ fmt.Fprintf(w, "- %s\n", err.Message())
+ }
+
+ return w.String()
+}
+
+// OrigErr returns the invalid parameters as a awserr.BatchedErrors value
+func (e ErrInvalidParams) OrigErr() error {
+ return awserr.NewBatchError(
+ InvalidParameterErrCode, e.Message(), e.OrigErrs())
+}
+
+// OrigErrs returns a slice of the invalid parameters
+func (e ErrInvalidParams) OrigErrs() []error {
+ errs := make([]error, len(e.errs))
+ for i := 0; i < len(errs); i++ {
+ errs[i] = e.errs[i]
+ }
+
+ return errs
+}
+
+// An ErrInvalidParam represents an invalid parameter error type.
+type ErrInvalidParam interface {
+ awserr.Error
+
+ // Field name the error occurred on.
+ Field() string
+
+ // SetContext updates the context of the error.
+ SetContext(string)
+
+ // AddNestedContext updates the error's context to include a nested level.
+ AddNestedContext(string)
+}
+
+type errInvalidParam struct {
+ context string
+ nestedContext string
+ field string
+ code string
+ msg string
+}
+
+// Code returns the error code for the type of invalid parameter.
+func (e *errInvalidParam) Code() string {
+ return e.code
+}
+
+// Message returns the reason the parameter was invalid, and its context.
+func (e *errInvalidParam) Message() string {
+ return fmt.Sprintf("%s, %s.", e.msg, e.Field())
+}
+
+// Error returns the string version of the invalid parameter error.
+func (e *errInvalidParam) Error() string {
+ return fmt.Sprintf("%s: %s", e.code, e.Message())
+}
+
+// OrigErr returns nil, Implemented for awserr.Error interface.
+func (e *errInvalidParam) OrigErr() error {
+ return nil
+}
+
+// Field Returns the field and context the error occurred.
+func (e *errInvalidParam) Field() string {
+ field := e.context
+ if len(field) > 0 {
+ field += "."
+ }
+ if len(e.nestedContext) > 0 {
+ field += fmt.Sprintf("%s.", e.nestedContext)
+ }
+ field += e.field
+
+ return field
+}
+
+// SetContext updates the base context of the error.
+func (e *errInvalidParam) SetContext(ctx string) {
+ e.context = ctx
+}
+
+// AddNestedContext prepends a context to the field's path.
+func (e *errInvalidParam) AddNestedContext(ctx string) {
+ if len(e.nestedContext) == 0 {
+ e.nestedContext = ctx
+ } else {
+ e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext)
+ }
+
+}
+
+// An ErrParamRequired represents an required parameter error.
+type ErrParamRequired struct {
+ errInvalidParam
+}
+
+// NewErrParamRequired creates a new required parameter error.
+func NewErrParamRequired(field string) *ErrParamRequired {
+ return &ErrParamRequired{
+ errInvalidParam{
+ code: ParamRequiredErrCode,
+ field: field,
+ msg: fmt.Sprintf("missing required field"),
+ },
+ }
+}
+
+// An ErrParamMinValue represents a minimum value parameter error.
+type ErrParamMinValue struct {
+ errInvalidParam
+ min float64
+}
+
+// NewErrParamMinValue creates a new minimum value parameter error.
+func NewErrParamMinValue(field string, min float64) *ErrParamMinValue {
+ return &ErrParamMinValue{
+ errInvalidParam: errInvalidParam{
+ code: ParamMinValueErrCode,
+ field: field,
+ msg: fmt.Sprintf("minimum field value of %v", min),
+ },
+ min: min,
+ }
+}
+
+// MinValue returns the field's require minimum value.
+//
+// float64 is returned for both int and float min values.
+func (e *ErrParamMinValue) MinValue() float64 {
+ return e.min
+}
+
+// An ErrParamMinLen represents a minimum length parameter error.
+type ErrParamMinLen struct {
+ errInvalidParam
+ min int
+}
+
+// NewErrParamMinLen creates a new minimum length parameter error.
+func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
+ return &ErrParamMinLen{
+ errInvalidParam: errInvalidParam{
+ code: ParamMinLenErrCode,
+ field: field,
+ msg: fmt.Sprintf("minimum field size of %v", min),
+ },
+ min: min,
+ }
+}
+
+// MinLen returns the field's required minimum length.
+func (e *ErrParamMinLen) MinLen() int {
+ return e.min
+}
+
+// An ErrParamMaxLen represents a maximum length parameter error.
+type ErrParamMaxLen struct {
+ errInvalidParam
+ max int
+}
+
+// NewErrParamMaxLen creates a new maximum length parameter error.
+func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen {
+ return &ErrParamMaxLen{
+ errInvalidParam: errInvalidParam{
+ code: ParamMaxLenErrCode,
+ field: field,
+ msg: fmt.Sprintf("maximum size of %v, %v", max, value),
+ },
+ max: max,
+ }
+}
+
+// MaxLen returns the field's required minimum length.
+func (e *ErrParamMaxLen) MaxLen() int {
+ return e.max
+}
+
+// An ErrParamFormat represents a invalid format parameter error.
+type ErrParamFormat struct {
+ errInvalidParam
+ format string
+}
+
+// NewErrParamFormat creates a new invalid format parameter error.
+func NewErrParamFormat(field string, format, value string) *ErrParamFormat {
+ return &ErrParamFormat{
+ errInvalidParam: errInvalidParam{
+ code: ParamFormatErrCode,
+ field: field,
+ msg: fmt.Sprintf("format %v, %v", format, value),
+ },
+ format: format,
+ }
+}
+
+// Format returns the field's required format.
+func (e *ErrParamFormat) Format() string {
+ return e.format
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/waiter.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/waiter.go
new file mode 100644
index 0000000000000..e3d0f16b335f6
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/request/waiter.go
@@ -0,0 +1,295 @@
+package request
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awsutil"
+)
+
+// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when
+// the waiter's max attempts have been exhausted.
+const WaiterResourceNotReadyErrorCode = "ResourceNotReady"
+
+// A WaiterOption is a function that will update the Waiter value's fields to
+// configure the waiter.
+type WaiterOption func(*Waiter)
+
+// WithWaiterMaxAttempts returns the maximum number of times the waiter should
+// attempt to check the resource for the target state.
+func WithWaiterMaxAttempts(max int) WaiterOption {
+ return func(w *Waiter) {
+ w.MaxAttempts = max
+ }
+}
+
+// WaiterDelay will return a delay the waiter should pause between attempts to
+// check the resource state. The passed in attempt is the number of times the
+// Waiter has checked the resource state.
+//
+// Attempt is the number of attempts the Waiter has made checking the resource
+// state.
+type WaiterDelay func(attempt int) time.Duration
+
+// ConstantWaiterDelay returns a WaiterDelay that will always return a constant
+// delay the waiter should use between attempts. It ignores the number of
+// attempts made.
+func ConstantWaiterDelay(delay time.Duration) WaiterDelay {
+ return func(attempt int) time.Duration {
+ return delay
+ }
+}
+
+// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in.
+func WithWaiterDelay(delayer WaiterDelay) WaiterOption {
+ return func(w *Waiter) {
+ w.Delay = delayer
+ }
+}
+
+// WithWaiterLogger returns a waiter option to set the logger a waiter
+// should use to log warnings and errors to.
+func WithWaiterLogger(logger aws.Logger) WaiterOption {
+ return func(w *Waiter) {
+ w.Logger = logger
+ }
+}
+
+// WithWaiterRequestOptions returns a waiter option setting the request
+// options for each request the waiter makes. Appends to waiter's request
+// options already set.
+func WithWaiterRequestOptions(opts ...Option) WaiterOption {
+ return func(w *Waiter) {
+ w.RequestOptions = append(w.RequestOptions, opts...)
+ }
+}
+
+// A Waiter provides the functionality to perform a blocking call which will
+// wait for a resource state to be satisfied by a service.
+//
+// This type should not be used directly. The API operations provided in the
+// service packages prefixed with "WaitUntil" should be used instead.
+type Waiter struct {
+ Name string
+ Acceptors []WaiterAcceptor
+ Logger aws.Logger
+
+ MaxAttempts int
+ Delay WaiterDelay
+
+ RequestOptions []Option
+ NewRequest func([]Option) (*Request, error)
+ SleepWithContext func(aws.Context, time.Duration) error
+}
+
+// ApplyOptions updates the waiter with the list of waiter options provided.
+func (w *Waiter) ApplyOptions(opts ...WaiterOption) {
+ for _, fn := range opts {
+ fn(w)
+ }
+}
+
+// WaiterState are states the waiter uses based on WaiterAcceptor definitions
+// to identify if the resource state the waiter is waiting on has occurred.
+type WaiterState int
+
+// String returns the string representation of the waiter state.
+func (s WaiterState) String() string {
+ switch s {
+ case SuccessWaiterState:
+ return "success"
+ case FailureWaiterState:
+ return "failure"
+ case RetryWaiterState:
+ return "retry"
+ default:
+ return "unknown waiter state"
+ }
+}
+
+// States the waiter acceptors will use to identify target resource states.
+const (
+ SuccessWaiterState WaiterState = iota // waiter successful
+ FailureWaiterState // waiter failed
+ RetryWaiterState // waiter needs to be retried
+)
+
+// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor
+// definition's Expected attribute.
+type WaiterMatchMode int
+
+// Modes the waiter will use when inspecting API response to identify target
+// resource states.
+const (
+ PathAllWaiterMatch WaiterMatchMode = iota // match on all paths
+ PathWaiterMatch // match on specific path
+ PathAnyWaiterMatch // match on any path
+ PathListWaiterMatch // match on list of paths
+ StatusWaiterMatch // match on status code
+ ErrorWaiterMatch // match on error
+)
+
+// String returns the string representation of the waiter match mode.
+func (m WaiterMatchMode) String() string {
+ switch m {
+ case PathAllWaiterMatch:
+ return "pathAll"
+ case PathWaiterMatch:
+ return "path"
+ case PathAnyWaiterMatch:
+ return "pathAny"
+ case PathListWaiterMatch:
+ return "pathList"
+ case StatusWaiterMatch:
+ return "status"
+ case ErrorWaiterMatch:
+ return "error"
+ default:
+ return "unknown waiter match mode"
+ }
+}
+
+// WaitWithContext will make requests for the API operation using NewRequest to
+// build API requests. The request's response will be compared against the
+// Waiter's Acceptors to determine the successful state of the resource the
+// waiter is inspecting.
+//
+// The passed in context must not be nil. If it is nil a panic will occur. The
+// Context will be used to cancel the waiter's pending requests and retry delays.
+// Use aws.BackgroundContext if no context is available.
+//
+// The waiter will continue until the target state defined by the Acceptors,
+// or the max attempts expires.
+//
+// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's
+// retryer ShouldRetry returns false. This normally will happen when the max
+// wait attempts expires.
+func (w Waiter) WaitWithContext(ctx aws.Context) error {
+
+ for attempt := 1; ; attempt++ {
+ req, err := w.NewRequest(w.RequestOptions)
+ if err != nil {
+ waiterLogf(w.Logger, "unable to create request %v", err)
+ return err
+ }
+ req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter"))
+ err = req.Send()
+
+ // See if any of the acceptors match the request's response, or error
+ for _, a := range w.Acceptors {
+ if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched {
+ return matchErr
+ }
+ }
+
+ // The Waiter should only check the resource state MaxAttempts times
+ // This is here instead of in the for loop above to prevent delaying
+ // unnecessary when the waiter will not retry.
+ if attempt == w.MaxAttempts {
+ break
+ }
+
+ // Delay to wait before inspecting the resource again
+ delay := w.Delay(attempt)
+ if sleepFn := req.Config.SleepDelay; sleepFn != nil {
+ // Support SleepDelay for backwards compatibility and testing
+ sleepFn(delay)
+ } else {
+ sleepCtxFn := w.SleepWithContext
+ if sleepCtxFn == nil {
+ sleepCtxFn = aws.SleepWithContext
+ }
+
+ if err := sleepCtxFn(ctx, delay); err != nil {
+ return awserr.New(CanceledErrorCode, "waiter context canceled", err)
+ }
+ }
+ }
+
+ return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil)
+}
+
+// A WaiterAcceptor provides the information needed to wait for an API operation
+// to complete.
+type WaiterAcceptor struct {
+ State WaiterState
+ Matcher WaiterMatchMode
+ Argument string
+ Expected interface{}
+}
+
+// match returns if the acceptor found a match with the passed in request
+// or error. True is returned if the acceptor made a match, error is returned
+// if there was an error attempting to perform the match.
+func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) {
+ result := false
+ var vals []interface{}
+
+ switch a.Matcher {
+ case PathAllWaiterMatch, PathWaiterMatch:
+ // Require all matches to be equal for result to match
+ vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
+ if len(vals) == 0 {
+ break
+ }
+ result = true
+ for _, val := range vals {
+ if !awsutil.DeepEqual(val, a.Expected) {
+ result = false
+ break
+ }
+ }
+ case PathAnyWaiterMatch:
+ // Only a single match needs to equal for the result to match
+ vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
+ for _, val := range vals {
+ if awsutil.DeepEqual(val, a.Expected) {
+ result = true
+ break
+ }
+ }
+ case PathListWaiterMatch:
+ // ignored matcher
+ case StatusWaiterMatch:
+ s := a.Expected.(int)
+ result = s == req.HTTPResponse.StatusCode
+ case ErrorWaiterMatch:
+ if aerr, ok := err.(awserr.Error); ok {
+ result = aerr.Code() == a.Expected.(string)
+ }
+ default:
+ waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s",
+ name, a.Matcher)
+ }
+
+ if !result {
+ // If there was no matching result found there is nothing more to do
+ // for this response, retry the request.
+ return false, nil
+ }
+
+ switch a.State {
+ case SuccessWaiterState:
+ // waiter completed
+ return true, nil
+ case FailureWaiterState:
+ // Waiter failure state triggered
+ return true, awserr.New(WaiterResourceNotReadyErrorCode,
+ "failed waiting for successful resource state", err)
+ case RetryWaiterState:
+ // clear the error and retry the operation
+ return false, nil
+ default:
+ waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s",
+ name, a.State)
+ return false, nil
+ }
+}
+
+func waiterLogf(logger aws.Logger, msg string, args ...interface{}) {
+ if logger != nil {
+ logger.Log(fmt.Sprintf(msg, args...))
+ }
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/credentials.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/credentials.go
new file mode 100644
index 0000000000000..917d95156e671
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/credentials.go
@@ -0,0 +1,123 @@
+package session
+
+import (
+ "fmt"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials/processcreds"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+)
+
+
+func resolveCredentials(cfg *aws.Config,
+ envCfg envConfig, sharedCfg sharedConfig,
+ handlers request.Handlers,
+ sessOpts Options,
+) (*credentials.Credentials, error) {
+
+ switch {
+ case len(sessOpts.Profile) != 0:
+ // User explicitly provided an Profile in the session's configuration
+ // so load that profile from shared config first.
+ // Github(aws/aws-sdk-go#2727)
+ return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts)
+
+ case envCfg.Creds.HasKeys():
+ // Environment credentials
+ return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil
+
+ default:
+ // Fallback to the "default" credential resolution chain.
+ return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts)
+ }
+}
+
+func resolveCredsFromProfile(cfg *aws.Config,
+ envCfg envConfig, sharedCfg sharedConfig,
+ handlers request.Handlers,
+ sessOpts Options,
+) (creds *credentials.Credentials, err error) {
+
+ switch {
+ case sharedCfg.SourceProfile != nil:
+ // Assume IAM role with credentials source from a different profile.
+ creds, err = resolveCredsFromProfile(cfg, envCfg,
+ *sharedCfg.SourceProfile, handlers, sessOpts,
+ )
+
+ case sharedCfg.Creds.HasKeys():
+ // Static Credentials from Shared Config/Credentials file.
+ creds = credentials.NewStaticCredentialsFromCreds(
+ sharedCfg.Creds,
+ )
+
+ case len(sharedCfg.CredentialProcess) != 0:
+ // Get credentials from CredentialProcess
+ creds = processcreds.NewCredentials(sharedCfg.CredentialProcess)
+
+ case len(sharedCfg.CredentialSource) != 0:
+ creds, err = resolveCredsFromSource(cfg, envCfg,
+ sharedCfg, handlers, sessOpts,
+ )
+
+ default:
+ // Fallback to default credentials provider, include mock errors for
+ // the credential chain so user can identify why credentials failed to
+ // be retrieved.
+ creds = credentials.NewCredentials(&credentials.ChainProvider{
+ VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
+ Providers: []credentials.Provider{
+ &credProviderError{
+ Err: awserr.New("EnvAccessKeyNotFound",
+ "failed to find credentials in the environment.", nil),
+ },
+ &credProviderError{
+ Err: awserr.New("SharedCredsLoad",
+ fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil),
+ },
+ //defaults.RemoteCredProvider(*cfg, handlers), IBM
+ },
+ })
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return creds, nil
+}
+
+// valid credential source values
+const (
+ credSourceEnvironment = "Environment"
+)
+
+func resolveCredsFromSource(cfg *aws.Config,
+ envCfg envConfig, sharedCfg sharedConfig,
+ handlers request.Handlers,
+ sessOpts Options,
+) (creds *credentials.Credentials, err error) {
+
+ switch sharedCfg.CredentialSource {
+
+ case credSourceEnvironment:
+ creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds)
+
+ default:
+ return nil, ErrSharedConfigInvalidCredSource
+ }
+
+ return creds, nil
+}
+
+type credProviderError struct {
+ Err error
+}
+
+func (c credProviderError) Retrieve() (credentials.Value, error) {
+ return credentials.Value{}, c.Err
+}
+func (c credProviderError) IsExpired() bool {
+ return true
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/custom_transport.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/custom_transport.go
new file mode 100644
index 0000000000000..4390ad52f495f
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/custom_transport.go
@@ -0,0 +1,28 @@
+//go:build go1.13
+// +build go1.13
+
+package session
+
+import (
+ "net"
+ "net/http"
+ "time"
+)
+
+// Transport that should be used when a custom CA bundle is specified with the
+// SDK.
+func getCustomTransport() *http.Transport {
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }).DialContext,
+ ForceAttemptHTTP2: true,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ }
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/custom_transport_go1.12.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/custom_transport_go1.12.go
new file mode 100644
index 0000000000000..668565bea0c20
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/custom_transport_go1.12.go
@@ -0,0 +1,27 @@
+//go:build !go1.13 && go1.7
+// +build !go1.13,go1.7
+
+package session
+
+import (
+ "net"
+ "net/http"
+ "time"
+)
+
+// Transport that should be used when a custom CA bundle is specified with the
+// SDK.
+func getCustomTransport() *http.Transport {
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }).DialContext,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ }
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/custom_transport_go1.5.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/custom_transport_go1.5.go
new file mode 100644
index 0000000000000..e101aa6b6c06a
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/custom_transport_go1.5.go
@@ -0,0 +1,23 @@
+//go:build !go1.6 && go1.5
+// +build !go1.6,go1.5
+
+package session
+
+import (
+ "net"
+ "net/http"
+ "time"
+)
+
+// Transport that should be used when a custom CA bundle is specified with the
+// SDK.
+func getCustomTransport() *http.Transport {
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ TLSHandshakeTimeout: 10 * time.Second,
+ }
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/custom_transport_go1.6.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/custom_transport_go1.6.go
new file mode 100644
index 0000000000000..b5fcbe0d1e079
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/custom_transport_go1.6.go
@@ -0,0 +1,24 @@
+//go:build !go1.7 && go1.6
+// +build !go1.7,go1.6
+
+package session
+
+import (
+ "net"
+ "net/http"
+ "time"
+)
+
+// Transport that should be used when a custom CA bundle is specified with the
+// SDK.
+func getCustomTransport() *http.Transport {
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ }
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/doc.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/doc.go
new file mode 100644
index 0000000000000..2fda523f993d6
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/doc.go
@@ -0,0 +1,261 @@
+/*
+Package session provides configuration for the SDK's service clients. Sessions
+can be shared across service clients that share the same base configuration.
+
+Sessions are safe to use concurrently as long as the Session is not being
+modified. Sessions should be cached when possible, because creating a new
+Session will load all configuration values from the environment, and config
+files each time the Session is created. Sharing the Session value across all of
+your service clients will ensure the configuration is loaded the fewest number
+of times possible.
+
+Sessions options from Shared Config
+
+By default NewSession will only load credentials from the shared credentials
+file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is
+set to a truthy value the Session will be created from the configuration
+values from the shared config (~/.aws/config) and shared credentials
+(~/.aws/credentials) files. Using the NewSessionWithOptions with
+SharedConfigState set to SharedConfigEnable will create the session as if the
+AWS_SDK_LOAD_CONFIG environment variable was set.
+
+Credential and config loading order
+
+The Session will attempt to load configuration and credentials from the
+environment, configuration files, and other credential sources. The order
+configuration is loaded in is:
+
+ * Environment Variables
+ * Shared Credentials file
+ * Shared Configuration file (if SharedConfig is enabled)
+
+The Environment variables for credentials will have precedence over shared
+config even if SharedConfig is enabled. To override this behavior, and use
+shared config credentials instead specify the session.Options.Profile, (e.g.
+when using credential_source=Environment to assume a role).
+
+ sess, err := session.NewSessionWithOptions(session.Options{
+ Profile: "myProfile",
+ })
+
+Creating Sessions
+
+Creating a Session without additional options will load credentials region, and
+profile loaded from the environment and shared config automatically. See,
+"Environment Variables" section for information on environment variables used
+by Session.
+
+ // Create Session
+ sess, err := session.NewSession()
+
+
+When creating Sessions optional aws.Config values can be passed in that will
+override the default, or loaded, config values the Session is being created
+with. This allows you to provide additional, or case based, configuration
+as needed.
+
+ // Create a Session with a custom region
+ sess, err := session.NewSession(&aws.Config{
+ Region: aws.String("us-west-2"),
+ })
+
+Use NewSessionWithOptions to provide additional configuration driving how the
+Session's configuration will be loaded. Such as, specifying shared config
+profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG).
+
+ // Equivalent to session.NewSession()
+ sess, err := session.NewSessionWithOptions(session.Options{
+ // Options
+ })
+
+ sess, err := session.NewSessionWithOptions(session.Options{
+ // Specify profile to load for the session's config
+ Profile: "profile_name",
+
+ // Provide SDK Config options, such as Region.
+ Config: aws.Config{
+ Region: aws.String("us-west-2"),
+ },
+
+ // Force enable Shared Config support
+ SharedConfigState: session.SharedConfigEnable,
+ })
+
+Adding Handlers
+
+You can add handlers to a session to decorate API operation, (e.g. adding HTTP
+headers). All clients that use the Session receive a copy of the Session's
+handlers. For example, the following request handler added to the Session logs
+every requests made.
+
+ // Create a session, and add additional handlers for all service
+ // clients created with the Session to inherit. Adds logging handler.
+ sess := session.Must(session.NewSession())
+
+ sess.Handlers.Send.PushFront(func(r *request.Request) {
+ // Log every request made and its payload
+ logger.Printf("Request: %s/%s, Params: %s",
+ r.ClientInfo.ServiceName, r.Operation, r.Params)
+ })
+
+Shared Config Fields
+
+By default the SDK will only load the shared credentials file's
+(~/.aws/credentials) credentials values, and all other config is provided by
+the environment variables, SDK defaults, and user provided aws.Config values.
+
+If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable
+option is used to create the Session the full shared config values will be
+loaded. This includes credentials, region, and support for assume role. In
+addition the Session will load its configuration from both the shared config
+file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both
+files have the same format.
+
+If both config files are present the configuration from both files will be
+read. The Session will be created from configuration values from the shared
+credentials file (~/.aws/credentials) over those in the shared config file
+(~/.aws/config).
+
+Credentials are the values the SDK uses to authenticating requests with AWS
+Services. When specified in a file, both aws_access_key_id and
+aws_secret_access_key must be provided together in the same file to be
+considered valid. They will be ignored if both are not present.
+aws_session_token is an optional field that can be provided in addition to the
+other two fields.
+
+ aws_access_key_id = AKID
+ aws_secret_access_key = SECRET
+ aws_session_token = TOKEN
+
+ ; region only supported if SharedConfigEnabled.
+ region = us-east-1
+
+Environment Variables
+
+When a Session is created several environment variables can be set to adjust
+how the SDK functions, and what configuration data it loads when creating
+Sessions. All environment values are optional, but some values like credentials
+require multiple of the values to set or the partial values will be ignored.
+All environment variable values are strings unless otherwise noted.
+
+Environment configuration values. If set both Access Key ID and Secret Access
+Key must be provided. Session Token and optionally also be provided, but is
+not required.
+
+ # Access Key ID
+ AWS_ACCESS_KEY_ID=AKID
+ AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
+
+ # Secret Access Key
+ AWS_SECRET_ACCESS_KEY=SECRET
+ AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
+
+ # Session Token
+ AWS_SESSION_TOKEN=TOKEN
+
+Region value will instruct the SDK where to make service API requests to. If is
+not provided in the environment the region must be provided before a service
+client request is made.
+
+ AWS_REGION=us-east-1
+
+ # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
+ # and AWS_REGION is not also set.
+ AWS_DEFAULT_REGION=us-east-1
+
+Profile name the SDK should load use when loading shared config from the
+configuration files. If not provided "default" will be used as the profile name.
+
+ AWS_PROFILE=my_profile
+
+ # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
+ # and AWS_PROFILE is not also set.
+ AWS_DEFAULT_PROFILE=my_profile
+
+SDK load config instructs the SDK to load the shared config in addition to
+shared credentials. This also expands the configuration loaded so the shared
+credentials will have parity with the shared config file. This also enables
+Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
+env values as well.
+
+ AWS_SDK_LOAD_CONFIG=1
+
+Custom Shared Config and Credential Files
+
+Shared credentials file path can be set to instruct the SDK to use an alternative
+file for the shared credentials. If not set the file will be loaded from
+$HOME/.aws/credentials on Linux/Unix based systems, and
+%USERPROFILE%\.aws\credentials on Windows.
+
+ AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
+
+Shared config file path can be set to instruct the SDK to use an alternative
+file for the shared config. If not set the file will be loaded from
+$HOME/.aws/config on Linux/Unix based systems, and
+%USERPROFILE%\.aws\config on Windows.
+
+ AWS_CONFIG_FILE=$HOME/my_shared_config
+
+Custom CA Bundle
+
+Path to a custom Credentials Authority (CA) bundle PEM file that the SDK
+will use instead of the default system's root CA bundle. Use this only
+if you want to replace the CA bundle the SDK uses for TLS requests.
+
+ AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
+
+Enabling this option will attempt to merge the Transport into the SDK's HTTP
+client. If the client's Transport is not a http.Transport an error will be
+returned. If the Transport's TLS config is set this option will cause the SDK
+to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file
+contains multiple certificates all of them will be loaded.
+
+The Session option CustomCABundle is also available when creating sessions
+to also enable this feature. CustomCABundle session option field has priority
+over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
+
+Setting a custom HTTPClient in the aws.Config options will override this setting.
+To use this option and custom HTTP client, the HTTP client needs to be provided
+when creating the session. Not the service client.
+
+Custom Client TLS Certificate
+
+The SDK supports the environment and session option being configured with
+Client TLS certificates that are sent as a part of the client's TLS handshake
+for client authentication. If used, both Cert and Key values are required. If
+one is missing, or either fail to load the contents of the file an error will
+be returned.
+
+HTTP Client's Transport concrete implementation must be a http.Transport
+or creating the session will fail.
+
+ AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key
+ AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert
+
+This can also be configured via the session.Options ClientTLSCert and ClientTLSKey.
+
+ sess, err := session.NewSessionWithOptions(session.Options{
+ ClientTLSCert: myCertFile,
+ ClientTLSKey: myKeyFile,
+ })
+
+Custom EC2 IMDS Endpoint
+
+The endpoint of the EC2 IMDS client can be configured via the environment
+variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a
+Session. See Options.EC2IMDSEndpoint for more details.
+
+ AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254
+
+If using an URL with an IPv6 address literal, the IPv6 address
+component must be enclosed in square brackets.
+
+ AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1]
+
+The custom EC2 IMDS endpoint can also be specified via the Session options.
+
+ sess, err := session.NewSessionWithOptions(session.Options{
+ EC2IMDSEndpoint: "http://[::1]",
+ })
+*/
+package session
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/env_config.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/env_config.go
new file mode 100644
index 0000000000000..8c438f93385fd
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/env_config.go
@@ -0,0 +1,299 @@
+package session
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials"
+ "github.com/IBM/ibm-cos-sdk-go/aws/defaults"
+ "github.com/IBM/ibm-cos-sdk-go/aws/endpoints"
+)
+
+// EnvProviderName provides a name of the provider when config is loaded from environment.
+const EnvProviderName = "EnvConfigCredentials"
+
+// envConfig is a collection of environment values the SDK will read
+// setup config from. All environment values are optional. But some values
+// such as credentials require multiple values to be complete or the values
+// will be ignored.
+type envConfig struct {
+ // Environment configuration values. If set both Access Key ID and Secret Access
+ // Key must be provided. Session Token and optionally also be provided, but is
+ // not required.
+ //
+ // # Access Key ID
+ // AWS_ACCESS_KEY_ID=AKID
+ // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
+ //
+ // # Secret Access Key
+ // AWS_SECRET_ACCESS_KEY=SECRET
+ // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
+ //
+ // # Session Token
+ // AWS_SESSION_TOKEN=TOKEN
+ Creds credentials.Value
+
+ // Region value will instruct the SDK where to make service API requests to. If is
+ // not provided in the environment the region must be provided before a service
+ // client request is made.
+ //
+ // AWS_REGION=us-east-1
+ //
+ // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
+ // # and AWS_REGION is not also set.
+ // AWS_DEFAULT_REGION=us-east-1
+ Region string
+
+ // Profile name the SDK should load use when loading shared configuration from the
+ // shared configuration files. If not provided "default" will be used as the
+ // profile name.
+ //
+ // AWS_PROFILE=my_profile
+ //
+ // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
+ // # and AWS_PROFILE is not also set.
+ // AWS_DEFAULT_PROFILE=my_profile
+ Profile string
+
+ // SDK load config instructs the SDK to load the shared config in addition to
+ // shared credentials. This also expands the configuration loaded from the shared
+ // credentials to have parity with the shared config file. This also enables
+ // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
+ // env values as well.
+ //
+ // AWS_SDK_LOAD_CONFIG=1
+ EnableSharedConfig bool
+
+ // Shared credentials file path can be set to instruct the SDK to use an alternate
+ // file for the shared credentials. If not set the file will be loaded from
+ // $HOME/.aws/credentials on Linux/Unix based systems, and
+ // %USERPROFILE%\.aws\credentials on Windows.
+ //
+ // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
+ SharedCredentialsFile string
+
+ // Shared config file path can be set to instruct the SDK to use an alternate
+ // file for the shared config. If not set the file will be loaded from
+ // $HOME/.aws/config on Linux/Unix based systems, and
+ // %USERPROFILE%\.aws\config on Windows.
+ //
+ // AWS_CONFIG_FILE=$HOME/my_shared_config
+ SharedConfigFile string
+
+ // Sets the path to a custom Credentials Authority (CA) Bundle PEM file
+ // that the SDK will use instead of the system's root CA bundle.
+ // Only use this if you want to configure the SDK to use a custom set
+ // of CAs.
+ //
+ // Enabling this option will attempt to merge the Transport
+ // into the SDK's HTTP client. If the client's Transport is
+ // not a http.Transport an error will be returned. If the
+ // Transport's TLS config is set this option will cause the
+ // SDK to overwrite the Transport's TLS config's RootCAs value.
+ //
+ // Setting a custom HTTPClient in the aws.Config options will override this setting.
+ // To use this option and custom HTTP client, the HTTP client needs to be provided
+ // when creating the session. Not the service client.
+ //
+ // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
+ CustomCABundle string
+
+ // Sets the TLC client certificate that should be used by the SDK's HTTP transport
+ // when making requests. The certificate must be paired with a TLS client key file.
+ //
+ // AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert
+ ClientTLSCert string
+
+ // Sets the TLC client key that should be used by the SDK's HTTP transport
+ // when making requests. The key must be paired with a TLS client certificate file.
+ //
+ // AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key
+ ClientTLSKey string
+
+ // Enables endpoint discovery via environment variables.
+ //
+ // AWS_ENABLE_ENDPOINT_DISCOVERY=true
+ EnableEndpointDiscovery *bool
+ enableEndpointDiscovery string
+
+ // Specifies the S3 Regional Endpoint flag for the SDK to resolve the
+ // endpoint for a service.
+ //
+ // AWS_S3_US_EAST_1_REGIONAL_ENDPOINT=regional
+ // This can take value as `regional` or `legacy`
+ S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint
+
+ // Specifies if the S3 service should allow ARNs to direct the region
+ // the client's requests are sent to.
+ //
+ // AWS_S3_USE_ARN_REGION=true
+ S3UseARNRegion bool
+ // AWS_USE_DUALSTACK_ENDPOINT=true
+ UseDualStackEndpoint endpoints.DualStackEndpointState
+}
+
+var (
+ credAccessEnvKey = []string{
+ "AWS_ACCESS_KEY_ID",
+ "AWS_ACCESS_KEY",
+ }
+ credSecretEnvKey = []string{
+ "AWS_SECRET_ACCESS_KEY",
+ "AWS_SECRET_KEY",
+ }
+ credSessionEnvKey = []string{
+ "AWS_SESSION_TOKEN",
+ }
+
+ enableEndpointDiscoveryEnvKey = []string{
+ "AWS_ENABLE_ENDPOINT_DISCOVERY",
+ }
+
+ regionEnvKeys = []string{
+ "AWS_REGION",
+ "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set
+ }
+ profileEnvKeys = []string{
+ "AWS_PROFILE",
+ "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set
+ }
+ sharedCredsFileEnvKey = []string{
+ "AWS_SHARED_CREDENTIALS_FILE",
+ }
+ sharedConfigFileEnvKey = []string{
+ "AWS_CONFIG_FILE",
+ }
+ s3UsEast1RegionalEndpoint = []string{
+ "AWS_S3_US_EAST_1_REGIONAL_ENDPOINT",
+ }
+ s3UseARNRegionEnvKey = []string{
+ "AWS_S3_USE_ARN_REGION",
+ }
+ useCABundleKey = []string{
+ "AWS_CA_BUNDLE",
+ }
+ useClientTLSCert = []string{
+ "AWS_SDK_GO_CLIENT_TLS_CERT",
+ }
+ useClientTLSKey = []string{
+ "AWS_SDK_GO_CLIENT_TLS_KEY",
+ }
+ awsUseDualStackEndpoint = []string{
+ "AWS_USE_DUALSTACK_ENDPOINT",
+ }
+)
+
+// loadEnvConfig retrieves the SDK's environment configuration.
+// See `envConfig` for the values that will be retrieved.
+//
+// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value
+// the shared SDK config will be loaded in addition to the SDK's specific
+// configuration values.
+func loadEnvConfig() (envConfig, error) {
+ enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG"))
+ return envConfigLoad(enableSharedConfig)
+}
+
+// loadEnvSharedConfig retrieves the SDK's environment configuration, and the
+// SDK shared config. See `envConfig` for the values that will be retrieved.
+//
+// Loads the shared configuration in addition to the SDK's specific configuration.
+// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG`
+// environment variable is set.
+func loadSharedEnvConfig() (envConfig, error) {
+ return envConfigLoad(true)
+}
+
+func envConfigLoad(enableSharedConfig bool) (envConfig, error) {
+ cfg := envConfig{}
+
+ cfg.EnableSharedConfig = enableSharedConfig
+
+ // Static environment credentials
+ var creds credentials.Value
+ setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey)
+ setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey)
+ setFromEnvVal(&creds.SessionToken, credSessionEnvKey)
+ if creds.HasKeys() {
+ // Require logical grouping of credentials
+ creds.ProviderName = EnvProviderName
+ cfg.Creds = creds
+ }
+
+ // Require logical grouping of credentials
+ if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 {
+ cfg.Creds = credentials.Value{}
+ } else {
+ cfg.Creds.ProviderName = EnvProviderName
+ }
+
+ regionKeys := regionEnvKeys
+ profileKeys := profileEnvKeys
+ if !cfg.EnableSharedConfig {
+ regionKeys = regionKeys[:1]
+ profileKeys = profileKeys[:1]
+ }
+
+ setFromEnvVal(&cfg.Region, regionKeys)
+ setFromEnvVal(&cfg.Profile, profileKeys)
+
+ // endpoint discovery is in reference to it being enabled.
+ setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey)
+ if len(cfg.enableEndpointDiscovery) > 0 {
+ cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false")
+ }
+
+ setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey)
+ setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey)
+
+ if len(cfg.SharedCredentialsFile) == 0 {
+ cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename()
+ }
+ if len(cfg.SharedConfigFile) == 0 {
+ cfg.SharedConfigFile = defaults.SharedConfigFilename()
+ }
+
+ setFromEnvVal(&cfg.CustomCABundle, useCABundleKey)
+ setFromEnvVal(&cfg.ClientTLSCert, useClientTLSCert)
+ setFromEnvVal(&cfg.ClientTLSKey, useClientTLSKey)
+
+ var err error
+ // S3 Regional Endpoint variable
+ for _, k := range s3UsEast1RegionalEndpoint {
+ if v := os.Getenv(k); len(v) != 0 {
+ cfg.S3UsEast1RegionalEndpoint, err = endpoints.GetS3UsEast1RegionalEndpoint(v)
+ if err != nil {
+ return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err)
+ }
+ }
+ }
+
+ var s3UseARNRegion string
+ setFromEnvVal(&s3UseARNRegion, s3UseARNRegionEnvKey)
+ if len(s3UseARNRegion) != 0 {
+ switch {
+ case strings.EqualFold(s3UseARNRegion, "false"):
+ cfg.S3UseARNRegion = false
+ case strings.EqualFold(s3UseARNRegion, "true"):
+ cfg.S3UseARNRegion = true
+ default:
+ return envConfig{}, fmt.Errorf(
+ "invalid value for environment variable, %s=%s, need true or false",
+ s3UseARNRegionEnvKey[0], s3UseARNRegion)
+ }
+ }
+
+ return cfg, nil
+}
+
+func setFromEnvVal(dst *string, keys []string) {
+ for _, k := range keys {
+ if v := os.Getenv(k); len(v) != 0 {
+ *dst = v
+ break
+ }
+ }
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/session.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/session.go
new file mode 100644
index 0000000000000..53a60837aadd9
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/session.go
@@ -0,0 +1,769 @@
+package session
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/client"
+ "github.com/IBM/ibm-cos-sdk-go/aws/corehandlers"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam"
+ "github.com/IBM/ibm-cos-sdk-go/aws/defaults"
+ "github.com/IBM/ibm-cos-sdk-go/aws/endpoints"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+)
+
+const (
+ // ErrCodeSharedConfig represents an error that occurs in the shared
+ // configuration logic
+ ErrCodeSharedConfig = "SharedConfigErr"
+
+ // ErrCodeLoadCustomCABundle error code for unable to load custom CA bundle.
+ ErrCodeLoadCustomCABundle = "LoadCustomCABundleError"
+
+ // ErrCodeLoadClientTLSCert error code for unable to load client TLS
+ // certificate or key
+ ErrCodeLoadClientTLSCert = "LoadClientTLSCertError"
+)
+
+// ErrSharedConfigSourceCollision will be returned if a section contains both
+// source_profile and credential_source
+var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token, or sso", nil)
+
+// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment
+// variables are empty and Environment was set as the credential source
+var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil)
+
+// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided
+var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil)
+
+// A Session provides a central location to create service clients from and
+// store configurations and request handlers for those services.
+//
+// Sessions are safe to create service clients concurrently, but it is not safe
+// to mutate the Session concurrently.
+//
+// The Session satisfies the service client's client.ConfigProvider.
+type Session struct {
+ Config *aws.Config
+ Handlers request.Handlers
+
+ options Options
+}
+
+// New creates a new instance of the handlers merging in the provided configs
+// on top of the SDK's default configurations. Once the Session is created it
+// can be mutated to modify the Config or Handlers. The Session is safe to be
+// read concurrently, but it should not be written to concurrently.
+//
+// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New
+// method could now encounter an error when loading the configuration. When
+// The environment variable is set, and an error occurs, New will return a
+// session that will fail all requests reporting the error that occurred while
+// loading the session. Use NewSession to get the error when creating the
+// session.
+//
+// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
+// the shared config file (~/.aws/config) will also be loaded, in addition to
+// the shared credentials file (~/.aws/credentials). Values set in both the
+// shared config, and shared credentials will be taken from the shared
+// credentials file.
+//
+// Deprecated: Use NewSession functions to create sessions instead. NewSession
+// has the same functionality as New except an error can be returned when the
+// func is called instead of waiting to receive an error until a request is made.
+// IBM removed
+
+// NewSession returns a new Session created from SDK defaults, config files,
+// environment, and user provided config files. Once the Session is created
+// it can be mutated to modify the Config or Handlers. The Session is safe to
+// be read concurrently, but it should not be written to concurrently.
+//
+// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
+// the shared config file (~/.aws/config) will also be loaded in addition to
+// the shared credentials file (~/.aws/credentials). Values set in both the
+// shared config, and shared credentials will be taken from the shared
+// credentials file. Enabling the Shared Config will also allow the Session
+// to be built with retrieving credentials with AssumeRole set in the config.
+//
+// See the NewSessionWithOptions func for information on how to override or
+// control through code how the Session will be created, such as specifying the
+// config profile, and controlling if shared config is enabled or not.
+func NewSession(cfgs ...*aws.Config) (*Session, error) {
+ opts := Options{}
+ opts.Config.MergeIn(cfgs...)
+
+ return NewSessionWithOptions(opts)
+}
+
+// SharedConfigState provides the ability to optionally override the state
+// of the session's creation based on the shared config being enabled or
+// disabled.
+type SharedConfigState int
+
+const (
+ // SharedConfigStateFromEnv does not override any state of the
+ // AWS_SDK_LOAD_CONFIG env var. It is the default value of the
+ // SharedConfigState type.
+ SharedConfigStateFromEnv SharedConfigState = iota
+
+ // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value
+ // and disables the shared config functionality.
+ SharedConfigDisable
+
+ // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value
+ // and enables the shared config functionality.
+ SharedConfigEnable
+)
+
+// Options provides the means to control how a Session is created and what
+// configuration values will be loaded.
+//
+type Options struct {
+ // Provides config values for the SDK to use when creating service clients
+ // and making API requests to services. Any value set in with this field
+ // will override the associated value provided by the SDK defaults,
+ // environment or config files where relevant.
+ //
+ // If not set, configuration values from from SDK defaults, environment,
+ // config will be used.
+ Config aws.Config
+
+ // Overrides the config profile the Session should be created from. If not
+ // set the value of the environment variable will be loaded (AWS_PROFILE,
+ // or AWS_DEFAULT_PROFILE if the Shared Config is enabled).
+ //
+ // If not set and environment variables are not set the "default"
+ // (DefaultSharedConfigProfile) will be used as the profile to load the
+ // session config from.
+ Profile string
+
+ // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG
+ // environment variable. By default a Session will be created using the
+ // value provided by the AWS_SDK_LOAD_CONFIG environment variable.
+ //
+ // Setting this value to SharedConfigEnable or SharedConfigDisable
+ // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable
+ // and enable or disable the shared config functionality.
+ SharedConfigState SharedConfigState
+
+ // Ordered list of files the session will load configuration from.
+ // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE.
+ SharedConfigFiles []string
+
+ // When the SDK's shared config is configured to assume a role with MFA
+ // this option is required in order to provide the mechanism that will
+ // retrieve the MFA token. There is no default value for this field. If
+ // it is not set an error will be returned when creating the session.
+ //
+ // This token provider will be called when ever the assumed role's
+ // credentials need to be refreshed. Within the context of service clients
+ // all sharing the same session the SDK will ensure calls to the token
+ // provider are atomic. When sharing a token provider across multiple
+ // sessions additional synchronization logic is needed to ensure the
+ // token providers do not introduce race conditions. It is recommend to
+ // share the session where possible.
+ //
+ // stscreds.StdinTokenProvider is a basic implementation that will prompt
+ // from stdin for the MFA token code.
+ //
+ // This field is only used if the shared configuration is enabled, and
+ // the config enables assume role wit MFA via the mfa_serial field.
+ AssumeRoleTokenProvider func() (string, error)
+
+ // When the SDK's shared config is configured to assume a role this option
+ // may be provided to set the expiry duration of the STS credentials.
+ // Defaults to 15 minutes if not set as documented in the
+ // stscreds.AssumeRoleProvider.
+ AssumeRoleDuration time.Duration
+
+ // Reader for a custom Credentials Authority (CA) bundle in PEM format that
+ // the SDK will use instead of the default system's root CA bundle. Use this
+ // only if you want to replace the CA bundle the SDK uses for TLS requests.
+ //
+ // HTTP Client's Transport concrete implementation must be a http.Transport
+ // or creating the session will fail.
+ //
+ // If the Transport's TLS config is set this option will cause the SDK
+ // to overwrite the Transport's TLS config's RootCAs value. If the CA
+ // bundle reader contains multiple certificates all of them will be loaded.
+ //
+ // Can also be specified via the environment variable:
+ //
+ // AWS_CA_BUNDLE=$HOME/ca_bundle
+ //
+ // Can also be specified via the shared config field:
+ //
+ // ca_bundle = $HOME/ca_bundle
+ CustomCABundle io.Reader
+
+ // Reader for the TLC client certificate that should be used by the SDK's
+ // HTTP transport when making requests. The certificate must be paired with
+ // a TLS client key file. Will be ignored if both are not provided.
+ //
+ // HTTP Client's Transport concrete implementation must be a http.Transport
+ // or creating the session will fail.
+ //
+ // Can also be specified via the environment variable:
+ //
+ // AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert
+ ClientTLSCert io.Reader
+
+ // Reader for the TLC client key that should be used by the SDK's HTTP
+ // transport when making requests. The key must be paired with a TLS client
+ // certificate file. Will be ignored if both are not provided.
+ //
+ // HTTP Client's Transport concrete implementation must be a http.Transport
+ // or creating the session will fail.
+ //
+ // Can also be specified via the environment variable:
+ //
+ // AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key
+ ClientTLSKey io.Reader
+
+ // The handlers that the session and all API clients will be created with.
+ // This must be a complete set of handlers. Use the defaults.Handlers()
+ // function to initialize this value before changing the handlers to be
+ // used by the SDK.
+ Handlers request.Handlers
+}
+
+// NewSessionWithOptions returns a new Session created from SDK defaults, config files,
+// environment, and user provided config files. This func uses the Options
+// values to configure how the Session is created.
+//
+// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
+// the shared config file (~/.aws/config) will also be loaded in addition to
+// the shared credentials file (~/.aws/credentials). Values set in both the
+// shared config, and shared credentials will be taken from the shared
+// credentials file. Enabling the Shared Config will also allow the Session
+// to be built with retrieving credentials with AssumeRole set in the config.
+//
+// // Equivalent to session.New
+// sess := session.Must(session.NewSessionWithOptions(session.Options{}))
+//
+// // Specify profile to load for the session's config
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
+// Profile: "profile_name",
+// }))
+//
+// // Specify profile for config and region for requests
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
+// Config: aws.Config{Region: aws.String("us-east-1")},
+// Profile: "profile_name",
+// }))
+//
+// // Force enable Shared Config support
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
+// SharedConfigState: session.SharedConfigEnable,
+// }))
+func NewSessionWithOptions(opts Options) (*Session, error) {
+ var envCfg envConfig
+ var err error
+ if opts.SharedConfigState == SharedConfigEnable {
+ envCfg, err = loadSharedEnvConfig()
+ if err != nil {
+ return nil, fmt.Errorf("failed to load shared config, %v", err)
+ }
+ } else {
+ envCfg, err = loadEnvConfig()
+ if err != nil {
+ return nil, fmt.Errorf("failed to load environment config, %v", err)
+ }
+ }
+
+ if len(opts.Profile) != 0 {
+ envCfg.Profile = opts.Profile
+ }
+
+ switch opts.SharedConfigState {
+ case SharedConfigDisable:
+ envCfg.EnableSharedConfig = false
+ case SharedConfigEnable:
+ envCfg.EnableSharedConfig = true
+ }
+
+ return newSession(opts, envCfg, &opts.Config)
+}
+
+// Must is a helper function to ensure the Session is valid and there was no
+// error when calling a NewSession function.
+//
+// This helper is intended to be used in variable initialization to load the
+// Session and configuration at startup. Such as:
+//
+// var sess = session.Must(session.NewSession())
+func Must(sess *Session, err error) *Session {
+ if err != nil {
+ panic(err)
+ }
+
+ return sess
+}
+
+func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) {
+ cfg := defaults.Config()
+
+ handlers := opts.Handlers
+ if handlers.IsEmpty() {
+ handlers = defaults.Handlers()
+ }
+
+ // Get a merged version of the user provided config to determine if
+ // credentials were.
+ userCfg := &aws.Config{}
+ userCfg.MergeIn(cfgs...)
+ cfg.MergeIn(userCfg)
+
+ // Ordered config files will be loaded in with later files overwriting
+ // previous config file values.
+ var cfgFiles []string
+ if opts.SharedConfigFiles != nil {
+ cfgFiles = opts.SharedConfigFiles
+ } else {
+ cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile}
+ if !envCfg.EnableSharedConfig {
+ // The shared config file (~/.aws/config) is only loaded if instructed
+ // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG).
+ cfgFiles = cfgFiles[1:]
+ }
+ }
+
+ // Load additional config from file(s)
+ sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig)
+ if err != nil {
+ if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) {
+ // Special case where the user has not explicitly specified an AWS_PROFILE,
+ // or session.Options.profile, shared config is not enabled, and the
+ // environment has credentials, allow the shared config file to fail to
+ // load since the user has already provided credentials, and nothing else
+ // is required to be read file. Github(aws/aws-sdk-go#2455)
+ } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok {
+ return nil, err
+ }
+ }
+
+ if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil {
+ return nil, err
+ }
+
+ if err := setTLSOptions(&opts, cfg, envCfg, sharedCfg); err != nil {
+ return nil, err
+ }
+
+ s := &Session{
+ Config: cfg,
+ Handlers: handlers,
+ options: opts,
+ }
+
+ initHandlers(s)
+
+ return s, nil
+}
+
+func setTLSOptions(opts *Options, cfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig) error {
+ // CA Bundle can be specified in both environment variable shared config file.
+ var caBundleFilename = envCfg.CustomCABundle
+ if len(caBundleFilename) == 0 {
+ caBundleFilename = sharedCfg.CustomCABundle
+ }
+
+ // Only use environment value if session option is not provided.
+ customTLSOptions := map[string]struct {
+ filename string
+ field *io.Reader
+ errCode string
+ }{
+ "custom CA bundle PEM": {filename: caBundleFilename, field: &opts.CustomCABundle, errCode: ErrCodeLoadCustomCABundle},
+ "custom client TLS cert": {filename: envCfg.ClientTLSCert, field: &opts.ClientTLSCert, errCode: ErrCodeLoadClientTLSCert},
+ "custom client TLS key": {filename: envCfg.ClientTLSKey, field: &opts.ClientTLSKey, errCode: ErrCodeLoadClientTLSCert},
+ }
+ for name, v := range customTLSOptions {
+ if len(v.filename) != 0 && *v.field == nil {
+ f, err := os.Open(v.filename)
+ if err != nil {
+ return awserr.New(v.errCode, fmt.Sprintf("failed to open %s file", name), err)
+ }
+ defer f.Close()
+ *v.field = f
+ }
+ }
+
+ // Setup HTTP client with custom cert bundle if enabled
+ if opts.CustomCABundle != nil {
+ if err := loadCustomCABundle(cfg.HTTPClient, opts.CustomCABundle); err != nil {
+ return err
+ }
+ }
+
+ // Setup HTTP client TLS certificate and key for client TLS authentication.
+ if opts.ClientTLSCert != nil && opts.ClientTLSKey != nil {
+ if err := loadClientTLSCert(cfg.HTTPClient, opts.ClientTLSCert, opts.ClientTLSKey); err != nil {
+ return err
+ }
+ } else if opts.ClientTLSCert == nil && opts.ClientTLSKey == nil {
+ // Do nothing if neither values are available.
+
+ } else {
+ return awserr.New(ErrCodeLoadClientTLSCert,
+ fmt.Sprintf("client TLS cert(%t) and key(%t) must both be provided",
+ opts.ClientTLSCert != nil, opts.ClientTLSKey != nil), nil)
+ }
+
+ return nil
+}
+
+func getHTTPTransport(client *http.Client) (*http.Transport, error) {
+ var t *http.Transport
+ switch v := client.Transport.(type) {
+ case *http.Transport:
+ t = v
+ default:
+ if client.Transport != nil {
+ return nil, fmt.Errorf("unsupported transport, %T", client.Transport)
+ }
+ }
+ if t == nil {
+ // Nil transport implies `http.DefaultTransport` should be used. Since
+ // the SDK cannot modify, nor copy the `DefaultTransport` specifying
+ // the values the next closest behavior.
+ t = getCustomTransport()
+ }
+
+ return t, nil
+}
+
+func loadCustomCABundle(client *http.Client, bundle io.Reader) error {
+ t, err := getHTTPTransport(client)
+ if err != nil {
+ return awserr.New(ErrCodeLoadCustomCABundle,
+ "unable to load custom CA bundle, HTTPClient's transport unsupported type", err)
+ }
+
+ p, err := loadCertPool(bundle)
+ if err != nil {
+ return err
+ }
+ if t.TLSClientConfig == nil {
+ t.TLSClientConfig = &tls.Config{}
+ }
+ t.TLSClientConfig.RootCAs = p
+
+ client.Transport = t
+
+ return nil
+}
+
+func loadCertPool(r io.Reader) (*x509.CertPool, error) {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, awserr.New(ErrCodeLoadCustomCABundle,
+ "failed to read custom CA bundle PEM file", err)
+ }
+
+ p := x509.NewCertPool()
+ if !p.AppendCertsFromPEM(b) {
+ return nil, awserr.New(ErrCodeLoadCustomCABundle,
+ "failed to load custom CA bundle PEM file", err)
+ }
+
+ return p, nil
+}
+
+func loadClientTLSCert(client *http.Client, certFile, keyFile io.Reader) error {
+ t, err := getHTTPTransport(client)
+ if err != nil {
+ return awserr.New(ErrCodeLoadClientTLSCert,
+ "unable to get usable HTTP transport from client", err)
+ }
+
+ cert, err := ioutil.ReadAll(certFile)
+ if err != nil {
+ return awserr.New(ErrCodeLoadClientTLSCert,
+ "unable to get read client TLS cert file", err)
+ }
+
+ key, err := ioutil.ReadAll(keyFile)
+ if err != nil {
+ return awserr.New(ErrCodeLoadClientTLSCert,
+ "unable to get read client TLS key file", err)
+ }
+
+ clientCert, err := tls.X509KeyPair(cert, key)
+ if err != nil {
+ return awserr.New(ErrCodeLoadClientTLSCert,
+ "unable to load x509 key pair from client cert", err)
+ }
+
+ tlsCfg := t.TLSClientConfig
+ if tlsCfg == nil {
+ tlsCfg = &tls.Config{}
+ }
+
+ tlsCfg.Certificates = append(tlsCfg.Certificates, clientCert)
+
+ t.TLSClientConfig = tlsCfg
+ client.Transport = t
+
+ return nil
+}
+
+func mergeConfigSrcs(cfg, userCfg *aws.Config,
+ envCfg envConfig, sharedCfg sharedConfig,
+ handlers request.Handlers,
+ sessOpts Options,
+) error {
+ // Merge in user provided configuration
+ cfg.MergeIn(userCfg)
+
+ // Region if not already set by user
+ if len(aws.StringValue(cfg.Region)) == 0 {
+ if len(envCfg.Region) > 0 {
+ cfg.WithRegion(envCfg.Region)
+ } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 {
+ cfg.WithRegion(sharedCfg.Region)
+ }
+ }
+
+ if cfg.EnableEndpointDiscovery == nil {
+ if envCfg.EnableEndpointDiscovery != nil {
+ cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery)
+ } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil {
+ cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery)
+ }
+ }
+
+ // Regional Endpoint flag for S3 endpoint resolving
+ mergeS3UsEast1RegionalEndpointConfig(cfg, []endpoints.S3UsEast1RegionalEndpoint{
+ userCfg.S3UsEast1RegionalEndpoint,
+ envCfg.S3UsEast1RegionalEndpoint,
+ sharedCfg.S3UsEast1RegionalEndpoint,
+ endpoints.LegacyS3UsEast1Endpoint,
+ })
+
+ // Configure credentials if not already set by the user when creating the
+ // Session.
+ if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
+ // IBM COS SDK Code -- START
+ if iBmIamCreds := getIBMIAMCredentials(userCfg); iBmIamCreds != nil {
+ cfg.Credentials = iBmIamCreds
+ } else {
+ creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts)
+ if err != nil {
+ return err
+ }
+ cfg.Credentials = creds
+ }
+ // IBM COS SDK Code -- END
+ }
+
+ cfg.S3UseARNRegion = userCfg.S3UseARNRegion
+ if cfg.S3UseARNRegion == nil {
+ cfg.S3UseARNRegion = &envCfg.S3UseARNRegion
+ }
+ if cfg.S3UseARNRegion == nil {
+ cfg.S3UseARNRegion = &sharedCfg.S3UseARNRegion
+ }
+
+ for _, v := range []endpoints.DualStackEndpointState{userCfg.UseDualStackEndpoint, envCfg.UseDualStackEndpoint, sharedCfg.UseDualStackEndpoint} {
+ if v != endpoints.DualStackEndpointStateUnset {
+ cfg.UseDualStackEndpoint = v
+ break
+ }
+ }
+
+ return nil
+}
+
+func mergeS3UsEast1RegionalEndpointConfig(cfg *aws.Config, values []endpoints.S3UsEast1RegionalEndpoint) {
+ for _, v := range values {
+ if v != endpoints.UnsetS3UsEast1Endpoint {
+ cfg.S3UsEast1RegionalEndpoint = v
+ break
+ }
+ }
+}
+
+// IBM COS SDK Code -- START
+// getIBMIAMCredentials retrieve token manager creds or ibm based credentials
+func getIBMIAMCredentials(config *aws.Config) *credentials.Credentials {
+
+ if provider := ibmiam.NewEnvProvider(config); provider.IsValid() {
+ return credentials.NewCredentials(provider)
+ }
+
+ if provider := ibmiam.NewSharedCredentialsProvider(config, "", ""); provider.IsValid() {
+ return credentials.NewCredentials(provider)
+ }
+
+ if provider := ibmiam.NewSharedConfigProvider(config, "", ""); provider.IsValid() {
+ return credentials.NewCredentials(provider)
+ }
+
+ return nil
+}
+
+// IBM COS SDK Code -- END
+
+func initHandlers(s *Session) {
+ // Add the Validate parameter handler if it is not disabled.
+ s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler)
+ if !aws.BoolValue(s.Config.DisableParamValidation) {
+ s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler)
+ }
+}
+
+// Copy creates and returns a copy of the current Session, copying the config
+// and handlers. If any additional configs are provided they will be merged
+// on top of the Session's copied config.
+//
+// // Create a copy of the current Session, configured for the us-west-2 region.
+// sess.Copy(&aws.Config{Region: aws.String("us-west-2")})
+func (s *Session) Copy(cfgs ...*aws.Config) *Session {
+ newSession := &Session{
+ Config: s.Config.Copy(cfgs...),
+ Handlers: s.Handlers.Copy(),
+ options: s.options,
+ }
+
+ initHandlers(newSession)
+
+ return newSession
+}
+
+// ClientConfig satisfies the client.ConfigProvider interface and is used to
+// configure the service client instances. Passing the Session to the service
+// client's constructor (New) will use this method to configure the client.
+func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config {
+ s = s.Copy(cfgs...)
+
+ resolvedRegion := normalizeRegion(s.Config)
+
+ region := aws.StringValue(s.Config.Region)
+ resolved, err := s.resolveEndpoint(service, region, resolvedRegion, s.Config)
+ if err != nil {
+ s.Handlers.Validate.PushBack(func(r *request.Request) {
+ if len(r.ClientInfo.Endpoint) != 0 {
+ // Error occurred while resolving endpoint, but the request
+ // being invoked has had an endpoint specified after the client
+ // was created.
+ return
+ }
+ r.Error = err
+ })
+ }
+
+ return client.Config{
+ Config: s.Config,
+ Handlers: s.Handlers,
+ PartitionID: resolved.PartitionID,
+ Endpoint: resolved.URL,
+ SigningRegion: resolved.SigningRegion,
+ SigningNameDerived: resolved.SigningNameDerived,
+ SigningName: resolved.SigningName,
+ ResolvedRegion: resolvedRegion,
+ }
+}
+
+func (s *Session) resolveEndpoint(service, region, resolvedRegion string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) {
+
+ if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 {
+ return endpoints.ResolvedEndpoint{
+ URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)),
+ SigningRegion: region,
+ }, nil
+ }
+
+ resolved, err := cfg.EndpointResolver.EndpointFor(service, region,
+ func(opt *endpoints.Options) {
+ opt.DisableSSL = aws.BoolValue(cfg.DisableSSL)
+ opt.UseDualStack = aws.BoolValue(cfg.UseDualStack)
+ opt.UseDualStackEndpoint = cfg.UseDualStackEndpoint
+
+ // Support for S3UsEast1RegionalEndpoint where the S3UsEast1RegionalEndpoint is
+ // provided in envConfig or sharedConfig with envConfig getting
+ // precedence.
+ opt.S3UsEast1RegionalEndpoint = cfg.S3UsEast1RegionalEndpoint
+
+ // Support the condition where the service is modeled but its
+ // endpoint metadata is not available.
+ opt.ResolveUnknownService = true
+
+ opt.ResolvedRegion = resolvedRegion
+
+ opt.Logger = cfg.Logger
+ opt.LogDeprecated = cfg.LogLevel.Matches(aws.LogDebugWithDeprecated)
+ },
+ )
+ if err != nil {
+ return endpoints.ResolvedEndpoint{}, err
+ }
+
+ return resolved, nil
+}
+
+// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception
+// that the EndpointResolver will not be used to resolve the endpoint. The only
+// endpoint set must come from the aws.Config.Endpoint field.
+func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config {
+ s = s.Copy(cfgs...)
+
+ resolvedRegion := normalizeRegion(s.Config)
+
+ var resolved endpoints.ResolvedEndpoint
+ if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 {
+ resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL))
+ resolved.SigningRegion = aws.StringValue(s.Config.Region)
+ }
+
+ return client.Config{
+ Config: s.Config,
+ Handlers: s.Handlers,
+ Endpoint: resolved.URL,
+ SigningRegion: resolved.SigningRegion,
+ SigningNameDerived: resolved.SigningNameDerived,
+ SigningName: resolved.SigningName,
+ ResolvedRegion: resolvedRegion,
+ }
+}
+
+// logDeprecatedNewSessionError function enables error handling for session
+func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) {
+ // Session creation failed, need to report the error and prevent
+ // any requests from succeeding.
+ s.Config.MergeIn(cfgs...)
+ s.Config.Logger.Log("ERROR:", msg, "Error:", err)
+ s.Handlers.Validate.PushBack(func(r *request.Request) {
+ r.Error = err
+ })
+}
+
+// normalizeRegion resolves / normalizes the configured region (converts pseudo fips regions), and modifies the provided
+// config to have the equivalent options for resolution and returns the resolved region name.
+func normalizeRegion(cfg *aws.Config) (resolved string) {
+ const fipsInfix = "-fips-"
+ const fipsPrefix = "-fips"
+ const fipsSuffix = "fips-"
+
+ region := aws.StringValue(cfg.Region)
+
+ if strings.Contains(region, fipsInfix) ||
+ strings.Contains(region, fipsPrefix) ||
+ strings.Contains(region, fipsSuffix) {
+ resolved = strings.Replace(strings.Replace(strings.Replace(
+ region, fipsInfix, "-", -1), fipsPrefix, "", -1), fipsSuffix, "", -1)
+ // cfg.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled
+ }
+
+ return resolved
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/shared_config.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/shared_config.go
new file mode 100644
index 0000000000000..2cdf97ae9788c
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/session/shared_config.go
@@ -0,0 +1,447 @@
+package session
+
+import (
+ "fmt"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials"
+ "github.com/IBM/ibm-cos-sdk-go/aws/endpoints"
+ "github.com/IBM/ibm-cos-sdk-go/internal/ini"
+)
+
+const (
+ // Static Credentials group
+ accessKeyIDKey = `aws_access_key_id` // group required
+ secretAccessKey = `aws_secret_access_key` // group required
+ sessionTokenKey = `aws_session_token` // optional
+
+ // Assume Role Credentials group
+ roleArnKey = `role_arn` // group required
+ sourceProfileKey = `source_profile` // group required (or credential_source)
+ credentialSourceKey = `credential_source` // group required (or source_profile)
+ externalIDKey = `external_id` // optional
+ mfaSerialKey = `mfa_serial` // optional
+ roleSessionNameKey = `role_session_name` // optional
+ roleDurationSecondsKey = "duration_seconds" // optional
+
+ // Additional Config fields
+ regionKey = `region`
+
+ // custom CA Bundle filename
+ customCABundleKey = `ca_bundle`
+
+ // endpoint discovery group
+ enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional
+
+ // External Credential Process
+ credentialProcessKey = `credential_process` // optional
+
+ // Additional config fields for regional or legacy endpoints
+ stsRegionalEndpointSharedKey = `sts_regional_endpoints`
+
+ // Additional config fields for regional or legacy endpoints
+ s3UsEast1RegionalSharedKey = `s3_us_east_1_regional_endpoint`
+
+ // DefaultSharedConfigProfile is the default profile to be used when
+ // loading configuration from the config files if another profile name
+ // is not provided.
+ DefaultSharedConfigProfile = `default`
+
+ // S3 ARN Region Usage
+ s3UseARNRegionKey = "s3_use_arn_region"
+ // Use DualStack Endpoint Resolution
+ useDualStackEndpoint = "use_dualstack_endpoint"
+)
+
+// sharedConfig represents the configuration fields of the SDK config files.
+type sharedConfig struct {
+ Profile string
+
+ // Credentials values from the config file. Both aws_access_key_id and
+ // aws_secret_access_key must be provided together in the same file to be
+ // considered valid. The values will be ignored if not a complete group.
+ // aws_session_token is an optional field that can be provided if both of
+ // the other two fields are also provided.
+ //
+ // aws_access_key_id
+ // aws_secret_access_key
+ // aws_session_token
+ Creds credentials.Value
+
+ CredentialSource string
+ CredentialProcess string
+
+ RoleARN string
+ RoleSessionName string
+ ExternalID string
+ MFASerial string
+
+ SourceProfileName string
+ SourceProfile *sharedConfig
+
+ // Region is the region the SDK should use for looking up AWS service
+ // endpoints and signing requests.
+ //
+ // region
+ Region string
+
+ // CustomCABundle is the file path to a PEM file the SDK will read and
+ // use to configure the HTTP transport with additional CA certs that are
+ // not present in the platforms default CA store.
+ //
+ // This value will be ignored if the file does not exist.
+ //
+ // ca_bundle
+ CustomCABundle string
+
+ // EnableEndpointDiscovery can be enabled in the shared config by setting
+ // endpoint_discovery_enabled to true
+ //
+ // endpoint_discovery_enabled = true
+ EnableEndpointDiscovery *bool
+
+ // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service
+ //
+ // s3_us_east_1_regional_endpoint = regional
+ // This can take value as `LegacyS3UsEast1Endpoint` or `RegionalS3UsEast1Endpoint`
+ S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint
+
+ // Specifies if the S3 service should allow ARNs to direct the region
+ // the client's requests are sent to.
+ //
+ // s3_use_arn_region=true
+ S3UseARNRegion bool
+ // use_dualstack_endpoint=true
+ UseDualStackEndpoint endpoints.DualStackEndpointState
+}
+
+type sharedConfigFile struct {
+ Filename string
+ IniData ini.Sections
+}
+
+// loadSharedConfig retrieves the configuration from the list of files using
+// the profile provided. The order the files are listed will determine
+// precedence. Values in subsequent files will overwrite values defined in
+// earlier files.
+//
+// For example, given two files A and B. Both define credentials. If the order
+// of the files are A then B, B's credential values will be used instead of
+// A's.
+//
+// See sharedConfig.setFromFile for information how the config files
+// will be loaded.
+func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) {
+ if len(profile) == 0 {
+ profile = DefaultSharedConfigProfile
+ }
+
+ files, err := loadSharedConfigIniFiles(filenames)
+ if err != nil {
+ return sharedConfig{}, err
+ }
+
+ cfg := sharedConfig{}
+ profiles := map[string]struct{}{}
+ if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil {
+ return sharedConfig{}, err
+ }
+
+ return cfg, nil
+}
+
+func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
+ files := make([]sharedConfigFile, 0, len(filenames))
+
+ for _, filename := range filenames {
+ sections, err := ini.OpenFile(filename)
+ if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile {
+ // Skip files which can't be opened and read for whatever reason
+ continue
+ } else if err != nil {
+ return nil, SharedConfigLoadError{Filename: filename, Err: err}
+ }
+
+ files = append(files, sharedConfigFile{
+ Filename: filename, IniData: sections,
+ })
+ }
+
+ return files, nil
+}
+
+func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error {
+ cfg.Profile = profile
+
+ // Trim files from the list that don't exist.
+ var skippedFiles int
+ var profileNotFoundErr error
+ for _, f := range files {
+ if err := cfg.setFromIniFile(profile, f, exOpts); err != nil {
+ if _, ok := err.(SharedConfigProfileNotExistsError); ok {
+ // Ignore profiles not defined in individual files.
+ profileNotFoundErr = err
+ skippedFiles++
+ continue
+ }
+ return err
+ }
+ }
+ if skippedFiles == len(files) {
+ // If all files were skipped because the profile is not found, return
+ // the original profile not found error.
+ return profileNotFoundErr
+ }
+
+ profiles[profile] = struct{}{}
+
+ if err := cfg.validateCredentialType(); err != nil {
+ return err
+ }
+
+ // Link source profiles for assume roles
+ if len(cfg.SourceProfileName) != 0 {
+ // Linked profile via source_profile ignore credential provider
+ // options, the source profile must provide the credentials.
+ cfg.clearCredentialOptions()
+
+ srcCfg := &sharedConfig{}
+ err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts)
+ if err != nil {
+ return err
+ }
+
+ cfg.SourceProfile = srcCfg
+ }
+
+ return nil
+}
+
+// setFromFile loads the configuration from the file using the profile
+// provided. A sharedConfig pointer type value is used so that multiple config
+// file loadings can be chained.
+//
+// Only loads complete logically grouped values, and will not set fields in cfg
+// for incomplete grouped values in the config. Such as credentials. For
+// example if a config file only includes aws_access_key_id but no
+// aws_secret_access_key the aws_access_key_id will be ignored.
+func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error {
+ section, ok := file.IniData.GetSection(profile)
+ if !ok {
+ // Fallback to to alternate profile name: profile <name>
+ section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile))
+ if !ok {
+ return SharedConfigProfileNotExistsError{Profile: profile, Err: nil}
+ }
+ }
+
+ if exOpts {
+ // Assume Role Parameters
+ updateString(&cfg.RoleARN, section, roleArnKey)
+ updateString(&cfg.ExternalID, section, externalIDKey)
+ updateString(&cfg.MFASerial, section, mfaSerialKey)
+ updateString(&cfg.RoleSessionName, section, roleSessionNameKey)
+ updateString(&cfg.SourceProfileName, section, sourceProfileKey)
+ updateString(&cfg.CredentialSource, section, credentialSourceKey)
+ updateString(&cfg.Region, section, regionKey)
+ updateString(&cfg.CustomCABundle, section, customCABundleKey)
+
+ if v := section.String(s3UsEast1RegionalSharedKey); len(v) != 0 {
+ sre, err := endpoints.GetS3UsEast1RegionalEndpoint(v)
+ if err != nil {
+ return fmt.Errorf("failed to load %s from shared config, %s, %v",
+ s3UsEast1RegionalSharedKey, file.Filename, err)
+ }
+ cfg.S3UsEast1RegionalEndpoint = sre
+ }
+ }
+
+ updateString(&cfg.CredentialProcess, section, credentialProcessKey)
+
+ // Shared Credentials
+ creds := credentials.Value{
+ AccessKeyID: section.String(accessKeyIDKey),
+ SecretAccessKey: section.String(secretAccessKey),
+ SessionToken: section.String(sessionTokenKey),
+ ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename),
+ }
+ if creds.HasKeys() {
+ cfg.Creds = creds
+ }
+
+ // `credential_process`
+ if credProc := section.String(credentialProcessKey); len(credProc) > 0 {
+ cfg.CredentialProcess = credProc
+ }
+
+ // Region
+ if v := section.String(regionKey); len(v) > 0 {
+ cfg.Region = v
+ }
+
+ // Endpoint discovery
+ updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey)
+
+ updateBool(&cfg.S3UseARNRegion, section, s3UseARNRegionKey)
+
+ return nil
+}
+
+func (cfg *sharedConfig) validateCredentialType() error {
+ // Only one or no credential type can be defined.
+ if !oneOrNone(
+ len(cfg.SourceProfileName) != 0,
+ len(cfg.CredentialSource) != 0,
+ len(cfg.CredentialProcess) != 0,
+ ) {
+ return ErrSharedConfigSourceCollision
+ }
+
+ return nil
+}
+
+func (cfg *sharedConfig) hasCredentials() bool {
+ switch {
+ case len(cfg.SourceProfileName) != 0:
+ case len(cfg.CredentialSource) != 0:
+ case len(cfg.CredentialProcess) != 0:
+ case cfg.Creds.HasKeys():
+ default:
+ return false
+ }
+
+ return true
+}
+
+func (cfg *sharedConfig) clearCredentialOptions() {
+ cfg.CredentialSource = ""
+ cfg.CredentialProcess = ""
+ cfg.Creds = credentials.Value{}
+}
+
+func oneOrNone(bs ...bool) bool {
+ var count int
+
+ for _, b := range bs {
+ if b {
+ count++
+ if count > 1 {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// updateString will only update the dst with the value in the section key, key
+// is present in the section.
+func updateString(dst *string, section ini.Section, key string) {
+ if !section.Has(key) {
+ return
+ }
+ *dst = section.String(key)
+}
+
+// updateBool will only update the dst with the value in the section key, key
+// is present in the section.
+func updateBool(dst *bool, section ini.Section, key string) {
+ if !section.Has(key) {
+ return
+ }
+ *dst = section.Bool(key)
+}
+
+// updateBoolPtr will only update the dst with the value in the section key,
+// key is present in the section.
+func updateBoolPtr(dst **bool, section ini.Section, key string) {
+ if !section.Has(key) {
+ return
+ }
+ *dst = new(bool)
+ **dst = section.Bool(key)
+}
+
+// SharedConfigLoadError is an error for the shared config file failed to load.
+type SharedConfigLoadError struct {
+ Filename string
+ Err error
+}
+
+// Code is the short id of the error.
+func (e SharedConfigLoadError) Code() string {
+ return "SharedConfigLoadError"
+}
+
+// Message is the description of the error
+func (e SharedConfigLoadError) Message() string {
+ return fmt.Sprintf("failed to load config file, %s", e.Filename)
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e SharedConfigLoadError) OrigErr() error {
+ return e.Err
+}
+
+// Error satisfies the error interface.
+func (e SharedConfigLoadError) Error() string {
+ return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
+}
+
+// SharedConfigProfileNotExistsError is an error for the shared config when
+// the profile was not find in the config file.
+type SharedConfigProfileNotExistsError struct {
+ Profile string
+ Err error
+}
+
+// Code is the short id of the error.
+func (e SharedConfigProfileNotExistsError) Code() string {
+ return "SharedConfigProfileNotExistsError"
+}
+
+// Message is the description of the error
+func (e SharedConfigProfileNotExistsError) Message() string {
+ return fmt.Sprintf("failed to get profile, %s", e.Profile)
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e SharedConfigProfileNotExistsError) OrigErr() error {
+ return e.Err
+}
+
+// Error satisfies the error interface.
+func (e SharedConfigProfileNotExistsError) Error() string {
+ return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
+}
+
+// SharedConfigAssumeRoleError is an error for the shared config when the
+// profile contains assume role information, but that information is invalid
+// or not complete.
+type SharedConfigAssumeRoleError struct {
+ RoleARN string
+ SourceProfile string
+}
+
+// Code is the short id of the error.
+func (e SharedConfigAssumeRoleError) Code() string {
+ return "SharedConfigAssumeRoleError"
+}
+
+// Message is the description of the error
+func (e SharedConfigAssumeRoleError) Message() string {
+ return fmt.Sprintf(
+ "failed to load assume role for %s, source profile %s has no shared credentials",
+ e.RoleARN, e.SourceProfile,
+ )
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e SharedConfigAssumeRoleError) OrigErr() error {
+ return nil
+}
+
+// Error satisfies the error interface.
+func (e SharedConfigAssumeRoleError) Error() string {
+ return awserr.SprintError(e.Code(), e.Message(), "", nil)
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/ibmiam/common.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/ibmiam/common.go
new file mode 100644
index 0000000000000..37ab64d04e4a8
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/ibmiam/common.go
@@ -0,0 +1,17 @@
+package ibmiam
+
+// IBM COS SDK Code -- START
+const (
+ // LOGGER constants
+ debugLog = "<DEBUG>"
+ signRequestHandlerLog = "ibmiam.SignRequestHandler"
+
+ // Error constants
+ errorExpectedNotFound = "Error Expected Not Found"
+ errorNotMatch = "Error not match"
+
+ // Global constant
+ operation = "Operation"
+)
+
+// IBM COS SDK Code -- END
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/ibmiam/ibmiam.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/ibmiam/ibmiam.go
new file mode 100644
index 0000000000000..1b6e78b774c3a
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/ibmiam/ibmiam.go
@@ -0,0 +1,95 @@
+package ibmiam
+
+// IBM COS SDK Code -- START
+import (
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+)
+
+// SignRequestHandler is a named request handler the SDK will use to sign
+// service client request with using the IBM IAM signature.
+var SignRequestHandler = request.NamedHandler{
+ Name: signRequestHandlerLog, Fn: Sign,
+}
+
+var (
+ // Errors for Sign Request Handler
+ errTokenTypeNotSet = awserr.New(signRequestHandlerLog, "Token Type Not Set", nil)
+ errAccessTokenNotSet = awserr.New(signRequestHandlerLog, "Access Token Not Set", nil)
+ errServiceInstanceIDNotSet = awserr.New(signRequestHandlerLog, "Service Instance Id Not Set", nil)
+)
+
+// Sign signs IBM IAM requests with the token type, access token and service
+// instance id request is made to, and time the request is signed at.
+//
+// Returns a list of HTTP headers that were included in the signature or an
+// error if signing the request failed. Generally for signed requests this value
+// is not needed as the full request context will be captured by the http.Request
+// value. It is included for reference though.
+func Sign(req *request.Request) {
+
+ // Sets the logger for the Request to be signed
+ logger := req.Config.Logger
+ if !req.Config.LogLevel.Matches(aws.LogDebug) {
+ logger = nil
+ }
+
+ // Obtains the IBM IAM Credentials Object
+ // The objects includes:
+ // IBM IAM Token
+ // IBM IAM Service Instance ID
+ value, err := req.Config.Credentials.Get()
+ if err != nil {
+ if logger != nil {
+ logger.Log(debugLog, signRequestHandlerLog, "CREDENTIAL GET ERROR", err)
+ }
+ req.Error = err
+ req.SignedHeaderVals = nil
+ return
+ }
+
+ // Check the type of the Token
+ // If does not exist, return with an error in the request
+ if value.TokenType == "" {
+ err = errTokenTypeNotSet
+ if logger != nil {
+ logger.Log(debugLog, err)
+ }
+ req.Error = err
+ req.SignedHeaderVals = nil
+ return
+ }
+
+ // Checks the Access Token
+ // If does not exist, return with an error in the request
+ if value.AccessToken == "" {
+ err = errAccessTokenNotSet
+ if logger != nil {
+ logger.Log(debugLog, err)
+ }
+ req.Error = err
+ req.SignedHeaderVals = nil
+ return
+ }
+
+ // Get the Service Instance ID from the IBM IAM Credentials object
+ serviceInstanceID := req.HTTPRequest.Header.Get("ibm-service-instance-id")
+ if serviceInstanceID == "" && value.ServiceInstanceID != "" {
+ // Log the Service Instance ID
+ if logger != nil {
+ logger.Log(debugLog, "Setting the 'ibm-service-instance-id' from the Credentials")
+
+ }
+ req.HTTPRequest.Header.Set("ibm-service-instance-id", value.ServiceInstanceID)
+ }
+
+ // Use the IBM IAM Token Bearer as the Authorization Header
+ authString := value.TokenType + " " + value.AccessToken
+ req.HTTPRequest.Header.Set("Authorization", authString)
+ if logger != nil {
+ logger.Log(debugLog, signRequestHandlerLog, "Set Header Authorization", authString)
+ }
+}
+
+// IBM COS SDK Code -- END
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/signer_router.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/signer_router.go
new file mode 100644
index 0000000000000..d5fd68735ba58
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/signer_router.go
@@ -0,0 +1,112 @@
+package signer
+
+import (
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+ "github.com/IBM/ibm-cos-sdk-go/aws/signer/ibmiam"
+ "github.com/IBM/ibm-cos-sdk-go/aws/signer/v4"
+)
+
+const (
+ debugLog = "<DEBUG>"
+ signerRouterLog = "SignerRouter"
+)
+
+type requestSignerRouter struct {
+ signers map[string]request.NamedHandler
+}
+
+// SignRequestHandler handler to route the request to a signer according the credential type
+var SignRequestHandler = defaultRequestSignerRouter()
+
+// DefaultSignerHandlerForProviderType a map with default handlers per credential type
+var DefaultSignerHandlerForProviderType = map[string]request.NamedHandler{
+ "": v4.SignRequestHandler,
+ "oauth": ibmiam.SignRequestHandler,
+ "v4": v4.SignRequestHandler,
+}
+
+func defaultRequestSignerRouter() request.NamedHandler {
+ router := requestSignerRouter{signers: make(map[string]request.NamedHandler)}
+ for k, v := range DefaultSignerHandlerForProviderType {
+ router.signers[k] = v
+ }
+ return request.NamedHandler{
+ Name: "signer.requestsignerrouter", Fn: router.delegateRequestToSigner,
+ }
+}
+
+// to be as close as possible to aws template
+// *** required make public the method SignSDKRequestWithCurrTime
+
+// CustomRequestSignerRouter routes the request to a signer according to the current credentials type
+func CustomRequestSignerRouter(opts ...func(*v4.Signer)) request.NamedHandler {
+
+ router := requestSignerRouter{signers: make(map[string]request.NamedHandler)}
+ for k, v := range DefaultSignerHandlerForProviderType {
+ router.signers[k] = v
+ }
+
+ customV4Handler := request.NamedHandler{
+ Name: v4.SignRequestHandler.Name,
+ Fn: func(req *request.Request) {
+ v4.SignSDKRequestWithCurrentTime(req, time.Now, opts...)
+ },
+ }
+
+ router.signers[""] = customV4Handler
+ router.signers["v4"] = customV4Handler
+
+ return request.NamedHandler{
+ Name: SignRequestHandler.Name, Fn: router.delegateRequestToSigner,
+ }
+}
+
+// use req config to access config and logging stufff
+func (r requestSignerRouter) delegateRequestToSigner(req *request.Request) {
+
+ logger := req.Config.Logger
+ if !req.Config.LogLevel.Matches(aws.LogDebug) {
+ logger = nil
+ }
+
+ if req.Config.Credentials == credentials.AnonymousCredentials {
+ if logger != nil {
+ logger.Log(debugLog, signerRouterLog, "AnonymousCredentials")
+ }
+ return
+ }
+
+ value, err := req.Config.Credentials.Get()
+ if err != nil {
+ if logger != nil {
+ logger.Log(debugLog, signerRouterLog, "CREDENTIAL GET ERROR", err)
+ }
+ req.Error = err
+ req.SignedHeaderVals = nil
+ return
+ }
+
+ if logger != nil {
+ logger.Log(debugLog, signerRouterLog, "Provider Type", value.ProviderType)
+ }
+
+ if handler, ok := r.signers[value.ProviderType]; ok {
+ if logger != nil {
+ logger.Log(debugLog, signerRouterLog, "Delegating to", handler.Name)
+ }
+ handler.Fn(req)
+ } else {
+ err = awserr.New("SignerRouterMissingHandler", "No Handler Found for Type "+value.ProviderType, nil)
+ if logger != nil {
+ logger.Log(debugLog, signerRouterLog, "No Handler Found", err)
+ }
+ req.Error = err
+ req.SignedHeaderVals = nil
+ return
+ }
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/header_rules.go
new file mode 100644
index 0000000000000..485261fcfef20
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/header_rules.go
@@ -0,0 +1,81 @@
+package v4
+
+import (
+ "github.com/IBM/ibm-cos-sdk-go/internal/strings"
+)
+
+// validator houses a set of rule needed for validation of a
+// string value
+type rules []rule
+
+// rule interface allows for more flexible rules and just simply
+// checks whether or not a value adheres to that rule
+type rule interface {
+ IsValid(value string) bool
+}
+
+// IsValid will iterate through all rules and see if any rules
+// apply to the value and supports nested rules
+func (r rules) IsValid(value string) bool {
+ for _, rule := range r {
+ if rule.IsValid(value) {
+ return true
+ }
+ }
+ return false
+}
+
+// mapRule generic rule for maps
+type mapRule map[string]struct{}
+
+// IsValid for the map rule satisfies whether it exists in the map
+func (m mapRule) IsValid(value string) bool {
+ _, ok := m[value]
+ return ok
+}
+
+// allowList is a generic rule for allow listing
+type allowList struct {
+ rule
+}
+
+// IsValid for allow list checks if the value is within the allow list
+func (w allowList) IsValid(value string) bool {
+ return w.rule.IsValid(value)
+}
+
+// excludeList is a generic rule for exclude listing
+type excludeList struct {
+ rule
+}
+
+// IsValid for exclude list checks if the value is within the exclude list
+func (b excludeList) IsValid(value string) bool {
+ return !b.rule.IsValid(value)
+}
+
+type patterns []string
+
+// IsValid for patterns checks each pattern and returns if a match has
+// been found
+func (p patterns) IsValid(value string) bool {
+ for _, pattern := range p {
+ if strings.HasPrefixFold(value, pattern) {
+ return true
+ }
+ }
+ return false
+}
+
+// inclusiveRules rules allow for rules to depend on one another
+type inclusiveRules []rule
+
+// IsValid will return true if all rules are true
+func (r inclusiveRules) IsValid(value string) bool {
+ for _, rule := range r {
+ if !rule.IsValid(value) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/options.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/options.go
new file mode 100644
index 0000000000000..6aa2ed241bb13
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/options.go
@@ -0,0 +1,7 @@
+package v4
+
+// WithUnsignedPayload will enable and set the UnsignedPayload field to
+// true of the signer.
+func WithUnsignedPayload(v4 *Signer) {
+ v4.UnsignedPayload = true
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/request_context_go1.7.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/request_context_go1.7.go
new file mode 100644
index 0000000000000..3b82ba8c6316c
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/request_context_go1.7.go
@@ -0,0 +1,14 @@
+//go:build go1.7
+// +build go1.7
+
+package v4
+
+import (
+ "net/http"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+)
+
+func requestContext(r *http.Request) aws.Context {
+ return r.Context()
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/stream.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/stream.go
new file mode 100644
index 0000000000000..e0f5d86c5ca09
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/stream.go
@@ -0,0 +1,63 @@
+package v4
+
+import (
+ "encoding/hex"
+ "strings"
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials"
+)
+
+type credentialValueProvider interface {
+ Get() (credentials.Value, error)
+}
+
+// StreamSigner implements signing of event stream encoded payloads
+type StreamSigner struct {
+ region string
+ service string
+
+ credentials credentialValueProvider
+
+ prevSig []byte
+}
+
+// NewStreamSigner creates a SigV4 signer used to sign Event Stream encoded messages
+func NewStreamSigner(region, service string, seedSignature []byte, credentials *credentials.Credentials) *StreamSigner {
+ return &StreamSigner{
+ region: region,
+ service: service,
+ credentials: credentials,
+ prevSig: seedSignature,
+ }
+}
+
+// GetSignature takes an event stream encoded headers and payload and returns a signature
+func (s *StreamSigner) GetSignature(headers, payload []byte, date time.Time) ([]byte, error) {
+ credValue, err := s.credentials.Get()
+ if err != nil {
+ return nil, err
+ }
+
+ sigKey := deriveSigningKey(s.region, s.service, credValue.SecretAccessKey, date)
+
+ keyPath := buildSigningScope(s.region, s.service, date)
+
+ stringToSign := buildEventStreamStringToSign(headers, payload, s.prevSig, keyPath, date)
+
+ signature := hmacSHA256(sigKey, []byte(stringToSign))
+ s.prevSig = signature
+
+ return signature, nil
+}
+
+func buildEventStreamStringToSign(headers, payload, prevSig []byte, scope string, date time.Time) string {
+ return strings.Join([]string{
+ "AWS4-HMAC-SHA256-PAYLOAD",
+ formatTime(date),
+ scope,
+ hex.EncodeToString(prevSig),
+ hex.EncodeToString(hashSHA256(headers)),
+ hex.EncodeToString(hashSHA256(payload)),
+ }, "\n")
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/uri_path.go
new file mode 100644
index 0000000000000..7711ec7377f1d
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/uri_path.go
@@ -0,0 +1,25 @@
+//go:build go1.5
+// +build go1.5
+
+package v4
+
+import (
+ "net/url"
+ "strings"
+)
+
+func getURIPath(u *url.URL) string {
+ var uri string
+
+ if len(u.Opaque) > 0 {
+ uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
+ } else {
+ uri = u.EscapedPath()
+ }
+
+ if len(uri) == 0 {
+ uri = "/"
+ }
+
+ return uri
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/v4.go
new file mode 100644
index 0000000000000..abc15a248b09d
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/signer/v4/v4.go
@@ -0,0 +1,853 @@
+// Package v4 implements signing for AWS V4 signer
+//
+// Provides request signing for request that need to be signed with
+// AWS V4 Signatures.
+//
+// Standalone Signer
+//
+// Generally using the signer outside of the SDK should not require any additional
+// logic when using Go v1.5 or higher. The signer does this by taking advantage
+// of the URL.EscapedPath method. If your request URI requires additional escaping
+// you many need to use the URL.Opaque to define what the raw URI should be sent
+// to the service as.
+//
+// The signer will first check the URL.Opaque field, and use its value if set.
+// The signer does require the URL.Opaque field to be set in the form of:
+//
+// "//<hostname>/<path>"
+//
+// // e.g.
+// "//example.com/some/path"
+//
+// The leading "//" and hostname are required or the URL.Opaque escaping will
+// not work correctly.
+//
+// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath()
+// method and using the returned value. If you're using Go v1.4 you must set
+// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with
+// Go v1.5 the signer will fallback to URL.Path.
+//
+// AWS v4 signature validation requires that the canonical string's URI path
+// element must be the URI escaped form of the HTTP request's path.
+// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+//
+// The Go HTTP client will perform escaping automatically on the request. Some
+// of these escaping may cause signature validation errors because the HTTP
+// request differs from the URI path or query that the signature was generated.
+// https://golang.org/pkg/net/url/#URL.EscapedPath
+//
+// Because of this, it is recommended that when using the signer outside of the
+// SDK that explicitly escaping the request prior to being signed is preferable,
+// and will help prevent signature validation errors. This can be done by setting
+// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then
+// call URL.EscapedPath() if Opaque is not set.
+//
+// If signing a request intended for HTTP2 server, and you're using Go 1.6.2
+// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the
+// request URL. https://github.com/golang/go/issues/16847 points to a bug in
+// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP
+// message. URL.Opaque generally will force Go to make requests with absolute URL.
+// URL.RawPath does not do this, but RawPath must be a valid escaping of Path
+// or url.EscapedPath will ignore the RawPath escaping.
+//
+// Test `TestStandaloneSign` provides a complete example of using the signer
+// outside of the SDK and pre-escaping the URI path.
+package v4
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/credentials"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+ "github.com/IBM/ibm-cos-sdk-go/internal/sdkio"
+ "github.com/IBM/ibm-cos-sdk-go/private/protocol/rest"
+)
+
+const (
+ authorizationHeader = "Authorization"
+ authHeaderSignatureElem = "Signature="
+ signatureQueryKey = "X-Amz-Signature"
+
+ authHeaderPrefix = "AWS4-HMAC-SHA256"
+ timeFormat = "20060102T150405Z"
+ shortTimeFormat = "20060102"
+ awsV4Request = "aws4_request"
+
+ // emptyStringSHA256 is a SHA256 of an empty string
+ emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
+)
+
+var ignoredHeaders = rules{
+ excludeList{
+ mapRule{
+ authorizationHeader: struct{}{},
+ "User-Agent": struct{}{},
+ "X-Amzn-Trace-Id": struct{}{},
+ },
+ },
+}
+
+// requiredSignedHeaders is a allow list for build canonical headers.
+var requiredSignedHeaders = rules{
+ allowList{
+ mapRule{
+ "Cache-Control": struct{}{},
+ "Content-Disposition": struct{}{},
+ "Content-Encoding": struct{}{},
+ "Content-Language": struct{}{},
+ "Content-Md5": struct{}{},
+ "Content-Type": struct{}{},
+ "Expires": struct{}{},
+ "If-Match": struct{}{},
+ "If-Modified-Since": struct{}{},
+ "If-None-Match": struct{}{},
+ "If-Unmodified-Since": struct{}{},
+ "Range": struct{}{},
+ "X-Amz-Acl": struct{}{},
+ "X-Amz-Copy-Source": struct{}{},
+ "X-Amz-Copy-Source-If-Match": struct{}{},
+ "X-Amz-Copy-Source-If-Modified-Since": struct{}{},
+ "X-Amz-Copy-Source-If-None-Match": struct{}{},
+ "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
+ "X-Amz-Copy-Source-Range": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
+ "X-Amz-Grant-Full-control": struct{}{},
+ "X-Amz-Grant-Read": struct{}{},
+ "X-Amz-Grant-Read-Acp": struct{}{},
+ "X-Amz-Grant-Write": struct{}{},
+ "X-Amz-Grant-Write-Acp": struct{}{},
+ "X-Amz-Metadata-Directive": struct{}{},
+ "X-Amz-Mfa": struct{}{},
+ "X-Amz-Request-Payer": struct{}{},
+ "X-Amz-Server-Side-Encryption": struct{}{},
+ "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
+ "X-Amz-Storage-Class": struct{}{},
+ "X-Amz-Tagging": struct{}{},
+ "X-Amz-Website-Redirect-Location": struct{}{},
+ "X-Amz-Content-Sha256": struct{}{},
+ },
+ },
+ patterns{"X-Amz-Meta-"},
+}
+
+// allowedHoisting is a allow list for build query headers. The boolean value
+// represents whether or not it is a pattern.
+var allowedQueryHoisting = inclusiveRules{
+ excludeList{requiredSignedHeaders},
+ patterns{"X-Amz-"},
+}
+
+// Signer applies AWS v4 signing to given request. Use this to sign requests
+// that need to be signed with AWS V4 Signatures.
+type Signer struct {
+ // The authentication credentials the request will be signed against.
+ // This value must be set to sign requests.
+ Credentials *credentials.Credentials
+
+ // Sets the log level the signer should use when reporting information to
+ // the logger. If the logger is nil nothing will be logged. See
+ // aws.LogLevelType for more information on available logging levels
+ //
+ // By default nothing will be logged.
+ Debug aws.LogLevelType
+
+ // The logger loging information will be written to. If there the logger
+ // is nil, nothing will be logged.
+ Logger aws.Logger
+
+ // Disables the Signer's moving HTTP header key/value pairs from the HTTP
+ // request header to the request's query string. This is most commonly used
+ // with pre-signed requests preventing headers from being added to the
+ // request's query string.
+ DisableHeaderHoisting bool
+
+ // Disables the automatic escaping of the URI path of the request for the
+ // siganture's canonical string's path. For services that do not need additional
+ // escaping then use this to disable the signer escaping the path.
+ //
+ // S3 is an example of a service that does not need additional escaping.
+ //
+ // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+ DisableURIPathEscaping bool
+
+ // Disables the automatical setting of the HTTP request's Body field with the
+ // io.ReadSeeker passed in to the signer. This is useful if you're using a
+ // custom wrapper around the body for the io.ReadSeeker and want to preserve
+ // the Body value on the Request.Body.
+ //
+ // This does run the risk of signing a request with a body that will not be
+ // sent in the request. Need to ensure that the underlying data of the Body
+ // values are the same.
+ DisableRequestBodyOverwrite bool
+
+ // currentTimeFn returns the time value which represents the current time.
+ // This value should only be used for testing. If it is nil the default
+ // time.Now will be used.
+ currentTimeFn func() time.Time
+
+ // UnsignedPayload will prevent signing of the payload. This will only
+ // work for services that have support for this.
+ UnsignedPayload bool
+}
+
+// NewSigner returns a Signer pointer configured with the credentials and optional
+// option values provided. If not options are provided the Signer will use its
+// default configuration.
+func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer {
+ v4 := &Signer{
+ Credentials: credentials,
+ }
+
+ for _, option := range options {
+ option(v4)
+ }
+
+ return v4
+}
+
+type signingCtx struct {
+ ServiceName string
+ Region string
+ Request *http.Request
+ Body io.ReadSeeker
+ Query url.Values
+ Time time.Time
+ ExpireTime time.Duration
+ SignedHeaderVals http.Header
+
+ DisableURIPathEscaping bool
+
+ credValues credentials.Value
+ isPresign bool
+ unsignedPayload bool
+
+ bodyDigest string
+ signedHeaders string
+ canonicalHeaders string
+ canonicalString string
+ credentialString string
+ stringToSign string
+ signature string
+ authorization string
+}
+
+// Sign signs AWS v4 requests with the provided body, service name, region the
+// request is made to, and time the request is signed at. The signTime allows
+// you to specify that a request is signed for the future, and cannot be
+// used until then.
+//
+// Returns a list of HTTP headers that were included in the signature or an
+// error if signing the request failed. Generally for signed requests this value
+// is not needed as the full request context will be captured by the http.Request
+// value. It is included for reference though.
+//
+// Sign will set the request's Body to be the `body` parameter passed in. If
+// the body is not already an io.ReadCloser, it will be wrapped within one. If
+// a `nil` body parameter passed to Sign, the request's Body field will be
+// also set to nil. Its important to note that this functionality will not
+// change the request's ContentLength of the request.
+//
+// Sign differs from Presign in that it will sign the request using HTTP
+// header values. This type of signing is intended for http.Request values that
+// will not be shared, or are shared in a way the header values on the request
+// will not be lost.
+//
+// The requests body is an io.ReadSeeker so the SHA256 of the body can be
+// generated. To bypass the signer computing the hash you can set the
+// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
+// only compute the hash if the request header value is empty.
+func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) {
+ return v4.signWithBody(r, body, service, region, 0, false, signTime)
+}
+
+// Presign signs AWS v4 requests with the provided body, service name, region
+// the request is made to, and time the request is signed at. The signTime
+// allows you to specify that a request is signed for the future, and cannot
+// be used until then.
+//
+// Returns a list of HTTP headers that were included in the signature or an
+// error if signing the request failed. For presigned requests these headers
+// and their values must be included on the HTTP request when it is made. This
+// is helpful to know what header values need to be shared with the party the
+// presigned request will be distributed to.
+//
+// Presign differs from Sign in that it will sign the request using query string
+// instead of header values. This allows you to share the Presigned Request's
+// URL with third parties, or distribute it throughout your system with minimal
+// dependencies.
+//
+// Presign also takes an exp value which is the duration the
+// signed request will be valid after the signing time. This is allows you to
+// set when the request will expire.
+//
+// The requests body is an io.ReadSeeker so the SHA256 of the body can be
+// generated. To bypass the signer computing the hash you can set the
+// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
+// only compute the hash if the request header value is empty.
+//
+// Presigning a S3 request will not compute the body's SHA256 hash by default.
+// This is done due to the general use case for S3 presigned URLs is to share
+// PUT/GET capabilities. If you would like to include the body's SHA256 in the
+// presigned request's signature you can set the "X-Amz-Content-Sha256"
+// HTTP header and that will be included in the request's signature.
+func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
+ return v4.signWithBody(r, body, service, region, exp, true, signTime)
+}
+
+func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) {
+ currentTimeFn := v4.currentTimeFn
+ if currentTimeFn == nil {
+ currentTimeFn = time.Now
+ }
+
+ ctx := &signingCtx{
+ Request: r,
+ Body: body,
+ Query: r.URL.Query(),
+ Time: signTime,
+ ExpireTime: exp,
+ isPresign: isPresign,
+ ServiceName: service,
+ Region: region,
+ DisableURIPathEscaping: v4.DisableURIPathEscaping,
+ unsignedPayload: v4.UnsignedPayload,
+ }
+
+ for key := range ctx.Query {
+ sort.Strings(ctx.Query[key])
+ }
+
+ if ctx.isRequestSigned() {
+ ctx.Time = currentTimeFn()
+ ctx.handlePresignRemoval()
+ }
+
+ var err error
+ ctx.credValues, err = v4.Credentials.GetWithContext(requestContext(r))
+ if err != nil {
+ return http.Header{}, err
+ }
+
+ ctx.sanitizeHostForHeader()
+ ctx.assignAmzQueryValues()
+ if err := ctx.build(v4.DisableHeaderHoisting); err != nil {
+ return nil, err
+ }
+
+ // If the request is not presigned the body should be attached to it. This
+ // prevents the confusion of wanting to send a signed request without
+ // the body the request was signed for attached.
+ if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) {
+ var reader io.ReadCloser
+ if body != nil {
+ var ok bool
+ if reader, ok = body.(io.ReadCloser); !ok {
+ reader = ioutil.NopCloser(body)
+ }
+ }
+ r.Body = reader
+ }
+
+ if v4.Debug.Matches(aws.LogDebugWithSigning) {
+ v4.logSigningInfo(ctx)
+ }
+
+ return ctx.SignedHeaderVals, nil
+}
+
+func (ctx *signingCtx) sanitizeHostForHeader() {
+ request.SanitizeHostForHeader(ctx.Request)
+}
+
+func (ctx *signingCtx) handlePresignRemoval() {
+ if !ctx.isPresign {
+ return
+ }
+
+ // The credentials have expired for this request. The current signing
+ // is invalid, and needs to be request because the request will fail.
+ ctx.removePresign()
+
+ // Update the request's query string to ensure the values stays in
+ // sync in the case retrieving the new credentials fails.
+ ctx.Request.URL.RawQuery = ctx.Query.Encode()
+}
+
+func (ctx *signingCtx) assignAmzQueryValues() {
+ if ctx.isPresign {
+ ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
+ if ctx.credValues.SessionToken != "" {
+ ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
+ } else {
+ ctx.Query.Del("X-Amz-Security-Token")
+ }
+
+ return
+ }
+
+ if ctx.credValues.SessionToken != "" {
+ ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
+ }
+}
+
+// SignRequestHandler is a named request handler the SDK will use to sign
+// service client request with using the V4 signature.
+var SignRequestHandler = request.NamedHandler{
+ Name: "v4.SignRequestHandler", Fn: SignSDKRequest,
+}
+
+// SignSDKRequest signs an AWS request with the V4 signature. This
+// request handler should only be used with the SDK's built in service client's
+// API operation requests.
+//
+// This function should not be used on its own, but in conjunction with
+// an AWS service client's API operation call. To sign a standalone request
+// not created by a service client's API operation method use the "Sign" or
+// "Presign" functions of the "Signer" type.
+//
+// If the credentials of the request's config are set to
+// credentials.AnonymousCredentials the request will not be signed.
+func SignSDKRequest(req *request.Request) {
+ SignSDKRequestWithCurrentTime(req, time.Now)
+}
+
+// BuildNamedHandler will build a generic handler for signing.
+func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler {
+ return request.NamedHandler{
+ Name: name,
+ Fn: func(req *request.Request) {
+ SignSDKRequestWithCurrentTime(req, time.Now, opts...)
+ },
+ }
+}
+
+// SignSDKRequestWithCurrentTime will sign the SDK's request using the time
+// function passed in. Behaves the same as SignSDKRequest with the exception
+// the request is signed with the value returned by the current time function.
+func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) {
+ // If the request does not need to be signed ignore the signing of the
+ // request if the AnonymousCredentials object is used.
+ if req.Config.Credentials == credentials.AnonymousCredentials {
+ return
+ }
+
+ region := req.ClientInfo.SigningRegion
+ if region == "" {
+ region = aws.StringValue(req.Config.Region)
+ }
+
+ name := req.ClientInfo.SigningName
+ if name == "" {
+ name = req.ClientInfo.ServiceName
+ }
+
+ v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) {
+ v4.Debug = req.Config.LogLevel.Value()
+ v4.Logger = req.Config.Logger
+ v4.DisableHeaderHoisting = req.NotHoist
+ v4.currentTimeFn = curTimeFn
+ if name == "s3" {
+ // S3 service should not have any escaping applied
+ v4.DisableURIPathEscaping = true
+ }
+ // Prevents setting the HTTPRequest's Body. Since the Body could be
+ // wrapped in a custom io.Closer that we do not want to be stompped
+ // on top of by the signer.
+ v4.DisableRequestBodyOverwrite = true
+ })
+
+ for _, opt := range opts {
+ opt(v4)
+ }
+
+ curTime := curTimeFn()
+ signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(),
+ name, region, req.ExpireTime, req.ExpireTime > 0, curTime,
+ )
+ if err != nil {
+ req.Error = err
+ req.SignedHeaderVals = nil
+ return
+ }
+
+ req.SignedHeaderVals = signedHeaders
+ req.LastSignedAt = curTime
+}
+
+const logSignInfoMsg = `DEBUG: Request Signature:
+---[ CANONICAL STRING ]-----------------------------
+%s
+---[ STRING TO SIGN ]--------------------------------
+%s%s
+-----------------------------------------------------`
+const logSignedURLMsg = `
+---[ SIGNED URL ]------------------------------------
+%s`
+
+func (v4 *Signer) logSigningInfo(ctx *signingCtx) {
+ signedURLMsg := ""
+ if ctx.isPresign {
+ signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String())
+ }
+ msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg)
+ v4.Logger.Log(msg)
+}
+
+func (ctx *signingCtx) build(disableHeaderHoisting bool) error {
+ ctx.buildTime() // no depends
+ ctx.buildCredentialString() // no depends
+
+ if err := ctx.buildBodyDigest(); err != nil {
+ return err
+ }
+
+ unsignedHeaders := ctx.Request.Header
+ if ctx.isPresign {
+ if !disableHeaderHoisting {
+ urlValues := url.Values{}
+ urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
+ for k := range urlValues {
+ ctx.Query[k] = urlValues[k]
+ }
+ }
+ }
+
+ ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
+ ctx.buildCanonicalString() // depends on canon headers / signed headers
+ ctx.buildStringToSign() // depends on canon string
+ ctx.buildSignature() // depends on string to sign
+
+ if ctx.isPresign {
+ ctx.Request.URL.RawQuery += "&" + signatureQueryKey + "=" + ctx.signature
+ } else {
+ parts := []string{
+ authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString,
+ "SignedHeaders=" + ctx.signedHeaders,
+ authHeaderSignatureElem + ctx.signature,
+ }
+ ctx.Request.Header.Set(authorizationHeader, strings.Join(parts, ", "))
+ }
+
+ return nil
+}
+
+// GetSignedRequestSignature attempts to extract the signature of the request.
+// Returning an error if the request is unsigned, or unable to extract the
+// signature.
+func GetSignedRequestSignature(r *http.Request) ([]byte, error) {
+
+ if auth := r.Header.Get(authorizationHeader); len(auth) != 0 {
+ ps := strings.Split(auth, ", ")
+ for _, p := range ps {
+ if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 {
+ sig := p[len(authHeaderSignatureElem):]
+ if len(sig) == 0 {
+ return nil, fmt.Errorf("invalid request signature authorization header")
+ }
+ return hex.DecodeString(sig)
+ }
+ }
+ }
+
+ if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 {
+ return hex.DecodeString(sig)
+ }
+
+ return nil, fmt.Errorf("request not signed")
+}
+
+func (ctx *signingCtx) buildTime() {
+ if ctx.isPresign {
+ duration := int64(ctx.ExpireTime / time.Second)
+ ctx.Query.Set("X-Amz-Date", formatTime(ctx.Time))
+ ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
+ } else {
+ ctx.Request.Header.Set("X-Amz-Date", formatTime(ctx.Time))
+ }
+}
+
+func (ctx *signingCtx) buildCredentialString() {
+ ctx.credentialString = buildSigningScope(ctx.Region, ctx.ServiceName, ctx.Time)
+
+ if ctx.isPresign {
+ ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString)
+ }
+}
+
+func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
+ query := url.Values{}
+ unsignedHeaders := http.Header{}
+ for k, h := range header {
+ if r.IsValid(k) {
+ query[k] = h
+ } else {
+ unsignedHeaders[k] = h
+ }
+ }
+
+ return query, unsignedHeaders
+}
+func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
+ var headers []string
+ headers = append(headers, "host")
+ for k, v := range header {
+ if !r.IsValid(k) {
+ continue // ignored header
+ }
+ if ctx.SignedHeaderVals == nil {
+ ctx.SignedHeaderVals = make(http.Header)
+ }
+
+ lowerCaseKey := strings.ToLower(k)
+ if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok {
+ // include additional values
+ ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...)
+ continue
+ }
+
+ headers = append(headers, lowerCaseKey)
+ ctx.SignedHeaderVals[lowerCaseKey] = v
+ }
+ sort.Strings(headers)
+
+ ctx.signedHeaders = strings.Join(headers, ";")
+
+ if ctx.isPresign {
+ ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders)
+ }
+
+ headerItems := make([]string, len(headers))
+ for i, k := range headers {
+ if k == "host" {
+ if ctx.Request.Host != "" {
+ headerItems[i] = "host:" + ctx.Request.Host
+ } else {
+ headerItems[i] = "host:" + ctx.Request.URL.Host
+ }
+ } else {
+ headerValues := make([]string, len(ctx.SignedHeaderVals[k]))
+ for i, v := range ctx.SignedHeaderVals[k] {
+ headerValues[i] = strings.TrimSpace(v)
+ }
+ headerItems[i] = k + ":" +
+ strings.Join(headerValues, ",")
+ }
+ }
+ stripExcessSpaces(headerItems)
+ ctx.canonicalHeaders = strings.Join(headerItems, "\n")
+}
+
+func (ctx *signingCtx) buildCanonicalString() {
+ ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1)
+
+ uri := getURIPath(ctx.Request.URL)
+
+ if !ctx.DisableURIPathEscaping {
+ uri = rest.EscapePath(uri, false)
+ }
+
+ ctx.canonicalString = strings.Join([]string{
+ ctx.Request.Method,
+ uri,
+ ctx.Request.URL.RawQuery,
+ ctx.canonicalHeaders + "\n",
+ ctx.signedHeaders,
+ ctx.bodyDigest,
+ }, "\n")
+}
+
+func (ctx *signingCtx) buildStringToSign() {
+ ctx.stringToSign = strings.Join([]string{
+ authHeaderPrefix,
+ formatTime(ctx.Time),
+ ctx.credentialString,
+ hex.EncodeToString(hashSHA256([]byte(ctx.canonicalString))),
+ }, "\n")
+}
+
+func (ctx *signingCtx) buildSignature() {
+ creds := deriveSigningKey(ctx.Region, ctx.ServiceName, ctx.credValues.SecretAccessKey, ctx.Time)
+ signature := hmacSHA256(creds, []byte(ctx.stringToSign))
+ ctx.signature = hex.EncodeToString(signature)
+}
+
+func (ctx *signingCtx) buildBodyDigest() error {
+ hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
+ if hash == "" {
+ includeSHA256Header := ctx.unsignedPayload ||
+ ctx.ServiceName == "s3" ||
+ ctx.ServiceName == "s3-object-lambda" ||
+ ctx.ServiceName == "glacier"
+
+ s3Presign := ctx.isPresign &&
+ (ctx.ServiceName == "s3" ||
+ ctx.ServiceName == "s3-object-lambda")
+
+ if ctx.unsignedPayload || s3Presign {
+ hash = "UNSIGNED-PAYLOAD"
+ includeSHA256Header = !s3Presign
+ } else if ctx.Body == nil {
+ hash = emptyStringSHA256
+ } else {
+ if !aws.IsReaderSeekable(ctx.Body) {
+ return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body)
+ }
+ hashBytes, err := makeSha256Reader(ctx.Body)
+ if err != nil {
+ return err
+ }
+ hash = hex.EncodeToString(hashBytes)
+ }
+
+ if includeSHA256Header {
+ ctx.Request.Header.Set("X-Amz-Content-Sha256", hash)
+ }
+ }
+ ctx.bodyDigest = hash
+
+ return nil
+}
+
+// isRequestSigned returns if the request is currently signed or presigned
+func (ctx *signingCtx) isRequestSigned() bool {
+ if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" {
+ return true
+ }
+ if ctx.Request.Header.Get("Authorization") != "" {
+ return true
+ }
+
+ return false
+}
+
+// unsign removes signing flags for both signed and presigned requests.
+func (ctx *signingCtx) removePresign() {
+ ctx.Query.Del("X-Amz-Algorithm")
+ ctx.Query.Del("X-Amz-Signature")
+ ctx.Query.Del("X-Amz-Security-Token")
+ ctx.Query.Del("X-Amz-Date")
+ ctx.Query.Del("X-Amz-Expires")
+ ctx.Query.Del("X-Amz-Credential")
+ ctx.Query.Del("X-Amz-SignedHeaders")
+}
+
+func hmacSHA256(key []byte, data []byte) []byte {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+func hashSHA256(data []byte) []byte {
+ hash := sha256.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) {
+ hash := sha256.New()
+ start, err := reader.Seek(0, sdkio.SeekCurrent)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ // ensure error is return if unable to seek back to start of payload.
+ _, err = reader.Seek(start, sdkio.SeekStart)
+ }()
+
+ // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies
+ // smaller than 32KB. Fall back to io.Copy if we fail to determine the size.
+ size, err := aws.SeekerLen(reader)
+ if err != nil {
+ io.Copy(hash, reader)
+ } else {
+ io.CopyN(hash, reader, size)
+ }
+
+ return hash.Sum(nil), nil
+}
+
+const doubleSpace = " "
+
+// stripExcessSpaces will rewrite the passed in slice's string values to not
+// contain multiple side-by-side spaces.
+func stripExcessSpaces(vals []string) {
+ var j, k, l, m, spaces int
+ for i, str := range vals {
+ // Trim trailing spaces
+ for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
+ }
+
+ // Trim leading spaces
+ for k = 0; k < j && str[k] == ' '; k++ {
+ }
+ str = str[k : j+1]
+
+ // Strip multiple spaces.
+ j = strings.Index(str, doubleSpace)
+ if j < 0 {
+ vals[i] = str
+ continue
+ }
+
+ buf := []byte(str)
+ for k, m, l = j, j, len(buf); k < l; k++ {
+ if buf[k] == ' ' {
+ if spaces == 0 {
+ // First space.
+ buf[m] = buf[k]
+ m++
+ }
+ spaces++
+ } else {
+ // End of multiple spaces.
+ spaces = 0
+ buf[m] = buf[k]
+ m++
+ }
+ }
+
+ vals[i] = string(buf[:m])
+ }
+}
+
+func buildSigningScope(region, service string, dt time.Time) string {
+ return strings.Join([]string{
+ formatShortTime(dt),
+ region,
+ service,
+ awsV4Request,
+ }, "/")
+}
+
+func deriveSigningKey(region, service, secretKey string, dt time.Time) []byte {
+ kDate := hmacSHA256([]byte("AWS4"+secretKey), []byte(formatShortTime(dt)))
+ kRegion := hmacSHA256(kDate, []byte(region))
+ kService := hmacSHA256(kRegion, []byte(service))
+ signingKey := hmacSHA256(kService, []byte(awsV4Request))
+ return signingKey
+}
+
+func formatShortTime(dt time.Time) string {
+ return dt.UTC().Format(shortTimeFormat)
+}
+
+func formatTime(dt time.Time) string {
+ return dt.UTC().Format(timeFormat)
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/types.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/types.go
new file mode 100644
index 0000000000000..de696e73daa7b
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/types.go
@@ -0,0 +1,264 @@
+package aws
+
+import (
+ "io"
+ "strings"
+ "sync"
+
+ "github.com/IBM/ibm-cos-sdk-go/internal/sdkio"
+)
+
+// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the
+// SDK to accept an io.Reader that is not also an io.Seeker for unsigned
+// streaming payload API operations.
+//
+// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API
+// operation's input will prevent that operation being retried in the case of
+// network errors, and cause operation requests to fail if the operation
+// requires payload signing.
+//
+// Note: If using With S3 PutObject to stream an object upload The SDK's S3
+// Upload manager (s3manager.Uploader) provides support for streaming with the
+// ability to retry network errors.
+func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
+ return ReaderSeekerCloser{r}
+}
+
+// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
+// io.Closer interfaces to the underlying object if they are available.
+type ReaderSeekerCloser struct {
+ r io.Reader
+}
+
+// IsReaderSeekable returns if the underlying reader type can be seeked. A
+// io.Reader might not actually be seekable if it is the ReaderSeekerCloser
+// type.
+func IsReaderSeekable(r io.Reader) bool {
+ switch v := r.(type) {
+ case ReaderSeekerCloser:
+ return v.IsSeeker()
+ case *ReaderSeekerCloser:
+ return v.IsSeeker()
+ case io.ReadSeeker:
+ return true
+ default:
+ return false
+ }
+}
+
+// Read reads from the reader up to size of p. The number of bytes read, and
+// error if it occurred will be returned.
+//
+// If the reader is not an io.Reader zero bytes read, and nil error will be
+// returned.
+//
+// Performs the same functionality as io.Reader Read
+func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
+ switch t := r.r.(type) {
+ case io.Reader:
+ return t.Read(p)
+ }
+ return 0, nil
+}
+
+// Seek sets the offset for the next Read to offset, interpreted according to
+// whence: 0 means relative to the origin of the file, 1 means relative to the
+// current offset, and 2 means relative to the end. Seek returns the new offset
+// and an error, if any.
+//
+// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
+func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
+ switch t := r.r.(type) {
+ case io.Seeker:
+ return t.Seek(offset, whence)
+ }
+ return int64(0), nil
+}
+
+// IsSeeker returns if the underlying reader is also a seeker.
+func (r ReaderSeekerCloser) IsSeeker() bool {
+ _, ok := r.r.(io.Seeker)
+ return ok
+}
+
+// HasLen returns the length of the underlying reader if the value implements
+// the Len() int method.
+func (r ReaderSeekerCloser) HasLen() (int, bool) {
+ type lenner interface {
+ Len() int
+ }
+
+ if lr, ok := r.r.(lenner); ok {
+ return lr.Len(), true
+ }
+
+ return 0, false
+}
+
+// GetLen returns the length of the bytes remaining in the underlying reader.
+// Checks first for Len(), then io.Seeker to determine the size of the
+// underlying reader.
+//
+// Will return -1 if the length cannot be determined.
+func (r ReaderSeekerCloser) GetLen() (int64, error) {
+ if l, ok := r.HasLen(); ok {
+ return int64(l), nil
+ }
+
+ if s, ok := r.r.(io.Seeker); ok {
+ return seekerLen(s)
+ }
+
+ return -1, nil
+}
+
+// SeekerLen attempts to get the number of bytes remaining at the seeker's
+// current position. Returns the number of bytes remaining or error.
+func SeekerLen(s io.Seeker) (int64, error) {
+ // Determine if the seeker is actually seekable. ReaderSeekerCloser
+ // hides the fact that a io.Readers might not actually be seekable.
+ switch v := s.(type) {
+ case ReaderSeekerCloser:
+ return v.GetLen()
+ case *ReaderSeekerCloser:
+ return v.GetLen()
+ }
+
+ return seekerLen(s)
+}
+
+func seekerLen(s io.Seeker) (int64, error) {
+ curOffset, err := s.Seek(0, sdkio.SeekCurrent)
+ if err != nil {
+ return 0, err
+ }
+
+ endOffset, err := s.Seek(0, sdkio.SeekEnd)
+ if err != nil {
+ return 0, err
+ }
+
+ _, err = s.Seek(curOffset, sdkio.SeekStart)
+ if err != nil {
+ return 0, err
+ }
+
+ return endOffset - curOffset, nil
+}
+
+// Close closes the ReaderSeekerCloser.
+//
+// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
+func (r ReaderSeekerCloser) Close() error {
+ switch t := r.r.(type) {
+ case io.Closer:
+ return t.Close()
+ }
+ return nil
+}
+
+// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface
+// Can be used with the s3manager.Downloader to download content to a buffer
+// in memory. Safe to use concurrently.
+type WriteAtBuffer struct {
+ buf []byte
+ m sync.Mutex
+
+ // GrowthCoeff defines the growth rate of the internal buffer. By
+ // default, the growth rate is 1, where expanding the internal
+ // buffer will allocate only enough capacity to fit the new expected
+ // length.
+ GrowthCoeff float64
+}
+
+// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer
+// provided by buf.
+func NewWriteAtBuffer(buf []byte) *WriteAtBuffer {
+ return &WriteAtBuffer{buf: buf}
+}
+
+// WriteAt writes a slice of bytes to a buffer starting at the position provided
+// The number of bytes written will be returned, or error. Can overwrite previous
+// written slices if the write ats overlap.
+func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
+ pLen := len(p)
+ expLen := pos + int64(pLen)
+ b.m.Lock()
+ defer b.m.Unlock()
+ if int64(len(b.buf)) < expLen {
+ if int64(cap(b.buf)) < expLen {
+ if b.GrowthCoeff < 1 {
+ b.GrowthCoeff = 1
+ }
+ newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen)))
+ copy(newBuf, b.buf)
+ b.buf = newBuf
+ }
+ b.buf = b.buf[:expLen]
+ }
+ copy(b.buf[pos:], p)
+ return pLen, nil
+}
+
+// Bytes returns a slice of bytes written to the buffer.
+func (b *WriteAtBuffer) Bytes() []byte {
+ b.m.Lock()
+ defer b.m.Unlock()
+ return b.buf
+}
+
+// MultiCloser is a utility to close multiple io.Closers within a single
+// statement.
+type MultiCloser []io.Closer
+
+// Close closes all of the io.Closers making up the MultiClosers. Any
+// errors that occur while closing will be returned in the order they
+// occur.
+func (m MultiCloser) Close() error {
+ var errs errors
+ for _, c := range m {
+ err := c.Close()
+ if err != nil {
+ errs = append(errs, err)
+ }
+ }
+ if len(errs) != 0 {
+ return errs
+ }
+
+ return nil
+}
+
+type errors []error
+
+func (es errors) Error() string {
+ var parts []string
+ for _, e := range es {
+ parts = append(parts, e.Error())
+ }
+
+ return strings.Join(parts, "\n")
+}
+
+// CopySeekableBody copies the seekable body to an io.Writer
+func CopySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) {
+ curPos, err := src.Seek(0, sdkio.SeekCurrent)
+ if err != nil {
+ return 0, err
+ }
+
+ // copy errors may be assumed to be from the body.
+ n, err := io.Copy(dst, src)
+ if err != nil {
+ return n, err
+ }
+
+ // seek back to the first position after reading to reset
+ // the body for transmission.
+ _, err = src.Seek(curPos, sdkio.SeekStart)
+ if err != nil {
+ return n, err
+ }
+
+ return n, nil
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/url.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/url.go
new file mode 100644
index 0000000000000..fed561bd597cb
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/url.go
@@ -0,0 +1,13 @@
+//go:build go1.8
+// +build go1.8
+
+package aws
+
+import "net/url"
+
+// URLHostname will extract the Hostname without port from the URL value.
+//
+// Wrapper of net/url#URL.Hostname for backwards Go version compatibility.
+func URLHostname(url *url.URL) string {
+ return url.Hostname()
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/aws/version.go b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/version.go
new file mode 100644
index 0000000000000..6df52c15c6607
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/aws/version.go
@@ -0,0 +1,12 @@
+// Package aws provides core functionality for making requests to IBM COS services.
+package aws
+
+// IBM COS SDK Code -- START
+
+// SDKName is the name of this AWS SDK
+const SDKName = "ibm-cos-sdk-go"
+
+// SDKVersion is the version of this SDK
+const SDKVersion = "1.9.4"
+
+// IBM COS SDK Code -- END
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ast.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ast.go
new file mode 100644
index 0000000000000..e83a99886bccd
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ast.go
@@ -0,0 +1,120 @@
+package ini
+
+// ASTKind represents different states in the parse table
+// and the type of AST that is being constructed
+type ASTKind int
+
+// ASTKind* is used in the parse table to transition between
+// the different states
+const (
+ ASTKindNone = ASTKind(iota)
+ ASTKindStart
+ ASTKindExpr
+ ASTKindEqualExpr
+ ASTKindStatement
+ ASTKindSkipStatement
+ ASTKindExprStatement
+ ASTKindSectionStatement
+ ASTKindNestedSectionStatement
+ ASTKindCompletedNestedSectionStatement
+ ASTKindCommentStatement
+ ASTKindCompletedSectionStatement
+)
+
+func (k ASTKind) String() string {
+ switch k {
+ case ASTKindNone:
+ return "none"
+ case ASTKindStart:
+ return "start"
+ case ASTKindExpr:
+ return "expr"
+ case ASTKindStatement:
+ return "stmt"
+ case ASTKindSectionStatement:
+ return "section_stmt"
+ case ASTKindExprStatement:
+ return "expr_stmt"
+ case ASTKindCommentStatement:
+ return "comment"
+ case ASTKindNestedSectionStatement:
+ return "nested_section_stmt"
+ case ASTKindCompletedSectionStatement:
+ return "completed_stmt"
+ case ASTKindSkipStatement:
+ return "skip"
+ default:
+ return ""
+ }
+}
+
+// AST interface allows us to determine what kind of node we
+// are on and casting may not need to be necessary.
+//
+// The root is always the first node in Children
+type AST struct {
+ Kind ASTKind
+ Root Token
+ RootToken bool
+ Children []AST
+}
+
+func newAST(kind ASTKind, root AST, children ...AST) AST {
+ return AST{
+ Kind: kind,
+ Children: append([]AST{root}, children...),
+ }
+}
+
+func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST {
+ return AST{
+ Kind: kind,
+ Root: root,
+ RootToken: true,
+ Children: children,
+ }
+}
+
+// AppendChild will append to the list of children an AST has.
+func (a *AST) AppendChild(child AST) {
+ a.Children = append(a.Children, child)
+}
+
+// GetRoot will return the root AST which can be the first entry
+// in the children list or a token.
+func (a *AST) GetRoot() AST {
+ if a.RootToken {
+ return *a
+ }
+
+ if len(a.Children) == 0 {
+ return AST{}
+ }
+
+ return a.Children[0]
+}
+
+// GetChildren will return the current AST's list of children
+func (a *AST) GetChildren() []AST {
+ if len(a.Children) == 0 {
+ return []AST{}
+ }
+
+ if a.RootToken {
+ return a.Children
+ }
+
+ return a.Children[1:]
+}
+
+// SetChildren will set and override all children of the AST.
+func (a *AST) SetChildren(children []AST) {
+ if a.RootToken {
+ a.Children = children
+ } else {
+ a.Children = append(a.Children[:1], children...)
+ }
+}
+
+// Start is used to indicate the starting state of the parse table.
+var Start = newAST(ASTKindStart, AST{})
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/comma_token.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/comma_token.go
new file mode 100644
index 0000000000000..0895d53cbe656
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/comma_token.go
@@ -0,0 +1,11 @@
+package ini
+
+var commaRunes = []rune(",")
+
+func isComma(b rune) bool {
+ return b == ','
+}
+
+func newCommaToken() Token {
+ return newToken(TokenComma, commaRunes, NoneType)
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/comment_token.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/comment_token.go
new file mode 100644
index 0000000000000..0b76999ba1f37
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/comment_token.go
@@ -0,0 +1,35 @@
+package ini
+
+// isComment will return whether or not the next byte(s) is a
+// comment.
+func isComment(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ switch b[0] {
+ case ';':
+ return true
+ case '#':
+ return true
+ }
+
+ return false
+}
+
+// newCommentToken will create a comment token and
+// return how many bytes were read.
+func newCommentToken(b []rune) (Token, int, error) {
+ i := 0
+ for ; i < len(b); i++ {
+ if b[i] == '\n' {
+ break
+ }
+
+ if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' {
+ break
+ }
+ }
+
+ return newToken(TokenComment, b[:i], NoneType), i, nil
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/doc.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/doc.go
new file mode 100644
index 0000000000000..1e55bbd07b91b
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/doc.go
@@ -0,0 +1,42 @@
+// Package ini is an LL(1) parser for configuration files.
+//
+// Example:
+// sections, err := ini.OpenFile("/path/to/file")
+// if err != nil {
+// panic(err)
+// }
+//
+// profile := "foo"
+// section, ok := sections.GetSection(profile)
+// if !ok {
+// fmt.Printf("section %q could not be found", profile)
+// }
+//
+// Below is the BNF that describes this parser
+// Grammar:
+// stmt -> section | stmt'
+// stmt' -> epsilon | expr
+// expr -> value (stmt)* | equal_expr (stmt)*
+// equal_expr -> value ( ':' | '=' ) equal_expr'
+// equal_expr' -> number | string | quoted_string
+// quoted_string -> " quoted_string'
+// quoted_string' -> string quoted_string_end
+// quoted_string_end -> "
+//
+// section -> [ section'
+// section' -> section_value section_close
+// section_value -> number | string_subset | boolean | quoted_string_subset
+// quoted_string_subset -> " quoted_string_subset'
+// quoted_string_subset' -> string_subset quoted_string_end
+// quoted_string_subset -> "
+// section_close -> ]
+//
+// value -> number | string_subset | boolean
+// string -> ? UTF-8 Code-Points except '\n' (U+000A) and '\r\n' (U+000D U+000A) ?
+// string_subset -> ? Code-points excepted by <string> grammar except ':' (U+003A), '=' (U+003D), '[' (U+005B), and ']' (U+005D) ?
+//
+// SkipState will skip (NL WS)+
+//
+// comment -> # comment' | ; comment'
+// comment' -> epsilon | value
+package ini
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/empty_token.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/empty_token.go
new file mode 100644
index 0000000000000..04345a54c20d5
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/empty_token.go
@@ -0,0 +1,4 @@
+package ini
+
+// emptyToken is used to satisfy the Token interface
+var emptyToken = newToken(TokenNone, []rune{}, NoneType)
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/expression.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/expression.go
new file mode 100644
index 0000000000000..91ba2a59dd5e7
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/expression.go
@@ -0,0 +1,24 @@
+package ini
+
+// newExpression will return an expression AST.
+// Expr represents an expression
+//
+// grammar:
+// expr -> string | number
+func newExpression(tok Token) AST {
+ return newASTWithRootToken(ASTKindExpr, tok)
+}
+
+func newEqualExpr(left AST, tok Token) AST {
+ return newASTWithRootToken(ASTKindEqualExpr, tok, left)
+}
+
+// EqualExprKey will return a LHS value in the equal expr
+func EqualExprKey(ast AST) string {
+ children := ast.GetChildren()
+ if len(children) == 0 || ast.Kind != ASTKindEqualExpr {
+ return ""
+ }
+
+ return string(children[0].Root.Raw())
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/fuzz.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/fuzz.go
new file mode 100644
index 0000000000000..6e545b63bc419
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/fuzz.go
@@ -0,0 +1,18 @@
+//go:build gofuzz
+// +build gofuzz
+
+package ini
+
+import (
+ "bytes"
+)
+
+func Fuzz(data []byte) int {
+ b := bytes.NewReader(data)
+
+ if _, err := Parse(b); err != nil {
+ return 0
+ }
+
+ return 1
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ini.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ini.go
new file mode 100644
index 0000000000000..d72432096a422
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ini.go
@@ -0,0 +1,51 @@
+package ini
+
+import (
+ "io"
+ "os"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+)
+
+// OpenFile takes a path to a given file, and will open and parse
+// that file.
+func OpenFile(path string) (Sections, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err)
+ }
+ defer f.Close()
+
+ return Parse(f)
+}
+
+// Parse will parse the given file using the shared config
+// visitor.
+func Parse(f io.Reader) (Sections, error) {
+ tree, err := ParseAST(f)
+ if err != nil {
+ return Sections{}, err
+ }
+
+ v := NewDefaultVisitor()
+ if err = Walk(tree, v); err != nil {
+ return Sections{}, err
+ }
+
+ return v.Sections, nil
+}
+
+// ParseBytes will parse the given bytes and return the parsed sections.
+func ParseBytes(b []byte) (Sections, error) {
+ tree, err := ParseASTBytes(b)
+ if err != nil {
+ return Sections{}, err
+ }
+
+ v := NewDefaultVisitor()
+ if err = Walk(tree, v); err != nil {
+ return Sections{}, err
+ }
+
+ return v.Sections, nil
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ini_lexer.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ini_lexer.go
new file mode 100644
index 0000000000000..b015c9dabea21
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ini_lexer.go
@@ -0,0 +1,165 @@
+package ini
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+)
+
+const (
+ // ErrCodeUnableToReadFile is used when a file is failed to be
+ // opened or read from.
+ ErrCodeUnableToReadFile = "FailedRead"
+)
+
+// TokenType represents the various different tokens types
+type TokenType int
+
+func (t TokenType) String() string {
+ switch t {
+ case TokenNone:
+ return "none"
+ case TokenLit:
+ return "literal"
+ case TokenSep:
+ return "sep"
+ case TokenOp:
+ return "op"
+ case TokenWS:
+ return "ws"
+ case TokenNL:
+ return "newline"
+ case TokenComment:
+ return "comment"
+ case TokenComma:
+ return "comma"
+ default:
+ return ""
+ }
+}
+
+// TokenType enums
+const (
+ TokenNone = TokenType(iota)
+ TokenLit
+ TokenSep
+ TokenComma
+ TokenOp
+ TokenWS
+ TokenNL
+ TokenComment
+)
+
+type iniLexer struct{}
+
+// Tokenize will return a list of tokens during lexical analysis of the
+// io.Reader.
+func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err)
+ }
+
+ return l.tokenize(b)
+}
+
+func (l *iniLexer) tokenize(b []byte) ([]Token, error) {
+ runes := bytes.Runes(b)
+ var err error
+ n := 0
+ tokenAmount := countTokens(runes)
+ tokens := make([]Token, tokenAmount)
+ count := 0
+
+ for len(runes) > 0 && count < tokenAmount {
+ switch {
+ case isWhitespace(runes[0]):
+ tokens[count], n, err = newWSToken(runes)
+ case isComma(runes[0]):
+ tokens[count], n = newCommaToken(), 1
+ case isComment(runes):
+ tokens[count], n, err = newCommentToken(runes)
+ case isNewline(runes):
+ tokens[count], n, err = newNewlineToken(runes)
+ case isSep(runes):
+ tokens[count], n, err = newSepToken(runes)
+ case isOp(runes):
+ tokens[count], n, err = newOpToken(runes)
+ default:
+ tokens[count], n, err = newLitToken(runes)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ count++
+
+ runes = runes[n:]
+ }
+
+ return tokens[:count], nil
+}
+
+func countTokens(runes []rune) int {
+ count, n := 0, 0
+ var err error
+
+ for len(runes) > 0 {
+ switch {
+ case isWhitespace(runes[0]):
+ _, n, err = newWSToken(runes)
+ case isComma(runes[0]):
+ _, n = newCommaToken(), 1
+ case isComment(runes):
+ _, n, err = newCommentToken(runes)
+ case isNewline(runes):
+ _, n, err = newNewlineToken(runes)
+ case isSep(runes):
+ _, n, err = newSepToken(runes)
+ case isOp(runes):
+ _, n, err = newOpToken(runes)
+ default:
+ _, n, err = newLitToken(runes)
+ }
+
+ if err != nil {
+ return 0
+ }
+
+ count++
+ runes = runes[n:]
+ }
+
+ return count + 1
+}
+
+// Token indicates a metadata about a given value.
+type Token struct {
+ t TokenType
+ ValueType ValueType
+ base int
+ raw []rune
+}
+
+var emptyValue = Value{}
+
+func newToken(t TokenType, raw []rune, v ValueType) Token {
+ return Token{
+ t: t,
+ raw: raw,
+ ValueType: v,
+ }
+}
+
+// Raw return the raw runes that were consumed
+func (tok Token) Raw() []rune {
+ return tok.raw
+}
+
+// Type returns the token type
+func (tok Token) Type() TokenType {
+ return tok.t
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ini_parser.go
new file mode 100644
index 0000000000000..760c41353747b
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ini_parser.go
@@ -0,0 +1,362 @@
+package ini
+
+import (
+ "fmt"
+ "io"
+)
+
+// ParseState represents the current state of the parser.
+type ParseState uint
+
+// State enums for the parse table
+const (
+ InvalidState ParseState = iota
+ // stmt -> value stmt'
+ StatementState
+ // stmt' -> MarkComplete | op stmt
+ StatementPrimeState
+ // value -> number | string | boolean | quoted_string
+ ValueState
+ // section -> [ section'
+ OpenScopeState
+ // section' -> value section_close
+ SectionState
+ // section_close -> ]
+ CloseScopeState
+ // SkipState will skip (NL WS)+
+ SkipState
+ // SkipTokenState will skip any token and push the previous
+ // state onto the stack.
+ SkipTokenState
+ // comment -> # comment' | ; comment'
+ // comment' -> MarkComplete | value
+ CommentState
+ // MarkComplete state will complete statements and move that
+ // to the completed AST list
+ MarkCompleteState
+ // TerminalState signifies that the tokens have been fully parsed
+ TerminalState
+)
+
+// parseTable is a state machine to dictate the grammar above.
+var parseTable = map[ASTKind]map[TokenType]ParseState{
+ ASTKindStart: {
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: TerminalState,
+ },
+ ASTKindCommentStatement: {
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindExpr: {
+ TokenOp: StatementPrimeState,
+ TokenLit: ValueState,
+ TokenSep: OpenScopeState,
+ TokenWS: ValueState,
+ TokenNL: SkipState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindEqualExpr: {
+ TokenLit: ValueState,
+ TokenSep: ValueState,
+ TokenOp: ValueState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipState,
+ TokenNone: SkipState,
+ },
+ ASTKindStatement: {
+ TokenLit: SectionState,
+ TokenSep: CloseScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindExprStatement: {
+ TokenLit: ValueState,
+ TokenSep: ValueState,
+ TokenOp: ValueState,
+ TokenWS: ValueState,
+ TokenNL: MarkCompleteState,
+ TokenComment: CommentState,
+ TokenNone: TerminalState,
+ TokenComma: SkipState,
+ },
+ ASTKindSectionStatement: {
+ TokenLit: SectionState,
+ TokenOp: SectionState,
+ TokenSep: CloseScopeState,
+ TokenWS: SectionState,
+ TokenNL: SkipTokenState,
+ },
+ ASTKindCompletedSectionStatement: {
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindSkipStatement: {
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: TerminalState,
+ },
+}
+
+// ParseAST will parse input from an io.Reader using
+// an LL(1) parser.
+func ParseAST(r io.Reader) ([]AST, error) {
+ lexer := iniLexer{}
+ tokens, err := lexer.Tokenize(r)
+ if err != nil {
+ return []AST{}, err
+ }
+
+ return parse(tokens)
+}
+
+// ParseASTBytes will parse input from a byte slice using
+// an LL(1) parser.
+func ParseASTBytes(b []byte) ([]AST, error) {
+ lexer := iniLexer{}
+ tokens, err := lexer.tokenize(b)
+ if err != nil {
+ return []AST{}, err
+ }
+
+ return parse(tokens)
+}
+
+func parse(tokens []Token) ([]AST, error) {
+ start := Start
+ stack := newParseStack(3, len(tokens))
+
+ stack.Push(start)
+ s := newSkipper()
+
+loop:
+ for stack.Len() > 0 {
+ k := stack.Pop()
+
+ var tok Token
+ if len(tokens) == 0 {
+ // this occurs when all the tokens have been processed
+ // but reduction of what's left on the stack needs to
+ // occur.
+ tok = emptyToken
+ } else {
+ tok = tokens[0]
+ }
+
+ step := parseTable[k.Kind][tok.Type()]
+ if s.ShouldSkip(tok) {
+ // being in a skip state with no tokens will break out of
+ // the parse loop since there is nothing left to process.
+ if len(tokens) == 0 {
+ break loop
+ }
+ // if should skip is true, we skip the tokens until should skip is set to false.
+ step = SkipTokenState
+ }
+
+ switch step {
+ case TerminalState:
+ // Finished parsing. Push what should be the last
+ // statement to the stack. If there is anything left
+ // on the stack, an error in parsing has occurred.
+ if k.Kind != ASTKindStart {
+ stack.MarkComplete(k)
+ }
+ break loop
+ case SkipTokenState:
+ // When skipping a token, the previous state was popped off the stack.
+ // To maintain the correct state, the previous state will be pushed
+ // onto the stack.
+ stack.Push(k)
+ case StatementState:
+ if k.Kind != ASTKindStart {
+ stack.MarkComplete(k)
+ }
+ expr := newExpression(tok)
+ stack.Push(expr)
+ case StatementPrimeState:
+ if tok.Type() != TokenOp {
+ stack.MarkComplete(k)
+ continue
+ }
+
+ if k.Kind != ASTKindExpr {
+ return nil, NewParseError(
+ fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k),
+ )
+ }
+
+ k = trimSpaces(k)
+ expr := newEqualExpr(k, tok)
+ stack.Push(expr)
+ case ValueState:
+ // ValueState requires the previous state to either be an equal expression
+ // or an expression statement.
+ //
+ // This grammar occurs when the RHS is a number, word, or quoted string.
+ // equal_expr -> lit op equal_expr'
+ // equal_expr' -> number | string | quoted_string
+ // quoted_string -> " quoted_string'
+ // quoted_string' -> string quoted_string_end
+ // quoted_string_end -> "
+ //
+ // otherwise
+ // expr_stmt -> equal_expr (expr_stmt')*
+ // expr_stmt' -> ws S | op S | MarkComplete
+ // S -> equal_expr' expr_stmt'
+ switch k.Kind {
+ case ASTKindEqualExpr:
+ // assigning a value to some key
+ k.AppendChild(newExpression(tok))
+ stack.Push(newExprStatement(k))
+ case ASTKindExpr:
+ k.Root.raw = append(k.Root.raw, tok.Raw()...)
+ stack.Push(k)
+ case ASTKindExprStatement:
+ root := k.GetRoot()
+ children := root.GetChildren()
+ if len(children) == 0 {
+ return nil, NewParseError(
+ fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind),
+ )
+ }
+
+ rhs := children[len(children)-1]
+
+ if rhs.Root.ValueType != QuotedStringType {
+ rhs.Root.ValueType = StringType
+ rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...)
+
+ }
+
+ children[len(children)-1] = rhs
+ root.SetChildren(children)
+
+ stack.Push(k)
+ }
+ case OpenScopeState:
+ if !runeCompare(tok.Raw(), openBrace) {
+ return nil, NewParseError("expected '['")
+ }
+ // If OpenScopeState is not at the start, we must mark the previous ast as complete
+ //
+ // for example: if previous ast was a skip statement;
+ // we should mark it as complete before we create a new statement
+ if k.Kind != ASTKindStart {
+ stack.MarkComplete(k)
+ }
+
+ stmt := newStatement()
+ stack.Push(stmt)
+ case CloseScopeState:
+ if !runeCompare(tok.Raw(), closeBrace) {
+ return nil, NewParseError("expected ']'")
+ }
+
+ k = trimSpaces(k)
+ stack.Push(newCompletedSectionStatement(k))
+ case SectionState:
+ var stmt AST
+
+ switch k.Kind {
+ case ASTKindStatement:
+ // If there are multiple literals inside of a scope declaration,
+ // then the current token's raw value will be appended to the Name.
+ //
+ // This handles cases like [ profile default ]
+ //
+ // k will represent a SectionStatement with the children representing
+ // the label of the section
+ stmt = newSectionStatement(tok)
+ case ASTKindSectionStatement:
+ k.Root.raw = append(k.Root.raw, tok.Raw()...)
+ stmt = k
+ default:
+ return nil, NewParseError(
+ fmt.Sprintf("invalid statement: expected statement: %v", k.Kind),
+ )
+ }
+
+ stack.Push(stmt)
+ case MarkCompleteState:
+ if k.Kind != ASTKindStart {
+ stack.MarkComplete(k)
+ }
+
+ if stack.Len() == 0 {
+ stack.Push(start)
+ }
+ case SkipState:
+ stack.Push(newSkipStatement(k))
+ s.Skip()
+ case CommentState:
+ if k.Kind == ASTKindStart {
+ stack.Push(k)
+ } else {
+ stack.MarkComplete(k)
+ }
+
+ stmt := newCommentStatement(tok)
+ stack.Push(stmt)
+ default:
+ return nil, NewParseError(
+ fmt.Sprintf("invalid state with ASTKind %v and TokenType %v",
+ k, tok.Type()))
+ }
+
+ if len(tokens) > 0 {
+ tokens = tokens[1:]
+ }
+ }
+
+ // this occurs when a statement has not been completed
+ if stack.top > 1 {
+ return nil, NewParseError(fmt.Sprintf("incomplete ini expression"))
+ }
+
+ // returns a sublist which excludes the start symbol
+ return stack.List(), nil
+}
+
+// trimSpaces will trim spaces on the left and right hand side of
+// the literal.
+func trimSpaces(k AST) AST {
+ // trim left hand side of spaces
+ for i := 0; i < len(k.Root.raw); i++ {
+ if !isWhitespace(k.Root.raw[i]) {
+ break
+ }
+
+ k.Root.raw = k.Root.raw[1:]
+ i--
+ }
+
+ // trim right hand side of spaces
+ for i := len(k.Root.raw) - 1; i >= 0; i-- {
+ if !isWhitespace(k.Root.raw[i]) {
+ break
+ }
+
+ k.Root.raw = k.Root.raw[:len(k.Root.raw)-1]
+ }
+
+ return k
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/literal_tokens.go
new file mode 100644
index 0000000000000..34a481afbd4f7
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/literal_tokens.go
@@ -0,0 +1,340 @@
+package ini
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode"
+)
+
+var (
+ runesTrue = []rune("true")
+ runesFalse = []rune("false")
+)
+
+var literalValues = [][]rune{
+ runesTrue,
+ runesFalse,
+}
+
+func isBoolValue(b []rune) bool {
+ for _, lv := range literalValues {
+ if isCaselessLitValue(lv, b) {
+ return true
+ }
+ }
+ return false
+}
+
+func isLitValue(want, have []rune) bool {
+ if len(have) < len(want) {
+ return false
+ }
+
+ for i := 0; i < len(want); i++ {
+ if want[i] != have[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+// isCaselessLitValue is a caseless value comparison, assumes want is already lower-cased for efficiency.
+func isCaselessLitValue(want, have []rune) bool {
+ if len(have) < len(want) {
+ return false
+ }
+
+ for i := 0; i < len(want); i++ {
+ if want[i] != unicode.ToLower(have[i]) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// isNumberValue will return whether not the leading characters in
+// a byte slice is a number. A number is delimited by whitespace or
+// the newline token.
+//
+// A number is defined to be in a binary, octal, decimal (int | float), hex format,
+// or in scientific notation.
+func isNumberValue(b []rune) bool {
+ negativeIndex := 0
+ helper := numberHelper{}
+ needDigit := false
+
+ for i := 0; i < len(b); i++ {
+ negativeIndex++
+
+ switch b[i] {
+ case '-':
+ if helper.IsNegative() || negativeIndex != 1 {
+ return false
+ }
+ helper.Determine(b[i])
+ needDigit = true
+ continue
+ case 'e', 'E':
+ if err := helper.Determine(b[i]); err != nil {
+ return false
+ }
+ negativeIndex = 0
+ needDigit = true
+ continue
+ case 'b':
+ if helper.numberFormat == hex {
+ break
+ }
+ fallthrough
+ case 'o', 'x':
+ needDigit = true
+ if i == 0 {
+ return false
+ }
+
+ fallthrough
+ case '.':
+ if err := helper.Determine(b[i]); err != nil {
+ return false
+ }
+ needDigit = true
+ continue
+ }
+
+ if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) {
+ return !needDigit
+ }
+
+ if !helper.CorrectByte(b[i]) {
+ return false
+ }
+ needDigit = false
+ }
+
+ return !needDigit
+}
+
+func isValid(b []rune) (bool, int, error) {
+ if len(b) == 0 {
+ // TODO: should probably return an error
+ return false, 0, nil
+ }
+
+ return isValidRune(b[0]), 1, nil
+}
+
+func isValidRune(r rune) bool {
+ return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n'
+}
+
+// ValueType is an enum that will signify what type
+// the Value is
+type ValueType int
+
+func (v ValueType) String() string {
+ switch v {
+ case NoneType:
+ return "NONE"
+ case DecimalType:
+ return "FLOAT"
+ case IntegerType:
+ return "INT"
+ case StringType:
+ return "STRING"
+ case BoolType:
+ return "BOOL"
+ }
+
+ return ""
+}
+
+// ValueType enums
+const (
+ NoneType = ValueType(iota)
+ DecimalType
+ IntegerType
+ StringType
+ QuotedStringType
+ BoolType
+)
+
+// Value is a union container
+type Value struct {
+ Type ValueType
+ raw []rune
+
+ integer int64
+ decimal float64
+ boolean bool
+ str string
+}
+
+func newValue(t ValueType, base int, raw []rune) (Value, error) {
+ v := Value{
+ Type: t,
+ raw: raw,
+ }
+ var err error
+
+ switch t {
+ case DecimalType:
+ v.decimal, err = strconv.ParseFloat(string(raw), 64)
+ case IntegerType:
+ if base != 10 {
+ raw = raw[2:]
+ }
+
+ v.integer, err = strconv.ParseInt(string(raw), base, 64)
+ case StringType:
+ v.str = string(raw)
+ case QuotedStringType:
+ v.str = string(raw[1 : len(raw)-1])
+ case BoolType:
+ v.boolean = isCaselessLitValue(runesTrue, v.raw)
+ }
+
+ // issue 2253
+ //
+ // if the value trying to be parsed is too large, then we will use
+ // the 'StringType' and raw value instead.
+ if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange {
+ v.Type = StringType
+ v.str = string(raw)
+ err = nil
+ }
+
+ return v, err
+}
+
+// Append will append values and change the type to a string
+// type.
+func (v *Value) Append(tok Token) {
+ r := tok.Raw()
+ if v.Type != QuotedStringType {
+ v.Type = StringType
+ r = tok.raw[1 : len(tok.raw)-1]
+ }
+ if tok.Type() != TokenLit {
+ v.raw = append(v.raw, tok.Raw()...)
+ } else {
+ v.raw = append(v.raw, r...)
+ }
+}
+
+func (v Value) String() string {
+ switch v.Type {
+ case DecimalType:
+ return fmt.Sprintf("decimal: %f", v.decimal)
+ case IntegerType:
+ return fmt.Sprintf("integer: %d", v.integer)
+ case StringType:
+ return fmt.Sprintf("string: %s", string(v.raw))
+ case QuotedStringType:
+ return fmt.Sprintf("quoted string: %s", string(v.raw))
+ case BoolType:
+ return fmt.Sprintf("bool: %t", v.boolean)
+ default:
+ return "union not set"
+ }
+}
+
+func newLitToken(b []rune) (Token, int, error) {
+ n := 0
+ var err error
+
+ token := Token{}
+ if b[0] == '"' {
+ n, err = getStringValue(b)
+ if err != nil {
+ return token, n, err
+ }
+
+ token = newToken(TokenLit, b[:n], QuotedStringType)
+ } else if isNumberValue(b) {
+ var base int
+ base, n, err = getNumericalValue(b)
+ if err != nil {
+ return token, 0, err
+ }
+
+ value := b[:n]
+ vType := IntegerType
+ if contains(value, '.') || hasExponent(value) {
+ vType = DecimalType
+ }
+ token = newToken(TokenLit, value, vType)
+ token.base = base
+ } else if isBoolValue(b) {
+ n, err = getBoolValue(b)
+
+ token = newToken(TokenLit, b[:n], BoolType)
+ } else {
+ n, err = getValue(b)
+ token = newToken(TokenLit, b[:n], StringType)
+ }
+
+ return token, n, err
+}
+
+// IntValue returns an integer value
+func (v Value) IntValue() int64 {
+ return v.integer
+}
+
+// FloatValue returns a float value
+func (v Value) FloatValue() float64 {
+ return v.decimal
+}
+
+// BoolValue returns a bool value
+func (v Value) BoolValue() bool {
+ return v.boolean
+}
+
+func isTrimmable(r rune) bool {
+ switch r {
+ case '\n', ' ':
+ return true
+ }
+ return false
+}
+
+// StringValue returns the string value
+func (v Value) StringValue() string {
+ switch v.Type {
+ case StringType:
+ return strings.TrimFunc(string(v.raw), isTrimmable)
+ case QuotedStringType:
+ // preserve all characters in the quotes
+ return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1]))
+ default:
+ return strings.TrimFunc(string(v.raw), isTrimmable)
+ }
+}
+
+func contains(runes []rune, c rune) bool {
+ for i := 0; i < len(runes); i++ {
+ if runes[i] == c {
+ return true
+ }
+ }
+
+ return false
+}
+
+func runeCompare(v1 []rune, v2 []rune) bool {
+ if len(v1) != len(v2) {
+ return false
+ }
+
+ for i := 0; i < len(v1); i++ {
+ if v1[i] != v2[i] {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/newline_token.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/newline_token.go
new file mode 100644
index 0000000000000..e52ac399f17d4
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/newline_token.go
@@ -0,0 +1,30 @@
+package ini
+
+func isNewline(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ if b[0] == '\n' {
+ return true
+ }
+
+ if len(b) < 2 {
+ return false
+ }
+
+ return b[0] == '\r' && b[1] == '\n'
+}
+
+func newNewlineToken(b []rune) (Token, int, error) {
+ i := 1
+ if b[0] == '\r' && isNewline(b[1:]) {
+ i++
+ }
+
+ if !isNewline([]rune(b[:i])) {
+ return emptyToken, 0, NewParseError("invalid new line token")
+ }
+
+ return newToken(TokenNL, b[:i], NoneType), i, nil
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/number_helper.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/number_helper.go
new file mode 100644
index 0000000000000..a45c0bc56622a
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/number_helper.go
@@ -0,0 +1,152 @@
+package ini
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+)
+
+const (
+ none = numberFormat(iota)
+ binary
+ octal
+ decimal
+ hex
+ exponent
+)
+
+type numberFormat int
+
+// numberHelper is used to dictate what format a number is in
+// and what to do for negative values. Since -1e-4 is a valid
+// number, we cannot just simply check for duplicate negatives.
+type numberHelper struct {
+ numberFormat numberFormat
+
+ negative bool
+ negativeExponent bool
+}
+
+func (b numberHelper) Exists() bool {
+ return b.numberFormat != none
+}
+
+func (b numberHelper) IsNegative() bool {
+ return b.negative || b.negativeExponent
+}
+
+func (b *numberHelper) Determine(c rune) error {
+ if b.Exists() {
+ return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c)))
+ }
+
+ switch c {
+ case 'b':
+ b.numberFormat = binary
+ case 'o':
+ b.numberFormat = octal
+ case 'x':
+ b.numberFormat = hex
+ case 'e', 'E':
+ b.numberFormat = exponent
+ case '-':
+ if b.numberFormat != exponent {
+ b.negative = true
+ } else {
+ b.negativeExponent = true
+ }
+ case '.':
+ b.numberFormat = decimal
+ default:
+ return NewParseError(fmt.Sprintf("invalid number character: %v", string(c)))
+ }
+
+ return nil
+}
+
+func (b numberHelper) CorrectByte(c rune) bool {
+ switch {
+ case b.numberFormat == binary:
+ if !isBinaryByte(c) {
+ return false
+ }
+ case b.numberFormat == octal:
+ if !isOctalByte(c) {
+ return false
+ }
+ case b.numberFormat == hex:
+ if !isHexByte(c) {
+ return false
+ }
+ case b.numberFormat == decimal:
+ if !isDigit(c) {
+ return false
+ }
+ case b.numberFormat == exponent:
+ if !isDigit(c) {
+ return false
+ }
+ case b.negativeExponent:
+ if !isDigit(c) {
+ return false
+ }
+ case b.negative:
+ if !isDigit(c) {
+ return false
+ }
+ default:
+ if !isDigit(c) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (b numberHelper) Base() int {
+ switch b.numberFormat {
+ case binary:
+ return 2
+ case octal:
+ return 8
+ case hex:
+ return 16
+ default:
+ return 10
+ }
+}
+
+func (b numberHelper) String() string {
+ buf := bytes.Buffer{}
+ i := 0
+
+ switch b.numberFormat {
+ case binary:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": binary format\n")
+ case octal:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": octal format\n")
+ case hex:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": hex format\n")
+ case exponent:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": exponent format\n")
+ default:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": integer format\n")
+ }
+
+ if b.negative {
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": negative format\n")
+ }
+
+ if b.negativeExponent {
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n")
+ }
+
+ return buf.String()
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/op_tokens.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/op_tokens.go
new file mode 100644
index 0000000000000..8a84c7cbe0809
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/op_tokens.go
@@ -0,0 +1,39 @@
+package ini
+
+import (
+ "fmt"
+)
+
+var (
+ equalOp = []rune("=")
+ equalColonOp = []rune(":")
+)
+
+func isOp(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ switch b[0] {
+ case '=':
+ return true
+ case ':':
+ return true
+ default:
+ return false
+ }
+}
+
+func newOpToken(b []rune) (Token, int, error) {
+ tok := Token{}
+
+ switch b[0] {
+ case '=':
+ tok = newToken(TokenOp, equalOp, NoneType)
+ case ':':
+ tok = newToken(TokenOp, equalColonOp, NoneType)
+ default:
+ return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0]))
+ }
+ return tok, 1, nil
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/parse_error.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/parse_error.go
new file mode 100644
index 0000000000000..45728701931ce
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/parse_error.go
@@ -0,0 +1,43 @@
+package ini
+
+import "fmt"
+
+const (
+ // ErrCodeParseError is returned when a parsing error
+ // has occurred.
+ ErrCodeParseError = "INIParseError"
+)
+
+// ParseError is an error which is returned during any part of
+// the parsing process.
+type ParseError struct {
+ msg string
+}
+
+// NewParseError will return a new ParseError where message
+// is the description of the error.
+func NewParseError(message string) *ParseError {
+ return &ParseError{
+ msg: message,
+ }
+}
+
+// Code will return the ErrCodeParseError
+func (err *ParseError) Code() string {
+ return ErrCodeParseError
+}
+
+// Message returns the error's message
+func (err *ParseError) Message() string {
+ return err.msg
+}
+
+// OrigError return nothing since there will never be any
+// original error.
+func (err *ParseError) OrigError() error {
+ return nil
+}
+
+func (err *ParseError) Error() string {
+ return fmt.Sprintf("%s: %s", err.Code(), err.Message())
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/parse_stack.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/parse_stack.go
new file mode 100644
index 0000000000000..7f01cf7c70367
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/parse_stack.go
@@ -0,0 +1,60 @@
+package ini
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// ParseStack is a stack that contains a container, the stack portion,
+// and the list which is the list of ASTs that have been successfully
+// parsed.
+type ParseStack struct {
+ top int
+ container []AST
+ list []AST
+ index int
+}
+
+func newParseStack(sizeContainer, sizeList int) ParseStack {
+ return ParseStack{
+ container: make([]AST, sizeContainer),
+ list: make([]AST, sizeList),
+ }
+}
+
+// Pop will return and truncate the last container element.
+func (s *ParseStack) Pop() AST {
+ s.top--
+ return s.container[s.top]
+}
+
+// Push will add the new AST to the container
+func (s *ParseStack) Push(ast AST) {
+ s.container[s.top] = ast
+ s.top++
+}
+
+// MarkComplete will append the AST to the list of completed statements
+func (s *ParseStack) MarkComplete(ast AST) {
+ s.list[s.index] = ast
+ s.index++
+}
+
+// List will return the completed statements
+func (s ParseStack) List() []AST {
+ return s.list[:s.index]
+}
+
+// Len will return the length of the container
+func (s *ParseStack) Len() int {
+ return s.top
+}
+
+func (s ParseStack) String() string {
+ buf := bytes.Buffer{}
+ for i, node := range s.list {
+ buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node))
+ }
+
+ return buf.String()
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/sep_tokens.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/sep_tokens.go
new file mode 100644
index 0000000000000..f82095ba2594e
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/sep_tokens.go
@@ -0,0 +1,41 @@
+package ini
+
+import (
+ "fmt"
+)
+
+var (
+ emptyRunes = []rune{}
+)
+
+func isSep(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ switch b[0] {
+ case '[', ']':
+ return true
+ default:
+ return false
+ }
+}
+
+var (
+ openBrace = []rune("[")
+ closeBrace = []rune("]")
+)
+
+func newSepToken(b []rune) (Token, int, error) {
+ tok := Token{}
+
+ switch b[0] {
+ case '[':
+ tok = newToken(TokenSep, openBrace, NoneType)
+ case ']':
+ tok = newToken(TokenSep, closeBrace, NoneType)
+ default:
+ return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0]))
+ }
+ return tok, 1, nil
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/skipper.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/skipper.go
new file mode 100644
index 0000000000000..da7a4049cfae5
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/skipper.go
@@ -0,0 +1,45 @@
+package ini
+
+// skipper is used to skip certain blocks of an ini file.
+// Currently skipper is used to skip nested blocks of ini
+// files. See example below
+//
+// [ foo ]
+// nested = ; this section will be skipped
+// a=b
+// c=d
+// bar=baz ; this will be included
+type skipper struct {
+ shouldSkip bool
+ TokenSet bool
+ prevTok Token
+}
+
+func newSkipper() skipper {
+ return skipper{
+ prevTok: emptyToken,
+ }
+}
+
+func (s *skipper) ShouldSkip(tok Token) bool {
+ // should skip state will be modified only if previous token was new line (NL);
+ // and the current token is not WhiteSpace (WS).
+ if s.shouldSkip &&
+ s.prevTok.Type() == TokenNL &&
+ tok.Type() != TokenWS {
+ s.Continue()
+ return false
+ }
+ s.prevTok = tok
+ return s.shouldSkip
+}
+
+func (s *skipper) Skip() {
+ s.shouldSkip = true
+}
+
+func (s *skipper) Continue() {
+ s.shouldSkip = false
+ // empty token is assigned as we return to default state, when should skip is false
+ s.prevTok = emptyToken
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/statement.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/statement.go
new file mode 100644
index 0000000000000..18f3fe893170c
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/statement.go
@@ -0,0 +1,35 @@
+package ini
+
+// Statement is an empty AST mostly used for transitioning states.
+func newStatement() AST {
+ return newAST(ASTKindStatement, AST{})
+}
+
+// SectionStatement represents a section AST
+func newSectionStatement(tok Token) AST {
+ return newASTWithRootToken(ASTKindSectionStatement, tok)
+}
+
+// ExprStatement represents a completed expression AST
+func newExprStatement(ast AST) AST {
+ return newAST(ASTKindExprStatement, ast)
+}
+
+// CommentStatement represents a comment in the ini definition.
+//
+// grammar:
+// comment -> #comment' | ;comment'
+// comment' -> epsilon | value
+func newCommentStatement(tok Token) AST {
+ return newAST(ASTKindCommentStatement, newExpression(tok))
+}
+
+// CompletedSectionStatement represents a completed section
+func newCompletedSectionStatement(ast AST) AST {
+ return newAST(ASTKindCompletedSectionStatement, ast)
+}
+
+// SkipStatement is used to skip whole statements
+func newSkipStatement(ast AST) AST {
+ return newAST(ASTKindSkipStatement, ast)
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/value_util.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/value_util.go
new file mode 100644
index 0000000000000..b5480fdeb359b
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/value_util.go
@@ -0,0 +1,284 @@
+package ini
+
+import (
+ "fmt"
+)
+
+// getStringValue will return a quoted string and the amount
+// of bytes read
+//
+// an error will be returned if the string is not properly formatted
+func getStringValue(b []rune) (int, error) {
+ if b[0] != '"' {
+ return 0, NewParseError("strings must start with '\"'")
+ }
+
+ endQuote := false
+ i := 1
+
+ for ; i < len(b) && !endQuote; i++ {
+ if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped {
+ endQuote = true
+ break
+ } else if escaped {
+ /*c, err := getEscapedByte(b[i])
+ if err != nil {
+ return 0, err
+ }
+
+ b[i-1] = c
+ b = append(b[:i], b[i+1:]...)
+ i--*/
+
+ continue
+ }
+ }
+
+ if !endQuote {
+ return 0, NewParseError("missing '\"' in string value")
+ }
+
+ return i + 1, nil
+}
+
+// getBoolValue will return a boolean and the amount
+// of bytes read
+//
+// an error will be returned if the boolean is not of a correct
+// value
+func getBoolValue(b []rune) (int, error) {
+ if len(b) < 4 {
+ return 0, NewParseError("invalid boolean value")
+ }
+
+ n := 0
+ for _, lv := range literalValues {
+ if len(lv) > len(b) {
+ continue
+ }
+
+ if isCaselessLitValue(lv, b) {
+ n = len(lv)
+ }
+ }
+
+ if n == 0 {
+ return 0, NewParseError("invalid boolean value")
+ }
+
+ return n, nil
+}
+
+// getNumericalValue will return a numerical string, the amount
+// of bytes read, and the base of the number
+//
+// an error will be returned if the number is not of a correct
+// value
+func getNumericalValue(b []rune) (int, int, error) {
+ if !isDigit(b[0]) {
+ return 0, 0, NewParseError("invalid digit value")
+ }
+
+ i := 0
+ helper := numberHelper{}
+
+loop:
+ for negativeIndex := 0; i < len(b); i++ {
+ negativeIndex++
+
+ if !isDigit(b[i]) {
+ switch b[i] {
+ case '-':
+ if helper.IsNegative() || negativeIndex != 1 {
+ return 0, 0, NewParseError("parse error '-'")
+ }
+
+ n := getNegativeNumber(b[i:])
+ i += (n - 1)
+ helper.Determine(b[i])
+ continue
+ case '.':
+ if err := helper.Determine(b[i]); err != nil {
+ return 0, 0, err
+ }
+ case 'e', 'E':
+ if err := helper.Determine(b[i]); err != nil {
+ return 0, 0, err
+ }
+
+ negativeIndex = 0
+ case 'b':
+ if helper.numberFormat == hex {
+ break
+ }
+ fallthrough
+ case 'o', 'x':
+ if i == 0 && b[i] != '0' {
+ return 0, 0, NewParseError("incorrect base format, expected leading '0'")
+ }
+
+ if i != 1 {
+ return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i))
+ }
+
+ if err := helper.Determine(b[i]); err != nil {
+ return 0, 0, err
+ }
+ default:
+ if isWhitespace(b[i]) {
+ break loop
+ }
+
+ if isNewline(b[i:]) {
+ break loop
+ }
+
+ if !(helper.numberFormat == hex && isHexByte(b[i])) {
+ if i+2 < len(b) && !isNewline(b[i:i+2]) {
+ return 0, 0, NewParseError("invalid numerical character")
+ } else if !isNewline([]rune{b[i]}) {
+ return 0, 0, NewParseError("invalid numerical character")
+ }
+
+ break loop
+ }
+ }
+ }
+ }
+
+ return helper.Base(), i, nil
+}
+
+// isDigit will return whether or not something is an integer
+func isDigit(b rune) bool {
+ return b >= '0' && b <= '9'
+}
+
+func hasExponent(v []rune) bool {
+ return contains(v, 'e') || contains(v, 'E')
+}
+
+func isBinaryByte(b rune) bool {
+ switch b {
+ case '0', '1':
+ return true
+ default:
+ return false
+ }
+}
+
+func isOctalByte(b rune) bool {
+ switch b {
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ return true
+ default:
+ return false
+ }
+}
+
+func isHexByte(b rune) bool {
+ if isDigit(b) {
+ return true
+ }
+ return (b >= 'A' && b <= 'F') ||
+ (b >= 'a' && b <= 'f')
+}
+
+func getValue(b []rune) (int, error) {
+ i := 0
+
+ for i < len(b) {
+ if isNewline(b[i:]) {
+ break
+ }
+
+ if isOp(b[i:]) {
+ break
+ }
+
+ valid, n, err := isValid(b[i:])
+ if err != nil {
+ return 0, err
+ }
+
+ if !valid {
+ break
+ }
+
+ i += n
+ }
+
+ return i, nil
+}
+
+// getNegativeNumber will return a negative number from a
+// byte slice. This will iterate through all characters until
+// a non-digit has been found.
+func getNegativeNumber(b []rune) int {
+ if b[0] != '-' {
+ return 0
+ }
+
+ i := 1
+ for ; i < len(b); i++ {
+ if !isDigit(b[i]) {
+ return i
+ }
+ }
+
+ return i
+}
+
+// isEscaped will return whether or not the character is an escaped
+// character.
+func isEscaped(value []rune, b rune) bool {
+ if len(value) == 0 {
+ return false
+ }
+
+ switch b {
+ case '\'': // single quote
+ case '"': // quote
+ case 'n': // newline
+ case 't': // tab
+ case '\\': // backslash
+ default:
+ return false
+ }
+
+ return value[len(value)-1] == '\\'
+}
+
+func getEscapedByte(b rune) (rune, error) {
+ switch b {
+ case '\'': // single quote
+ return '\'', nil
+ case '"': // quote
+ return '"', nil
+ case 'n': // newline
+ return '\n', nil
+ case 't': // table
+ return '\t', nil
+ case '\\': // backslash
+ return '\\', nil
+ default:
+ return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b))
+ }
+}
+
+func removeEscapedCharacters(b []rune) []rune {
+ for i := 0; i < len(b); i++ {
+ if isEscaped(b[:i], b[i]) {
+ c, err := getEscapedByte(b[i])
+ if err != nil {
+ return b
+ }
+
+ b[i-1] = c
+ b = append(b[:i], b[i+1:]...)
+ i--
+ }
+ }
+
+ return b
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/visitor.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/visitor.go
new file mode 100644
index 0000000000000..081cf43342419
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/visitor.go
@@ -0,0 +1,169 @@
+package ini
+
+import (
+ "fmt"
+ "sort"
+)
+
+// Visitor is an interface used by walkers that will
+// traverse an array of ASTs.
+type Visitor interface {
+ VisitExpr(AST) error
+ VisitStatement(AST) error
+}
+
+// DefaultVisitor is used to visit statements and expressions
+// and ensure that they are both of the correct format.
+// In addition, upon visiting this will build sections and populate
+// the Sections field which can be used to retrieve profile
+// configuration.
+type DefaultVisitor struct {
+ scope string
+ Sections Sections
+}
+
+// NewDefaultVisitor return a DefaultVisitor
+func NewDefaultVisitor() *DefaultVisitor {
+ return &DefaultVisitor{
+ Sections: Sections{
+ container: map[string]Section{},
+ },
+ }
+}
+
+// VisitExpr visits expressions...
+func (v *DefaultVisitor) VisitExpr(expr AST) error {
+ t := v.Sections.container[v.scope]
+ if t.values == nil {
+ t.values = values{}
+ }
+
+ switch expr.Kind {
+ case ASTKindExprStatement:
+ opExpr := expr.GetRoot()
+ switch opExpr.Kind {
+ case ASTKindEqualExpr:
+ children := opExpr.GetChildren()
+ if len(children) <= 1 {
+ return NewParseError("unexpected token type")
+ }
+
+ rhs := children[1]
+
+ // The right-hand value side the equality expression is allowed to contain '[', ']', ':', '=' in the values.
+ // If the token is not either a literal or one of the token types that identifies those four additional
+ // tokens then error.
+ if !(rhs.Root.Type() == TokenLit || rhs.Root.Type() == TokenOp || rhs.Root.Type() == TokenSep) {
+ return NewParseError("unexpected token type")
+ }
+
+ key := EqualExprKey(opExpr)
+ v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw())
+ if err != nil {
+ return err
+ }
+
+ t.values[key] = v
+ default:
+ return NewParseError(fmt.Sprintf("unsupported expression %v", expr))
+ }
+ default:
+ return NewParseError(fmt.Sprintf("unsupported expression %v", expr))
+ }
+
+ v.Sections.container[v.scope] = t
+ return nil
+}
+
+// VisitStatement visits statements...
+func (v *DefaultVisitor) VisitStatement(stmt AST) error {
+ switch stmt.Kind {
+ case ASTKindCompletedSectionStatement:
+ child := stmt.GetRoot()
+ if child.Kind != ASTKindSectionStatement {
+ return NewParseError(fmt.Sprintf("unsupported child statement: %T", child))
+ }
+
+ name := string(child.Root.Raw())
+ v.Sections.container[name] = Section{}
+ v.scope = name
+ default:
+ return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind))
+ }
+
+ return nil
+}
+
+// Sections is a map of Section structures that represent
+// a configuration.
+type Sections struct {
+ container map[string]Section
+}
+
+// GetSection will return section p. If section p does not exist,
+// false will be returned in the second parameter.
+func (t Sections) GetSection(p string) (Section, bool) {
+ v, ok := t.container[p]
+ return v, ok
+}
+
+// values represents a map of union values.
+type values map[string]Value
+
+// List will return a list of all sections that were successfully
+// parsed.
+func (t Sections) List() []string {
+ keys := make([]string, len(t.container))
+ i := 0
+ for k := range t.container {
+ keys[i] = k
+ i++
+ }
+
+ sort.Strings(keys)
+ return keys
+}
+
+// Section contains a name and values. This represent
+// a sectioned entry in a configuration file.
+type Section struct {
+ Name string
+ values values
+}
+
+// Has will return whether or not an entry exists in a given section
+func (t Section) Has(k string) bool {
+ _, ok := t.values[k]
+ return ok
+}
+
+// ValueType will returned what type the union is set to. If
+// k was not found, the NoneType will be returned.
+func (t Section) ValueType(k string) (ValueType, bool) {
+ v, ok := t.values[k]
+ return v.Type, ok
+}
+
+// Bool returns a bool value at k
+func (t Section) Bool(k string) bool {
+ return t.values[k].BoolValue()
+}
+
+// Int returns an integer value at k
+func (t Section) Int(k string) int64 {
+ return t.values[k].IntValue()
+}
+
+// Float64 returns a float value at k
+func (t Section) Float64(k string) float64 {
+ return t.values[k].FloatValue()
+}
+
+// String returns the string value at k
+func (t Section) String(k string) string {
+ _, ok := t.values[k]
+ if !ok {
+ return ""
+ }
+ return t.values[k].StringValue()
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/walker.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/walker.go
new file mode 100644
index 0000000000000..99915f7f777ce
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/walker.go
@@ -0,0 +1,25 @@
+package ini
+
+// Walk will traverse the AST using the v, the Visitor.
+func Walk(tree []AST, v Visitor) error {
+ for _, node := range tree {
+ switch node.Kind {
+ case ASTKindExpr,
+ ASTKindExprStatement:
+
+ if err := v.VisitExpr(node); err != nil {
+ return err
+ }
+ case ASTKindStatement,
+ ASTKindCompletedSectionStatement,
+ ASTKindNestedSectionStatement,
+ ASTKindCompletedNestedSectionStatement:
+
+ if err := v.VisitStatement(node); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ws_token.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ws_token.go
new file mode 100644
index 0000000000000..7ffb4ae06ff0c
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/ini/ws_token.go
@@ -0,0 +1,24 @@
+package ini
+
+import (
+ "unicode"
+)
+
+// isWhitespace will return whether or not the character is
+// a whitespace character.
+//
+// Whitespace is defined as a space or tab.
+func isWhitespace(c rune) bool {
+ return unicode.IsSpace(c) && c != '\n' && c != '\r'
+}
+
+func newWSToken(b []rune) (Token, int, error) {
+ i := 0
+ for ; i < len(b); i++ {
+ if !isWhitespace(b[i]) {
+ break
+ }
+ }
+
+ return newToken(TokenWS, b[:i], NoneType), i, nil
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn/accesspoint_arn.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn/accesspoint_arn.go
new file mode 100644
index 0000000000000..9cf7bb66c0934
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn/accesspoint_arn.go
@@ -0,0 +1,50 @@
+package arn
+
+import (
+ "strings"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/arn"
+)
+
+// AccessPointARN provides representation
+type AccessPointARN struct {
+ arn.ARN
+ AccessPointName string
+}
+
+// GetARN returns the base ARN for the Access Point resource
+func (a AccessPointARN) GetARN() arn.ARN {
+ return a.ARN
+}
+
+// ParseAccessPointResource attempts to parse the ARN's resource as an
+// AccessPoint resource.
+//
+// Supported Access point resource format:
+// - Access point format: arn:{partition}:s3:{region}:{accountId}:accesspoint/{accesspointName}
+// - example: arn.aws.s3.us-west-2.012345678901:accesspoint/myaccesspoint
+//
+func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) {
+ if len(a.Region) == 0 {
+ return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "region not set"}
+ }
+ if len(a.AccountID) == 0 {
+ return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "account-id not set"}
+ }
+ if len(resParts) == 0 {
+ return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"}
+ }
+ if len(resParts) > 1 {
+ return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "sub resource not supported"}
+ }
+
+ resID := resParts[0]
+ if len(strings.TrimSpace(resID)) == 0 {
+ return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"}
+ }
+
+ return AccessPointARN{
+ ARN: a,
+ AccessPointName: resID,
+ }, nil
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn/arn.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn/arn.go
new file mode 100644
index 0000000000000..abf6427056733
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn/arn.go
@@ -0,0 +1,95 @@
+package arn
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/arn"
+)
+
+var supportedServiceARN = []string{
+ "s3",
+ "s3-outposts",
+ "s3-object-lambda",
+}
+
+func isSupportedServiceARN(service string) bool {
+ for _, name := range supportedServiceARN {
+ if name == service {
+ return true
+ }
+ }
+ return false
+}
+
+// Resource provides the interfaces abstracting ARNs of specific resource
+// types.
+type Resource interface {
+ GetARN() arn.ARN
+ String() string
+}
+
+// ResourceParser provides the function for parsing an ARN's resource
+// component into a typed resource.
+type ResourceParser func(arn.ARN) (Resource, error)
+
+// ParseResource parses an AWS ARN into a typed resource for the S3 API.
+func ParseResource(s string, resParser ResourceParser) (resARN Resource, err error) {
+ a, err := arn.Parse(s)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(a.Partition) == 0 {
+ return nil, InvalidARNError{ARN: a, Reason: "partition not set"}
+ }
+
+ if !isSupportedServiceARN(a.Service) {
+ return nil, InvalidARNError{ARN: a, Reason: "service is not supported"}
+ }
+
+ //IBM Unsupported
+ /*if strings.HasPrefix(a.Region, "fips-") || strings.HasSuffix(a.Region, "-fips") {
+ return nil, InvalidARNError{ARN: a, Reason: "FIPS region not allowed in ARN"}
+ }*/
+
+ if len(a.Resource) == 0 {
+ return nil, InvalidARNError{ARN: a, Reason: "resource not set"}
+ }
+
+ return resParser(a)
+}
+
+// SplitResource splits the resource components by the ARN resource delimiters.
+func SplitResource(v string) []string {
+ var parts []string
+ var offset int
+
+ for offset <= len(v) {
+ idx := strings.IndexAny(v[offset:], "/:")
+ if idx < 0 {
+ parts = append(parts, v[offset:])
+ break
+ }
+ parts = append(parts, v[offset:idx+offset])
+ offset += idx + 1
+ }
+
+ return parts
+}
+
+// IsARN returns whether the given string is an ARN
+func IsARN(s string) bool {
+ return arn.IsARN(s)
+}
+
+// InvalidARNError provides the error for an invalid ARN error.
+type InvalidARNError struct {
+ ARN arn.ARN
+ Reason string
+}
+
+// Error returns a string denoting the occurred InvalidARNError
+func (e InvalidARNError) Error() string {
+ return fmt.Sprintf("invalid Amazon %s ARN, %s, %s", e.ARN.Service, e.Reason, e.ARN.String())
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn/outpost_arn.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn/outpost_arn.go
new file mode 100644
index 0000000000000..45bb994af7c3f
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn/outpost_arn.go
@@ -0,0 +1,126 @@
+package arn
+
+import (
+ "strings"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/arn"
+)
+
+// OutpostARN interface that should be satisfied by outpost ARNs
+type OutpostARN interface {
+ Resource
+ GetOutpostID() string
+}
+
+// ParseOutpostARNResource will parse a provided ARNs resource using the appropriate ARN format
+// and return a specific OutpostARN type
+//
+// Currently supported outpost ARN formats:
+// * Outpost AccessPoint ARN format:
+// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName}
+// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint
+//
+// * Outpost Bucket ARN format:
+// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/bucket/{bucketName}
+// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/bucket/mybucket
+//
+// Other outpost ARN formats may be supported and added in the future.
+//
+func ParseOutpostARNResource(a arn.ARN, resParts []string) (OutpostARN, error) {
+ if len(a.Region) == 0 {
+ return nil, InvalidARNError{ARN: a, Reason: "region not set"}
+ }
+
+ if len(a.AccountID) == 0 {
+ return nil, InvalidARNError{ARN: a, Reason: "account-id not set"}
+ }
+
+ // verify if outpost id is present and valid
+ if len(resParts) == 0 || len(strings.TrimSpace(resParts[0])) == 0 {
+ return nil, InvalidARNError{ARN: a, Reason: "outpost resource-id not set"}
+ }
+
+ // verify possible resource type exists
+ if len(resParts) < 3 {
+ return nil, InvalidARNError{
+ ARN: a, Reason: "incomplete outpost resource type. Expected bucket or access-point resource to be present",
+ }
+ }
+
+ // Since we know this is a OutpostARN fetch outpostID
+ outpostID := strings.TrimSpace(resParts[0])
+
+ switch resParts[1] {
+ case "accesspoint":
+ accesspointARN, err := ParseAccessPointResource(a, resParts[2:])
+ if err != nil {
+ return OutpostAccessPointARN{}, err
+ }
+ return OutpostAccessPointARN{
+ AccessPointARN: accesspointARN,
+ OutpostID: outpostID,
+ }, nil
+
+ case "bucket":
+ bucketName, err := parseBucketResource(a, resParts[2:])
+ if err != nil {
+ return nil, err
+ }
+ return OutpostBucketARN{
+ ARN: a,
+ BucketName: bucketName,
+ OutpostID: outpostID,
+ }, nil
+
+ default:
+ return nil, InvalidARNError{ARN: a, Reason: "unknown resource set for outpost ARN"}
+ }
+}
+
+// OutpostAccessPointARN represents outpost access point ARN.
+type OutpostAccessPointARN struct {
+ AccessPointARN
+ OutpostID string
+}
+
+// GetOutpostID returns the outpost id of outpost access point arn
+func (o OutpostAccessPointARN) GetOutpostID() string {
+ return o.OutpostID
+}
+
+// OutpostBucketARN represents the outpost bucket ARN.
+type OutpostBucketARN struct {
+ arn.ARN
+ BucketName string
+ OutpostID string
+}
+
+// GetOutpostID returns the outpost id of outpost bucket arn
+func (o OutpostBucketARN) GetOutpostID() string {
+ return o.OutpostID
+}
+
+// GetARN retrives the base ARN from outpost bucket ARN resource
+func (o OutpostBucketARN) GetARN() arn.ARN {
+ return o.ARN
+}
+
+// parseBucketResource attempts to parse the ARN's bucket resource and retrieve the
+// bucket resource id.
+//
+// parseBucketResource only parses the bucket resource id.
+//
+func parseBucketResource(a arn.ARN, resParts []string) (bucketName string, err error) {
+ if len(resParts) == 0 {
+ return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"}
+ }
+ if len(resParts) > 1 {
+ return bucketName, InvalidARNError{ARN: a, Reason: "sub resource not supported"}
+ }
+
+ bucketName = strings.TrimSpace(resParts[0])
+ if len(bucketName) == 0 {
+ return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"}
+ }
+ return bucketName, err
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go
new file mode 100644
index 0000000000000..513154cc0e317
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go
@@ -0,0 +1,15 @@
+package arn
+
+// S3ObjectLambdaARN represents an ARN for the s3-object-lambda service
+type S3ObjectLambdaARN interface {
+ Resource
+
+ isS3ObjectLambdasARN()
+}
+
+// S3ObjectLambdaAccessPointARN is an S3ObjectLambdaARN for the Access Point resource type
+type S3ObjectLambdaAccessPointARN struct {
+ AccessPointARN
+}
+
+func (s S3ObjectLambdaAccessPointARN) isS3ObjectLambdasARN() {}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/endpoint_errors.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/endpoint_errors.go
new file mode 100644
index 0000000000000..d537110e7e684
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/endpoint_errors.go
@@ -0,0 +1,203 @@
+package s3shared
+
+import (
+ "fmt"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn"
+)
+
+const (
+ invalidARNErrorErrCode = "InvalidARNError"
+ configurationErrorErrCode = "ConfigurationError"
+)
+
+// InvalidARNError denotes the error for Invalid ARN
+type InvalidARNError struct {
+ message string
+ resource arn.Resource
+ origErr error
+}
+
+// Error returns the InvalidARNError
+func (e InvalidARNError) Error() string {
+ var extra string
+ if e.resource != nil {
+ extra = "ARN: " + e.resource.String()
+ }
+ return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr)
+}
+
+// Code returns the invalid ARN error code
+func (e InvalidARNError) Code() string {
+ return invalidARNErrorErrCode
+}
+
+// Message returns the message for Invalid ARN error
+func (e InvalidARNError) Message() string {
+ return e.message
+}
+
+// OrigErr is the original error wrapped by Invalid ARN Error
+func (e InvalidARNError) OrigErr() error {
+ return e.origErr
+}
+
+// NewInvalidARNError denotes invalid arn error
+func NewInvalidARNError(resource arn.Resource, err error) InvalidARNError {
+ return InvalidARNError{
+ message: "invalid ARN",
+ origErr: err,
+ resource: resource,
+ }
+}
+
+// NewInvalidARNWithCustomEndpointError ARN not supported for custom clients endpoints
+func NewInvalidARNWithCustomEndpointError(resource arn.Resource, err error) InvalidARNError {
+ return InvalidARNError{
+ message: "resource ARN not supported with custom client endpoints",
+ origErr: err,
+ resource: resource,
+ }
+}
+
+// NewInvalidARNWithUnsupportedPartitionError ARN not supported for the target partition
+func NewInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) InvalidARNError {
+ return InvalidARNError{
+ message: "resource ARN not supported for the target ARN partition",
+ origErr: err,
+ resource: resource,
+ }
+}
+
+// NewInvalidARNWithFIPSError ARN not supported for FIPS region
+//
+// Deprecated: FIPS will not appear in the ARN region component.
+func NewInvalidARNWithFIPSError(resource arn.Resource, err error) InvalidARNError {
+ return InvalidARNError{
+ message: "resource ARN not supported for FIPS region",
+ resource: resource,
+ origErr: err,
+ }
+}
+
+// ConfigurationError is used to denote a client configuration error
+type ConfigurationError struct {
+ message string
+ resource arn.Resource
+ clientPartitionID string
+ clientRegion string
+ origErr error
+}
+
+// Error returns the Configuration error string
+func (e ConfigurationError) Error() string {
+ extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s",
+ e.resource, e.clientPartitionID, e.clientRegion)
+
+ return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr)
+}
+
+// Code returns configuration error's error-code
+func (e ConfigurationError) Code() string {
+ return configurationErrorErrCode
+}
+
+// Message returns the configuration error message
+func (e ConfigurationError) Message() string {
+ return e.message
+}
+
+// OrigErr is the original error wrapped by Configuration Error
+func (e ConfigurationError) OrigErr() error {
+ return e.origErr
+}
+
+// NewClientPartitionMismatchError stub
+func NewClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
+ return ConfigurationError{
+ message: "client partition does not match provided ARN partition",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
+
+// NewClientRegionMismatchError denotes cross region access error
+func NewClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
+ return ConfigurationError{
+ message: "client region does not match provided ARN region",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
+
+// NewFailedToResolveEndpointError denotes endpoint resolving error
+func NewFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
+ return ConfigurationError{
+ message: "endpoint resolver failed to find an endpoint for the provided ARN region",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
+
+// NewClientConfiguredForFIPSError denotes client config error for unsupported cross region FIPS access
+func NewClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
+ return ConfigurationError{
+ message: "client configured for fips but cross-region resource ARN provided",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
+
+// NewFIPSConfigurationError denotes a configuration error when a client or request is configured for FIPS
+// IBM Unsupported
+/*func NewFIPSConfigurationError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
+ return ConfigurationError{
+ message: "use of ARN is not supported when client or request is configured for FIPS",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}*/
+
+// NewClientConfiguredForAccelerateError denotes client config error for unsupported S3 accelerate
+func NewClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
+ return ConfigurationError{
+ message: "client configured for S3 Accelerate but is not supported with resource ARN",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
+
+// NewClientConfiguredForCrossRegionFIPSError denotes client config error for unsupported cross region FIPS request
+func NewClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
+ return ConfigurationError{
+ message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
+
+// NewClientConfiguredForDualStackError denotes client config error for unsupported S3 Dual-stack
+func NewClientConfiguredForDualStackError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
+ return ConfigurationError{
+ message: "client configured for S3 Dual-stack but is not supported with resource ARN",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/resource_request.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/resource_request.go
new file mode 100644
index 0000000000000..1431c697016c2
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/resource_request.go
@@ -0,0 +1,56 @@
+package s3shared
+
+import (
+ "strings"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+ "github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn"
+)
+
+// ResourceRequest represents the request and arn resource
+type ResourceRequest struct {
+ Resource arn.Resource
+ Request *request.Request
+}
+
+// AllowCrossRegion returns a bool value to denote if S3UseARNRegion flag is set
+func (r ResourceRequest) AllowCrossRegion() bool {
+ return aws.BoolValue(r.Request.Config.S3UseARNRegion)
+}
+
+// UseFIPS returns true if request config region is FIPS
+func (r ResourceRequest) UseFIPS() bool {
+ return IsFIPS(aws.StringValue(r.Request.Config.Region))
+}
+
+// ResourceConfiguredForFIPS returns true if resource ARNs region is FIPS
+func (r ResourceRequest) ResourceConfiguredForFIPS() bool {
+ return false // IBM does not support AWS ARN
+}
+
+// IsCrossPartition returns true if client is configured for another partition, than
+// the partition that resource ARN region resolves to.
+func (r ResourceRequest) IsCrossPartition() bool {
+ return r.Request.ClientInfo.PartitionID != r.Resource.GetARN().Partition
+}
+
+// IsCrossRegion returns true if ARN region is different than client configured region
+func (r ResourceRequest) IsCrossRegion() bool {
+ return IsCrossRegion(r.Request, r.Resource.GetARN().Region)
+}
+
+// HasCustomEndpoint returns true if custom client endpoint is provided
+func (r ResourceRequest) HasCustomEndpoint() bool {
+ return len(aws.StringValue(r.Request.Config.Endpoint)) > 0
+}
+
+// IsFIPS returns true if region is a fips region
+func IsFIPS(clientRegion string) bool {
+ return strings.HasPrefix(clientRegion, "fips-") || strings.HasSuffix(clientRegion, "-fips")
+}
+
+// IsCrossRegion returns true if request signing region is not same as configured region
+func IsCrossRegion(req *request.Request, otherRegion string) bool {
+ return req.ClientInfo.SigningRegion != otherRegion
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/s3err/error.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/s3err/error.go
new file mode 100644
index 0000000000000..f555483f0c773
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/s3shared/s3err/error.go
@@ -0,0 +1,57 @@
+package s3err
+
+import (
+ "fmt"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+)
+
+// RequestFailure provides additional S3 specific metadata for the request
+// failure.
+type RequestFailure struct {
+ awserr.RequestFailure
+
+ hostID string
+}
+
+// NewRequestFailure returns a request failure error decordated with S3
+// specific metadata.
+func NewRequestFailure(err awserr.RequestFailure, hostID string) *RequestFailure {
+ return &RequestFailure{RequestFailure: err, hostID: hostID}
+}
+
+func (r RequestFailure) Error() string {
+ extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s",
+ r.StatusCode(), r.RequestID(), r.hostID)
+ return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr())
+}
+func (r RequestFailure) String() string {
+ return r.Error()
+}
+
+// HostID returns the HostID request response value.
+func (r RequestFailure) HostID() string {
+ return r.hostID
+}
+
+// RequestFailureWrapperHandler returns a handler to rap an
+// awserr.RequestFailure with the S3 request ID 2 from the response.
+func RequestFailureWrapperHandler() request.NamedHandler {
+ return request.NamedHandler{
+ Name: "awssdk.s3.errorHandler",
+ Fn: func(req *request.Request) {
+ reqErr, ok := req.Error.(awserr.RequestFailure)
+ if !ok || reqErr == nil {
+ return
+ }
+
+ hostID := req.HTTPResponse.Header.Get("X-Amz-Id-2")
+ if req.Error == nil {
+ return
+ }
+
+ req.Error = NewRequestFailure(reqErr, hostID)
+ },
+ }
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkio/byte.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkio/byte.go
new file mode 100644
index 0000000000000..6c443988bbc9a
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkio/byte.go
@@ -0,0 +1,12 @@
+package sdkio
+
+const (
+ // Byte is 8 bits
+ Byte int64 = 1
+ // KibiByte (KiB) is 1024 Bytes
+ KibiByte = Byte * 1024
+ // MebiByte (MiB) is 1024 KiB
+ MebiByte = KibiByte * 1024
+ // GibiByte (GiB) is 1024 MiB
+ GibiByte = MebiByte * 1024
+)
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkio/io_go1.7.go
new file mode 100644
index 0000000000000..65e7c60c4def2
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkio/io_go1.7.go
@@ -0,0 +1,13 @@
+//go:build go1.7
+// +build go1.7
+
+package sdkio
+
+import "io"
+
+// Alias for Go 1.7 io package Seeker constants
+const (
+ SeekStart = io.SeekStart // seek relative to the origin of the file
+ SeekCurrent = io.SeekCurrent // seek relative to the current offset
+ SeekEnd = io.SeekEnd // seek relative to the end
+)
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkmath/floor.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkmath/floor.go
new file mode 100644
index 0000000000000..a8452878324d6
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkmath/floor.go
@@ -0,0 +1,16 @@
+//go:build go1.10
+// +build go1.10
+
+package sdkmath
+
+import "math"
+
+// Round returns the nearest integer, rounding half away from zero.
+//
+// Special cases are:
+// Round(±0) = ±0
+// Round(±Inf) = ±Inf
+// Round(NaN) = NaN
+func Round(x float64) float64 {
+ return math.Round(x)
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkmath/floor_go1.9.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkmath/floor_go1.9.go
new file mode 100644
index 0000000000000..a3ae3e5dba8c7
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkmath/floor_go1.9.go
@@ -0,0 +1,57 @@
+//go:build !go1.10
+// +build !go1.10
+
+package sdkmath
+
+import "math"
+
+// Copied from the Go standard library's (Go 1.12) math/floor.go for use in
+// Go version prior to Go 1.10.
+const (
+ uvone = 0x3FF0000000000000
+ mask = 0x7FF
+ shift = 64 - 11 - 1
+ bias = 1023
+ signMask = 1 << 63
+ fracMask = 1<<shift - 1
+)
+
+// Round returns the nearest integer, rounding half away from zero.
+//
+// Special cases are:
+// Round(±0) = ±0
+// Round(±Inf) = ±Inf
+// Round(NaN) = NaN
+//
+// Copied from the Go standard library's (Go 1.12) math/floor.go for use in
+// Go version prior to Go 1.10.
+func Round(x float64) float64 {
+ // Round is a faster implementation of:
+ //
+ // func Round(x float64) float64 {
+ // t := Trunc(x)
+ // if Abs(x-t) >= 0.5 {
+ // return t + Copysign(1, x)
+ // }
+ // return t
+ // }
+ bits := math.Float64bits(x)
+ e := uint(bits>>shift) & mask
+ if e < bias {
+ // Round abs(x) < 1 including denormals.
+ bits &= signMask // +-0
+ if e == bias-1 {
+ bits |= uvone // +-1
+ }
+ } else if e < bias+shift {
+ // Round any abs(x) >= 1 containing a fractional component [0,1).
+ //
+ // Numbers with larger exponents are returned unchanged since they
+ // must be either an integer, infinity, or NaN.
+ const half = 1 << (shift - 1)
+ e -= bias
+ bits += half >> e
+ bits &^= fracMask >> e
+ }
+ return math.Float64frombits(bits)
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkrand/locked_source.go
new file mode 100644
index 0000000000000..0c9802d877066
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkrand/locked_source.go
@@ -0,0 +1,29 @@
+package sdkrand
+
+import (
+ "math/rand"
+ "sync"
+ "time"
+)
+
+// lockedSource is a thread-safe implementation of rand.Source
+type lockedSource struct {
+ lk sync.Mutex
+ src rand.Source
+}
+
+func (r *lockedSource) Int63() (n int64) {
+ r.lk.Lock()
+ n = r.src.Int63()
+ r.lk.Unlock()
+ return
+}
+
+func (r *lockedSource) Seed(seed int64) {
+ r.lk.Lock()
+ r.src.Seed(seed)
+ r.lk.Unlock()
+}
+
+// SeededRand is a new RNG using a thread safe implementation of rand.Source
+var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkrand/read.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkrand/read.go
new file mode 100644
index 0000000000000..4bae66ceed5ac
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sdkrand/read.go
@@ -0,0 +1,12 @@
+//go:build go1.6
+// +build go1.6
+
+package sdkrand
+
+import "math/rand"
+
+// Read provides the stub for math.Rand.Read method support for go version's
+// 1.6 and greater.
+func Read(r *rand.Rand, p []byte) (int, error) {
+ return r.Read(p)
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/shareddefaults/ecs_container.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/shareddefaults/ecs_container.go
new file mode 100644
index 0000000000000..7da8a49ce522a
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/shareddefaults/ecs_container.go
@@ -0,0 +1,12 @@
+package shareddefaults
+
+const (
+ // ECSCredsProviderEnvVar is an environmental variable key used to
+ // determine which path needs to be hit.
+ ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
+)
+
+// ECSContainerCredentialsURI is the endpoint to retrieve container
+// credentials. This can be overridden to test to ensure the credential process
+// is behaving correctly.
+var ECSContainerCredentialsURI = "http://169.254.170.2"
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/shareddefaults/shared_config.go
new file mode 100644
index 0000000000000..ebcbc2b40a3fb
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/shareddefaults/shared_config.go
@@ -0,0 +1,40 @@
+package shareddefaults
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+)
+
+// SharedCredentialsFilename returns the SDK's default file path
+// for the shared credentials file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/credentials
+// - Windows: %USERPROFILE%\.aws\credentials
+func SharedCredentialsFilename() string {
+ return filepath.Join(UserHomeDir(), ".aws", "credentials")
+}
+
+// SharedConfigFilename returns the SDK's default file path for
+// the shared config file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/config
+// - Windows: %USERPROFILE%\.aws\config
+func SharedConfigFilename() string {
+ return filepath.Join(UserHomeDir(), ".aws", "config")
+}
+
+// UserHomeDir returns the home directory for the user the process is
+// running under.
+func UserHomeDir() string {
+ if runtime.GOOS == "windows" { // Windows
+ return os.Getenv("USERPROFILE")
+ }
+
+ // *nix
+ return os.Getenv("HOME")
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/strings/strings.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/strings/strings.go
new file mode 100644
index 0000000000000..d008ae27cb319
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/strings/strings.go
@@ -0,0 +1,11 @@
+package strings
+
+import (
+ "strings"
+)
+
+// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings,
+// under Unicode case-folding.
+func HasPrefixFold(s, prefix string) bool {
+ return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix)
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sync/singleflight/LICENSE b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sync/singleflight/LICENSE
new file mode 100644
index 0000000000000..6a66aea5eafe0
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sync/singleflight/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sync/singleflight/singleflight.go b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sync/singleflight/singleflight.go
new file mode 100644
index 0000000000000..14ad0c5891151
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/internal/sync/singleflight/singleflight.go
@@ -0,0 +1,120 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package singleflight provides a duplicate function call suppression
+// mechanism.
+package singleflight
+
+import "sync"
+
+// call is an in-flight or completed singleflight.Do call
+type call struct {
+ wg sync.WaitGroup
+
+ // These fields are written once before the WaitGroup is done
+ // and are only read after the WaitGroup is done.
+ val interface{}
+ err error
+
+ // forgotten indicates whether Forget was called with this call's key
+ // while the call was still in flight.
+ forgotten bool
+
+ // These fields are read and written with the singleflight
+ // mutex held before the WaitGroup is done, and are read but
+ // not written after the WaitGroup is done.
+ dups int
+ chans []chan<- Result
+}
+
+// Group represents a class of work and forms a namespace in
+// which units of work can be executed with duplicate suppression.
+type Group struct {
+ mu sync.Mutex // protects m
+ m map[string]*call // lazily initialized
+}
+
+// Result holds the results of Do, so they can be passed
+// on a channel.
+type Result struct {
+ Val interface{}
+ Err error
+ Shared bool
+}
+
+// Do executes and returns the results of the given function, making
+// sure that only one execution is in-flight for a given key at a
+// time. If a duplicate comes in, the duplicate caller waits for the
+// original to complete and receives the same results.
+// The return value shared indicates whether v was given to multiple callers.
+func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) {
+ g.mu.Lock()
+ if g.m == nil {
+ g.m = make(map[string]*call)
+ }
+ if c, ok := g.m[key]; ok {
+ c.dups++
+ g.mu.Unlock()
+ c.wg.Wait()
+ return c.val, c.err, true
+ }
+ c := new(call)
+ c.wg.Add(1)
+ g.m[key] = c
+ g.mu.Unlock()
+
+ g.doCall(c, key, fn)
+ return c.val, c.err, c.dups > 0
+}
+
+// DoChan is like Do but returns a channel that will receive the
+// results when they are ready.
+func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result {
+ ch := make(chan Result, 1)
+ g.mu.Lock()
+ if g.m == nil {
+ g.m = make(map[string]*call)
+ }
+ if c, ok := g.m[key]; ok {
+ c.dups++
+ c.chans = append(c.chans, ch)
+ g.mu.Unlock()
+ return ch
+ }
+ c := &call{chans: []chan<- Result{ch}}
+ c.wg.Add(1)
+ g.m[key] = c
+ g.mu.Unlock()
+
+ go g.doCall(c, key, fn)
+
+ return ch
+}
+
+// doCall handles the single call for a key.
+func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) {
+ c.val, c.err = fn()
+ c.wg.Done()
+
+ g.mu.Lock()
+ if !c.forgotten {
+ delete(g.m, key)
+ }
+ for _, ch := range c.chans {
+ ch <- Result{c.val, c.err, c.dups > 0}
+ }
+ g.mu.Unlock()
+}
+
+// Forget tells the singleflight to forget about a key. Future calls
+// to Do for this key will call the function rather than waiting for
+// an earlier call to complete.
+func (g *Group) Forget(key string) {
+ g.mu.Lock()
+ if c, ok := g.m[key]; ok {
+ c.forgotten = true
+ }
+ delete(g.m, key)
+ g.mu.Unlock()
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/checksum/content_md5.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/checksum/content_md5.go
new file mode 100644
index 0000000000000..47aaa4a52537e
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/checksum/content_md5.go
@@ -0,0 +1,53 @@
+package checksum
+
+import (
+ "crypto/md5"
+ "encoding/base64"
+ "fmt"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+)
+
+const contentMD5Header = "Content-Md5"
+
+// AddBodyContentMD5Handler computes and sets the HTTP Content-MD5 header for requests that
+// require it.
+func AddBodyContentMD5Handler(r *request.Request) {
+ // if Content-MD5 header is already present, return
+ if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) != 0 {
+ return
+ }
+
+ // if S3DisableContentMD5Validation flag is set, return
+ if aws.BoolValue(r.Config.S3DisableContentMD5Validation) {
+ return
+ }
+
+ // if request is presigned, return
+ if r.IsPresigned() {
+ return
+ }
+
+ // if body is not seekable, return
+ if !aws.IsReaderSeekable(r.Body) {
+ if r.Config.Logger != nil {
+ r.Config.Logger.Log(fmt.Sprintf(
+ "Unable to compute Content-MD5 for unseekable body, S3.%s",
+ r.Operation.Name))
+ }
+ return
+ }
+
+ h := md5.New()
+
+ if _, err := aws.CopySeekableBody(h, r.Body); err != nil {
+ r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err)
+ return
+ }
+
+ // encode the md5 checksum in base64 and set the request header.
+ v := base64.StdEncoding.EncodeToString(h.Sum(nil))
+ r.HTTPRequest.Header.Set(contentMD5Header, v)
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/host.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/host.go
new file mode 100644
index 0000000000000..82938e66dc1c4
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/host.go
@@ -0,0 +1,104 @@
+package protocol
+
+import (
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+ "net"
+ "strconv"
+ "strings"
+)
+
+// ValidateEndpointHostHandler is a request handler that will validate the
+// request endpoint's hosts is a valid RFC 3986 host.
+var ValidateEndpointHostHandler = request.NamedHandler{
+ Name: "awssdk.protocol.ValidateEndpointHostHandler",
+ Fn: func(r *request.Request) {
+ err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host)
+ if err != nil {
+ r.Error = err
+ }
+ },
+}
+
+// ValidateEndpointHost validates that the host string passed in is a valid RFC
+// 3986 host. Returns error if the host is not valid.
+func ValidateEndpointHost(opName, host string) error {
+ paramErrs := request.ErrInvalidParams{Context: opName}
+
+ var hostname string
+ var port string
+ var err error
+
+ if strings.Contains(host, ":") {
+ hostname, port, err = net.SplitHostPort(host)
+
+ if err != nil {
+ paramErrs.Add(request.NewErrParamFormat("endpoint", err.Error(), host))
+ }
+
+ if !ValidPortNumber(port) {
+ paramErrs.Add(request.NewErrParamFormat("endpoint port number", "[0-65535]", port))
+ }
+ } else {
+ hostname = host
+ }
+
+ labels := strings.Split(hostname, ".")
+ for i, label := range labels {
+ if i == len(labels)-1 && len(label) == 0 {
+ // Allow trailing dot for FQDN hosts.
+ continue
+ }
+
+ if !ValidHostLabel(label) {
+ paramErrs.Add(request.NewErrParamFormat(
+ "endpoint host label", "[a-zA-Z0-9-]{1,63}", label))
+ }
+ }
+
+ if len(hostname) == 0 {
+ paramErrs.Add(request.NewErrParamMinLen("endpoint host", 1))
+ }
+
+ if len(hostname) > 255 {
+ paramErrs.Add(request.NewErrParamMaxLen(
+ "endpoint host", 255, host,
+ ))
+ }
+
+ if paramErrs.Len() > 0 {
+ return paramErrs
+ }
+ return nil
+}
+
+// ValidHostLabel returns if the label is a valid RFC 3986 host label.
+func ValidHostLabel(label string) bool {
+ if l := len(label); l == 0 || l > 63 {
+ return false
+ }
+ for _, r := range label {
+ switch {
+ case r >= '0' && r <= '9':
+ case r >= 'A' && r <= 'Z':
+ case r >= 'a' && r <= 'z':
+ case r == '-':
+ default:
+ return false
+ }
+ }
+
+ return true
+}
+
+// ValidPortNumber return if the port is valid RFC 3986 port
+func ValidPortNumber(port string) bool {
+ i, err := strconv.Atoi(port)
+ if err != nil {
+ return false
+ }
+
+ if i < 0 || i > 65535 {
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/host_prefix.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/host_prefix.go
new file mode 100644
index 0000000000000..446fb77c35006
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/host_prefix.go
@@ -0,0 +1,54 @@
+package protocol
+
+import (
+ "strings"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+)
+
+// HostPrefixHandlerName is the handler name for the host prefix request
+// handler.
+const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler"
+
+// NewHostPrefixHandler constructs a build handler
+func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler {
+ builder := HostPrefixBuilder{
+ Prefix: prefix,
+ LabelsFn: labelsFn,
+ }
+
+ return request.NamedHandler{
+ Name: HostPrefixHandlerName,
+ Fn: builder.Build,
+ }
+}
+
+// HostPrefixBuilder provides the request handler to expand and prepend
+// the host prefix into the operation's request endpoint host.
+type HostPrefixBuilder struct {
+ Prefix string
+ LabelsFn func() map[string]string
+}
+
+// Build updates the passed in Request with the HostPrefix template expanded.
+func (h HostPrefixBuilder) Build(r *request.Request) {
+ if aws.BoolValue(r.Config.DisableEndpointHostPrefix) {
+ return
+ }
+
+ var labels map[string]string
+ if h.LabelsFn != nil {
+ labels = h.LabelsFn()
+ }
+
+ prefix := h.Prefix
+ for name, value := range labels {
+ prefix = strings.Replace(prefix, "{"+name+"}", value, -1)
+ }
+
+ r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host
+ if len(r.HTTPRequest.Host) > 0 {
+ r.HTTPRequest.Host = prefix + r.HTTPRequest.Host
+ }
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/idempotency.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/idempotency.go
new file mode 100644
index 0000000000000..53831dff9842d
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/idempotency.go
@@ -0,0 +1,75 @@
+package protocol
+
+import (
+ "crypto/rand"
+ "fmt"
+ "reflect"
+)
+
+// RandReader is the random reader the protocol package will use to read
+// random bytes from. This is exported for testing, and should not be used.
+var RandReader = rand.Reader
+
+const idempotencyTokenFillTag = `idempotencyToken`
+
+// CanSetIdempotencyToken returns true if the struct field should be
+// automatically populated with a Idempotency token.
+//
+// Only *string and string type fields that are tagged with idempotencyToken
+// which are not already set can be auto filled.
+func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool {
+ switch u := v.Interface().(type) {
+ // To auto fill an Idempotency token the field must be a string,
+ // tagged for auto fill, and have a zero value.
+ case *string:
+ return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
+ case string:
+ return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
+ }
+
+ return false
+}
+
+// GetIdempotencyToken returns a randomly generated idempotency token.
+func GetIdempotencyToken() string {
+ b := make([]byte, 16)
+ RandReader.Read(b)
+
+ return UUIDVersion4(b)
+}
+
+// SetIdempotencyToken will set the value provided with a Idempotency Token.
+// Given that the value can be set. Will panic if value is not setable.
+func SetIdempotencyToken(v reflect.Value) {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() && v.CanSet() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+ v = reflect.Indirect(v)
+
+ if !v.CanSet() {
+ panic(fmt.Sprintf("unable to set idempotnecy token %v", v))
+ }
+
+ b := make([]byte, 16)
+ _, err := rand.Read(b)
+ if err != nil {
+ // TODO handle error
+ return
+ }
+
+ v.Set(reflect.ValueOf(UUIDVersion4(b)))
+}
+
+// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided
+func UUIDVersion4(u []byte) string {
+ // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29
+ // 13th character is "4"
+ u[6] = (u[6] | 0x40) & 0x4F
+ // 17th character is "8", "9", "a", or "b"
+ u[8] = (u[8] | 0x80) & 0xBF
+
+ return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/json/jsonutil/build.go
new file mode 100644
index 0000000000000..f1b33dc717519
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/json/jsonutil/build.go
@@ -0,0 +1,298 @@
+// Package jsonutil provides JSON serialization of AWS requests and responses.
+package jsonutil
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/private/protocol"
+)
+
+var timeType = reflect.ValueOf(time.Time{}).Type()
+var byteSliceType = reflect.ValueOf([]byte{}).Type()
+
+// BuildJSON builds a JSON string for a given object v.
+func BuildJSON(v interface{}) ([]byte, error) {
+ var buf bytes.Buffer
+
+ err := buildAny(reflect.ValueOf(v), &buf, "")
+ return buf.Bytes(), err
+}
+
+func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+ origVal := value
+ value = reflect.Indirect(value)
+ if !value.IsValid() {
+ return nil
+ }
+
+ vtype := value.Type()
+
+ t := tag.Get("type")
+ if t == "" {
+ switch vtype.Kind() {
+ case reflect.Struct:
+ // also it can't be a time object
+ if value.Type() != timeType {
+ t = "structure"
+ }
+ case reflect.Slice:
+ // also it can't be a byte slice
+ if _, ok := value.Interface().([]byte); !ok {
+ t = "list"
+ }
+ case reflect.Map:
+ // cannot be a JSONValue map
+ if _, ok := value.Interface().(aws.JSONValue); !ok {
+ t = "map"
+ }
+ }
+ }
+
+ switch t {
+ case "structure":
+ if field, ok := vtype.FieldByName("_"); ok {
+ tag = field.Tag
+ }
+ return buildStruct(value, buf, tag)
+ case "list":
+ return buildList(value, buf, tag)
+ case "map":
+ return buildMap(value, buf, tag)
+ default:
+ return buildScalar(origVal, buf, tag)
+ }
+}
+
+func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+ if !value.IsValid() {
+ return nil
+ }
+
+ // unwrap payloads
+ if payload := tag.Get("payload"); payload != "" {
+ field, _ := value.Type().FieldByName(payload)
+ tag = field.Tag
+ value = elemOf(value.FieldByName(payload))
+ if !value.IsValid() && tag.Get("type") != "structure" {
+ return nil
+ }
+ }
+
+ buf.WriteByte('{')
+ defer buf.WriteString("}")
+
+ if !value.IsValid() {
+ return nil
+ }
+
+ t := value.Type()
+ first := true
+ for i := 0; i < t.NumField(); i++ {
+ member := value.Field(i)
+
+ // This allocates the most memory.
+ // Additionally, we cannot skip nil fields due to
+ // idempotency auto filling.
+ field := t.Field(i)
+
+ if field.PkgPath != "" {
+ continue // ignore unexported fields
+ }
+ if field.Tag.Get("json") == "-" {
+ continue
+ }
+ if field.Tag.Get("location") != "" {
+ continue // ignore non-body elements
+ }
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
+ if protocol.CanSetIdempotencyToken(member, field) {
+ token := protocol.GetIdempotencyToken()
+ member = reflect.ValueOf(&token)
+ }
+
+ if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() {
+ continue // ignore unset fields
+ }
+
+ if first {
+ first = false
+ } else {
+ buf.WriteByte(',')
+ }
+
+ // figure out what this field is called
+ name := field.Name
+ if locName := field.Tag.Get("locationName"); locName != "" {
+ name = locName
+ }
+
+ writeString(name, buf)
+ buf.WriteString(`:`)
+
+ err := buildAny(member, buf, field.Tag)
+ if err != nil {
+ return err
+ }
+
+ }
+
+ return nil
+}
+
+func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+ buf.WriteString("[")
+
+ for i := 0; i < value.Len(); i++ {
+ buildAny(value.Index(i), buf, "")
+
+ if i < value.Len()-1 {
+ buf.WriteString(",")
+ }
+ }
+
+ buf.WriteString("]")
+
+ return nil
+}
+
+type sortedValues []reflect.Value
+
+func (sv sortedValues) Len() int { return len(sv) }
+func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
+func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() }
+
+func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+ buf.WriteString("{")
+
+ sv := sortedValues(value.MapKeys())
+ sort.Sort(sv)
+
+ for i, k := range sv {
+ if i > 0 {
+ buf.WriteByte(',')
+ }
+
+ writeString(k.String(), buf)
+ buf.WriteString(`:`)
+
+ buildAny(value.MapIndex(k), buf, "")
+ }
+
+ buf.WriteString("}")
+
+ return nil
+}
+
+func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+ // prevents allocation on the heap.
+ scratch := [64]byte{}
+ switch value := reflect.Indirect(v); value.Kind() {
+ case reflect.String:
+ writeString(value.String(), buf)
+ case reflect.Bool:
+ if value.Bool() {
+ buf.WriteString("true")
+ } else {
+ buf.WriteString("false")
+ }
+ case reflect.Int64:
+ buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10))
+ case reflect.Float64:
+ f := value.Float()
+ if math.IsInf(f, 0) || math.IsNaN(f) {
+ return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)}
+ }
+ buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64))
+ default:
+ switch converted := value.Interface().(type) {
+ case time.Time:
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.UnixTimeFormatName
+ }
+
+ ts := protocol.FormatTime(format, converted)
+ if format != protocol.UnixTimeFormatName {
+ ts = `"` + ts + `"`
+ }
+
+ buf.WriteString(ts)
+ case []byte:
+ if !value.IsNil() {
+ buf.WriteByte('"')
+ if len(converted) < 1024 {
+ // for small buffers, using Encode directly is much faster.
+ dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted)))
+ base64.StdEncoding.Encode(dst, converted)
+ buf.Write(dst)
+ } else {
+ // for large buffers, avoid unnecessary extra temporary
+ // buffer space.
+ enc := base64.NewEncoder(base64.StdEncoding, buf)
+ enc.Write(converted)
+ enc.Close()
+ }
+ buf.WriteByte('"')
+ }
+ case aws.JSONValue:
+ str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape)
+ if err != nil {
+ return fmt.Errorf("unable to encode JSONValue, %v", err)
+ }
+ buf.WriteString(str)
+ default:
+ return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type())
+ }
+ }
+ return nil
+}
+
+var hex = "0123456789abcdef"
+
+func writeString(s string, buf *bytes.Buffer) {
+ buf.WriteByte('"')
+ for i := 0; i < len(s); i++ {
+ if s[i] == '"' {
+ buf.WriteString(`\"`)
+ } else if s[i] == '\\' {
+ buf.WriteString(`\\`)
+ } else if s[i] == '\b' {
+ buf.WriteString(`\b`)
+ } else if s[i] == '\f' {
+ buf.WriteString(`\f`)
+ } else if s[i] == '\r' {
+ buf.WriteString(`\r`)
+ } else if s[i] == '\t' {
+ buf.WriteString(`\t`)
+ } else if s[i] == '\n' {
+ buf.WriteString(`\n`)
+ } else if s[i] < 32 {
+ buf.WriteString("\\u00")
+ buf.WriteByte(hex[s[i]>>4])
+ buf.WriteByte(hex[s[i]&0xF])
+ } else {
+ buf.WriteByte(s[i])
+ }
+ }
+ buf.WriteByte('"')
+}
+
+// Returns the reflection element of a value, if it is a pointer.
+func elemOf(value reflect.Value) reflect.Value {
+ for value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ return value
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/json/jsonutil/unmarshal.go
new file mode 100644
index 0000000000000..1a60f43d884a6
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/json/jsonutil/unmarshal.go
@@ -0,0 +1,304 @@
+package jsonutil
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "math/big"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/private/protocol"
+)
+
+var millisecondsFloat = new(big.Float).SetInt64(1e3)
+
+// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in
+// type. The value to unmarshal the json document into must be a pointer to the
+// type.
+func UnmarshalJSONError(v interface{}, stream io.Reader) error {
+ var errBuf bytes.Buffer
+ body := io.TeeReader(stream, &errBuf)
+
+ err := json.NewDecoder(body).Decode(v)
+ if err != nil {
+ msg := "failed decoding error message"
+ if err == io.EOF {
+ msg = "error message missing"
+ err = nil
+ }
+ return awserr.NewUnmarshalError(err, msg, errBuf.Bytes())
+ }
+
+ return nil
+}
+
+// UnmarshalJSON reads a stream and unmarshals the results in object v.
+func UnmarshalJSON(v interface{}, stream io.Reader) error {
+ var out interface{}
+
+ decoder := json.NewDecoder(stream)
+ decoder.UseNumber()
+ err := decoder.Decode(&out)
+ if err == io.EOF {
+ return nil
+ } else if err != nil {
+ return err
+ }
+
+ return unmarshaler{}.unmarshalAny(reflect.ValueOf(v), out, "")
+}
+
+// UnmarshalJSONCaseInsensitive reads a stream and unmarshals the result into the
+// object v. Ignores casing for structure members.
+func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error {
+ var out interface{}
+
+ decoder := json.NewDecoder(stream)
+ decoder.UseNumber()
+ err := decoder.Decode(&out)
+ if err == io.EOF {
+ return nil
+ } else if err != nil {
+ return err
+ }
+
+ return unmarshaler{
+ caseInsensitive: true,
+ }.unmarshalAny(reflect.ValueOf(v), out, "")
+}
+
+type unmarshaler struct {
+ caseInsensitive bool
+}
+
+func (u unmarshaler) unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+ vtype := value.Type()
+ if vtype.Kind() == reflect.Ptr {
+ vtype = vtype.Elem() // check kind of actual element type
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch vtype.Kind() {
+ case reflect.Struct:
+ // also it can't be a time object
+ if _, ok := value.Interface().(*time.Time); !ok {
+ t = "structure"
+ }
+ case reflect.Slice:
+ // also it can't be a byte slice
+ if _, ok := value.Interface().([]byte); !ok {
+ t = "list"
+ }
+ case reflect.Map:
+ // cannot be a JSONValue map
+ if _, ok := value.Interface().(aws.JSONValue); !ok {
+ t = "map"
+ }
+ }
+ }
+
+ switch t {
+ case "structure":
+ if field, ok := vtype.FieldByName("_"); ok {
+ tag = field.Tag
+ }
+ return u.unmarshalStruct(value, data, tag)
+ case "list":
+ return u.unmarshalList(value, data, tag)
+ case "map":
+ return u.unmarshalMap(value, data, tag)
+ default:
+ return u.unmarshalScalar(value, data, tag)
+ }
+}
+
+func (u unmarshaler) unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+ if data == nil {
+ return nil
+ }
+ mapData, ok := data.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("JSON value is not a structure (%#v)", data)
+ }
+
+ t := value.Type()
+ if value.Kind() == reflect.Ptr {
+ if value.IsNil() { // create the structure if it's nil
+ s := reflect.New(value.Type().Elem())
+ value.Set(s)
+ value = s
+ }
+
+ value = value.Elem()
+ t = t.Elem()
+ }
+
+ // unwrap any payloads
+ if payload := tag.Get("payload"); payload != "" {
+ field, _ := t.FieldByName(payload)
+ return u.unmarshalAny(value.FieldByName(payload), data, field.Tag)
+ }
+
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+ if field.PkgPath != "" {
+ continue // ignore unexported fields
+ }
+
+ // figure out what this field is called
+ name := field.Name
+ if locName := field.Tag.Get("locationName"); locName != "" {
+ name = locName
+ }
+ if u.caseInsensitive {
+ if _, ok := mapData[name]; !ok {
+ // Fallback to uncased name search if the exact name didn't match.
+ for kn, v := range mapData {
+ if strings.EqualFold(kn, name) {
+ mapData[name] = v
+ }
+ }
+ }
+ }
+
+ member := value.FieldByIndex(field.Index)
+ err := u.unmarshalAny(member, mapData[name], field.Tag)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (u unmarshaler) unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+ if data == nil {
+ return nil
+ }
+ listData, ok := data.([]interface{})
+ if !ok {
+ return fmt.Errorf("JSON value is not a list (%#v)", data)
+ }
+
+ if value.IsNil() {
+ l := len(listData)
+ value.Set(reflect.MakeSlice(value.Type(), l, l))
+ }
+
+ for i, c := range listData {
+ err := u.unmarshalAny(value.Index(i), c, "")
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (u unmarshaler) unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+ if data == nil {
+ return nil
+ }
+ mapData, ok := data.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("JSON value is not a map (%#v)", data)
+ }
+
+ if value.IsNil() {
+ value.Set(reflect.MakeMap(value.Type()))
+ }
+
+ for k, v := range mapData {
+ kvalue := reflect.ValueOf(k)
+ vvalue := reflect.New(value.Type().Elem()).Elem()
+
+ u.unmarshalAny(vvalue, v, "")
+ value.SetMapIndex(kvalue, vvalue)
+ }
+
+ return nil
+}
+
+func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+
+ switch d := data.(type) {
+ case nil:
+ return nil // nothing to do here
+ case string:
+ switch value.Interface().(type) {
+ case *string:
+ value.Set(reflect.ValueOf(&d))
+ case []byte:
+ b, err := base64.StdEncoding.DecodeString(d)
+ if err != nil {
+ return err
+ }
+ value.Set(reflect.ValueOf(b))
+ case *time.Time:
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.ISO8601TimeFormatName
+ }
+
+ t, err := protocol.ParseTime(format, d)
+ if err != nil {
+ return err
+ }
+ value.Set(reflect.ValueOf(&t))
+ case aws.JSONValue:
+ // No need to use escaping as the value is a non-quoted string.
+ v, err := protocol.DecodeJSONValue(d, protocol.NoEscape)
+ if err != nil {
+ return err
+ }
+ value.Set(reflect.ValueOf(v))
+ default:
+ return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
+ }
+ case json.Number:
+ switch value.Interface().(type) {
+ case *int64:
+ // Retain the old behavior where we would just truncate the float64
+ // calling d.Int64() here could cause an invalid syntax error due to the usage of strconv.ParseInt
+ f, err := d.Float64()
+ if err != nil {
+ return err
+ }
+ di := int64(f)
+ value.Set(reflect.ValueOf(&di))
+ case *float64:
+ f, err := d.Float64()
+ if err != nil {
+ return err
+ }
+ value.Set(reflect.ValueOf(&f))
+ case *time.Time:
+ float, ok := new(big.Float).SetString(d.String())
+ if !ok {
+ return fmt.Errorf("unsupported float time representation: %v", d.String())
+ }
+ float = float.Mul(float, millisecondsFloat)
+ ms, _ := float.Int64()
+ t := time.Unix(0, ms*1e6).UTC()
+ value.Set(reflect.ValueOf(&t))
+ default:
+ return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
+ }
+ case bool:
+ switch value.Interface().(type) {
+ case *bool:
+ value.Set(reflect.ValueOf(&d))
+ default:
+ return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
+ }
+ default:
+ return fmt.Errorf("unsupported JSON value (%v)", data)
+ }
+ return nil
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/jsonvalue.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/jsonvalue.go
new file mode 100644
index 0000000000000..513377fa9219e
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/jsonvalue.go
@@ -0,0 +1,76 @@
+package protocol
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "strconv"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+)
+
+// EscapeMode is the mode that should be use for escaping a value
+type EscapeMode uint
+
+// The modes for escaping a value before it is marshaled, and unmarshaled.
+const (
+ NoEscape EscapeMode = iota
+ Base64Escape
+ QuotedEscape
+)
+
+// EncodeJSONValue marshals the value into a JSON string, and optionally base64
+// encodes the string before returning it.
+//
+// Will panic if the escape mode is unknown.
+func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) {
+ b, err := json.Marshal(v)
+ if err != nil {
+ return "", err
+ }
+
+ switch escape {
+ case NoEscape:
+ return string(b), nil
+ case Base64Escape:
+ return base64.StdEncoding.EncodeToString(b), nil
+ case QuotedEscape:
+ return strconv.Quote(string(b)), nil
+ }
+
+ panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape))
+}
+
+// DecodeJSONValue will attempt to decode the string input as a JSONValue.
+// Optionally decoding base64 the value first before JSON unmarshaling.
+//
+// Will panic if the escape mode is unknown.
+func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) {
+ var b []byte
+ var err error
+
+ switch escape {
+ case NoEscape:
+ b = []byte(v)
+ case Base64Escape:
+ b, err = base64.StdEncoding.DecodeString(v)
+ case QuotedEscape:
+ var u string
+ u, err = strconv.Unquote(v)
+ b = []byte(u)
+ default:
+ panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape))
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ m := aws.JSONValue{}
+ err = json.Unmarshal(b, &m)
+ if err != nil {
+ return nil, err
+ }
+
+ return m, nil
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/payload.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/payload.go
new file mode 100644
index 0000000000000..b33ac985a101f
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/payload.go
@@ -0,0 +1,81 @@
+package protocol
+
+import (
+ "io"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/client/metadata"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+)
+
+// PayloadUnmarshaler provides the interface for unmarshaling a payload's
+// reader into a SDK shape.
+type PayloadUnmarshaler interface {
+ UnmarshalPayload(io.Reader, interface{}) error
+}
+
+// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a
+// HandlerList. This provides the support for unmarshaling a payload reader to
+// a shape without needing a SDK request first.
+type HandlerPayloadUnmarshal struct {
+ Unmarshalers request.HandlerList
+}
+
+// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using
+// the Unmarshalers HandlerList provided. Returns an error if unable
+// unmarshaling fails.
+func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error {
+ req := &request.Request{
+ HTTPRequest: &http.Request{},
+ HTTPResponse: &http.Response{
+ StatusCode: 200,
+ Header: http.Header{},
+ Body: ioutil.NopCloser(r),
+ },
+ Data: v,
+ }
+
+ h.Unmarshalers.Run(req)
+
+ return req.Error
+}
+
+// PayloadMarshaler provides the interface for marshaling a SDK shape into and
+// io.Writer.
+type PayloadMarshaler interface {
+ MarshalPayload(io.Writer, interface{}) error
+}
+
+// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList.
+// This provides support for marshaling a SDK shape into an io.Writer without
+// needing a SDK request first.
+type HandlerPayloadMarshal struct {
+ Marshalers request.HandlerList
+}
+
+// MarshalPayload marshals the SDK shape into the io.Writer using the
+// Marshalers HandlerList provided. Returns an error if unable if marshal
+// fails.
+func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error {
+ req := request.New(
+ aws.Config{},
+ metadata.ClientInfo{},
+ request.Handlers{},
+ nil,
+ &request.Operation{HTTPMethod: "PUT"},
+ v,
+ nil,
+ )
+
+ h.Marshalers.Run(req)
+
+ if req.Error != nil {
+ return req.Error
+ }
+
+ io.Copy(w, req.GetBody())
+
+ return nil
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/protocol.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/protocol.go
new file mode 100644
index 0000000000000..baffdfe265661
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/protocol.go
@@ -0,0 +1,49 @@
+package protocol
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+)
+
+// RequireHTTPMinProtocol request handler is used to enforce that
+// the target endpoint supports the given major and minor HTTP protocol version.
+type RequireHTTPMinProtocol struct {
+ Major, Minor int
+}
+
+// Handler will mark the request.Request with an error if the
+// target endpoint did not connect with the required HTTP protocol
+// major and minor version.
+func (p RequireHTTPMinProtocol) Handler(r *request.Request) {
+ if r.Error != nil || r.HTTPResponse == nil {
+ return
+ }
+
+ if !strings.HasPrefix(r.HTTPResponse.Proto, "HTTP") {
+ r.Error = newMinHTTPProtoError(p.Major, p.Minor, r)
+ }
+
+ if r.HTTPResponse.ProtoMajor < p.Major || r.HTTPResponse.ProtoMinor < p.Minor {
+ r.Error = newMinHTTPProtoError(p.Major, p.Minor, r)
+ }
+}
+
+// ErrCodeMinimumHTTPProtocolError error code is returned when the target endpoint
+// did not match the required HTTP major and minor protocol version.
+const ErrCodeMinimumHTTPProtocolError = "MinimumHTTPProtocolError"
+
+func newMinHTTPProtoError(major, minor int, r *request.Request) error {
+ return awserr.NewRequestFailure(
+ awserr.New("MinimumHTTPProtocolError",
+ fmt.Sprintf(
+ "operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s",
+ major, minor, r.HTTPResponse.Proto,
+ ),
+ nil,
+ ),
+ r.HTTPResponse.StatusCode, r.RequestID,
+ )
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/query/build.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/query/build.go
new file mode 100644
index 0000000000000..05b702167c0c3
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/query/build.go
@@ -0,0 +1,36 @@
+// Package query provides serialization of AWS query requests, and responses.
+package query
+
+//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/query.json build_test.go
+
+import (
+ "net/url"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+ "github.com/IBM/ibm-cos-sdk-go/private/protocol/query/queryutil"
+)
+
+// BuildHandler is a named request handler for building query protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build}
+
+// Build builds a request for an AWS Query service.
+func Build(r *request.Request) {
+ body := url.Values{
+ "Action": {r.Operation.Name},
+ "Version": {r.ClientInfo.APIVersion},
+ }
+ if err := queryutil.Parse(body, r.Params, false); err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err)
+ return
+ }
+
+ if !r.IsPresigned() {
+ r.HTTPRequest.Method = "POST"
+ r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
+ r.SetBufferBody([]byte(body.Encode()))
+ } else { // This is a pre-signed request
+ r.HTTPRequest.Method = "GET"
+ r.HTTPRequest.URL.RawQuery = body.Encode()
+ }
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/query/queryutil/queryutil.go
new file mode 100644
index 0000000000000..4f869600b1162
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/query/queryutil/queryutil.go
@@ -0,0 +1,246 @@
+package queryutil
+
+import (
+ "encoding/base64"
+ "fmt"
+ "net/url"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/private/protocol"
+)
+
+// Parse parses an object i and fills a url.Values object. The isEC2 flag
+// indicates if this is the EC2 Query sub-protocol.
+func Parse(body url.Values, i interface{}, isEC2 bool) error {
+ q := queryParser{isEC2: isEC2}
+ return q.parseValue(body, reflect.ValueOf(i), "", "")
+}
+
+func elemOf(value reflect.Value) reflect.Value {
+ for value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ return value
+}
+
+type queryParser struct {
+ isEC2 bool
+}
+
+func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+ value = elemOf(value)
+
+ // no need to handle zero values
+ if !value.IsValid() {
+ return nil
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch value.Kind() {
+ case reflect.Struct:
+ t = "structure"
+ case reflect.Slice:
+ t = "list"
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ switch t {
+ case "structure":
+ return q.parseStruct(v, value, prefix)
+ case "list":
+ return q.parseList(v, value, prefix, tag)
+ case "map":
+ return q.parseMap(v, value, prefix, tag)
+ default:
+ return q.parseScalar(v, value, prefix, tag)
+ }
+}
+
+func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error {
+ if !value.IsValid() {
+ return nil
+ }
+
+ t := value.Type()
+ for i := 0; i < value.NumField(); i++ {
+ elemValue := elemOf(value.Field(i))
+ field := t.Field(i)
+
+ if field.PkgPath != "" {
+ continue // ignore unexported fields
+ }
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
+ if protocol.CanSetIdempotencyToken(value.Field(i), field) {
+ token := protocol.GetIdempotencyToken()
+ elemValue = reflect.ValueOf(token)
+ }
+
+ var name string
+ if q.isEC2 {
+ name = field.Tag.Get("queryName")
+ }
+ if name == "" {
+ if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
+ name = field.Tag.Get("locationNameList")
+ } else if locName := field.Tag.Get("locationName"); locName != "" {
+ name = locName
+ }
+ if name != "" && q.isEC2 {
+ name = strings.ToUpper(name[0:1]) + name[1:]
+ }
+ }
+ if name == "" {
+ name = field.Name
+ }
+
+ if prefix != "" {
+ name = prefix + "." + name
+ }
+
+ if err := q.parseValue(v, elemValue, name, field.Tag); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+ // If it's empty, generate an empty value
+ if !value.IsNil() && value.Len() == 0 {
+ v.Set(prefix, "")
+ return nil
+ }
+
+ if _, ok := value.Interface().([]byte); ok {
+ return q.parseScalar(v, value, prefix, tag)
+ }
+
+ // check for unflattened list member
+ if !q.isEC2 && tag.Get("flattened") == "" {
+ if listName := tag.Get("locationNameList"); listName == "" {
+ prefix += ".member"
+ } else {
+ prefix += "." + listName
+ }
+ }
+
+ for i := 0; i < value.Len(); i++ {
+ slicePrefix := prefix
+ if slicePrefix == "" {
+ slicePrefix = strconv.Itoa(i + 1)
+ } else {
+ slicePrefix = slicePrefix + "." + strconv.Itoa(i+1)
+ }
+ if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+ // If it's empty, generate an empty value
+ if !value.IsNil() && value.Len() == 0 {
+ v.Set(prefix, "")
+ return nil
+ }
+
+ // check for unflattened list member
+ if !q.isEC2 && tag.Get("flattened") == "" {
+ prefix += ".entry"
+ }
+
+ // sort keys for improved serialization consistency.
+ // this is not strictly necessary for protocol support.
+ mapKeyValues := value.MapKeys()
+ mapKeys := map[string]reflect.Value{}
+ mapKeyNames := make([]string, len(mapKeyValues))
+ for i, mapKey := range mapKeyValues {
+ name := mapKey.String()
+ mapKeys[name] = mapKey
+ mapKeyNames[i] = name
+ }
+ sort.Strings(mapKeyNames)
+
+ for i, mapKeyName := range mapKeyNames {
+ mapKey := mapKeys[mapKeyName]
+ mapValue := value.MapIndex(mapKey)
+
+ kname := tag.Get("locationNameKey")
+ if kname == "" {
+ kname = "key"
+ }
+ vname := tag.Get("locationNameValue")
+ if vname == "" {
+ vname = "value"
+ }
+
+ // serialize key
+ var keyName string
+ if prefix == "" {
+ keyName = strconv.Itoa(i+1) + "." + kname
+ } else {
+ keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname
+ }
+
+ if err := q.parseValue(v, mapKey, keyName, ""); err != nil {
+ return err
+ }
+
+ // serialize value
+ var valueName string
+ if prefix == "" {
+ valueName = strconv.Itoa(i+1) + "." + vname
+ } else {
+ valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname
+ }
+
+ if err := q.parseValue(v, mapValue, valueName, ""); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error {
+ switch value := r.Interface().(type) {
+ case string:
+ v.Set(name, value)
+ case []byte:
+ if !r.IsNil() {
+ v.Set(name, base64.StdEncoding.EncodeToString(value))
+ }
+ case bool:
+ v.Set(name, strconv.FormatBool(value))
+ case int64:
+ v.Set(name, strconv.FormatInt(value, 10))
+ case int:
+ v.Set(name, strconv.Itoa(value))
+ case float64:
+ v.Set(name, strconv.FormatFloat(value, 'f', -1, 64))
+ case float32:
+ v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32))
+ case time.Time:
+ const ISO8601UTC = "2006-01-02T15:04:05Z"
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.ISO8601TimeFormatName
+ }
+
+ v.Set(name, protocol.FormatTime(format, value))
+ default:
+ return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name())
+ }
+ return nil
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/query/unmarshal.go
new file mode 100644
index 0000000000000..0fb0eccbe8546
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/query/unmarshal.go
@@ -0,0 +1,39 @@
+package query
+
+//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/query.json unmarshal_test.go
+
+import (
+ "encoding/xml"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+ "github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil"
+)
+
+// UnmarshalHandler is a named request handler for unmarshaling query protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// Unmarshal unmarshals a response for an AWS Query service.
+func Unmarshal(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ if r.DataFilled() {
+ decoder := xml.NewDecoder(r.HTTPResponse.Body)
+ err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result")
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+ }
+}
+
+// UnmarshalMeta unmarshals header response values for an AWS Query service.
+func UnmarshalMeta(r *request.Request) {
+ r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/query/unmarshal_error.go
new file mode 100644
index 0000000000000..9d6f859d1133f
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/query/unmarshal_error.go
@@ -0,0 +1,69 @@
+package query
+
+import (
+ "encoding/xml"
+ "fmt"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+ "github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil"
+)
+
+// UnmarshalErrorHandler is a name request handler to unmarshal request errors
+var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError}
+
+type xmlErrorResponse struct {
+ Code string `xml:"Error>Code"`
+ Message string `xml:"Error>Message"`
+ RequestID string `xml:"RequestId"`
+}
+
+type xmlResponseError struct {
+ xmlErrorResponse
+}
+
+func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ const svcUnavailableTagName = "ServiceUnavailableException"
+ const errorResponseTagName = "ErrorResponse"
+
+ switch start.Name.Local {
+ case svcUnavailableTagName:
+ e.Code = svcUnavailableTagName
+ e.Message = "service is unavailable"
+ return d.Skip()
+
+ case errorResponseTagName:
+ return d.DecodeElement(&e.xmlErrorResponse, &start)
+
+ default:
+ return fmt.Errorf("unknown error response tag, %v", start)
+ }
+}
+
+// UnmarshalError unmarshals an error response for an AWS Query service.
+func UnmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ var respErr xmlResponseError
+ err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization,
+ "failed to unmarshal error message", err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+
+ reqID := respErr.RequestID
+ if len(reqID) == 0 {
+ reqID = r.RequestID
+ }
+
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(respErr.Code, respErr.Message, nil),
+ r.HTTPResponse.StatusCode,
+ reqID,
+ )
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/rest/build.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/rest/build.go
new file mode 100644
index 0000000000000..196b2f44f703f
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/rest/build.go
@@ -0,0 +1,333 @@
+// Package rest provides RESTful serialization of AWS requests and responses.
+package rest
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "path"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+ "github.com/IBM/ibm-cos-sdk-go/private/protocol"
+)
+
+// Whether the byte value can be sent without escaping in AWS URLs
+var noEscape [256]bool
+
+var errValueNotSet = fmt.Errorf("value not set")
+
+var byteSliceType = reflect.TypeOf([]byte{})
+
+func init() {
+ for i := 0; i < len(noEscape); i++ {
+ // AWS expects every character except these to be escaped
+ noEscape[i] = (i >= 'A' && i <= 'Z') ||
+ (i >= 'a' && i <= 'z') ||
+ (i >= '0' && i <= '9') ||
+ i == '-' ||
+ i == '.' ||
+ i == '_' ||
+ i == '~'
+ }
+}
+
+// BuildHandler is a named request handler for building rest protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build}
+
+// Build builds the REST component of a service request.
+func Build(r *request.Request) {
+ if r.ParamsFilled() {
+ v := reflect.ValueOf(r.Params).Elem()
+ buildLocationElements(r, v, false)
+ buildBody(r, v)
+ }
+}
+
+// BuildAsGET builds the REST component of a service request with the ability to hoist
+// data from the body.
+func BuildAsGET(r *request.Request) {
+ if r.ParamsFilled() {
+ v := reflect.ValueOf(r.Params).Elem()
+ buildLocationElements(r, v, true)
+ buildBody(r, v)
+ }
+}
+
+func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) {
+ query := r.HTTPRequest.URL.Query()
+
+ // Setup the raw path to match the base path pattern. This is needed
+ // so that when the path is mutated a custom escaped version can be
+ // stored in RawPath that will be used by the Go client.
+ r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path
+
+ for i := 0; i < v.NumField(); i++ {
+ m := v.Field(i)
+ if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) {
+ continue
+ }
+
+ if m.IsValid() {
+ field := v.Type().Field(i)
+ name := field.Tag.Get("locationName")
+ if name == "" {
+ name = field.Name
+ }
+ if kind := m.Kind(); kind == reflect.Ptr {
+ m = m.Elem()
+ } else if kind == reflect.Interface {
+ if !m.Elem().IsValid() {
+ continue
+ }
+ }
+ if !m.IsValid() {
+ continue
+ }
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
+ // Support the ability to customize values to be marshaled as a
+ // blob even though they were modeled as a string. Required for S3
+ // API operations like SSECustomerKey is modeled as string but
+ // required to be base64 encoded in request.
+ if field.Tag.Get("marshal-as") == "blob" {
+ m = m.Convert(byteSliceType)
+ }
+
+ var err error
+ switch field.Tag.Get("location") {
+ case "headers": // header maps
+ err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag)
+ case "header":
+ err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag)
+ case "uri":
+ err = buildURI(r.HTTPRequest.URL, m, name, field.Tag)
+ case "querystring":
+ err = buildQueryString(query, m, name, field.Tag)
+ default:
+ if buildGETQuery {
+ err = buildQueryString(query, m, name, field.Tag)
+ }
+ }
+ r.Error = err
+ }
+ if r.Error != nil {
+ return
+ }
+ }
+
+ r.HTTPRequest.URL.RawQuery = query.Encode()
+ if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) {
+ cleanPath(r.HTTPRequest.URL)
+ }
+}
+
+func buildBody(r *request.Request, v reflect.Value) {
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ pfield, _ := v.Type().FieldByName(payloadName)
+ if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
+ payload := reflect.Indirect(v.FieldByName(payloadName))
+ if payload.IsValid() && payload.Interface() != nil {
+ switch reader := payload.Interface().(type) {
+ case io.ReadSeeker:
+ r.SetReaderBody(reader)
+ case []byte:
+ r.SetBufferBody(reader)
+ case string:
+ r.SetStringBody(reader)
+ default:
+ r.Error = awserr.New(request.ErrCodeSerialization,
+ "failed to encode REST request",
+ fmt.Errorf("unknown payload type %s", payload.Type()))
+ }
+ }
+ }
+ }
+ }
+}
+
+func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error {
+ str, err := convertType(v, tag)
+ if err == errValueNotSet {
+ return nil
+ } else if err != nil {
+ return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err)
+ }
+
+ name = strings.TrimSpace(name)
+ str = strings.TrimSpace(str)
+
+ header.Add(name, str)
+
+ return nil
+}
+
+func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error {
+ prefix := tag.Get("locationName")
+ for _, key := range v.MapKeys() {
+ str, err := convertType(v.MapIndex(key), tag)
+ if err == errValueNotSet {
+ continue
+ } else if err != nil {
+ return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err)
+
+ }
+ keyStr := strings.TrimSpace(key.String())
+ str = strings.TrimSpace(str)
+
+ header.Add(prefix+keyStr, str)
+ }
+ return nil
+}
+
+func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error {
+ value, err := convertType(v, tag)
+ if err == errValueNotSet {
+ return nil
+ } else if err != nil {
+ return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err)
+ }
+
+ u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1)
+ u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1)
+
+ u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1)
+ u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1)
+
+ return nil
+}
+
+func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error {
+ switch value := v.Interface().(type) {
+ case []*string:
+ for _, item := range value {
+ query.Add(name, *item)
+ }
+ case map[string]*string:
+ for key, item := range value {
+ query.Add(key, *item)
+ }
+ case map[string][]*string:
+ for key, items := range value {
+ for _, item := range items {
+ query.Add(key, *item)
+ }
+ }
+ default:
+ str, err := convertType(v, tag)
+ if err == errValueNotSet {
+ return nil
+ } else if err != nil {
+ return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err)
+ }
+ query.Set(name, str)
+ }
+
+ return nil
+}
+
+func cleanPath(u *url.URL) {
+ hasSlash := strings.HasSuffix(u.Path, "/")
+
+ // clean up path, removing duplicate `/`
+ u.Path = path.Clean(u.Path)
+ u.RawPath = path.Clean(u.RawPath)
+
+ if hasSlash && !strings.HasSuffix(u.Path, "/") {
+ u.Path += "/"
+ u.RawPath += "/"
+ }
+}
+
+// EscapePath escapes part of a URL path in Amazon style
+func EscapePath(path string, encodeSep bool) string {
+ var buf bytes.Buffer
+ for i := 0; i < len(path); i++ {
+ c := path[i]
+ if noEscape[c] || (c == '/' && !encodeSep) {
+ buf.WriteByte(c)
+ } else {
+ fmt.Fprintf(&buf, "%%%02X", c)
+ }
+ }
+ return buf.String()
+}
+
+func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) {
+ v = reflect.Indirect(v)
+ if !v.IsValid() {
+ return "", errValueNotSet
+ }
+
+ switch value := v.Interface().(type) {
+ case string:
+ if tag.Get("suppressedJSONValue") == "true" && tag.Get("location") == "header" {
+ value = base64.StdEncoding.EncodeToString([]byte(value))
+ }
+ str = value
+ case []*string:
+ if tag.Get("location") != "header" || tag.Get("enum") == "" {
+ return "", fmt.Errorf("%T is only supported with location header and enum shapes", value)
+ }
+ buff := &bytes.Buffer{}
+ for i, sv := range value {
+ if sv == nil || len(*sv) == 0 {
+ continue
+ }
+ if i != 0 {
+ buff.WriteRune(',')
+ }
+ item := *sv
+ if strings.Index(item, `,`) != -1 || strings.Index(item, `"`) != -1 {
+ item = strconv.Quote(item)
+ }
+ buff.WriteString(item)
+ }
+ str = string(buff.Bytes())
+ case []byte:
+ str = base64.StdEncoding.EncodeToString(value)
+ case bool:
+ str = strconv.FormatBool(value)
+ case int64:
+ str = strconv.FormatInt(value, 10)
+ case float64:
+ str = strconv.FormatFloat(value, 'f', -1, 64)
+ case time.Time:
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.RFC822TimeFormatName
+ if tag.Get("location") == "querystring" {
+ format = protocol.ISO8601TimeFormatName
+ }
+ }
+ str = protocol.FormatTime(format, value)
+ case aws.JSONValue:
+ if len(value) == 0 {
+ return "", errValueNotSet
+ }
+ escaping := protocol.NoEscape
+ if tag.Get("location") == "header" {
+ escaping = protocol.Base64Escape
+ }
+ str, err = protocol.EncodeJSONValue(value, escaping)
+ if err != nil {
+ return "", fmt.Errorf("unable to encode JSONValue, %v", err)
+ }
+ default:
+ err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type())
+ return "", err
+ }
+
+ return str, nil
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/rest/payload.go
new file mode 100644
index 0000000000000..b54c99edae46f
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/rest/payload.go
@@ -0,0 +1,54 @@
+package rest
+
+import "reflect"
+
+// PayloadMember returns the payload field member of i if there is one, or nil.
+func PayloadMember(i interface{}) interface{} {
+ if i == nil {
+ return nil
+ }
+
+ v := reflect.ValueOf(i).Elem()
+ if !v.IsValid() {
+ return nil
+ }
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ field, _ := v.Type().FieldByName(payloadName)
+ if field.Tag.Get("type") != "structure" {
+ return nil
+ }
+
+ payload := v.FieldByName(payloadName)
+ if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) {
+ return payload.Interface()
+ }
+ }
+ }
+ return nil
+}
+
+const nopayloadPayloadType = "nopayload"
+
+// PayloadType returns the type of a payload field member of i if there is one,
+// or "".
+func PayloadType(i interface{}) string {
+ v := reflect.Indirect(reflect.ValueOf(i))
+ if !v.IsValid() {
+ return ""
+ }
+
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if noPayload := field.Tag.Get(nopayloadPayloadType); noPayload != "" {
+ return nopayloadPayloadType
+ }
+
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ if member, ok := v.Type().FieldByName(payloadName); ok {
+ return member.Tag.Get("type")
+ }
+ }
+ }
+
+ return ""
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/rest/unmarshal.go
new file mode 100644
index 0000000000000..6c0d162f5ebc7
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/rest/unmarshal.go
@@ -0,0 +1,264 @@
+package rest
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+ awsStrings "github.com/IBM/ibm-cos-sdk-go/internal/strings"
+ "github.com/IBM/ibm-cos-sdk-go/private/protocol"
+)
+
+// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// Unmarshal unmarshals the REST component of a response in a REST service.
+func Unmarshal(r *request.Request) {
+ if r.DataFilled() {
+ v := reflect.Indirect(reflect.ValueOf(r.Data))
+ if err := unmarshalBody(r, v); err != nil {
+ r.Error = err
+ }
+ }
+}
+
+// UnmarshalMeta unmarshals the REST metadata of a response in a REST service
+func UnmarshalMeta(r *request.Request) {
+ r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
+ if r.RequestID == "" {
+ // Alternative version of request id in the header
+ r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id")
+ }
+ if r.DataFilled() {
+ if err := UnmarshalResponse(r.HTTPResponse, r.Data, aws.BoolValue(r.Config.LowerCaseHeaderMaps)); err != nil {
+ r.Error = err
+ }
+ }
+}
+
+// UnmarshalResponse attempts to unmarshal the REST response headers to
+// the data type passed in. The type must be a pointer. An error is returned
+// with any error unmarshaling the response into the target datatype.
+func UnmarshalResponse(resp *http.Response, data interface{}, lowerCaseHeaderMaps bool) error {
+ v := reflect.Indirect(reflect.ValueOf(data))
+ return unmarshalLocationElements(resp, v, lowerCaseHeaderMaps)
+}
+
+func unmarshalBody(r *request.Request, v reflect.Value) error {
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ pfield, _ := v.Type().FieldByName(payloadName)
+ if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
+ payload := v.FieldByName(payloadName)
+ if payload.IsValid() {
+ switch payload.Interface().(type) {
+ case []byte:
+ defer r.HTTPResponse.Body.Close()
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
+ }
+
+ payload.Set(reflect.ValueOf(b))
+
+ case *string:
+ defer r.HTTPResponse.Body.Close()
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
+ }
+
+ str := string(b)
+ payload.Set(reflect.ValueOf(&str))
+
+ default:
+ switch payload.Type().String() {
+ case "io.ReadCloser":
+ payload.Set(reflect.ValueOf(r.HTTPResponse.Body))
+
+ case "io.ReadSeeker":
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ return awserr.New(request.ErrCodeSerialization,
+ "failed to read response body", err)
+ }
+ payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b))))
+
+ default:
+ io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+ r.HTTPResponse.Body.Close()
+ return awserr.New(request.ErrCodeSerialization,
+ "failed to decode REST response",
+ fmt.Errorf("unknown payload type %s", payload.Type()))
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+func unmarshalLocationElements(resp *http.Response, v reflect.Value, lowerCaseHeaderMaps bool) error {
+ for i := 0; i < v.NumField(); i++ {
+ m, field := v.Field(i), v.Type().Field(i)
+ if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) {
+ continue
+ }
+
+ if m.IsValid() {
+ name := field.Tag.Get("locationName")
+ if name == "" {
+ name = field.Name
+ }
+
+ switch field.Tag.Get("location") {
+ case "statusCode":
+ unmarshalStatusCode(m, resp.StatusCode)
+
+ case "header":
+ err := unmarshalHeader(m, resp.Header.Get(name), field.Tag)
+ if err != nil {
+ return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
+ }
+
+ case "headers":
+ prefix := field.Tag.Get("locationName")
+ err := unmarshalHeaderMap(m, resp.Header, prefix, lowerCaseHeaderMaps)
+ if err != nil {
+ return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+func unmarshalStatusCode(v reflect.Value, statusCode int) {
+ if !v.IsValid() {
+ return
+ }
+
+ switch v.Interface().(type) {
+ case *int64:
+ s := int64(statusCode)
+ v.Set(reflect.ValueOf(&s))
+ }
+}
+
+func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string, normalize bool) error {
+ if len(headers) == 0 {
+ return nil
+ }
+ switch r.Interface().(type) {
+ case map[string]*string: // we only support string map value types
+ out := map[string]*string{}
+ for k, v := range headers {
+ if awsStrings.HasPrefixFold(k, prefix) {
+ if normalize == true {
+ k = strings.ToLower(k)
+ } else {
+ k = http.CanonicalHeaderKey(k)
+ }
+ out[k[len(prefix):]] = &v[0]
+ }
+ }
+ if len(out) != 0 {
+ r.Set(reflect.ValueOf(out))
+ }
+
+ }
+ return nil
+}
+
+func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error {
+ switch tag.Get("type") {
+ case "jsonvalue":
+ if len(header) == 0 {
+ return nil
+ }
+ case "blob":
+ if len(header) == 0 {
+ return nil
+ }
+ default:
+ if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) {
+ return nil
+ }
+ }
+
+ switch v.Interface().(type) {
+ case *string:
+ if tag.Get("suppressedJSONValue") == "true" && tag.Get("location") == "header" {
+ b, err := base64.StdEncoding.DecodeString(header)
+ if err != nil {
+ return fmt.Errorf("failed to decode JSONValue, %v", err)
+ }
+ header = string(b)
+ }
+ v.Set(reflect.ValueOf(&header))
+ case []byte:
+ b, err := base64.StdEncoding.DecodeString(header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(b))
+ case *bool:
+ b, err := strconv.ParseBool(header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&b))
+ case *int64:
+ i, err := strconv.ParseInt(header, 10, 64)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&i))
+ case *float64:
+ f, err := strconv.ParseFloat(header, 64)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&f))
+ case *time.Time:
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.RFC822TimeFormatName
+ }
+ t, err := protocol.ParseTime(format, header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&t))
+ case aws.JSONValue:
+ escaping := protocol.NoEscape
+ if tag.Get("location") == "header" {
+ escaping = protocol.Base64Escape
+ }
+ m, err := protocol.DecodeJSONValue(header, escaping)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(m))
+ default:
+ err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/restxml/restxml.go
new file mode 100644
index 0000000000000..6e9ab32cdf313
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/restxml/restxml.go
@@ -0,0 +1,79 @@
+// Package restxml provides RESTful XML serialization of AWS
+// requests and responses.
+package restxml
+
+//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-xml.json build_test.go
+//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go
+
+import (
+ "bytes"
+ "encoding/xml"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+ "github.com/IBM/ibm-cos-sdk-go/private/protocol/query"
+ "github.com/IBM/ibm-cos-sdk-go/private/protocol/rest"
+ "github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil"
+)
+
+// BuildHandler is a named request handler for building restxml protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build}
+
+// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors
+var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError}
+
+// Build builds a request payload for the REST XML protocol.
+func Build(r *request.Request) {
+ rest.Build(r)
+
+ if t := rest.PayloadType(r.Params); t == "structure" || t == "" {
+ var buf bytes.Buffer
+ err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf))
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization,
+ "failed to encode rest XML request", err),
+ 0,
+ r.RequestID,
+ )
+ return
+ }
+ r.SetBufferBody(buf.Bytes())
+ }
+}
+
+// Unmarshal unmarshals a payload response for the REST XML protocol.
+func Unmarshal(r *request.Request) {
+ if t := rest.PayloadType(r.Data); t == "structure" || t == "" {
+ defer r.HTTPResponse.Body.Close()
+ decoder := xml.NewDecoder(r.HTTPResponse.Body)
+ err := xmlutil.UnmarshalXML(r.Data, decoder, "")
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization,
+ "failed to decode REST XML response", err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+ } else {
+ rest.Unmarshal(r)
+ }
+}
+
+// UnmarshalMeta unmarshals response headers for the REST XML protocol.
+func UnmarshalMeta(r *request.Request) {
+ rest.UnmarshalMeta(r)
+}
+
+// UnmarshalError unmarshals a response error for the REST XML protocol.
+func UnmarshalError(r *request.Request) {
+ query.UnmarshalError(r)
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/timestamp.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/timestamp.go
new file mode 100644
index 0000000000000..4ba7999b4c566
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/timestamp.go
@@ -0,0 +1,150 @@
+package protocol
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "strconv"
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/internal/sdkmath"
+)
+
+// Names of time formats supported by the SDK
+const (
+ RFC822TimeFormatName = "rfc822"
+ ISO8601TimeFormatName = "iso8601"
+ UnixTimeFormatName = "unixTimestamp"
+)
+
+// Time formats supported by the SDK
+// Output time is intended to not contain decimals
+const (
+ // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT
+ RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
+ rfc822TimeFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT"
+ rfc822TimeFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT"
+
+ // This format is used for output time without seconds precision
+ RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT"
+
+ // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
+ ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z"
+ iso8601TimeFormatNoZ = "2006-01-02T15:04:05.999999999"
+
+ // This format is used for output time with fractional second precision up to milliseconds
+ ISO8601OutputTimeFormat = "2006-01-02T15:04:05.999999999Z"
+)
+
+// IsKnownTimestampFormat returns if the timestamp format name
+// is know to the SDK's protocols.
+func IsKnownTimestampFormat(name string) bool {
+ switch name {
+ case RFC822TimeFormatName:
+ fallthrough
+ case ISO8601TimeFormatName:
+ fallthrough
+ case UnixTimeFormatName:
+ return true
+ default:
+ return false
+ }
+}
+
+// FormatTime returns a string value of the time.
+func FormatTime(name string, t time.Time) string {
+ t = t.UTC().Truncate(time.Millisecond)
+
+ switch name {
+ case RFC822TimeFormatName:
+ return t.Format(RFC822OutputTimeFormat)
+ case ISO8601TimeFormatName:
+ return t.Format(ISO8601OutputTimeFormat)
+ case UnixTimeFormatName:
+ ms := t.UnixNano() / int64(time.Millisecond)
+ return strconv.FormatFloat(float64(ms)/1e3, 'f', -1, 64)
+ default:
+ panic("unknown timestamp format name, " + name)
+ }
+}
+
+// ParseTime attempts to parse the time given the format. Returns
+// the time if it was able to be parsed, and fails otherwise.
+func ParseTime(formatName, value string) (time.Time, error) {
+ switch formatName {
+ case RFC822TimeFormatName: // Smithy HTTPDate format
+ return tryParse(value,
+ RFC822TimeFormat,
+ rfc822TimeFormatSingleDigitDay,
+ rfc822TimeFormatSingleDigitDayTwoDigitYear,
+ time.RFC850,
+ time.ANSIC,
+ )
+ case ISO8601TimeFormatName: // Smithy DateTime format
+ return tryParse(value,
+ ISO8601TimeFormat,
+ iso8601TimeFormatNoZ,
+ time.RFC3339Nano,
+ time.RFC3339,
+ )
+ case UnixTimeFormatName:
+ v, err := strconv.ParseFloat(value, 64)
+ _, dec := math.Modf(v)
+ dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123
+ if err != nil {
+ return time.Time{}, err
+ }
+ return time.Unix(int64(v), int64(dec*(1e9))), nil
+ default:
+ panic("unknown timestamp format name, " + formatName)
+ }
+}
+
+func tryParse(v string, formats ...string) (time.Time, error) {
+ var errs parseErrors
+ for _, f := range formats {
+ t, err := time.Parse(f, v)
+ if err != nil {
+ errs = append(errs, parseError{
+ Format: f,
+ Err: err,
+ })
+ continue
+ }
+ return t, nil
+ }
+
+ return time.Time{}, fmt.Errorf("unable to parse time string, %v", errs)
+}
+
+type parseErrors []parseError
+
+func (es parseErrors) Error() string {
+ var s bytes.Buffer
+ for _, e := range es {
+ fmt.Fprintf(&s, "\n * %q: %v", e.Format, e.Err)
+ }
+
+ return "parse errors:" + s.String()
+}
+
+type parseError struct {
+ Format string
+ Err error
+}
+
+// IBM COS SDK Code -- START
+
+// ParseIbmTime - checks to see if first character of date string is a letter
+// if so it tries to parse it as an RFC822 formatted date
+func ParseIbmTime(formatName, value string) (time.Time, error) {
+ if formatName == ISO8601TimeFormatName && len(value) != 0 {
+ ch := value[0]
+ if ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') {
+ formatName = RFC822TimeFormatName
+ }
+ }
+ return ParseTime(formatName, value)
+}
+
+// IBM COS SDK Code -- END
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/unmarshal.go
new file mode 100644
index 0000000000000..183a4fabd795e
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/unmarshal.go
@@ -0,0 +1,27 @@
+package protocol
+
+import (
+ "io"
+ "io/ioutil"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+)
+
+// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body
+var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody}
+
+// UnmarshalDiscardBody is a request handler to empty a response's body and closing it.
+func UnmarshalDiscardBody(r *request.Request) {
+ if r.HTTPResponse == nil || r.HTTPResponse.Body == nil {
+ return
+ }
+
+ io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+ r.HTTPResponse.Body.Close()
+}
+
+// ResponseMetadata provides the SDK response metadata attributes.
+type ResponseMetadata struct {
+ StatusCode int
+ RequestID string
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/unmarshal_error.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/unmarshal_error.go
new file mode 100644
index 0000000000000..ac5ef29e0ff96
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/unmarshal_error.go
@@ -0,0 +1,65 @@
+package protocol
+
+import (
+ "net/http"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+)
+
+// UnmarshalErrorHandler provides unmarshaling errors API response errors for
+// both typed and untyped errors.
+type UnmarshalErrorHandler struct {
+ unmarshaler ErrorUnmarshaler
+}
+
+// ErrorUnmarshaler is an abstract interface for concrete implementations to
+// unmarshal protocol specific response errors.
+type ErrorUnmarshaler interface {
+ UnmarshalError(*http.Response, ResponseMetadata) (error, error)
+}
+
+// NewUnmarshalErrorHandler returns an UnmarshalErrorHandler
+// initialized for the set of exception names to the error unmarshalers
+func NewUnmarshalErrorHandler(unmarshaler ErrorUnmarshaler) *UnmarshalErrorHandler {
+ return &UnmarshalErrorHandler{
+ unmarshaler: unmarshaler,
+ }
+}
+
+// UnmarshalErrorHandlerName is the name of the named handler.
+const UnmarshalErrorHandlerName = "awssdk.protocol.UnmarshalError"
+
+// NamedHandler returns a NamedHandler for the unmarshaler using the set of
+// errors the unmarshaler was initialized for.
+func (u *UnmarshalErrorHandler) NamedHandler() request.NamedHandler {
+ return request.NamedHandler{
+ Name: UnmarshalErrorHandlerName,
+ Fn: u.UnmarshalError,
+ }
+}
+
+// UnmarshalError will attempt to unmarshal the API response's error message
+// into either a generic SDK error type, or a typed error corresponding to the
+// errors exception name.
+func (u *UnmarshalErrorHandler) UnmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ respMeta := ResponseMetadata{
+ StatusCode: r.HTTPResponse.StatusCode,
+ RequestID: r.RequestID,
+ }
+
+ v, err := u.unmarshaler.UnmarshalError(r.HTTPResponse, respMeta)
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization,
+ "failed to unmarshal response error", err),
+ respMeta.StatusCode,
+ respMeta.RequestID,
+ )
+ return
+ }
+
+ r.Error = v
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil/build.go
new file mode 100644
index 0000000000000..dd8cdc372a6d9
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil/build.go
@@ -0,0 +1,317 @@
+// Package xmlutil provides XML serialization of AWS requests and responses.
+package xmlutil
+
+import (
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/private/protocol"
+)
+
+// BuildXML will serialize params into an xml.Encoder. Error will be returned
+// if the serialization of any of the params or nested values fails.
+func BuildXML(params interface{}, e *xml.Encoder) error {
+ return buildXML(params, e, false)
+}
+
+func buildXML(params interface{}, e *xml.Encoder, sorted bool) error {
+ b := xmlBuilder{encoder: e, namespaces: map[string]string{}}
+ root := NewXMLElement(xml.Name{})
+ if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil {
+ return err
+ }
+ for _, c := range root.Children {
+ for _, v := range c {
+ return StructToXML(e, v, sorted)
+ }
+ }
+ return nil
+}
+
+// Returns the reflection element of a value, if it is a pointer.
+func elemOf(value reflect.Value) reflect.Value {
+ for value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ return value
+}
+
+// A xmlBuilder serializes values from Go code to XML
+type xmlBuilder struct {
+ encoder *xml.Encoder
+ namespaces map[string]string
+}
+
+// buildValue generic XMLNode builder for any type. Will build value for their specific type
+// struct, list, map, scalar.
+//
+// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If
+// type is not provided reflect will be used to determine the value's type.
+func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ value = elemOf(value)
+ if !value.IsValid() { // no need to handle zero values
+ return nil
+ } else if tag.Get("location") != "" { // don't handle non-body location values
+ return nil
+ }
+
+ xml := tag.Get("xml")
+ if len(xml) != 0 {
+ name := strings.SplitAfterN(xml, ",", 2)[0]
+ if name == "-" {
+ return nil
+ }
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch value.Kind() {
+ case reflect.Struct:
+ t = "structure"
+ case reflect.Slice:
+ t = "list"
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ switch t {
+ case "structure":
+ if field, ok := value.Type().FieldByName("_"); ok {
+ tag = tag + reflect.StructTag(" ") + field.Tag
+ }
+ return b.buildStruct(value, current, tag)
+ case "list":
+ return b.buildList(value, current, tag)
+ case "map":
+ return b.buildMap(value, current, tag)
+ default:
+ return b.buildScalar(value, current, tag)
+ }
+}
+
+// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested
+// types are converted to XMLNodes also.
+func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if !value.IsValid() {
+ return nil
+ }
+
+ // unwrap payloads
+ if payload := tag.Get("payload"); payload != "" {
+ field, _ := value.Type().FieldByName(payload)
+ tag = field.Tag
+ value = elemOf(value.FieldByName(payload))
+
+ if !value.IsValid() {
+ return nil
+ }
+ }
+
+ child := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
+
+ // there is an xmlNamespace associated with this struct
+ if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" {
+ ns := xml.Attr{
+ Name: xml.Name{Local: "xmlns"},
+ Value: uri,
+ }
+ if prefix != "" {
+ b.namespaces[prefix] = uri // register the namespace
+ ns.Name.Local = "xmlns:" + prefix
+ }
+
+ child.Attr = append(child.Attr, ns)
+ }
+
+ var payloadFields, nonPayloadFields int
+
+ t := value.Type()
+ for i := 0; i < value.NumField(); i++ {
+ member := elemOf(value.Field(i))
+ field := t.Field(i)
+
+ if field.PkgPath != "" {
+ continue // ignore unexported fields
+ }
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
+ mTag := field.Tag
+ if mTag.Get("location") != "" { // skip non-body members
+ nonPayloadFields++
+ continue
+ }
+ payloadFields++
+
+ if protocol.CanSetIdempotencyToken(value.Field(i), field) {
+ token := protocol.GetIdempotencyToken()
+ member = reflect.ValueOf(token)
+ }
+
+ memberName := mTag.Get("locationName")
+ if memberName == "" {
+ memberName = field.Name
+ mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`)
+ }
+ if err := b.buildValue(member, child, mTag); err != nil {
+ return err
+ }
+ }
+
+ // Only case where the child shape is not added is if the shape only contains
+ // non-payload fields, e.g headers/query.
+ if !(payloadFields == 0 && nonPayloadFields > 0) {
+ current.AddChild(child)
+ }
+
+ return nil
+}
+
+// buildList adds the value's list items to the current XMLNode as children nodes. All
+// nested values in the list are converted to XMLNodes also.
+func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if value.IsNil() { // don't build omitted lists
+ return nil
+ }
+
+ // check for unflattened list member
+ flattened := tag.Get("flattened") != ""
+
+ xname := xml.Name{Local: tag.Get("locationName")}
+ if flattened {
+ for i := 0; i < value.Len(); i++ {
+ child := NewXMLElement(xname)
+ current.AddChild(child)
+ if err := b.buildValue(value.Index(i), child, ""); err != nil {
+ return err
+ }
+ }
+ } else {
+ list := NewXMLElement(xname)
+ current.AddChild(list)
+
+ for i := 0; i < value.Len(); i++ {
+ iname := tag.Get("locationNameList")
+ if iname == "" {
+ iname = "member"
+ }
+
+ child := NewXMLElement(xml.Name{Local: iname})
+ list.AddChild(child)
+ if err := b.buildValue(value.Index(i), child, ""); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All
+// nested values in the map are converted to XMLNodes also.
+//
+// Error will be returned if it is unable to build the map's values into XMLNodes
+func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if value.IsNil() { // don't build omitted maps
+ return nil
+ }
+
+ maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
+ current.AddChild(maproot)
+ current = maproot
+
+ kname, vname := "key", "value"
+ if n := tag.Get("locationNameKey"); n != "" {
+ kname = n
+ }
+ if n := tag.Get("locationNameValue"); n != "" {
+ vname = n
+ }
+
+ // sorting is not required for compliance, but it makes testing easier
+ keys := make([]string, value.Len())
+ for i, k := range value.MapKeys() {
+ keys[i] = k.String()
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ v := value.MapIndex(reflect.ValueOf(k))
+
+ mapcur := current
+ if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps
+ child := NewXMLElement(xml.Name{Local: "entry"})
+ mapcur.AddChild(child)
+ mapcur = child
+ }
+
+ kchild := NewXMLElement(xml.Name{Local: kname})
+ kchild.Text = k
+ vchild := NewXMLElement(xml.Name{Local: vname})
+ mapcur.AddChild(kchild)
+ mapcur.AddChild(vchild)
+
+ if err := b.buildValue(v, vchild, ""); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// buildScalar will convert the value into a string and append it as a attribute or child
+// of the current XMLNode.
+//
+// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value.
+//
+// Error will be returned if the value type is unsupported.
+func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ var str string
+ switch converted := value.Interface().(type) {
+ case string:
+ str = converted
+ case []byte:
+ if !value.IsNil() {
+ str = base64.StdEncoding.EncodeToString(converted)
+ }
+ case bool:
+ str = strconv.FormatBool(converted)
+ case int64:
+ str = strconv.FormatInt(converted, 10)
+ case int:
+ str = strconv.Itoa(converted)
+ case float64:
+ str = strconv.FormatFloat(converted, 'f', -1, 64)
+ case float32:
+ str = strconv.FormatFloat(float64(converted), 'f', -1, 32)
+ case time.Time:
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.ISO8601TimeFormatName
+ }
+
+ str = protocol.FormatTime(format, converted)
+ default:
+ return fmt.Errorf("unsupported value for param %s: %v (%s)",
+ tag.Get("locationName"), value.Interface(), value.Type().Name())
+ }
+
+ xname := xml.Name{Local: tag.Get("locationName")}
+ if tag.Get("xmlAttribute") != "" { // put into current node's attribute list
+ attr := xml.Attr{Name: xname, Value: str}
+ current.Attr = append(current.Attr, attr)
+ } else if len(xname.Local) == 0 {
+ current.Text = str
+ } else { // regular text node
+ current.AddChild(&XMLNode{Name: xname, Text: str})
+ }
+ return nil
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil/sort.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil/sort.go
new file mode 100644
index 0000000000000..c1a511851f6ee
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil/sort.go
@@ -0,0 +1,32 @@
+package xmlutil
+
+import (
+ "encoding/xml"
+ "strings"
+)
+
+type xmlAttrSlice []xml.Attr
+
+func (x xmlAttrSlice) Len() int {
+ return len(x)
+}
+
+func (x xmlAttrSlice) Less(i, j int) bool {
+ spaceI, spaceJ := x[i].Name.Space, x[j].Name.Space
+ localI, localJ := x[i].Name.Local, x[j].Name.Local
+ valueI, valueJ := x[i].Value, x[j].Value
+
+ spaceCmp := strings.Compare(spaceI, spaceJ)
+ localCmp := strings.Compare(localI, localJ)
+ valueCmp := strings.Compare(valueI, valueJ)
+
+ if spaceCmp == -1 || (spaceCmp == 0 && (localCmp == -1 || (localCmp == 0 && valueCmp == -1))) {
+ return true
+ }
+
+ return false
+}
+
+func (x xmlAttrSlice) Swap(i, j int) {
+ x[i], x[j] = x[j], x[i]
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
new file mode 100644
index 0000000000000..0d504037b85c4
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
@@ -0,0 +1,301 @@
+package xmlutil
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/private/protocol"
+)
+
+// UnmarshalXMLError unmarshals the XML error from the stream into the value
+// type specified. The value must be a pointer. If the message fails to
+// unmarshal, the message content will be included in the returned error as a
+// awserr.UnmarshalError.
+func UnmarshalXMLError(v interface{}, stream io.Reader) error {
+ var errBuf bytes.Buffer
+ body := io.TeeReader(stream, &errBuf)
+
+ err := xml.NewDecoder(body).Decode(v)
+ if err != nil && err != io.EOF {
+ return awserr.NewUnmarshalError(err,
+ "failed to unmarshal error message", errBuf.Bytes())
+ }
+
+ return nil
+}
+
+// UnmarshalXML deserializes an xml.Decoder into the container v. V
+// needs to match the shape of the XML expected to be decoded.
+// If the shape doesn't match unmarshaling will fail.
+func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error {
+ n, err := XMLToStruct(d, nil)
+ if err != nil {
+ return err
+ }
+ if n.Children != nil {
+ for _, root := range n.Children {
+ for _, c := range root {
+ if wrappedChild, ok := c.Children[wrapper]; ok {
+ c = wrappedChild[0] // pull out wrapped element
+ }
+
+ err = parse(reflect.ValueOf(v), c, "")
+ if err != nil {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+ }
+ }
+ return nil
+ }
+ return nil
+}
+
+// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect
+// will be used to determine the type from r.
+func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ xml := tag.Get("xml")
+ if len(xml) != 0 {
+ name := strings.SplitAfterN(xml, ",", 2)[0]
+ if name == "-" {
+ return nil
+ }
+ }
+
+ rtype := r.Type()
+ if rtype.Kind() == reflect.Ptr {
+ rtype = rtype.Elem() // check kind of actual element type
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch rtype.Kind() {
+ case reflect.Struct:
+ // also it can't be a time object
+ if _, ok := r.Interface().(*time.Time); !ok {
+ t = "structure"
+ }
+ case reflect.Slice:
+ // also it can't be a byte slice
+ if _, ok := r.Interface().([]byte); !ok {
+ t = "list"
+ }
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ switch t {
+ case "structure":
+ if field, ok := rtype.FieldByName("_"); ok {
+ tag = field.Tag
+ }
+ return parseStruct(r, node, tag)
+ case "list":
+ return parseList(r, node, tag)
+ case "map":
+ return parseMap(r, node, tag)
+ default:
+ return parseScalar(r, node, tag)
+ }
+}
+
+// parseStruct deserializes a structure and its fields from an XMLNode. Any nested
+// types in the structure will also be deserialized.
+func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ t := r.Type()
+ if r.Kind() == reflect.Ptr {
+ if r.IsNil() { // create the structure if it's nil
+ s := reflect.New(r.Type().Elem())
+ r.Set(s)
+ r = s
+ }
+
+ r = r.Elem()
+ t = t.Elem()
+ }
+
+ // unwrap any payloads
+ if payload := tag.Get("payload"); payload != "" {
+ field, _ := t.FieldByName(payload)
+ return parseStruct(r.FieldByName(payload), node, field.Tag)
+ }
+
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+ if c := field.Name[0:1]; strings.ToLower(c) == c {
+ continue // ignore unexported fields
+ }
+
+ // figure out what this field is called
+ name := field.Name
+ if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
+ name = field.Tag.Get("locationNameList")
+ } else if locName := field.Tag.Get("locationName"); locName != "" {
+ name = locName
+ }
+
+ // try to find the field by name in elements
+ elems := node.Children[name]
+
+ if elems == nil { // try to find the field in attributes
+ if val, ok := node.findElem(name); ok {
+ elems = []*XMLNode{{Text: val}}
+ }
+ }
+
+ member := r.FieldByName(field.Name)
+ for _, elem := range elems {
+ err := parse(member, elem, field.Tag)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// parseList deserializes a list of values from an XML node. Each list entry
+// will also be deserialized.
+func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ t := r.Type()
+
+ if tag.Get("flattened") == "" { // look at all item entries
+ mname := "member"
+ if name := tag.Get("locationNameList"); name != "" {
+ mname = name
+ }
+
+ if Children, ok := node.Children[mname]; ok {
+ if r.IsNil() {
+ r.Set(reflect.MakeSlice(t, len(Children), len(Children)))
+ }
+
+ for i, c := range Children {
+ err := parse(r.Index(i), c, "")
+ if err != nil {
+ return err
+ }
+ }
+ }
+ } else { // flattened list means this is a single element
+ if r.IsNil() {
+ r.Set(reflect.MakeSlice(t, 0, 0))
+ }
+
+ childR := reflect.Zero(t.Elem())
+ r.Set(reflect.Append(r, childR))
+ err := parse(r.Index(r.Len()-1), node, "")
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode
+// will also be deserialized as map entries.
+func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ if r.IsNil() {
+ r.Set(reflect.MakeMap(r.Type()))
+ }
+
+ if tag.Get("flattened") == "" { // look at all child entries
+ for _, entry := range node.Children["entry"] {
+ parseMapEntry(r, entry, tag)
+ }
+ } else { // this element is itself an entry
+ parseMapEntry(r, node, tag)
+ }
+
+ return nil
+}
+
+// parseMapEntry deserializes a map entry from a XML node.
+func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ kname, vname := "key", "value"
+ if n := tag.Get("locationNameKey"); n != "" {
+ kname = n
+ }
+ if n := tag.Get("locationNameValue"); n != "" {
+ vname = n
+ }
+
+ keys, ok := node.Children[kname]
+ values := node.Children[vname]
+ if ok {
+ for i, key := range keys {
+ keyR := reflect.ValueOf(key.Text)
+ value := values[i]
+ valueR := reflect.New(r.Type().Elem()).Elem()
+
+ parse(valueR, value, "")
+ r.SetMapIndex(keyR, valueR)
+ }
+ }
+ return nil
+}
+
+// parseScaller deserializes an XMLNode value into a concrete type based on the
+// interface type of r.
+//
+// Error is returned if the deserialization fails due to invalid type conversion,
+// or unsupported interface type.
+func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ switch r.Interface().(type) {
+ case *string:
+ r.Set(reflect.ValueOf(&node.Text))
+ return nil
+ case []byte:
+ b, err := base64.StdEncoding.DecodeString(node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(b))
+ case *bool:
+ v, err := strconv.ParseBool(node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&v))
+ case *int64:
+ v, err := strconv.ParseInt(node.Text, 10, 64)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&v))
+ case *float64:
+ v, err := strconv.ParseFloat(node.Text, 64)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&v))
+ case *time.Time:
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.ISO8601TimeFormatName
+ }
+
+ // IBM COS SDK Code -- START
+ t, err := protocol.ParseIbmTime(format, node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&t))
+ // IBM COS SDK Code -- END
+ default:
+ return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type())
+ }
+ return nil
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
new file mode 100644
index 0000000000000..c85b79fddd28b
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
@@ -0,0 +1,173 @@
+package xmlutil
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "sort"
+)
+
+// A XMLNode contains the values to be encoded or decoded.
+type XMLNode struct {
+ Name xml.Name `json:",omitempty"`
+ Children map[string][]*XMLNode `json:",omitempty"`
+ Text string `json:",omitempty"`
+ Attr []xml.Attr `json:",omitempty"`
+
+ namespaces map[string]string
+ parent *XMLNode
+}
+
+// textEncoder is a string type alias that implemnts the TextMarshaler interface.
+// This alias type is used to ensure that the line feed (\n) (U+000A) is escaped.
+type textEncoder string
+
+func (t textEncoder) MarshalText() ([]byte, error) {
+ return []byte(t), nil
+}
+
+// NewXMLElement returns a pointer to a new XMLNode initialized to default values.
+func NewXMLElement(name xml.Name) *XMLNode {
+ return &XMLNode{
+ Name: name,
+ Children: map[string][]*XMLNode{},
+ Attr: []xml.Attr{},
+ }
+}
+
+// AddChild adds child to the XMLNode.
+func (n *XMLNode) AddChild(child *XMLNode) {
+ child.parent = n
+ if _, ok := n.Children[child.Name.Local]; !ok {
+ n.Children[child.Name.Local] = []*XMLNode{}
+ }
+ n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child)
+}
+
+// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values.
+func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) {
+ out := &XMLNode{}
+ for {
+ tok, err := d.Token()
+ if err != nil {
+ if err == io.EOF {
+ break
+ } else {
+ return out, err
+ }
+ }
+
+ if tok == nil {
+ break
+ }
+
+ switch typed := tok.(type) {
+ case xml.CharData:
+ out.Text = string(typed.Copy())
+ case xml.StartElement:
+ el := typed.Copy()
+ out.Attr = el.Attr
+ if out.Children == nil {
+ out.Children = map[string][]*XMLNode{}
+ }
+
+ name := typed.Name.Local
+ slice := out.Children[name]
+ if slice == nil {
+ slice = []*XMLNode{}
+ }
+ node, e := XMLToStruct(d, &el)
+ out.findNamespaces()
+ if e != nil {
+ return out, e
+ }
+ node.Name = typed.Name
+ node.findNamespaces()
+ tempOut := *out
+ // Save into a temp variable, simply because out gets squashed during
+ // loop iterations
+ node.parent = &tempOut
+ slice = append(slice, node)
+ out.Children[name] = slice
+ case xml.EndElement:
+ if s != nil && s.Name.Local == typed.Name.Local { // matching end token
+ return out, nil
+ }
+ out = &XMLNode{}
+ }
+ }
+ return out, nil
+}
+
+func (n *XMLNode) findNamespaces() {
+ ns := map[string]string{}
+ for _, a := range n.Attr {
+ if a.Name.Space == "xmlns" {
+ ns[a.Value] = a.Name.Local
+ }
+ }
+
+ n.namespaces = ns
+}
+
+func (n *XMLNode) findElem(name string) (string, bool) {
+ for node := n; node != nil; node = node.parent {
+ for _, a := range node.Attr {
+ namespace := a.Name.Space
+ if v, ok := node.namespaces[namespace]; ok {
+ namespace = v
+ }
+ if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) {
+ return a.Value, true
+ }
+ }
+ }
+ return "", false
+}
+
+// StructToXML writes an XMLNode to a xml.Encoder as tokens.
+func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error {
+ // Sort Attributes
+ attrs := node.Attr
+ if sorted {
+ sortedAttrs := make([]xml.Attr, len(attrs))
+ for _, k := range node.Attr {
+ sortedAttrs = append(sortedAttrs, k)
+ }
+ sort.Sort(xmlAttrSlice(sortedAttrs))
+ attrs = sortedAttrs
+ }
+
+ startElement := xml.StartElement{Name: node.Name, Attr: attrs}
+
+ if node.Text != "" {
+ e.EncodeElement(textEncoder(node.Text), startElement)
+ return e.Flush()
+ }
+
+ e.EncodeToken(startElement)
+
+ if sorted {
+ sortedNames := []string{}
+ for k := range node.Children {
+ sortedNames = append(sortedNames, k)
+ }
+ sort.Strings(sortedNames)
+
+ for _, k := range sortedNames {
+ for _, v := range node.Children[k] {
+ StructToXML(e, v, sorted)
+ }
+ }
+ } else {
+ for _, c := range node.Children {
+ for _, v := range c {
+ StructToXML(e, v, sorted)
+ }
+ }
+ }
+
+ e.EncodeToken(startElement.End())
+
+ return e.Flush()
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/api.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/api.go
new file mode 100644
index 0000000000000..e183afbc0a5bf
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/api.go
@@ -0,0 +1,21586 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package s3
+
+import (
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awsutil"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+ "github.com/IBM/ibm-cos-sdk-go/private/checksum"
+ "github.com/IBM/ibm-cos-sdk-go/private/protocol"
+ "github.com/IBM/ibm-cos-sdk-go/private/protocol/restxml"
+)
+
+const opAbortMultipartUpload = "AbortMultipartUpload"
+
+// AbortMultipartUploadRequest generates a "aws/request.Request" representing the
+// client's request for the AbortMultipartUpload operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AbortMultipartUpload for more information on using the AbortMultipartUpload
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the AbortMultipartUploadRequest method.
+// req, resp := client.AbortMultipartUploadRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload
+func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) {
+ op := &request.Operation{
+ Name: opAbortMultipartUpload,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &AbortMultipartUploadInput{}
+ }
+
+ output = &AbortMultipartUploadOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// AbortMultipartUpload API operation for Amazon Simple Storage Service.
+//
+// This action aborts a multipart upload. After a multipart upload is aborted,
+// no additional parts can be uploaded using that upload ID. The storage consumed
+// by any previously uploaded parts will be freed. However, if any part uploads
+// are currently in progress, those part uploads might or might not succeed.
+// As a result, it might be necessary to abort a given multipart upload multiple
+// times in order to completely free all storage consumed by all parts.
+//
+// To verify that all parts have been removed, so you don't get charged for
+// the part storage, you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
+// action and ensure that the parts list is empty.
+//
+// For information about permissions required to use the multipart upload, see
+// Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html).
+//
+// The following operations are related to AbortMultipartUpload:
+//
+// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
+//
+// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+//
+// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
+//
+// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
+//
+// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation AbortMultipartUpload for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeNoSuchUpload "NoSuchUpload"
+// The specified multipart upload does not exist.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload
+func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) {
+ req, out := c.AbortMultipartUploadRequest(input)
+ return out, req.Send()
+}
+
+// AbortMultipartUploadWithContext is the same as AbortMultipartUpload with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AbortMultipartUpload for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) AbortMultipartUploadWithContext(ctx aws.Context, input *AbortMultipartUploadInput, opts ...request.Option) (*AbortMultipartUploadOutput, error) {
+ req, out := c.AbortMultipartUploadRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opAddLegalHold = "AddLegalHold"
+
+// AddLegalHoldRequest generates a "aws/request.Request" representing the
+// client's request for the AddLegalHold operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AddLegalHold for more information on using the AddLegalHold
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the AddLegalHoldRequest method.
+// req, resp := client.AddLegalHoldRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AddLegalHold
+func (c *S3) AddLegalHoldRequest(input *AddLegalHoldInput) (req *request.Request, output *AddLegalHoldOutput) {
+ op := &request.Operation{
+ Name: opAddLegalHold,
+ HTTPMethod: "POST",
+ HTTPPath: "/{Bucket}/{Key+}?legalHold",
+ }
+
+ if input == nil {
+ input = &AddLegalHoldInput{}
+ }
+
+ output = &AddLegalHoldOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// AddLegalHold API operation for Amazon Simple Storage Service.
+//
+// Add a legal hold on an object. The legal hold identifiers are stored in the
+// object metadata along with the timestamp of when they are POSTed to the object.
+// The presence of any legal hold identifiers prevents the modification or deletion
+// of the object data, even if the retention period has expired. Legal Holds
+// can only be added to objects in a bucket with a protection policy. Otherwise
+// a 400 error will be returned.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation AddLegalHold for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AddLegalHold
+func (c *S3) AddLegalHold(input *AddLegalHoldInput) (*AddLegalHoldOutput, error) {
+ req, out := c.AddLegalHoldRequest(input)
+ return out, req.Send()
+}
+
+// AddLegalHoldWithContext is the same as AddLegalHold with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AddLegalHold for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) AddLegalHoldWithContext(ctx aws.Context, input *AddLegalHoldInput, opts ...request.Option) (*AddLegalHoldOutput, error) {
+ req, out := c.AddLegalHoldRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCompleteMultipartUpload = "CompleteMultipartUpload"
+
+// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the
+// client's request for the CompleteMultipartUpload operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CompleteMultipartUpload for more information on using the CompleteMultipartUpload
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CompleteMultipartUploadRequest method.
+// req, resp := client.CompleteMultipartUploadRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload
+func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) {
+ op := &request.Operation{
+ Name: opCompleteMultipartUpload,
+ HTTPMethod: "POST",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &CompleteMultipartUploadInput{}
+ }
+
+ output = &CompleteMultipartUploadOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
+ return
+}
+
+// CompleteMultipartUpload API operation for Amazon Simple Storage Service.
+//
+// Completes a multipart upload by assembling previously uploaded parts.
+//
+// You first initiate the multipart upload and then upload all parts using the
+// UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+// operation. After successfully uploading all relevant parts of an upload,
+// you call this action to complete the upload. Upon receiving this request,
+// Amazon S3 concatenates all the parts in ascending order by part number to
+// create a new object. In the Complete Multipart Upload request, you must provide
+// the parts list. You must ensure that the parts list is complete. This action
+// concatenates the parts that you provide in the list. For each part in the
+// list, you must provide the part number and the ETag value, returned after
+// that part was uploaded.
+//
+// Processing of a Complete Multipart Upload request could take several minutes
+// to complete. After Amazon S3 begins processing the request, it sends an HTTP
+// response header that specifies a 200 OK response. While processing is in
+// progress, Amazon S3 periodically sends white space characters to keep the
+// connection from timing out. Because a request could fail after the initial
+// 200 OK response has been sent, it is important that you check the response
+// body to determine whether the request succeeded.
+//
+// Note that if CompleteMultipartUpload fails, applications should be prepared
+// to retry the failed requests. For more information, see Amazon S3 Error Best
+// Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html).
+//
+// For more information about multipart uploads, see Uploading Objects Using
+// Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html).
+//
+// For information about permissions required to use the multipart upload API,
+// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html).
+//
+// CompleteMultipartUpload has the following special errors:
+//
+// * Error code: EntityTooSmall Description: Your proposed upload is smaller
+// than the minimum allowed object size. Each part must be at least 5 MB
+// in size, except the last part. 400 Bad Request
+//
+// * Error code: InvalidPart Description: One or more of the specified parts
+// could not be found. The part might not have been uploaded, or the specified
+// entity tag might not have matched the part's entity tag. 400 Bad Request
+//
+// * Error code: InvalidPartOrder Description: The list of parts was not
+// in ascending order. The parts list must be specified in order by part
+// number. 400 Bad Request
+//
+// * Error code: NoSuchUpload Description: The specified multipart upload
+// does not exist. The upload ID might be invalid, or the multipart upload
+// might have been aborted or completed. 404 Not Found
+//
+// The following operations are related to CompleteMultipartUpload:
+//
+// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
+//
+// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+//
+// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
+//
+// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
+//
+// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation CompleteMultipartUpload for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload
+func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) {
+ req, out := c.CompleteMultipartUploadRequest(input)
+ return out, req.Send()
+}
+
+// CompleteMultipartUploadWithContext is the same as CompleteMultipartUpload with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CompleteMultipartUpload for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) CompleteMultipartUploadWithContext(ctx aws.Context, input *CompleteMultipartUploadInput, opts ...request.Option) (*CompleteMultipartUploadOutput, error) {
+ req, out := c.CompleteMultipartUploadRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCopyObject = "CopyObject"
+
+// CopyObjectRequest generates a "aws/request.Request" representing the
+// client's request for the CopyObject operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CopyObject for more information on using the CopyObject
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CopyObjectRequest method.
+// req, resp := client.CopyObjectRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject
+func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) {
+ op := &request.Operation{
+ Name: opCopyObject,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &CopyObjectInput{}
+ }
+
+ output = &CopyObjectOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CopyObject API operation for Amazon Simple Storage Service.
+//
+// Creates a copy of an object that is already stored in Amazon S3.
+//
+// You can store individual objects of up to 5 TB in Amazon S3. You create a
+// copy of your object up to 5 GB in size in a single atomic action using this
+// API. However, to copy an object greater than 5 GB, you must use the multipart
+// upload Upload Part - Copy API. For more information, see Copy Object Using
+// the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html).
+//
+// All copy requests must be authenticated. Additionally, you must have read
+// access to the source object and write access to the destination bucket. For
+// more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html).
+// Both the Region that you want to copy the object from and the Region that
+// you want to copy the object to must be enabled for your account.
+//
+// A copy request might return an error when Amazon S3 receives the copy request
+// or while Amazon S3 is copying the files. If the error occurs before the copy
+// action starts, you receive a standard Amazon S3 error. If the error occurs
+// during the copy operation, the error response is embedded in the 200 OK response.
+// This means that a 200 OK response can contain either a success or an error.
+// Design your application to parse the contents of the response and handle
+// it appropriately.
+//
+// If the copy is successful, you receive a response with information about
+// the copied object.
+//
+// If the request is an HTTP 1.1 request, the response is chunk encoded. If
+// it were not, it would not contain the content-length, and you would need
+// to read the entire body.
+//
+// The copy request charge is based on the storage class and Region that you
+// specify for the destination object. For pricing information, see Amazon S3
+// pricing (http://aws.amazon.com/s3/pricing/).
+//
+// Amazon S3 transfer acceleration does not support cross-Region copies. If
+// you request a cross-Region copy using a transfer acceleration endpoint, you
+// get a 400 Bad Request error. For more information, see Transfer Acceleration
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html).
+//
+// Metadata
+//
+// When copying an object, you can preserve all metadata (default) or specify
+// new metadata. However, the ACL is not preserved and is set to private for
+// the user making the request. To override the default ACL setting, specify
+// a new ACL when generating a copy request. For more information, see Using
+// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html).
+//
+// To specify whether you want the object metadata copied from the source object
+// or replaced with metadata provided in the request, you can optionally add
+// the x-amz-metadata-directive header. When you grant permissions, you can
+// use the s3:x-amz-metadata-directive condition key to enforce certain metadata
+// behavior when objects are uploaded. For more information, see Specifying
+// Conditions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html)
+// in the Amazon S3 Developer Guide. For a complete list of Amazon S3-specific
+// condition keys, see Actions, Resources, and Condition Keys for Amazon S3
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html).
+//
+// x-amz-copy-source-if Headers
+//
+// To only copy an object under certain conditions, such as whether the Etag
+// matches or whether the object was modified before or after a specified date,
+// use the following request parameters:
+//
+// * x-amz-copy-source-if-match
+//
+// * x-amz-copy-source-if-none-match
+//
+// * x-amz-copy-source-if-unmodified-since
+//
+// * x-amz-copy-source-if-modified-since
+//
+// If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since
+// headers are present in the request and evaluate as follows, Amazon S3 returns
+// 200 OK and copies the data:
+//
+// * x-amz-copy-source-if-match condition evaluates to true
+//
+// * x-amz-copy-source-if-unmodified-since condition evaluates to false
+//
+// If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since
+// headers are present in the request and evaluate as follows, Amazon S3 returns
+// the 412 Precondition Failed response code:
+//
+// * x-amz-copy-source-if-none-match condition evaluates to false
+//
+// * x-amz-copy-source-if-modified-since condition evaluates to true
+//
+// All headers with the x-amz- prefix, including x-amz-copy-source, must be
+// signed.
+//
+// Server-side encryption
+//
+// When you perform a CopyObject operation, you can optionally use the appropriate
+// encryption-related headers to encrypt the object using server-side encryption
+// with AWS managed encryption keys (SSE-S3 or SSE-KMS) or a customer-provided
+// encryption key. With server-side encryption, Amazon S3 encrypts your data
+// as it writes it to disks in its data centers and decrypts the data when you
+// access it. For more information about server-side encryption, see Using Server-Side
+// Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html).
+//
+// If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the
+// object. For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html)
+// in the Amazon S3 User Guide.
+//
+// Access Control List (ACL)-Specific Request Headers
+//
+// When copying an object, you can optionally use headers to grant ACL-based
+// permissions. By default, all objects are private. Only the owner has full
+// access control. When adding a new object, you can grant permissions to individual
+// AWS accounts or to predefined groups defined by Amazon S3. These permissions
+// are then added to the ACL on the object. For more information, see Access
+// Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
+// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html).
+//
+// Storage Class Options
+//
+// You can use the CopyObject action to change the storage class of an object
+// that is already stored in Amazon S3 using the StorageClass parameter. For
+// more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
+// in the Amazon S3 Service Developer Guide.
+//
+// Versioning
+//
+// By default, x-amz-copy-source identifies the current version of an object
+// to copy. If the current version is a delete marker, Amazon S3 behaves as
+// if the object was deleted. To copy a different version, use the versionId
+// subresource.
+//
+// If you enable versioning on the target bucket, Amazon S3 generates a unique
+// version ID for the object being copied. This version ID is different from
+// the version ID of the source object. Amazon S3 returns the version ID of
+// the copied object in the x-amz-version-id response header in the response.
+//
+// If you do not enable versioning or suspend it on the target bucket, the version
+// ID that Amazon S3 generates is always null.
+//
+// If the source object's storage class is GLACIER, you must restore a copy
+// of this object before you can use it as a source object for the copy operation.
+// For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html).
+//
+// The following operations are related to CopyObject:
+//
+// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+//
+// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+//
+// For more information, see Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation CopyObject for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError"
+// The source object of the COPY action is not in the active tier and is only
+// stored in Amazon S3 Glacier.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject
+func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) {
+ req, out := c.CopyObjectRequest(input)
+ return out, req.Send()
+}
+
+// CopyObjectWithContext is the same as CopyObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CopyObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) CopyObjectWithContext(ctx aws.Context, input *CopyObjectInput, opts ...request.Option) (*CopyObjectOutput, error) {
+ req, out := c.CopyObjectRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateBucket = "CreateBucket"
+
+// CreateBucketRequest generates a "aws/request.Request" representing the
+// client's request for the CreateBucket operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateBucket for more information on using the CreateBucket
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateBucketRequest method.
+// req, resp := client.CreateBucketRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket
+func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) {
+ op := &request.Operation{
+ Name: opCreateBucket,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}",
+ }
+
+ if input == nil {
+ input = &CreateBucketInput{}
+ }
+
+ output = &CreateBucketOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateBucket API operation for Amazon Simple Storage Service.
+//
+// Creates a new S3 bucket. To create a bucket, you must register with Amazon
+// S3 and have a valid AWS Access Key ID to authenticate requests. Anonymous
+// requests are never allowed to create buckets. By creating the bucket, you
+// become the bucket owner.
+//
+// Not every string is an acceptable bucket name. For information about bucket
+// naming restrictions, see Working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html).
+//
+// If you want to create an Amazon S3 on Outposts bucket, see Create Bucket
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html).
+//
+// By default, the bucket is created in the US East (N. Virginia) Region. You
+// can optionally specify a Region in the request body. You might choose a Region
+// to optimize latency, minimize costs, or address regulatory requirements.
+// For example, if you reside in Europe, you will probably find it advantageous
+// to create buckets in the Europe (Ireland) Region. For more information, see
+// Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro).
+//
+// If you send your create bucket request to the s3.amazonaws.com endpoint,
+// the request goes to the us-east-1 Region. Accordingly, the signature calculations
+// in Signature Version 4 must use us-east-1 as the Region, even if the location
+// constraint in the request specifies another Region where the bucket is to
+// be created. If you create a bucket in a Region other than US East (N. Virginia),
+// your application must be able to handle 307 redirect. For more information,
+// see Virtual hosting of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html).
+//
+// When creating a bucket using this operation, you can optionally specify the
+// accounts or groups that should be granted specific permissions on the bucket.
+// There are two ways to grant the appropriate permissions using the request
+// headers.
+//
+// * Specify a canned ACL using the x-amz-acl request header. Amazon S3 supports
+// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a
+// predefined set of grantees and permissions. For more information, see
+// Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
+//
+// * Specify access permissions explicitly using the x-amz-grant-read, x-amz-grant-write,
+// x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control
+// headers. These headers map to the set of permissions Amazon S3 supports
+// in an ACL. For more information, see Access control list (ACL) overview
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You
+// specify each grantee as a type=value pair, where the type is one of the
+// following: id – if the value specified is the canonical user ID of an
+// AWS account uri – if you are granting permissions to a predefined group
+// emailAddress – if the value specified is the email address of an AWS
+// account Using email addresses to specify a grantee is only supported in
+// the following AWS Regions: US East (N. Virginia) US West (N. California)
+// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific
+// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all
+// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints
+// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in
+// the AWS General Reference. For example, the following x-amz-grant-read
+// header grants the AWS accounts identified by account IDs permissions to
+// read object data and its metadata: x-amz-grant-read: id="11112222333",
+// id="444455556666"
+//
+// You can use either a canned ACL or specify access permissions explicitly.
+// You cannot do both.
+//
+// The following operations are related to CreateBucket:
+//
+// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+//
+// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation CreateBucket for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeBucketAlreadyExists "BucketAlreadyExists"
+// The requested bucket name is not available. The bucket namespace is shared
+// by all users of the system. Select a different name and try again.
+//
+// * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou"
+// The bucket you tried to create already exists, and you own it. Amazon S3
+// returns this error in all AWS Regions except in the North Virginia Region.
+// For legacy compatibility, if you re-create an existing bucket that you already
+// own in the North Virginia Region, Amazon S3 returns 200 OK and resets the
+// bucket access control lists (ACLs).
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket
+func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) {
+ req, out := c.CreateBucketRequest(input)
+ return out, req.Send()
+}
+
+// CreateBucketWithContext is the same as CreateBucket with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateBucket for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) CreateBucketWithContext(ctx aws.Context, input *CreateBucketInput, opts ...request.Option) (*CreateBucketOutput, error) {
+ req, out := c.CreateBucketRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateMultipartUpload = "CreateMultipartUpload"
+
+// CreateMultipartUploadRequest generates a "aws/request.Request" representing the
+// client's request for the CreateMultipartUpload operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateMultipartUpload for more information on using the CreateMultipartUpload
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateMultipartUploadRequest method.
+// req, resp := client.CreateMultipartUploadRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload
+func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) {
+ op := &request.Operation{
+ Name: opCreateMultipartUpload,
+ HTTPMethod: "POST",
+ HTTPPath: "/{Bucket}/{Key+}?uploads",
+ }
+
+ if input == nil {
+ input = &CreateMultipartUploadInput{}
+ }
+
+ output = &CreateMultipartUploadOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateMultipartUpload API operation for Amazon Simple Storage Service.
+//
+// This action initiates a multipart upload and returns an upload ID. This upload
+// ID is used to associate all of the parts in the specific multipart upload.
+// You specify this upload ID in each of your subsequent upload part requests
+// (see UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)).
+// You also include this upload ID in the final request to either complete or
+// abort the multipart upload request.
+//
+// For more information about multipart uploads, see Multipart Upload Overview
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html).
+//
+// If you have configured a lifecycle rule to abort incomplete multipart uploads,
+// the upload must complete within the number of days specified in the bucket
+// lifecycle configuration. Otherwise, the incomplete multipart upload becomes
+// eligible for an abort action and Amazon S3 aborts the multipart upload. For
+// more information, see Aborting Incomplete Multipart Uploads Using a Bucket
+// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config).
+//
+// For information about the permissions required to use the multipart upload
+// API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html).
+//
+// For request signing, multipart upload is just a series of regular requests.
+// You initiate a multipart upload, send one or more requests to upload parts,
+// and then complete the multipart upload process. You sign each request individually.
+// There is nothing special about signing multipart upload requests. For more
+// information about signing, see Authenticating Requests (AWS Signature Version
+// 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html).
+//
+// After you initiate a multipart upload and upload one or more parts, to stop
+// being charged for storing the uploaded parts, you must either complete or
+// abort the multipart upload. Amazon S3 frees up the space used to store the
+// parts and stop charging you for storing them only after you either complete
+// or abort a multipart upload.
+//
+// You can optionally request server-side encryption. For server-side encryption,
+// Amazon S3 encrypts your data as it writes it to disks in its data centers
+// and decrypts it when you access it. You can provide your own encryption key,
+// or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or
+// Amazon S3-managed encryption keys. If you choose to provide your own encryption
+// key, the request headers you provide in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html)
+// requests must match the headers you used in the request to initiate the upload
+// by using CreateMultipartUpload.
+//
+// To perform a multipart upload with encryption using an AWS KMS CMK, the requester
+// must have permission to the kms:Encrypt, kms:Decrypt, kms:ReEncrypt*, kms:GenerateDataKey*,
+// and kms:DescribeKey actions on the key. These permissions are required because
+// Amazon S3 must decrypt and read data from the encrypted file parts before
+// it completes the multipart upload.
+//
+// If your AWS Identity and Access Management (IAM) user or role is in the same
+// AWS account as the AWS KMS CMK, then you must have these permissions on the
+// key policy. If your IAM user or role belongs to a different account than
+// the key, then you must have the permissions on both the key policy and your
+// IAM user or role.
+//
+// For more information, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html).
+//
+// Access Permissions
+//
+// When copying an object, you can optionally specify the accounts or groups
+// that should be granted specific permissions on the new object. There are
+// two ways to grant the permissions using the request headers:
+//
+// * Specify a canned ACL with the x-amz-acl request header. For more information,
+// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
+//
+// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp,
+// x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters
+// map to the set of permissions that Amazon S3 supports in an ACL. For more
+// information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html).
+//
+// You can use either a canned ACL or specify access permissions explicitly.
+// You cannot do both.
+//
+// Server-Side- Encryption-Specific Request Headers
+//
+// You can optionally tell Amazon S3 to encrypt data at rest using server-side
+// encryption. Server-side encryption is for data encryption at rest. Amazon
+// S3 encrypts your data as it writes it to disks in its data centers and decrypts
+// it when you access it. The option you use depends on whether you want to
+// use AWS managed encryption keys or provide your own encryption key.
+//
+// * Use encryption keys managed by Amazon S3 or customer master keys (CMKs)
+// stored in AWS Key Management Service (AWS KMS) – If you want AWS to
+// manage the keys used to encrypt data, specify the following headers in
+// the request. x-amz-server-side-encryption x-amz-server-side-encryption-aws-kms-key-id
+// x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms,
+// but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon
+// S3 uses the AWS managed CMK in AWS KMS to protect the data. All GET and
+// PUT requests for an object protected by AWS KMS fail if you don't make
+// them with SSL or by using SigV4. For more information about server-side
+// encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data
+// Using Server-Side Encryption with CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html).
+//
+// * Use customer-provided encryption keys – If you want to manage your
+// own encryption keys, provide all the following headers in the request.
+// x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key
+// x-amz-server-side-encryption-customer-key-MD5 For more information about
+// server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting
+// Data Using Server-Side Encryption with CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html).
+//
+// Access-Control-List (ACL)-Specific Request Headers
+//
+// You also can use the following access control–related headers with this
+// operation. By default, all objects are private. Only the owner has full access
+// control. When adding a new object, you can grant permissions to individual
+// AWS accounts or to predefined groups defined by Amazon S3. These permissions
+// are then added to the access control list (ACL) on the object. For more information,
+// see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html).
+// With this operation, you can grant access permissions using one of the following
+// two methods:
+//
+// * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined
+// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees
+// and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
+//
+// * Specify access permissions explicitly — To explicitly grant access
+// permissions to specific AWS accounts or groups, use the following headers.
+// Each header maps to specific permissions that Amazon S3 supports in an
+// ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html).
+// In the header, you specify a list of grantees who get the specific permission.
+// To grant permissions explicitly, use: x-amz-grant-read x-amz-grant-write
+// x-amz-grant-read-acp x-amz-grant-write-acp x-amz-grant-full-control You
+// specify each grantee as a type=value pair, where the type is one of the
+// following: id – if the value specified is the canonical user ID of an
+// AWS account uri – if you are granting permissions to a predefined group
+// emailAddress – if the value specified is the email address of an AWS
+// account Using email addresses to specify a grantee is only supported in
+// the following AWS Regions: US East (N. Virginia) US West (N. California)
+// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific
+// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all
+// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints
+// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in
+// the AWS General Reference. For example, the following x-amz-grant-read
+// header grants the AWS accounts identified by account IDs permissions to
+// read object data and its metadata: x-amz-grant-read: id="11112222333",
+// id="444455556666"
+//
+// The following operations are related to CreateMultipartUpload:
+//
+// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+//
+// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
+//
+// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
+//
+// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
+//
+// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation CreateMultipartUpload for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload
+func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) {
+ req, out := c.CreateMultipartUploadRequest(input)
+ return out, req.Send()
+}
+
+// CreateMultipartUploadWithContext is the same as CreateMultipartUpload with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateMultipartUpload for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) CreateMultipartUploadWithContext(ctx aws.Context, input *CreateMultipartUploadInput, opts ...request.Option) (*CreateMultipartUploadOutput, error) {
+ req, out := c.CreateMultipartUploadRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucket = "DeleteBucket"
+
+// DeleteBucketRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucket operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteBucket for more information on using the DeleteBucket
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteBucketRequest method.
+// req, resp := client.DeleteBucketRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket
+func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucket,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}",
+ }
+
+ if input == nil {
+ input = &DeleteBucketInput{}
+ }
+
+ output = &DeleteBucketOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteBucket API operation for Amazon Simple Storage Service.
+//
+// Deletes the S3 bucket. All objects (including all object versions and delete
+// markers) in the bucket must be deleted before the bucket itself can be deleted.
+//
+// Related Resources
+//
+// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+//
+// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucket for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket
+func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) {
+ req, out := c.DeleteBucketRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketWithContext is the same as DeleteBucket with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucket for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketWithContext(ctx aws.Context, input *DeleteBucketInput, opts ...request.Option) (*DeleteBucketOutput, error) {
+ req, out := c.DeleteBucketRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucketCors = "DeleteBucketCors"
+
+// DeleteBucketCorsRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketCors operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteBucketCors for more information on using the DeleteBucketCors
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteBucketCorsRequest method.
+// req, resp := client.DeleteBucketCorsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors
+func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketCors,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?cors",
+ }
+
+ if input == nil {
+ input = &DeleteBucketCorsInput{}
+ }
+
+ output = &DeleteBucketCorsOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteBucketCors API operation for Amazon Simple Storage Service.
+//
+// Deletes the cors configuration information set for the bucket.
+//
+// To use this operation, you must have permission to perform the s3:PutBucketCORS
+// action. The bucket owner has this permission by default and can grant this
+// permission to others.
+//
+// For information about cors, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html)
+// in the Amazon S3 User Guide.
+//
+// Related Resources:
+//
+// * PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html)
+//
+// * RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketCors for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors
+func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) {
+ req, out := c.DeleteBucketCorsRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketCorsWithContext is the same as DeleteBucketCors with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketCors for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCorsInput, opts ...request.Option) (*DeleteBucketCorsOutput, error) {
+ req, out := c.DeleteBucketCorsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucketLifecycle = "DeleteBucketLifecycle"
+
+// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketLifecycle operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteBucketLifecycle for more information on using the DeleteBucketLifecycle
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteBucketLifecycleRequest method.
+// req, resp := client.DeleteBucketLifecycleRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle
+func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketLifecycle,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?lifecycle",
+ }
+
+ if input == nil {
+ input = &DeleteBucketLifecycleInput{}
+ }
+
+ output = &DeleteBucketLifecycleOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteBucketLifecycle API operation for Amazon Simple Storage Service.
+//
+// Deletes the lifecycle configuration from the specified bucket. Amazon S3
+// removes all the lifecycle configuration rules in the lifecycle subresource
+// associated with the bucket. Your objects never expire, and Amazon S3 no longer
+// automatically deletes any objects on the basis of rules contained in the
+// deleted lifecycle configuration.
+//
+// To use this operation, you must have permission to perform the s3:PutLifecycleConfiguration
+// action. By default, the bucket owner has this permission and the bucket owner
+// can grant this permission to others.
+//
+// There is usually some time lag before lifecycle configuration deletion is
+// fully propagated to all the Amazon S3 systems.
+//
+// For more information about the object expiration, see Elements to Describe
+// Lifecycle Actions (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions).
+//
+// Related actions include:
+//
+// * PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)
+//
+// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketLifecycle for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle
+func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) {
+ req, out := c.DeleteBucketLifecycleRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketLifecycleWithContext is the same as DeleteBucketLifecycle with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketLifecycle for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketLifecycleWithContext(ctx aws.Context, input *DeleteBucketLifecycleInput, opts ...request.Option) (*DeleteBucketLifecycleOutput, error) {
+ req, out := c.DeleteBucketLifecycleRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucketReplication = "DeleteBucketReplication"
+
+// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketReplication operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteBucketReplication for more information on using the DeleteBucketReplication
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteBucketReplicationRequest method.
+// req, resp := client.DeleteBucketReplicationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication
+func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketReplication,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?replication",
+ }
+
+ if input == nil {
+ input = &DeleteBucketReplicationInput{}
+ }
+
+ output = &DeleteBucketReplicationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteBucketReplication API operation for Amazon Simple Storage Service.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketReplication for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication
+func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) {
+ req, out := c.DeleteBucketReplicationRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketReplicationWithContext is the same as DeleteBucketReplication with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketReplication for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketReplicationWithContext(ctx aws.Context, input *DeleteBucketReplicationInput, opts ...request.Option) (*DeleteBucketReplicationOutput, error) {
+ req, out := c.DeleteBucketReplicationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucketWebsite = "DeleteBucketWebsite"
+
+// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketWebsite operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteBucketWebsite for more information on using the DeleteBucketWebsite
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteBucketWebsiteRequest method.
+// req, resp := client.DeleteBucketWebsiteRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite
+func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketWebsite,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?website",
+ }
+
+ if input == nil {
+ input = &DeleteBucketWebsiteInput{}
+ }
+
+ output = &DeleteBucketWebsiteOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteBucketWebsite API operation for Amazon Simple Storage Service.
+//
+// This action removes the website configuration for a bucket. Amazon S3 returns
+// a 200 OK response upon successfully deleting a website configuration on the
+// specified bucket. You will get a 200 OK response if the website configuration
+// you are trying to delete does not exist on the bucket. Amazon S3 returns
+// a 404 response if the bucket specified in the request does not exist.
+//
+// This DELETE action requires the S3:DeleteBucketWebsite permission. By default,
+// only the bucket owner can delete the website configuration attached to a
+// bucket. However, bucket owners can grant other users permission to delete
+// the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite
+// permission.
+//
+// For more information about hosting websites, see Hosting Websites on Amazon
+// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html).
+//
+// The following operations are related to DeleteBucketWebsite:
+//
+// * GetBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html)
+//
+// * PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketWebsite for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite
+func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) {
+ req, out := c.DeleteBucketWebsiteRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketWebsiteWithContext is the same as DeleteBucketWebsite with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketWebsite for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketWebsiteWithContext(ctx aws.Context, input *DeleteBucketWebsiteInput, opts ...request.Option) (*DeleteBucketWebsiteOutput, error) {
+ req, out := c.DeleteBucketWebsiteRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteLegalHold = "DeleteLegalHold"
+
+// DeleteLegalHoldRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteLegalHold operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteLegalHold for more information on using the DeleteLegalHold
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteLegalHoldRequest method.
+// req, resp := client.DeleteLegalHoldRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteLegalHold
+func (c *S3) DeleteLegalHoldRequest(input *DeleteLegalHoldInput) (req *request.Request, output *DeleteLegalHoldOutput) {
+ op := &request.Operation{
+ Name: opDeleteLegalHold,
+ HTTPMethod: "POST",
+ HTTPPath: "/{Bucket}/{Key+}?legalHold",
+ }
+
+ if input == nil {
+ input = &DeleteLegalHoldInput{}
+ }
+
+ output = &DeleteLegalHoldOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteLegalHold API operation for Amazon Simple Storage Service.
+//
+// Remove Legal hold on an object. The legal hold identifiers are stored in
+// the object metadata along with the timestamp of when they are POSTed to the
+// object. The presence of any legal hold identifiers prevents the modification
+// or deletion of the object data, even if the retention period has expired.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteLegalHold for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteLegalHold
+func (c *S3) DeleteLegalHold(input *DeleteLegalHoldInput) (*DeleteLegalHoldOutput, error) {
+ req, out := c.DeleteLegalHoldRequest(input)
+ return out, req.Send()
+}
+
+// DeleteLegalHoldWithContext is the same as DeleteLegalHold with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteLegalHold for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteLegalHoldWithContext(ctx aws.Context, input *DeleteLegalHoldInput, opts ...request.Option) (*DeleteLegalHoldOutput, error) {
+ req, out := c.DeleteLegalHoldRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteObject = "DeleteObject"
+
+// DeleteObjectRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteObject operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteObject for more information on using the DeleteObject
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteObjectRequest method.
+// req, resp := client.DeleteObjectRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject
+func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) {
+ op := &request.Operation{
+ Name: opDeleteObject,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &DeleteObjectInput{}
+ }
+
+ output = &DeleteObjectOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteObject API operation for Amazon Simple Storage Service.
+//
+// Removes the null version (if there is one) of an object and inserts a delete
+// marker, which becomes the latest version of the object. If there isn't a
+// null version, Amazon S3 does not remove any objects but will still respond
+// that the command was successful.
+//
+// To remove a specific version, you must be the bucket owner and you must use
+// the version Id subresource. Using this subresource permanently deletes the
+// version. If the object deleted is a delete marker, Amazon S3 sets the response
+// header, x-amz-delete-marker, to true.
+//
+// If the object you want to delete is in a bucket where the bucket versioning
+// configuration is MFA Delete enabled, you must include the x-amz-mfa request
+// header in the DELETE versionId request. Requests that include x-amz-mfa must
+// use HTTPS.
+//
+// For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html).
+// To see sample requests that use versioning, see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete).
+//
+// You can delete objects by explicitly calling DELETE Object or configure its
+// lifecycle (PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html))
+// to enable Amazon S3 to remove them for you. If you want to block users or
+// accounts from removing or deleting objects from your bucket, you must deny
+// them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration
+// actions.
+//
+// The following action is related to DeleteObject:
+//
+// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteObject for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject
+func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) {
+ req, out := c.DeleteObjectRequest(input)
+ return out, req.Send()
+}
+
+// DeleteObjectWithContext is the same as DeleteObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteObjectWithContext(ctx aws.Context, input *DeleteObjectInput, opts ...request.Option) (*DeleteObjectOutput, error) {
+ req, out := c.DeleteObjectRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteObjectTagging = "DeleteObjectTagging"
+
+// DeleteObjectTaggingRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteObjectTagging operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteObjectTagging for more information on using the DeleteObjectTagging
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteObjectTaggingRequest method.
+// req, resp := client.DeleteObjectTaggingRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging
+func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) {
+ op := &request.Operation{
+ Name: opDeleteObjectTagging,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}/{Key+}?tagging",
+ }
+
+ if input == nil {
+ input = &DeleteObjectTaggingInput{}
+ }
+
+ output = &DeleteObjectTaggingOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteObjectTagging API operation for Amazon Simple Storage Service.
+//
+// Removes the entire tag set from the specified object. For more information
+// about managing object tags, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html).
+//
+// To use this operation, you must have permission to perform the s3:DeleteObjectTagging
+// action.
+//
+// To delete tags of a specific object version, add the versionId query parameter
+// in the request. You will need permission for the s3:DeleteObjectVersionTagging
+// action.
+//
+// The following operations are related to DeleteBucketMetricsConfiguration:
+//
+// * PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html)
+//
+// * GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteObjectTagging for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging
+func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) {
+ req, out := c.DeleteObjectTaggingRequest(input)
+ return out, req.Send()
+}
+
+// DeleteObjectTaggingWithContext is the same as DeleteObjectTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteObjectTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteObjectTaggingWithContext(ctx aws.Context, input *DeleteObjectTaggingInput, opts ...request.Option) (*DeleteObjectTaggingOutput, error) {
+ req, out := c.DeleteObjectTaggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteObjects = "DeleteObjects"
+
+// DeleteObjectsRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteObjects operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteObjects for more information on using the DeleteObjects
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteObjectsRequest method.
+// req, resp := client.DeleteObjectsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects
+func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) {
+ op := &request.Operation{
+ Name: opDeleteObjects,
+ HTTPMethod: "POST",
+ HTTPPath: "/{Bucket}?delete",
+ }
+
+ if input == nil {
+ input = &DeleteObjectsInput{}
+ }
+
+ output = &DeleteObjectsOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
+ return
+}
+
+// DeleteObjects API operation for Amazon Simple Storage Service.
+//
+// This action enables you to delete multiple objects from a bucket using a
+// single HTTP request. If you know the object keys that you want to delete,
+// then this action provides a suitable alternative to sending individual delete
+// requests, reducing per-request overhead.
+//
+// The request contains a list of up to 1000 keys that you want to delete. In
+// the XML, you provide the object key names, and optionally, version IDs if
+// you want to delete a specific version of the object from a versioning-enabled
+// bucket. For each key, Amazon S3 performs a delete action and returns the
+// result of that delete, success, or failure, in the response. Note that if
+// the object specified in the request is not found, Amazon S3 returns the result
+// as deleted.
+//
+// The action supports two modes for the response: verbose and quiet. By default,
+// the action uses verbose mode in which the response includes the result of
+// deletion of each key in your request. In quiet mode the response includes
+// only keys where the delete action encountered an error. For a successful
+// deletion, the action does not return any information about the delete in
+// the response body.
+//
+// When performing this action on an MFA Delete enabled bucket, that attempts
+// to delete any versioned objects, you must include an MFA token. If you do
+// not provide one, the entire request will fail, even if there are non-versioned
+// objects you are trying to delete. If you provide an invalid token, whether
+// there are versioned keys in the request or not, the entire Multi-Object Delete
+// request will fail. For information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete).
+//
+// Finally, the Content-MD5 header is required for all Multi-Object Delete requests.
+// Amazon S3 uses the header value to ensure that your request body has not
+// been altered in transit.
+//
+// The following operations are related to DeleteObjects:
+//
+// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
+//
+// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+//
+// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
+//
+// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
+//
+// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteObjects for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects
+func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) {
+ req, out := c.DeleteObjectsRequest(input)
+ return out, req.Send()
+}
+
+// DeleteObjectsWithContext is the same as DeleteObjects with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteObjects for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteObjectsWithContext(ctx aws.Context, input *DeleteObjectsInput, opts ...request.Option) (*DeleteObjectsOutput, error) {
+ req, out := c.DeleteObjectsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeletePublicAccessBlock = "DeletePublicAccessBlock"
+
+// DeletePublicAccessBlockRequest generates a "aws/request.Request" representing the
+// client's request for the DeletePublicAccessBlock operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeletePublicAccessBlock for more information on using the DeletePublicAccessBlock
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeletePublicAccessBlockRequest method.
+// req, resp := client.DeletePublicAccessBlockRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock
+func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput) (req *request.Request, output *DeletePublicAccessBlockOutput) {
+ op := &request.Operation{
+ Name: opDeletePublicAccessBlock,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?publicAccessBlock",
+ }
+
+ if input == nil {
+ input = &DeletePublicAccessBlockInput{}
+ }
+
+ output = &DeletePublicAccessBlockOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeletePublicAccessBlock API operation for Amazon Simple Storage Service.
+//
+// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use
+// this operation, you must have the s3:PutBucketPublicAccessBlock permission.
+// For more information about permissions, see Permissions Related to Bucket
+// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
+//
+// The following operations are related to DeletePublicAccessBlock:
+//
+// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html)
+//
+// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html)
+//
+// * PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html)
+//
+// * GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeletePublicAccessBlock for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock
+func (c *S3) DeletePublicAccessBlock(input *DeletePublicAccessBlockInput) (*DeletePublicAccessBlockOutput, error) {
+ req, out := c.DeletePublicAccessBlockRequest(input)
+ return out, req.Send()
+}
+
+// DeletePublicAccessBlockWithContext is the same as DeletePublicAccessBlock with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeletePublicAccessBlock for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeletePublicAccessBlockWithContext(ctx aws.Context, input *DeletePublicAccessBlockInput, opts ...request.Option) (*DeletePublicAccessBlockOutput, error) {
+ req, out := c.DeletePublicAccessBlockRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opExtendObjectRetention = "ExtendObjectRetention"
+
+// ExtendObjectRetentionRequest generates a "aws/request.Request" representing the
+// client's request for the ExtendObjectRetention operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ExtendObjectRetention for more information on using the ExtendObjectRetention
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ExtendObjectRetentionRequest method.
+// req, resp := client.ExtendObjectRetentionRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ExtendObjectRetention
+func (c *S3) ExtendObjectRetentionRequest(input *ExtendObjectRetentionInput) (req *request.Request, output *ExtendObjectRetentionOutput) {
+ op := &request.Operation{
+ Name: opExtendObjectRetention,
+ HTTPMethod: "POST",
+ HTTPPath: "/{Bucket}/{Key+}?extendRetention",
+ }
+
+ if input == nil {
+ input = &ExtendObjectRetentionInput{}
+ }
+
+ output = &ExtendObjectRetentionOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// ExtendObjectRetention API operation for Amazon Simple Storage Service.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ExtendObjectRetention for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ExtendObjectRetention
+func (c *S3) ExtendObjectRetention(input *ExtendObjectRetentionInput) (*ExtendObjectRetentionOutput, error) {
+ req, out := c.ExtendObjectRetentionRequest(input)
+ return out, req.Send()
+}
+
+// ExtendObjectRetentionWithContext is the same as ExtendObjectRetention with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ExtendObjectRetention for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ExtendObjectRetentionWithContext(ctx aws.Context, input *ExtendObjectRetentionInput, opts ...request.Option) (*ExtendObjectRetentionOutput, error) {
+ req, out := c.ExtendObjectRetentionRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketAcl = "GetBucketAcl"
+
+// GetBucketAclRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketAcl operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketAcl for more information on using the GetBucketAcl
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketAclRequest method.
+// req, resp := client.GetBucketAclRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl
+func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) {
+ op := &request.Operation{
+ Name: opGetBucketAcl,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?acl",
+ }
+
+ if input == nil {
+ input = &GetBucketAclInput{}
+ }
+
+ output = &GetBucketAclOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketAcl API operation for Amazon Simple Storage Service.
+//
+// This implementation of the GET action uses the acl subresource to return
+// the access control list (ACL) of a bucket. To use GET to return the ACL of
+// the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission
+// is granted to the anonymous user, you can return the ACL of the bucket without
+// using an authorization header.
+//
+// Related Resources
+//
+// * ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketAcl for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl
+func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) {
+ req, out := c.GetBucketAclRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketAclWithContext is the same as GetBucketAcl with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketAcl for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketAclWithContext(ctx aws.Context, input *GetBucketAclInput, opts ...request.Option) (*GetBucketAclOutput, error) {
+ req, out := c.GetBucketAclRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketCors = "GetBucketCors"
+
+// GetBucketCorsRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketCors operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketCors for more information on using the GetBucketCors
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketCorsRequest method.
+// req, resp := client.GetBucketCorsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors
+func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) {
+ op := &request.Operation{
+ Name: opGetBucketCors,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?cors",
+ }
+
+ if input == nil {
+ input = &GetBucketCorsInput{}
+ }
+
+ output = &GetBucketCorsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketCors API operation for Amazon Simple Storage Service.
+//
+// Returns the cors configuration information set for the bucket.
+//
+// To use this operation, you must have permission to perform the s3:GetBucketCORS
+// action. By default, the bucket owner has this permission and can grant it
+// to others.
+//
+// For more information about cors, see Enabling Cross-Origin Resource Sharing
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html).
+//
+// The following operations are related to GetBucketCors:
+//
+// * PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html)
+//
+// * DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketCors for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors
+func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) {
+ req, out := c.GetBucketCorsRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketCorsWithContext is the same as GetBucketCors with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketCors for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput, opts ...request.Option) (*GetBucketCorsOutput, error) {
+ req, out := c.GetBucketCorsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration"
+
+// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketLifecycleConfiguration operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketLifecycleConfiguration for more information on using the GetBucketLifecycleConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketLifecycleConfigurationRequest method.
+// req, resp := client.GetBucketLifecycleConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration
+func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) {
+ op := &request.Operation{
+ Name: opGetBucketLifecycleConfiguration,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?lifecycle",
+ }
+
+ if input == nil {
+ input = &GetBucketLifecycleConfigurationInput{}
+ }
+
+ output = &GetBucketLifecycleConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service.
+//
+//
+// Bucket lifecycle configuration now supports specifying a lifecycle rule using
+// an object key name prefix, one or more object tags, or a combination of both.
+// Accordingly, this section describes the latest API. The response describes
+// the new filter element that you can use to specify a filter to select a subset
+// of objects to which the rule applies. If you are using a previous version
+// of the lifecycle configuration, it still works. For the earlier action, see
+// GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html).
+//
+// Returns the lifecycle configuration information set on the bucket. For information
+// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html).
+//
+// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration
+// action. The bucket owner has this permission, by default. The bucket owner
+// can grant this permission to others. For more information about permissions,
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
+//
+// GetBucketLifecycleConfiguration has the following special error:
+//
+// * Error code: NoSuchLifecycleConfiguration Description: The lifecycle
+// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault
+// Code Prefix: Client
+//
+// The following operations are related to GetBucketLifecycleConfiguration:
+//
+// * GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html)
+//
+// * PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)
+//
+// * DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketLifecycleConfiguration for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration
+func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) {
+ req, out := c.GetBucketLifecycleConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketLifecycleConfigurationWithContext is the same as GetBucketLifecycleConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketLifecycleConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketLifecycleConfigurationWithContext(ctx aws.Context, input *GetBucketLifecycleConfigurationInput, opts ...request.Option) (*GetBucketLifecycleConfigurationOutput, error) {
+ req, out := c.GetBucketLifecycleConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketLocation = "GetBucketLocation"
+
+// GetBucketLocationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketLocation operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketLocation for more information on using the GetBucketLocation
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketLocationRequest method.
+// req, resp := client.GetBucketLocationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation
+func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) {
+ op := &request.Operation{
+ Name: opGetBucketLocation,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?location",
+ }
+
+ if input == nil {
+ input = &GetBucketLocationInput{}
+ }
+
+ output = &GetBucketLocationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketLocation API operation for Amazon Simple Storage Service.
+//
+// Returns the Region the bucket resides in. You set the bucket's Region using
+// the LocationConstraint request parameter in a CreateBucket request. For more
+// information, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html).
+//
+// To use this implementation of the operation, you must be the bucket owner.
+//
+// The following operations are related to GetBucketLocation:
+//
+// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+//
+// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketLocation for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation
+func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) {
+ req, out := c.GetBucketLocationRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketLocationWithContext is the same as GetBucketLocation with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketLocation for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketLocationWithContext(ctx aws.Context, input *GetBucketLocationInput, opts ...request.Option) (*GetBucketLocationOutput, error) {
+ req, out := c.GetBucketLocationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketLogging = "GetBucketLogging"
+
+// GetBucketLoggingRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketLogging operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketLogging for more information on using the GetBucketLogging
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketLoggingRequest method.
+// req, resp := client.GetBucketLoggingRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging
+func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) {
+ op := &request.Operation{
+ Name: opGetBucketLogging,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?logging",
+ }
+
+ if input == nil {
+ input = &GetBucketLoggingInput{}
+ }
+
+ output = &GetBucketLoggingOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketLogging API operation for Amazon Simple Storage Service.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketLogging for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging
+func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) {
+ req, out := c.GetBucketLoggingRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketLoggingWithContext is the same as GetBucketLogging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketLogging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketLoggingWithContext(ctx aws.Context, input *GetBucketLoggingInput, opts ...request.Option) (*GetBucketLoggingOutput, error) {
+ req, out := c.GetBucketLoggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketProtectionConfiguration = "GetBucketProtectionConfiguration"
+
+// GetBucketProtectionConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketProtectionConfiguration operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketProtectionConfiguration for more information on using the GetBucketProtectionConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketProtectionConfigurationRequest method.
+// req, resp := client.GetBucketProtectionConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketProtectionConfiguration
+func (c *S3) GetBucketProtectionConfigurationRequest(input *GetBucketProtectionConfigurationInput) (req *request.Request, output *GetBucketProtectionConfigurationOutput) {
+ op := &request.Operation{
+ Name: opGetBucketProtectionConfiguration,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?protection",
+ }
+
+ if input == nil {
+ input = &GetBucketProtectionConfigurationInput{}
+ }
+
+ output = &GetBucketProtectionConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketProtectionConfiguration API operation for Amazon Simple Storage Service.
+//
+// Returns the protection configuration of a bucket.EnablePermanentRetention
+// flag will only be returned if the flag is set to true for a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketProtectionConfiguration for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketProtectionConfiguration
+func (c *S3) GetBucketProtectionConfiguration(input *GetBucketProtectionConfigurationInput) (*GetBucketProtectionConfigurationOutput, error) {
+ req, out := c.GetBucketProtectionConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketProtectionConfigurationWithContext is the same as GetBucketProtectionConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketProtectionConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketProtectionConfigurationWithContext(ctx aws.Context, input *GetBucketProtectionConfigurationInput, opts ...request.Option) (*GetBucketProtectionConfigurationOutput, error) {
+ req, out := c.GetBucketProtectionConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketReplication = "GetBucketReplication"
+
+// GetBucketReplicationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketReplication operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketReplication for more information on using the GetBucketReplication
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketReplicationRequest method.
+// req, resp := client.GetBucketReplicationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication
+func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) {
+ op := &request.Operation{
+ Name: opGetBucketReplication,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?replication",
+ }
+
+ if input == nil {
+ input = &GetBucketReplicationInput{}
+ }
+
+ output = &GetBucketReplicationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketReplication API operation for Amazon Simple Storage Service.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketReplication for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication
+func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) {
+ req, out := c.GetBucketReplicationRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketReplicationWithContext is the same as GetBucketReplication with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketReplication for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketReplicationWithContext(ctx aws.Context, input *GetBucketReplicationInput, opts ...request.Option) (*GetBucketReplicationOutput, error) {
+ req, out := c.GetBucketReplicationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketVersioning = "GetBucketVersioning"
+
+// GetBucketVersioningRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketVersioning operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketVersioning for more information on using the GetBucketVersioning
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketVersioningRequest method.
+// req, resp := client.GetBucketVersioningRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning
+func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) {
+ op := &request.Operation{
+ Name: opGetBucketVersioning,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?versioning",
+ }
+
+ if input == nil {
+ input = &GetBucketVersioningInput{}
+ }
+
+ output = &GetBucketVersioningOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketVersioning API operation for Amazon Simple Storage Service.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketVersioning for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning
+func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) {
+ req, out := c.GetBucketVersioningRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketVersioningWithContext is the same as GetBucketVersioning with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketVersioning for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketVersioningWithContext(ctx aws.Context, input *GetBucketVersioningInput, opts ...request.Option) (*GetBucketVersioningOutput, error) {
+ req, out := c.GetBucketVersioningRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketWebsite = "GetBucketWebsite"
+
+// GetBucketWebsiteRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketWebsite operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketWebsite for more information on using the GetBucketWebsite
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketWebsiteRequest method.
+// req, resp := client.GetBucketWebsiteRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite
+func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) {
+ op := &request.Operation{
+ Name: opGetBucketWebsite,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?website",
+ }
+
+ if input == nil {
+ input = &GetBucketWebsiteInput{}
+ }
+
+ output = &GetBucketWebsiteOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketWebsite API operation for Amazon Simple Storage Service.
+//
+// Returns the website configuration for a bucket. To host website on Amazon
+// S3, you can configure a bucket as website by adding a website configuration.
+// For more information about hosting websites, see Hosting Websites on Amazon
+// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html).
+//
+// This GET action requires the S3:GetBucketWebsite permission. By default,
+// only the bucket owner can read the bucket website configuration. However,
+// bucket owners can allow other users to read the website configuration by
+// writing a bucket policy granting them the S3:GetBucketWebsite permission.
+//
+// The following operations are related to DeleteBucketWebsite:
+//
+// * DeleteBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html)
+//
+// * PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketWebsite for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite
+func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) {
+ req, out := c.GetBucketWebsiteRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketWebsiteWithContext is the same as GetBucketWebsite with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketWebsite for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketWebsiteWithContext(ctx aws.Context, input *GetBucketWebsiteInput, opts ...request.Option) (*GetBucketWebsiteOutput, error) {
+ req, out := c.GetBucketWebsiteRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetObject = "GetObject"
+
+// GetObjectRequest generates a "aws/request.Request" representing the
+// client's request for the GetObject operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetObject for more information on using the GetObject
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetObjectRequest method.
+// req, resp := client.GetObjectRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject
+func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) {
+ op := &request.Operation{
+ Name: opGetObject,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &GetObjectInput{}
+ }
+
+ output = &GetObjectOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetObject API operation for Amazon Simple Storage Service.
+//
+// Retrieves objects from Amazon S3. To use GET, you must have READ access to
+// the object. If you grant READ access to the anonymous user, you can return
+// the object without using an authorization header.
+//
+// An Amazon S3 bucket has no directory hierarchy such as you would find in
+// a typical computer file system. You can, however, create a logical hierarchy
+// by using object key names that imply a folder structure. For example, instead
+// of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg.
+//
+// To get an object from such a logical hierarchy, specify the full key name
+// for the object in the GET operation. For a virtual hosted-style request example,
+// if you have the object photos/2006/February/sample.jpg, specify the resource
+// as /photos/2006/February/sample.jpg. For a path-style request example, if
+// you have the object photos/2006/February/sample.jpg in the bucket named examplebucket,
+// specify the resource as /examplebucket/photos/2006/February/sample.jpg. For
+// more information about request types, see HTTP Host Header Bucket Specification
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket).
+//
+// To distribute large files to many people, you can save bandwidth costs by
+// using BitTorrent. For more information, see Amazon S3 Torrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html).
+// For more information about returning the ACL of an object, see GetObjectAcl
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html).
+//
+// If the object you are retrieving is stored in the S3 Glacier or S3 Glacier
+// Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering
+// Deep Archive tiers, before you can retrieve the object you must first restore
+// a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html).
+// Otherwise, this action returns an InvalidObjectStateError error. For information
+// about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html).
+//
+// Encryption request headers, like x-amz-server-side-encryption, should not
+// be sent for GET requests if your object uses server-side encryption with
+// CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed
+// encryption keys (SSE-S3). If your object does use these types of keys, you’ll
+// get an HTTP 400 BadRequest error.
+//
+// If you encrypt an object by using server-side encryption with customer-provided
+// encryption keys (SSE-C) when you store the object in Amazon S3, then when
+// you GET the object, you must use the following headers:
+//
+// * x-amz-server-side-encryption-customer-algorithm
+//
+// * x-amz-server-side-encryption-customer-key
+//
+// * x-amz-server-side-encryption-customer-key-MD5
+//
+// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided
+// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
+//
+// Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging
+// action), the response also returns the x-amz-tagging-count header that provides
+// the count of number of tags associated with the object. You can use GetObjectTagging
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html)
+// to retrieve the tag set associated with an object.
+//
+// Permissions
+//
+// You need the s3:GetObject permission for this operation. For more information,
+// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html).
+// If the object you request does not exist, the error Amazon S3 returns depends
+// on whether you also have the s3:ListBucket permission.
+//
+// * If you have the s3:ListBucket permission on the bucket, Amazon S3 will
+// return an HTTP status code 404 ("no such key") error.
+//
+// * If you don’t have the s3:ListBucket permission, Amazon S3 will return
+// an HTTP status code 403 ("access denied") error.
+//
+// Versioning
+//
+// By default, the GET action returns the current version of an object. To return
+// a different version, use the versionId subresource.
+//
+// If the current version of the object is a delete marker, Amazon S3 behaves
+// as if the object was deleted and includes x-amz-delete-marker: true in the
+// response.
+//
+// For more information about versioning, see PutBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html).
+//
+// Overriding Response Header Values
+//
+// There are times when you want to override certain response header values
+// in a GET response. For example, you might override the Content-Disposition
+// response header value in your GET request.
+//
+// You can override values for a set of response headers using the following
+// query parameters. These response header values are sent only on a successful
+// request, that is, when status code 200 OK is returned. The set of headers
+// you can override using these parameters is a subset of the headers that Amazon
+// S3 accepts when you create an object. The response headers that you can override
+// for the GET response are Content-Type, Content-Language, Expires, Cache-Control,
+// Content-Disposition, and Content-Encoding. To override these header values
+// in the GET response, you use the following request parameters.
+//
+// You must sign the request, either using an Authorization header or a presigned
+// URL, when using these parameters. They cannot be used with an unsigned (anonymous)
+// request.
+//
+// * response-content-type
+//
+// * response-content-language
+//
+// * response-expires
+//
+// * response-cache-control
+//
+// * response-content-disposition
+//
+// * response-content-encoding
+//
+// Additional Considerations about Request Headers
+//
+// If both of the If-Match and If-Unmodified-Since headers are present in the
+// request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since
+// condition evaluates to false; then, S3 returns 200 OK and the data requested.
+//
+// If both of the If-None-Match and If-Modified-Since headers are present in
+// the request as follows:If-None-Match condition evaluates to false, and; If-Modified-Since
+// condition evaluates to true; then, S3 returns 304 Not Modified response code.
+//
+// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232).
+//
+// The following operations are related to GetObject:
+//
+// * ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html)
+//
+// * GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetObject for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeNoSuchKey "NoSuchKey"
+// The specified key does not exist.
+//
+// * ErrCodeInvalidObjectState "InvalidObjectState"
+// Object is archived and inaccessible until restored.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject
+func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) {
+ req, out := c.GetObjectRequest(input)
+ return out, req.Send()
+}
+
+// GetObjectWithContext is the same as GetObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetObjectWithContext(ctx aws.Context, input *GetObjectInput, opts ...request.Option) (*GetObjectOutput, error) {
+ req, out := c.GetObjectRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetObjectAcl = "GetObjectAcl"
+
+// GetObjectAclRequest generates a "aws/request.Request" representing the
+// client's request for the GetObjectAcl operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetObjectAcl for more information on using the GetObjectAcl
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetObjectAclRequest method.
+// req, resp := client.GetObjectAclRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl
+func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) {
+ op := &request.Operation{
+ Name: opGetObjectAcl,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}?acl",
+ }
+
+ if input == nil {
+ input = &GetObjectAclInput{}
+ }
+
+ output = &GetObjectAclOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetObjectAcl API operation for Amazon Simple Storage Service.
+//
+// Returns the access control list (ACL) of an object. To use this operation,
+// you must have READ_ACP access to the object.
+//
+// This action is not supported by Amazon S3 on Outposts.
+//
+// Versioning
+//
+// By default, GET returns ACL information about the current version of an object.
+// To return ACL information about a different version, use the versionId subresource.
+//
+// The following operations are related to GetObjectAcl:
+//
+// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+//
+// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
+//
+// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetObjectAcl for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeNoSuchKey "NoSuchKey"
+// The specified key does not exist.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl
+func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) {
+ req, out := c.GetObjectAclRequest(input)
+ return out, req.Send()
+}
+
+// GetObjectAclWithContext is the same as GetObjectAcl with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetObjectAcl for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, opts ...request.Option) (*GetObjectAclOutput, error) {
+ req, out := c.GetObjectAclRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetObjectTagging = "GetObjectTagging"
+
+// GetObjectTaggingRequest generates a "aws/request.Request" representing the
+// client's request for the GetObjectTagging operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetObjectTagging for more information on using the GetObjectTagging
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetObjectTaggingRequest method.
+// req, resp := client.GetObjectTaggingRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging
+func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) {
+ op := &request.Operation{
+ Name: opGetObjectTagging,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}?tagging",
+ }
+
+ if input == nil {
+ input = &GetObjectTaggingInput{}
+ }
+
+ output = &GetObjectTaggingOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetObjectTagging API operation for Amazon Simple Storage Service.
+//
+// Returns the tag-set of an object. You send the GET request against the tagging
+// subresource associated with the object.
+//
+// To use this operation, you must have permission to perform the s3:GetObjectTagging
+// action. By default, the GET action returns information about current version
+// of an object. For a versioned bucket, you can have multiple versions of an
+// object in your bucket. To retrieve tags of any other version, use the versionId
+// query parameter. You also need permission for the s3:GetObjectVersionTagging
+// action.
+//
+// By default, the bucket owner has this permission and can grant this permission
+// to others.
+//
+// For information about the Amazon S3 object tagging feature, see Object Tagging
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html).
+//
+// The following action is related to GetObjectTagging:
+//
+// * PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html)
+//
+// * DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetObjectTagging for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging
+func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) {
+ req, out := c.GetObjectTaggingRequest(input)
+ return out, req.Send()
+}
+
+// GetObjectTaggingWithContext is the same as GetObjectTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetObjectTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetObjectTaggingWithContext(ctx aws.Context, input *GetObjectTaggingInput, opts ...request.Option) (*GetObjectTaggingOutput, error) {
+ req, out := c.GetObjectTaggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetPublicAccessBlock = "GetPublicAccessBlock"
+
+// GetPublicAccessBlockRequest generates a "aws/request.Request" representing the
+// client's request for the GetPublicAccessBlock operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetPublicAccessBlock for more information on using the GetPublicAccessBlock
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetPublicAccessBlockRequest method.
+// req, resp := client.GetPublicAccessBlockRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock
+func (c *S3) GetPublicAccessBlockRequest(input *GetPublicAccessBlockInput) (req *request.Request, output *GetPublicAccessBlockOutput) {
+ op := &request.Operation{
+ Name: opGetPublicAccessBlock,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?publicAccessBlock",
+ }
+
+ if input == nil {
+ input = &GetPublicAccessBlockInput{}
+ }
+
+ output = &GetPublicAccessBlockOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetPublicAccessBlock API operation for Amazon Simple Storage Service.
+//
+// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To
+// use this operation, you must have the s3:GetBucketPublicAccessBlock permission.
+// For more information about Amazon S3 permissions, see Specifying Permissions
+// in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html).
+//
+// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket
+// or an object, it checks the PublicAccessBlock configuration for both the
+// bucket (or the bucket that contains the object) and the bucket owner's account.
+// If the PublicAccessBlock settings are different between the bucket and the
+// account, Amazon S3 uses the most restrictive combination of the bucket-level
+// and account-level settings.
+//
+// For more information about when Amazon S3 considers a bucket or an object
+// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status).
+//
+// The following operations are related to GetPublicAccessBlock:
+//
+// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html)
+//
+// * PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html)
+//
+// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html)
+//
+// * DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetPublicAccessBlock for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock
+func (c *S3) GetPublicAccessBlock(input *GetPublicAccessBlockInput) (*GetPublicAccessBlockOutput, error) {
+ req, out := c.GetPublicAccessBlockRequest(input)
+ return out, req.Send()
+}
+
+// GetPublicAccessBlockWithContext is the same as GetPublicAccessBlock with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetPublicAccessBlock for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetPublicAccessBlockWithContext(ctx aws.Context, input *GetPublicAccessBlockInput, opts ...request.Option) (*GetPublicAccessBlockOutput, error) {
+ req, out := c.GetPublicAccessBlockRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opHeadBucket = "HeadBucket"
+
+// HeadBucketRequest generates a "aws/request.Request" representing the
+// client's request for the HeadBucket operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See HeadBucket for more information on using the HeadBucket
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the HeadBucketRequest method.
+// req, resp := client.HeadBucketRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket
+func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) {
+ op := &request.Operation{
+ Name: opHeadBucket,
+ HTTPMethod: "HEAD",
+ HTTPPath: "/{Bucket}",
+ }
+
+ if input == nil {
+ input = &HeadBucketInput{}
+ }
+
+ output = &HeadBucketOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// HeadBucket API operation for Amazon Simple Storage Service.
+//
+// This action is useful to determine if a bucket exists and you have permission
+// to access it. The action returns a 200 OK if the bucket exists and you have
+// permission to access it.
+//
+// If the bucket does not exist or you do not have permission to access it,
+// the HEAD request returns a generic 404 Not Found or 403 Forbidden code. A
+// message body is not included, so you cannot determine the exception beyond
+// these error codes.
+//
+// To use this operation, you must have permissions to perform the s3:ListBucket
+// action. The bucket owner has this permission by default and can grant this
+// permission to others. For more information about permissions, see Permissions
+// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation HeadBucket for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeNoSuchBucket "NoSuchBucket"
+// The specified bucket does not exist.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket
+func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) {
+ req, out := c.HeadBucketRequest(input)
+ return out, req.Send()
+}
+
+// HeadBucketWithContext is the same as HeadBucket with the addition of
+// the ability to pass a context and additional request options.
+//
+// See HeadBucket for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) HeadBucketWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.Option) (*HeadBucketOutput, error) {
+ req, out := c.HeadBucketRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opHeadObject = "HeadObject"
+
+// HeadObjectRequest generates a "aws/request.Request" representing the
+// client's request for the HeadObject operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See HeadObject for more information on using the HeadObject
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the HeadObjectRequest method.
+// req, resp := client.HeadObjectRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject
+func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) {
+ op := &request.Operation{
+ Name: opHeadObject,
+ HTTPMethod: "HEAD",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &HeadObjectInput{}
+ }
+
+ output = &HeadObjectOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// HeadObject API operation for Amazon Simple Storage Service.
+//
+// The HEAD action retrieves metadata from an object without returning the object
+// itself. This action is useful if you're only interested in an object's metadata.
+// To use HEAD, you must have READ access to the object.
+//
+// A HEAD request has the same options as a GET action on an object. The response
+// is identical to the GET response except that there is no response body. Because
+// of this, if the HEAD request generates an error, it returns a generic 404
+// Not Found or 403 Forbidden code. It is not possible to retrieve the exact
+// exception beyond these error codes.
+//
+// If you encrypt an object by using server-side encryption with customer-provided
+// encryption keys (SSE-C) when you store the object in Amazon S3, then when
+// you retrieve the metadata from the object, you must use the following headers:
+//
+// * x-amz-server-side-encryption-customer-algorithm
+//
+// * x-amz-server-side-encryption-customer-key
+//
+// * x-amz-server-side-encryption-customer-key-MD5
+//
+// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided
+// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
+//
+// * Encryption request headers, like x-amz-server-side-encryption, should
+// not be sent for GET requests if your object uses server-side encryption
+// with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon
+// S3–managed encryption keys (SSE-S3). If your object does use these types
+// of keys, you’ll get an HTTP 400 BadRequest error.
+//
+// * The last modified property in this case is the creation date of the
+// object.
+//
+// Request headers are limited to 8 KB in size. For more information, see Common
+// Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html).
+//
+// Consider the following when using request headers:
+//
+// * Consideration 1 – If both of the If-Match and If-Unmodified-Since
+// headers are present in the request as follows: If-Match condition evaluates
+// to true, and; If-Unmodified-Since condition evaluates to false; Then Amazon
+// S3 returns 200 OK and the data requested.
+//
+// * Consideration 2 – If both of the If-None-Match and If-Modified-Since
+// headers are present in the request as follows: If-None-Match condition
+// evaluates to false, and; If-Modified-Since condition evaluates to true;
+// Then Amazon S3 returns the 304 Not Modified response code.
+//
+// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232).
+//
+// Permissions
+//
+// You need the s3:GetObject permission for this operation. For more information,
+// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html).
+// If the object you request does not exist, the error Amazon S3 returns depends
+// on whether you also have the s3:ListBucket permission.
+//
+// * If you have the s3:ListBucket permission on the bucket, Amazon S3 returns
+// an HTTP status code 404 ("no such key") error.
+//
+// * If you don’t have the s3:ListBucket permission, Amazon S3 returns
+// an HTTP status code 403 ("access denied") error.
+//
+// The following action is related to HeadObject:
+//
+// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+//
+// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses
+// for more information on returned errors.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation HeadObject for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject
+func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) {
+ req, out := c.HeadObjectRequest(input)
+ return out, req.Send()
+}
+
+// HeadObjectWithContext is the same as HeadObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See HeadObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) HeadObjectWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.Option) (*HeadObjectOutput, error) {
+ req, out := c.HeadObjectRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opListBuckets = "ListBuckets"
+
+// ListBucketsRequest generates a "aws/request.Request" representing the
+// client's request for the ListBuckets operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListBuckets for more information on using the ListBuckets
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListBucketsRequest method.
+// req, resp := client.ListBucketsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets
+func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) {
+ op := &request.Operation{
+ Name: opListBuckets,
+ HTTPMethod: "GET",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ListBucketsInput{}
+ }
+
+ output = &ListBucketsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListBuckets API operation for Amazon Simple Storage Service.
+//
+// Returns a list of all buckets owned by the authenticated sender of the request.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListBuckets for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets
+func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) {
+ req, out := c.ListBucketsRequest(input)
+ return out, req.Send()
+}
+
+// ListBucketsWithContext is the same as ListBuckets with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListBuckets for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, opts ...request.Option) (*ListBucketsOutput, error) {
+ req, out := c.ListBucketsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opListBucketsExtended = "ListBucketsExtended"
+
+// ListBucketsExtendedRequest generates a "aws/request.Request" representing the
+// client's request for the ListBucketsExtended operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListBucketsExtended for more information on using the ListBucketsExtended
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListBucketsExtendedRequest method.
+// req, resp := client.ListBucketsExtendedRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsExtended
+func (c *S3) ListBucketsExtendedRequest(input *ListBucketsExtendedInput) (req *request.Request, output *ListBucketsExtendedOutput) {
+ op := &request.Operation{
+ Name: opListBucketsExtended,
+ HTTPMethod: "GET",
+ HTTPPath: "/?extended",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"Marker"},
+ OutputTokens: []string{"Buckets[-1].Name"},
+ LimitToken: "MaxKeys",
+ TruncationToken: "IsTruncated",
+ },
+ }
+
+ if input == nil {
+ input = &ListBucketsExtendedInput{}
+ }
+
+ output = &ListBucketsExtendedOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListBucketsExtended API operation for Amazon Simple Storage Service.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListBucketsExtended for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsExtended
+func (c *S3) ListBucketsExtended(input *ListBucketsExtendedInput) (*ListBucketsExtendedOutput, error) {
+ req, out := c.ListBucketsExtendedRequest(input)
+ return out, req.Send()
+}
+
+// ListBucketsExtendedWithContext is the same as ListBucketsExtended with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListBucketsExtended for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListBucketsExtendedWithContext(ctx aws.Context, input *ListBucketsExtendedInput, opts ...request.Option) (*ListBucketsExtendedOutput, error) {
+ req, out := c.ListBucketsExtendedRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// ListBucketsExtendedPages iterates over the pages of a ListBucketsExtended operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListBucketsExtended method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a ListBucketsExtended operation.
+// pageNum := 0
+// err := client.ListBucketsExtendedPages(params,
+// func(page *s3.ListBucketsExtendedOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *S3) ListBucketsExtendedPages(input *ListBucketsExtendedInput, fn func(*ListBucketsExtendedOutput, bool) bool) error {
+ return c.ListBucketsExtendedPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListBucketsExtendedPagesWithContext same as ListBucketsExtendedPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListBucketsExtendedPagesWithContext(ctx aws.Context, input *ListBucketsExtendedInput, fn func(*ListBucketsExtendedOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListBucketsExtendedInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListBucketsExtendedRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*ListBucketsExtendedOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
+const opListLegalHolds = "ListLegalHolds"
+
+// ListLegalHoldsRequest generates a "aws/request.Request" representing the
+// client's request for the ListLegalHolds operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListLegalHolds for more information on using the ListLegalHolds
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListLegalHoldsRequest method.
+// req, resp := client.ListLegalHoldsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListLegalHolds
+func (c *S3) ListLegalHoldsRequest(input *ListLegalHoldsInput) (req *request.Request, output *ListLegalHoldsOutput) {
+ op := &request.Operation{
+ Name: opListLegalHolds,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}?legalHold",
+ }
+
+ if input == nil {
+ input = &ListLegalHoldsInput{}
+ }
+
+ output = &ListLegalHoldsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListLegalHolds API operation for Amazon Simple Storage Service.
+//
+// Returns a list of legal holds on an object.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListLegalHolds for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListLegalHolds
+func (c *S3) ListLegalHolds(input *ListLegalHoldsInput) (*ListLegalHoldsOutput, error) {
+ req, out := c.ListLegalHoldsRequest(input)
+ return out, req.Send()
+}
+
+// ListLegalHoldsWithContext is the same as ListLegalHolds with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListLegalHolds for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListLegalHoldsWithContext(ctx aws.Context, input *ListLegalHoldsInput, opts ...request.Option) (*ListLegalHoldsOutput, error) {
+ req, out := c.ListLegalHoldsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opListMultipartUploads = "ListMultipartUploads"
+
+// ListMultipartUploadsRequest generates a "aws/request.Request" representing the
+// client's request for the ListMultipartUploads operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListMultipartUploads for more information on using the ListMultipartUploads
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListMultipartUploadsRequest method.
+// req, resp := client.ListMultipartUploadsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads
+func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) {
+ op := &request.Operation{
+ Name: opListMultipartUploads,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?uploads",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"KeyMarker", "UploadIdMarker"},
+ OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"},
+ LimitToken: "MaxUploads",
+ TruncationToken: "IsTruncated",
+ },
+ }
+
+ if input == nil {
+ input = &ListMultipartUploadsInput{}
+ }
+
+ output = &ListMultipartUploadsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListMultipartUploads API operation for Amazon Simple Storage Service.
+//
+// This action lists in-progress multipart uploads. An in-progress multipart
+// upload is a multipart upload that has been initiated using the Initiate Multipart
+// Upload request, but has not yet been completed or aborted.
+//
+// This action returns at most 1,000 multipart uploads in the response. 1,000
+// multipart uploads is the maximum number of uploads a response can include,
+// which is also the default value. You can further limit the number of uploads
+// in a response by specifying the max-uploads parameter in the response. If
+// additional multipart uploads satisfy the list criteria, the response will
+// contain an IsTruncated element with the value true. To list the additional
+// multipart uploads, use the key-marker and upload-id-marker request parameters.
+//
+// In the response, the uploads are sorted by key. If your application has initiated
+// more than one multipart upload using the same object key, then uploads in
+// the response are first sorted by key. Additionally, uploads are sorted in
+// ascending order within each key by the upload initiation time.
+//
+// For more information on multipart uploads, see Uploading Objects Using Multipart
+// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html).
+//
+// For information on permissions required to use the multipart upload API,
+// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html).
+//
+// The following operations are related to ListMultipartUploads:
+//
+// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
+//
+// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+//
+// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
+//
+// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
+//
+// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListMultipartUploads for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads
+func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) {
+ req, out := c.ListMultipartUploadsRequest(input)
+ return out, req.Send()
+}
+
+// ListMultipartUploadsWithContext is the same as ListMultipartUploads with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListMultipartUploads for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipartUploadsInput, opts ...request.Option) (*ListMultipartUploadsOutput, error) {
+ req, out := c.ListMultipartUploadsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListMultipartUploads method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a ListMultipartUploads operation.
+// pageNum := 0
+// err := client.ListMultipartUploadsPages(params,
+// func(page *s3.ListMultipartUploadsOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool) error {
+ return c.ListMultipartUploadsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListMultipartUploadsPagesWithContext same as ListMultipartUploadsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListMultipartUploadsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListMultipartUploadsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
+const opListObjectVersions = "ListObjectVersions"
+
+// ListObjectVersionsRequest generates a "aws/request.Request" representing the
+// client's request for the ListObjectVersions operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListObjectVersions for more information on using the ListObjectVersions
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListObjectVersionsRequest method.
+// req, resp := client.ListObjectVersionsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions
+func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) {
+ op := &request.Operation{
+ Name: opListObjectVersions,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?versions",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"KeyMarker", "VersionIdMarker"},
+ OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"},
+ LimitToken: "MaxKeys",
+ TruncationToken: "IsTruncated",
+ },
+ }
+
+ if input == nil {
+ input = &ListObjectVersionsInput{}
+ }
+
+ output = &ListObjectVersionsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListObjectVersions API operation for Amazon Simple Storage Service.
+//
+// Returns metadata about all versions of the objects in a bucket. You can also
+// use request parameters as selection criteria to return metadata about a subset
+// of all the object versions.
+//
+// To use this operation, you must have permissions to perform the s3:ListBucketVersions
+// action. Be aware of the name difference.
+//
+// A 200 OK response can contain valid or invalid XML. Make sure to design your
+// application to parse the contents of the response and handle it appropriately.
+//
+// To use this operation, you must have READ access to the bucket.
+//
+// This action is not supported by Amazon S3 on Outposts.
+//
+// The following operations are related to ListObjectVersions:
+//
+// * ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html)
+//
+// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+//
+// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+//
+// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListObjectVersions for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions
+func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) {
+ req, out := c.ListObjectVersionsRequest(input)
+ return out, req.Send()
+}
+
+// ListObjectVersionsWithContext is the same as ListObjectVersions with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListObjectVersions for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVersionsInput, opts ...request.Option) (*ListObjectVersionsOutput, error) {
+ req, out := c.ListObjectVersionsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListObjectVersions method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a ListObjectVersions operation.
+// pageNum := 0
+// err := client.ListObjectVersionsPages(params,
+// func(page *s3.ListObjectVersionsOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool) error {
+ return c.ListObjectVersionsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListObjectVersionsPagesWithContext same as ListObjectVersionsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListObjectVersionsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListObjectVersionsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
+const opListObjects = "ListObjects"
+
+// ListObjectsRequest generates a "aws/request.Request" representing the
+// client's request for the ListObjects operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListObjects for more information on using the ListObjects
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListObjectsRequest method.
+// req, resp := client.ListObjectsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects
+func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) {
+ op := &request.Operation{
+ Name: opListObjects,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"Marker"},
+ OutputTokens: []string{"NextMarker || Contents[-1].Key"},
+ LimitToken: "MaxKeys",
+ TruncationToken: "IsTruncated",
+ },
+ }
+
+ if input == nil {
+ input = &ListObjectsInput{}
+ }
+
+ output = &ListObjectsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListObjects API operation for Amazon Simple Storage Service.
+//
+// Returns some or all (up to 1,000) of the objects in a bucket. You can use
+// the request parameters as selection criteria to return a subset of the objects
+// in a bucket. A 200 OK response can contain valid or invalid XML. Be sure
+// to design your application to parse the contents of the response and handle
+// it appropriately.
+//
+// This action has been revised. We recommend that you use the newer version,
+// ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html),
+// when developing applications. For backward compatibility, Amazon S3 continues
+// to support ListObjects.
+//
+// The following operations are related to ListObjects:
+//
+// * ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html)
+//
+// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+//
+// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+//
+// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+//
+// * ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListObjects for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeNoSuchBucket "NoSuchBucket"
+// The specified bucket does not exist.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects
+func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) {
+ req, out := c.ListObjectsRequest(input)
+ return out, req.Send()
+}
+
+// ListObjectsWithContext is the same as ListObjects with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListObjects for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, opts ...request.Option) (*ListObjectsOutput, error) {
+ req, out := c.ListObjectsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// ListObjectsPages iterates over the pages of a ListObjects operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListObjects method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a ListObjects operation.
+// pageNum := 0
+// err := client.ListObjectsPages(params,
+// func(page *s3.ListObjectsOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool) error {
+ return c.ListObjectsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListObjectsPagesWithContext same as ListObjectsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListObjectsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListObjectsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
+const opListObjectsV2 = "ListObjectsV2"
+
+// ListObjectsV2Request generates a "aws/request.Request" representing the
+// client's request for the ListObjectsV2 operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListObjectsV2 for more information on using the ListObjectsV2
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListObjectsV2Request method.
+// req, resp := client.ListObjectsV2Request(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2
+func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) {
+ op := &request.Operation{
+ Name: opListObjectsV2,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?list-type=2",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"ContinuationToken"},
+ OutputTokens: []string{"NextContinuationToken"},
+ LimitToken: "MaxKeys",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &ListObjectsV2Input{}
+ }
+
+ output = &ListObjectsV2Output{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListObjectsV2 API operation for Amazon Simple Storage Service.
+//
+// Returns some or all (up to 1,000) of the objects in a bucket. You can use
+// the request parameters as selection criteria to return a subset of the objects
+// in a bucket. A 200 OK response can contain valid or invalid XML. Make sure
+// to design your application to parse the contents of the response and handle
+// it appropriately. Objects are returned sorted in an ascending order of the
+// respective key names in the list.
+//
+// To use this operation, you must have READ access to the bucket.
+//
+// To use this action in an AWS Identity and Access Management (IAM) policy,
+// you must have permissions to perform the s3:ListBucket action. The bucket
+// owner has this permission by default and can grant this permission to others.
+// For more information about permissions, see Permissions Related to Bucket
+// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
+//
+// This section describes the latest revision of this action. We recommend that
+// you use this revised API for application development. For backward compatibility,
+// Amazon S3 continues to support the prior version of this API, ListObjects
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html).
+//
+// To get a list of your buckets, see ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html).
+//
+// The following operations are related to ListObjectsV2:
+//
+// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+//
+// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+//
+// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListObjectsV2 for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeNoSuchBucket "NoSuchBucket"
+// The specified bucket does not exist.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2
+func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) {
+ req, out := c.ListObjectsV2Request(input)
+ return out, req.Send()
+}
+
+// ListObjectsV2WithContext is the same as ListObjectsV2 with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListObjectsV2 for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input, opts ...request.Option) (*ListObjectsV2Output, error) {
+ req, out := c.ListObjectsV2Request(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListObjectsV2 method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a ListObjectsV2 operation.
+// pageNum := 0
+// err := client.ListObjectsV2Pages(params,
+// func(page *s3.ListObjectsV2Output, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool) error {
+ return c.ListObjectsV2PagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListObjectsV2PagesWithContext same as ListObjectsV2Pages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListObjectsV2Input
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListObjectsV2Request(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
+const opListParts = "ListParts"
+
+// ListPartsRequest generates a "aws/request.Request" representing the
+// client's request for the ListParts operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListParts for more information on using the ListParts
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListPartsRequest method.
+// req, resp := client.ListPartsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts
+func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) {
+ op := &request.Operation{
+ Name: opListParts,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"PartNumberMarker"},
+ OutputTokens: []string{"NextPartNumberMarker"},
+ LimitToken: "MaxParts",
+ TruncationToken: "IsTruncated",
+ },
+ }
+
+ if input == nil {
+ input = &ListPartsInput{}
+ }
+
+ output = &ListPartsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListParts API operation for Amazon Simple Storage Service.
+//
+// Lists the parts that have been uploaded for a specific multipart upload.
+// This operation must include the upload ID, which you obtain by sending the
+// initiate multipart upload request (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)).
+// This request returns a maximum of 1,000 uploaded parts. The default number
+// of parts returned is 1,000 parts. You can restrict the number of parts returned
+// by specifying the max-parts request parameter. If your multipart upload consists
+// of more than 1,000 parts, the response returns an IsTruncated field with
+// the value of true, and a NextPartNumberMarker element. In subsequent ListParts
+// requests you can include the part-number-marker query string parameter and
+// set its value to the NextPartNumberMarker field value from the previous response.
+//
+// For more information on multipart uploads, see Uploading Objects Using Multipart
+// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html).
+//
+// For information on permissions required to use the multipart upload API,
+// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html).
+//
+// The following operations are related to ListParts:
+//
+// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
+//
+// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+//
+// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
+//
+// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
+//
+// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListParts for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts
+func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) {
+ req, out := c.ListPartsRequest(input)
+ return out, req.Send()
+}
+
+// ListPartsWithContext is the same as ListParts with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListParts for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts ...request.Option) (*ListPartsOutput, error) {
+ req, out := c.ListPartsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// ListPartsPages iterates over the pages of a ListParts operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListParts method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a ListParts operation.
+// pageNum := 0
+// err := client.ListPartsPages(params,
+// func(page *s3.ListPartsOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *S3) ListPartsPages(input *ListPartsInput, fn func(*ListPartsOutput, bool) bool) error {
+ return c.ListPartsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListPartsPagesWithContext same as ListPartsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, fn func(*ListPartsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListPartsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListPartsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
+const opPutBucketAcl = "PutBucketAcl"
+
+// PutBucketAclRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketAcl operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutBucketAcl for more information on using the PutBucketAcl
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutBucketAclRequest method.
+// req, resp := client.PutBucketAclRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl
+func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) {
+ op := &request.Operation{
+ Name: opPutBucketAcl,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?acl",
+ }
+
+ if input == nil {
+ input = &PutBucketAclInput{}
+ }
+
+ output = &PutBucketAclOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
+ return
+}
+
+// PutBucketAcl API operation for Amazon Simple Storage Service.
+//
+// Sets the permissions on an existing bucket using access control lists (ACL).
+// For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html).
+// To set the ACL of a bucket, you must have WRITE_ACP permission.
+//
+// You can use one of the following two ways to set a bucket's permissions:
+//
+// * Specify the ACL in the request body
+//
+// * Specify permissions using request headers
+//
+// You cannot specify access permission using both the body and the request
+// headers.
+//
+// Depending on your application needs, you may choose to set the ACL on a bucket
+// using either the request body or the headers. For example, if you have an
+// existing application that updates a bucket ACL using the request body, then
+// you can continue to use that approach.
+//
+// Access Permissions
+//
+// You can set access permissions using one of the following methods:
+//
+// * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports
+// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a
+// predefined set of grantees and permissions. Specify the canned ACL name
+// as the value of x-amz-acl. If you use this header, you cannot use other
+// access control-specific headers in your request. For more information,
+// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
+//
+// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp,
+// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using
+// these headers, you specify explicit access permissions and grantees (AWS
+// accounts or Amazon S3 groups) who will receive the permission. If you
+// use these ACL-specific headers, you cannot use the x-amz-acl header to
+// set a canned ACL. These parameters map to the set of permissions that
+// Amazon S3 supports in an ACL. For more information, see Access Control
+// List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html).
+// You specify each grantee as a type=value pair, where the type is one of
+// the following: id – if the value specified is the canonical user ID
+// of an AWS account uri – if you are granting permissions to a predefined
+// group emailAddress – if the value specified is the email address of
+// an AWS account Using email addresses to specify a grantee is only supported
+// in the following AWS Regions: US East (N. Virginia) US West (N. California)
+// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific
+// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all
+// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints
+// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in
+// the AWS General Reference. For example, the following x-amz-grant-write
+// header grants create, overwrite, and delete objects permission to LogDelivery
+// group predefined by Amazon S3 and two AWS accounts identified by their
+// email addresses. x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery",
+// id="111122223333", id="555566667777"
+//
+// You can use either a canned ACL or specify access permissions explicitly.
+// You cannot do both.
+//
+// Grantee Values
+//
+// You can specify the person (grantee) to whom you're assigning access rights
+// (using request elements) in the following ways:
+//
+// * By the person's ID: <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+// xsi:type="CanonicalUser"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName>
+// </Grantee> DisplayName is optional and ignored in the request
+//
+// * By URI: <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+// xsi:type="Group"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>
+//
+// * By Email address: <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+// xsi:type="AmazonCustomerByEmail"><EmailAddress><>[email protected]<></EmailAddress>lt;/Grantee>
+// The grantee is resolved to the CanonicalUser and, in a response to a GET
+// Object acl request, appears as the CanonicalUser. Using email addresses
+// to specify a grantee is only supported in the following AWS Regions: US
+// East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific
+// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland)
+// South America (São Paulo) For a list of all the Amazon S3 supported Regions
+// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
+// in the AWS General Reference.
+//
+// Related Resources
+//
+// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+//
+// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html)
+//
+// * GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketAcl for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl
+func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) {
+ req, out := c.PutBucketAclRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketAclWithContext is the same as PutBucketAcl with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketAcl for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketAclWithContext(ctx aws.Context, input *PutBucketAclInput, opts ...request.Option) (*PutBucketAclOutput, error) {
+ req, out := c.PutBucketAclRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketCors = "PutBucketCors"
+
+// PutBucketCorsRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketCors operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutBucketCors for more information on using the PutBucketCors
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutBucketCorsRequest method.
+// req, resp := client.PutBucketCorsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors
+func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) {
+ op := &request.Operation{
+ Name: opPutBucketCors,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?cors",
+ }
+
+ if input == nil {
+ input = &PutBucketCorsInput{}
+ }
+
+ output = &PutBucketCorsOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
+ return
+}
+
+// PutBucketCors API operation for Amazon Simple Storage Service.
+//
+// Sets the cors configuration for your bucket. If the configuration exists,
+// Amazon S3 replaces it.
+//
+// To use this operation, you must be allowed to perform the s3:PutBucketCORS
+// action. By default, the bucket owner has this permission and can grant it
+// to others.
+//
+// You set this configuration on a bucket so that the bucket can service cross-origin
+// requests. For example, you might want to enable a request whose origin is
+// http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com
+// by using the browser's XMLHttpRequest capability.
+//
+// To enable cross-origin resource sharing (CORS) on a bucket, you add the cors
+// subresource to the bucket. The cors subresource is an XML document in which
+// you configure rules that identify origins and the HTTP methods that can be
+// executed on your bucket. The document is limited to 64 KB in size.
+//
+// When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request)
+// against a bucket, it evaluates the cors configuration on the bucket and uses
+// the first CORSRule rule that matches the incoming browser request to enable
+// a cross-origin request. For a rule to match, the following conditions must
+// be met:
+//
+// * The request's Origin header must match AllowedOrigin elements.
+//
+// * The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method
+// header in case of a pre-flight OPTIONS request must be one of the AllowedMethod
+// elements.
+//
+// * Every header specified in the Access-Control-Request-Headers request
+// header of a pre-flight request must match an AllowedHeader element.
+//
+// For more information about CORS, go to Enabling Cross-Origin Resource Sharing
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon
+// S3 User Guide.
+//
+// Related Resources
+//
+// * GetBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html)
+//
+// * DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html)
+//
+// * RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketCors for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors
+func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) {
+ req, out := c.PutBucketCorsRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketCorsWithContext is the same as PutBucketCors with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketCors for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput, opts ...request.Option) (*PutBucketCorsOutput, error) {
+ req, out := c.PutBucketCorsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration"
+
+// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketLifecycleConfiguration operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutBucketLifecycleConfiguration for more information on using the PutBucketLifecycleConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutBucketLifecycleConfigurationRequest method.
+// req, resp := client.PutBucketLifecycleConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration
+func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) {
+ op := &request.Operation{
+ Name: opPutBucketLifecycleConfiguration,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?lifecycle",
+ }
+
+ if input == nil {
+ input = &PutBucketLifecycleConfigurationInput{}
+ }
+
+ output = &PutBucketLifecycleConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
+ return
+}
+
+// PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service.
+//
+// Creates a new lifecycle configuration for the bucket or replaces an existing
+// lifecycle configuration. For information about lifecycle configuration, see
+// Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
+//
+// Bucket lifecycle configuration now supports specifying a lifecycle rule using
+// an object key name prefix, one or more object tags, or a combination of both.
+// Accordingly, this section describes the latest API. The previous version
+// of the API supported filtering based only on an object key name prefix, which
+// is supported for backward compatibility. For the related API description,
+// see PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html).
+//
+// Rules
+//
+// You specify the lifecycle configuration in your request body. The lifecycle
+// configuration is specified as XML consisting of one or more rules. Each rule
+// consists of the following:
+//
+// * Filter identifying a subset of objects to which the rule applies. The
+// filter can be based on a key name prefix, object tags, or a combination
+// of both.
+//
+// * Status whether the rule is in effect.
+//
+// * One or more lifecycle transition and expiration actions that you want
+// Amazon S3 to perform on the objects identified by the filter. If the state
+// of your bucket is versioning-enabled or versioning-suspended, you can
+// have many versions of the same object (one current version and zero or
+// more noncurrent versions). Amazon S3 provides predefined actions that
+// you can specify for current and noncurrent object versions.
+//
+// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
+// and Lifecycle Configuration Elements (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html).
+//
+// Permissions
+//
+// By default, all Amazon S3 resources are private, including buckets, objects,
+// and related subresources (for example, lifecycle configuration and website
+// configuration). Only the resource owner (that is, the AWS account that created
+// it) can access the resource. The resource owner can optionally grant access
+// permissions to others by writing an access policy. For this operation, a
+// user must get the s3:PutLifecycleConfiguration permission.
+//
+// You can also explicitly deny permissions. Explicit deny also supersedes any
+// other permissions. If you want to block users or accounts from removing or
+// deleting objects from your bucket, you must deny them permissions for the
+// following actions:
+//
+// * s3:DeleteObject
+//
+// * s3:DeleteObjectVersion
+//
+// * s3:PutLifecycleConfiguration
+//
+// For more information about permissions, see Managing Access Permissions to
+// Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
+//
+// The following are related to PutBucketLifecycleConfiguration:
+//
+// * Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html)
+//
+// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html)
+//
+// * DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketLifecycleConfiguration for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration
+func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) {
+ req, out := c.PutBucketLifecycleConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketLifecycleConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketLifecycleConfigurationWithContext(ctx aws.Context, input *PutBucketLifecycleConfigurationInput, opts ...request.Option) (*PutBucketLifecycleConfigurationOutput, error) {
+ req, out := c.PutBucketLifecycleConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketLogging = "PutBucketLogging"
+
+// PutBucketLoggingRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketLogging operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutBucketLogging for more information on using the PutBucketLogging
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutBucketLoggingRequest method.
+// req, resp := client.PutBucketLoggingRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging
+func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) {
+ op := &request.Operation{
+ Name: opPutBucketLogging,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?logging",
+ }
+
+ if input == nil {
+ input = &PutBucketLoggingInput{}
+ }
+
+ output = &PutBucketLoggingOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
+ return
+}
+
+// PutBucketLogging API operation for Amazon Simple Storage Service.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketLogging for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging
+func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) {
+ req, out := c.PutBucketLoggingRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketLoggingWithContext is the same as PutBucketLogging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketLogging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketLoggingWithContext(ctx aws.Context, input *PutBucketLoggingInput, opts ...request.Option) (*PutBucketLoggingOutput, error) {
+ req, out := c.PutBucketLoggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketProtectionConfiguration = "PutBucketProtectionConfiguration"
+
+// PutBucketProtectionConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketProtectionConfiguration operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutBucketProtectionConfiguration for more information on using the PutBucketProtectionConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutBucketProtectionConfigurationRequest method.
+// req, resp := client.PutBucketProtectionConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketProtectionConfiguration
+func (c *S3) PutBucketProtectionConfigurationRequest(input *PutBucketProtectionConfigurationInput) (req *request.Request, output *PutBucketProtectionConfigurationOutput) {
+ op := &request.Operation{
+ Name: opPutBucketProtectionConfiguration,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?protection",
+ }
+
+ if input == nil {
+ input = &PutBucketProtectionConfigurationInput{}
+ }
+
+ output = &PutBucketProtectionConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
+ return
+}
+
+// PutBucketProtectionConfiguration API operation for Amazon Simple Storage Service.
+//
+// Sets the protection configuration of an existing bucket. EnablePermanentRetention
+// is optional and if not included is considered to be false. Once set to true,
+// must be included in any subsequent PUT Bucket?protection requests for that
+// bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketProtectionConfiguration for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketProtectionConfiguration
+func (c *S3) PutBucketProtectionConfiguration(input *PutBucketProtectionConfigurationInput) (*PutBucketProtectionConfigurationOutput, error) {
+ req, out := c.PutBucketProtectionConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketProtectionConfigurationWithContext is the same as PutBucketProtectionConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketProtectionConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketProtectionConfigurationWithContext(ctx aws.Context, input *PutBucketProtectionConfigurationInput, opts ...request.Option) (*PutBucketProtectionConfigurationOutput, error) {
+ req, out := c.PutBucketProtectionConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketReplication = "PutBucketReplication"
+
+// PutBucketReplicationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketReplication operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutBucketReplication for more information on using the PutBucketReplication
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutBucketReplicationRequest method.
+// req, resp := client.PutBucketReplicationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication
+func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) {
+ op := &request.Operation{
+ Name: opPutBucketReplication,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?replication",
+ }
+
+ if input == nil {
+ input = &PutBucketReplicationInput{}
+ }
+
+ output = &PutBucketReplicationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
+ return
+}
+
+// PutBucketReplication API operation for Amazon Simple Storage Service.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketReplication for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication
+func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) {
+ req, out := c.PutBucketReplicationRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketReplicationWithContext is the same as PutBucketReplication with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketReplication for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketReplicationWithContext(ctx aws.Context, input *PutBucketReplicationInput, opts ...request.Option) (*PutBucketReplicationOutput, error) {
+ req, out := c.PutBucketReplicationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketVersioning = "PutBucketVersioning"
+
+// PutBucketVersioningRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketVersioning operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutBucketVersioning for more information on using the PutBucketVersioning
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutBucketVersioningRequest method.
+// req, resp := client.PutBucketVersioningRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning
+func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) {
+ op := &request.Operation{
+ Name: opPutBucketVersioning,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?versioning",
+ }
+
+ if input == nil {
+ input = &PutBucketVersioningInput{}
+ }
+
+ output = &PutBucketVersioningOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
+ return
+}
+
+// PutBucketVersioning API operation for Amazon Simple Storage Service.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketVersioning for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning
+func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) {
+ req, out := c.PutBucketVersioningRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketVersioningWithContext is the same as PutBucketVersioning with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketVersioning for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketVersioningWithContext(ctx aws.Context, input *PutBucketVersioningInput, opts ...request.Option) (*PutBucketVersioningOutput, error) {
+ req, out := c.PutBucketVersioningRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketWebsite = "PutBucketWebsite"
+
+// PutBucketWebsiteRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketWebsite operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutBucketWebsite for more information on using the PutBucketWebsite
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutBucketWebsiteRequest method.
+// req, resp := client.PutBucketWebsiteRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite
+func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) {
+ op := &request.Operation{
+ Name: opPutBucketWebsite,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?website",
+ }
+
+ if input == nil {
+ input = &PutBucketWebsiteInput{}
+ }
+
+ output = &PutBucketWebsiteOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
+ return
+}
+
+// PutBucketWebsite API operation for Amazon Simple Storage Service.
+//
+// Sets the configuration of the website that is specified in the website subresource.
+// To configure a bucket as a website, you can add this subresource on the bucket
+// with website configuration information such as the file name of the index
+// document and any redirect rules. For more information, see Hosting Websites
+// on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html).
+//
+// This PUT action requires the S3:PutBucketWebsite permission. By default,
+// only the bucket owner can configure the website attached to a bucket; however,
+// bucket owners can allow other users to set the website configuration by writing
+// a bucket policy that grants them the S3:PutBucketWebsite permission.
+//
+// To redirect all website requests sent to the bucket's website endpoint, you
+// add a website configuration with the following elements. Because all requests
+// are sent to another website, you don't need to provide index document name
+// for the bucket.
+//
+// * WebsiteConfiguration
+//
+// * RedirectAllRequestsTo
+//
+// * HostName
+//
+// * Protocol
+//
+// If you want granular control over redirects, you can use the following elements
+// to add routing rules that describe conditions for redirecting requests and
+// information about the redirect destination. In this case, the website configuration
+// must provide an index document for the bucket, because some requests might
+// not be redirected.
+//
+// * WebsiteConfiguration
+//
+// * IndexDocument
+//
+// * Suffix
+//
+// * ErrorDocument
+//
+// * Key
+//
+// * RoutingRules
+//
+// * RoutingRule
+//
+// * Condition
+//
+// * HttpErrorCodeReturnedEquals
+//
+// * KeyPrefixEquals
+//
+// * Redirect
+//
+// * Protocol
+//
+// * HostName
+//
+// * ReplaceKeyPrefixWith
+//
+// * ReplaceKeyWith
+//
+// * HttpRedirectCode
+//
+// Amazon S3 has a limitation of 50 routing rules per website configuration.
+// If you require more than 50 routing rules, you can use object redirect. For
+// more information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html)
+// in the Amazon S3 User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketWebsite for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite
+func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) {
+ req, out := c.PutBucketWebsiteRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketWebsiteWithContext is the same as PutBucketWebsite with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketWebsite for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketWebsiteWithContext(ctx aws.Context, input *PutBucketWebsiteInput, opts ...request.Option) (*PutBucketWebsiteOutput, error) {
+ req, out := c.PutBucketWebsiteRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutObject = "PutObject"
+
+// PutObjectRequest generates a "aws/request.Request" representing the
+// client's request for the PutObject operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutObject for more information on using the PutObject
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutObjectRequest method.
+// req, resp := client.PutObjectRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject
+func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) {
+ op := &request.Operation{
+ Name: opPutObject,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &PutObjectInput{}
+ }
+
+ output = &PutObjectOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// PutObject API operation for Amazon Simple Storage Service.
+//
+// Adds an object to a bucket. You must have WRITE permissions on a bucket to
+// add an object to it.
+//
+// Amazon S3 never adds partial objects; if you receive a success response,
+// Amazon S3 added the entire object to the bucket.
+//
+// Amazon S3 is a distributed system. If it receives multiple write requests
+// for the same object simultaneously, it overwrites all but the last object
+// written. Amazon S3 does not provide object locking; if you need this, make
+// sure to build it into your application layer or use versioning instead.
+//
+// To ensure that data is not corrupted traversing the network, use the Content-MD5
+// header. When you use this header, Amazon S3 checks the object against the
+// provided MD5 value and, if they do not match, returns an error. Additionally,
+// you can calculate the MD5 while putting an object to Amazon S3 and compare
+// the returned ETag to the calculated MD5 value.
+//
+// The Content-MD5 header is required for any request to upload an object with
+// a retention period configured using Amazon S3 Object Lock. For more information
+// about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)
+// in the Amazon S3 User Guide.
+//
+// Server-side Encryption
+//
+// You can optionally request server-side encryption. With server-side encryption,
+// Amazon S3 encrypts your data as it writes it to disks in its data centers
+// and decrypts the data when you access it. You have the option to provide
+// your own encryption key or use AWS managed encryption keys (SSE-S3 or SSE-KMS).
+// For more information, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html).
+//
+// If you request server-side encryption using AWS Key Management Service (SSE-KMS),
+// you can enable an S3 Bucket Key at the object-level. For more information,
+// see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html)
+// in the Amazon S3 User Guide.
+//
+// Access Control List (ACL)-Specific Request Headers
+//
+// You can use headers to grant ACL- based permissions. By default, all objects
+// are private. Only the owner has full access control. When adding a new object,
+// you can grant permissions to individual AWS accounts or to predefined groups
+// defined by Amazon S3. These permissions are then added to the ACL on the
+// object. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
+// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html).
+//
+// Storage Class Options
+//
+// By default, Amazon S3 uses the STANDARD Storage Class to store newly created
+// objects. The STANDARD storage class provides high durability and high availability.
+// Depending on performance needs, you can specify a different Storage Class.
+// Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information,
+// see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
+// in the Amazon S3 Service Developer Guide.
+//
+// Versioning
+//
+// If you enable versioning for a bucket, Amazon S3 automatically generates
+// a unique version ID for the object being stored. Amazon S3 returns this ID
+// in the response. When you enable versioning for a bucket, if Amazon S3 receives
+// multiple write requests for the same object simultaneously, it stores all
+// of the objects.
+//
+// For more information about versioning, see Adding Objects to Versioning Enabled
+// Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html).
+// For information about returning the versioning state of a bucket, see GetBucketVersioning
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html).
+//
+// Related Resources
+//
+// * CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html)
+//
+// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutObject for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject
+func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) {
+ req, out := c.PutObjectRequest(input)
+ return out, req.Send()
+}
+
+// PutObjectWithContext is the same as PutObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) {
+ req, out := c.PutObjectRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutObjectAcl = "PutObjectAcl"
+
+// PutObjectAclRequest generates a "aws/request.Request" representing the
+// client's request for the PutObjectAcl operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutObjectAcl for more information on using the PutObjectAcl
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutObjectAclRequest method.
+// req, resp := client.PutObjectAclRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl
+func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) {
+ op := &request.Operation{
+ Name: opPutObjectAcl,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}?acl",
+ }
+
+ if input == nil {
+ input = &PutObjectAclInput{}
+ }
+
+ output = &PutObjectAclOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
+ return
+}
+
+// PutObjectAcl API operation for Amazon Simple Storage Service.
+//
+// Uses the acl subresource to set the access control list (ACL) permissions
+// for a new or existing object in an S3 bucket. You must have WRITE_ACP permission
+// to set the ACL of an object. For more information, see What permissions can
+// I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions)
+// in the Amazon S3 User Guide.
+//
+// This action is not supported by Amazon S3 on Outposts.
+//
+// Depending on your application needs, you can choose to set the ACL on an
+// object using either the request body or the headers. For example, if you
+// have an existing application that updates a bucket ACL using the request
+// body, you can continue to use that approach. For more information, see Access
+// Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
+// in the Amazon S3 Developer Guide.
+//
+// Access Permissions
+//
+// You can set access permissions using one of the following methods:
+//
+// * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports
+// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a
+// predefined set of grantees and permissions. Specify the canned ACL name
+// as the value of x-amz-acl. If you use this header, you cannot use other
+// access control-specific headers in your request. For more information,
+// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
+//
+// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp,
+// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using
+// these headers, you specify explicit access permissions and grantees (AWS
+// accounts or Amazon S3 groups) who will receive the permission. If you
+// use these ACL-specific headers, you cannot use x-amz-acl header to set
+// a canned ACL. These parameters map to the set of permissions that Amazon
+// S3 supports in an ACL. For more information, see Access Control List (ACL)
+// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html).
+// You specify each grantee as a type=value pair, where the type is one of
+// the following: id – if the value specified is the canonical user ID
+// of an AWS account uri – if you are granting permissions to a predefined
+// group emailAddress – if the value specified is the email address of
+// an AWS account Using email addresses to specify a grantee is only supported
+// in the following AWS Regions: US East (N. Virginia) US West (N. California)
+// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific
+// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all
+// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints
+// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in
+// the AWS General Reference. For example, the following x-amz-grant-read
+// header grants list objects permission to the two AWS accounts identified
+// by their email addresses. x-amz-grant-read: emailAddress="[email protected]",
+// emailAddress="[email protected]"
+//
+// You can use either a canned ACL or specify access permissions explicitly.
+// You cannot do both.
+//
+// Grantee Values
+//
+// You can specify the person (grantee) to whom you're assigning access rights
+// (using request elements) in the following ways:
+//
+// * By the person's ID: <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+// xsi:type="CanonicalUser"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName>
+// </Grantee> DisplayName is optional and ignored in the request.
+//
+// * By URI: <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+// xsi:type="Group"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>
+//
+// * By Email address: <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+// xsi:type="AmazonCustomerByEmail"><EmailAddress><>[email protected]<></EmailAddress>lt;/Grantee>
+// The grantee is resolved to the CanonicalUser and, in a response to a GET
+// Object acl request, appears as the CanonicalUser. Using email addresses
+// to specify a grantee is only supported in the following AWS Regions: US
+// East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific
+// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland)
+// South America (São Paulo) For a list of all the Amazon S3 supported Regions
+// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
+// in the AWS General Reference.
+//
+// Versioning
+//
+// The ACL of an object is set at the object version level. By default, PUT
+// sets the ACL of the current version of an object. To set the ACL of a different
+// version, use the versionId subresource.
+//
+// Related Resources
+//
+// * CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html)
+//
+// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutObjectAcl for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeNoSuchKey "NoSuchKey"
+// The specified key does not exist.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl
+func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) {
+ req, out := c.PutObjectAclRequest(input)
+ return out, req.Send()
+}
+
+// PutObjectAclWithContext is the same as PutObjectAcl with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutObjectAcl for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, opts ...request.Option) (*PutObjectAclOutput, error) {
+ req, out := c.PutObjectAclRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutObjectTagging = "PutObjectTagging"
+
+// PutObjectTaggingRequest generates a "aws/request.Request" representing the
+// client's request for the PutObjectTagging operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutObjectTagging for more information on using the PutObjectTagging
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutObjectTaggingRequest method.
+// req, resp := client.PutObjectTaggingRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging
+func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) {
+ op := &request.Operation{
+ Name: opPutObjectTagging,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}?tagging",
+ }
+
+ if input == nil {
+ input = &PutObjectTaggingInput{}
+ }
+
+ output = &PutObjectTaggingOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
+ return
+}
+
+// PutObjectTagging API operation for Amazon Simple Storage Service.
+//
+// Sets the supplied tag-set to an object that already exists in a bucket.
+//
+// A tag is a key-value pair. You can associate tags with an object by sending
+// a PUT request against the tagging subresource that is associated with the
+// object. You can retrieve tags by sending a GET request. For more information,
+// see GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html).
+//
+// For tagging-related restrictions related to characters and encodings, see
+// Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html).
+// Note that Amazon S3 limits the maximum number of tags to 10 tags per object.
+//
+// To use this operation, you must have permission to perform the s3:PutObjectTagging
+// action. By default, the bucket owner has this permission and can grant this
+// permission to others.
+//
+// To put tags of any other version, use the versionId query parameter. You
+// also need permission for the s3:PutObjectVersionTagging action.
+//
+// For information about the Amazon S3 object tagging feature, see Object Tagging
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html).
+//
+// Special Errors
+//
+// * Code: InvalidTagError Cause: The tag provided was not a valid tag. This
+// error can occur if the tag did not pass input validation. For more information,
+// see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html).
+//
+// * Code: MalformedXMLError Cause: The XML provided does not match the schema.
+//
+// * Code: OperationAbortedError Cause: A conflicting conditional action
+// is currently in progress against this resource. Please try again.
+//
+// * Code: InternalError Cause: The service was unable to apply the provided
+// tag to the object.
+//
+// Related Resources
+//
+// * GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html)
+//
+// * DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutObjectTagging for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging
+func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) {
+ req, out := c.PutObjectTaggingRequest(input)
+ return out, req.Send()
+}
+
+// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutObjectTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) {
+ req, out := c.PutObjectTaggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutPublicAccessBlock = "PutPublicAccessBlock"
+
+// PutPublicAccessBlockRequest generates a "aws/request.Request" representing the
+// client's request for the PutPublicAccessBlock operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutPublicAccessBlock for more information on using the PutPublicAccessBlock
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutPublicAccessBlockRequest method.
+// req, resp := client.PutPublicAccessBlockRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock
+func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req *request.Request, output *PutPublicAccessBlockOutput) {
+ op := &request.Operation{
+ Name: opPutPublicAccessBlock,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?publicAccessBlock",
+ }
+
+ if input == nil {
+ input = &PutPublicAccessBlockInput{}
+ }
+
+ output = &PutPublicAccessBlockOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Build.PushBackNamed(request.NamedHandler{
+ Name: "contentMd5Handler",
+ Fn: checksum.AddBodyContentMD5Handler,
+ })
+ return
+}
+
+// PutPublicAccessBlock API operation for Amazon Simple Storage Service.
+//
+// Creates or modifies the PublicAccessBlock configuration for an Amazon S3
+// bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock
+// permission. For more information about Amazon S3 permissions, see Specifying
+// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html).
+//
+// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket
+// or an object, it checks the PublicAccessBlock configuration for both the
+// bucket (or the bucket that contains the object) and the bucket owner's account.
+// If the PublicAccessBlock configurations are different between the bucket
+// and the account, Amazon S3 uses the most restrictive combination of the bucket-level
+// and account-level settings.
+//
+// For more information about when Amazon S3 considers a bucket or an object
+// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status).
+//
+// Related Resources
+//
+// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html)
+//
+// * DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html)
+//
+// * GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html)
+//
+// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutPublicAccessBlock for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock
+func (c *S3) PutPublicAccessBlock(input *PutPublicAccessBlockInput) (*PutPublicAccessBlockOutput, error) {
+ req, out := c.PutPublicAccessBlockRequest(input)
+ return out, req.Send()
+}
+
+// PutPublicAccessBlockWithContext is the same as PutPublicAccessBlock with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutPublicAccessBlock for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutPublicAccessBlockWithContext(ctx aws.Context, input *PutPublicAccessBlockInput, opts ...request.Option) (*PutPublicAccessBlockOutput, error) {
+ req, out := c.PutPublicAccessBlockRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opRestoreObject = "RestoreObject"
+
+// RestoreObjectRequest generates a "aws/request.Request" representing the
+// client's request for the RestoreObject operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See RestoreObject for more information on using the RestoreObject
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the RestoreObjectRequest method.
+// req, resp := client.RestoreObjectRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject
+func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) {
+ op := &request.Operation{
+ Name: opRestoreObject,
+ HTTPMethod: "POST",
+ HTTPPath: "/{Bucket}/{Key+}?restore",
+ }
+
+ if input == nil {
+ input = &RestoreObjectInput{}
+ }
+
+ output = &RestoreObjectOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// RestoreObject API operation for Amazon Simple Storage Service.
+//
+// Restores an archived copy of an object back into Amazon S3
+//
+// This action is not supported by Amazon S3 on Outposts.
+//
+// This action performs the following types of requests:
+//
+// * select - Perform a select query on an archived object
+//
+// * restore an archive - Restore an archived object
+//
+// To use this operation, you must have permissions to perform the s3:RestoreObject
+// action. The bucket owner has this permission by default and can grant this
+// permission to others. For more information about permissions, see Permissions
+// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
+// in the Amazon S3 User Guide.
+//
+// Querying Archives with Select Requests
+//
+// You use a select type of request to perform SQL queries on archived objects.
+// The archived objects that are being queried by the select request must be
+// formatted as uncompressed comma-separated values (CSV) files. You can run
+// queries and custom analytics on your archived data without having to restore
+// your data to a hotter Amazon S3 tier. For an overview about select requests,
+// see Querying Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html)
+// in the Amazon S3 User Guide.
+//
+// When making a select request, do the following:
+//
+// * Define an output location for the select query's output. This must be
+// an Amazon S3 bucket in the same AWS Region as the bucket that contains
+// the archive object that is being queried. The AWS account that initiates
+// the job must have permissions to write to the S3 bucket. You can specify
+// the storage class and encryption for the output objects stored in the
+// bucket. For more information about output, see Querying Archived Objects
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html)
+// in the Amazon S3 User Guide. For more information about the S3 structure
+// in the request body, see the following: PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+// Managing Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html)
+// in the Amazon S3 User Guide Protecting Data Using Server-Side Encryption
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html)
+// in the Amazon S3 User Guide
+//
+// * Define the SQL expression for the SELECT type of restoration for your
+// query in the request body's SelectParameters structure. You can use expressions
+// like the following examples. The following expression returns all records
+// from the specified object. SELECT * FROM Object Assuming that you are
+// not using any headers for data stored in the object, you can specify columns
+// with positional headers. SELECT s._1, s._2 FROM Object s WHERE s._3 >
+// 100 If you have headers and you set the fileHeaderInfo in the CSV structure
+// in the request body to USE, you can specify headers in the query. (If
+// you set the fileHeaderInfo field to IGNORE, the first row is skipped for
+// the query.) You cannot mix ordinal positions with header column names.
+// SELECT s.Id, s.FirstName, s.SSN FROM S3Object s
+//
+// For more information about using SQL with S3 Glacier Select restore, see
+// SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html)
+// in the Amazon S3 User Guide.
+//
+// When making a select request, you can also do the following:
+//
+// * To expedite your queries, specify the Expedited tier. For more information
+// about tiers, see "Restoring Archives," later in this topic.
+//
+// * Specify details about the data serialization format of both the input
+// object that is being queried and the serialization of the CSV-encoded
+// query results.
+//
+// The following are additional important facts about the select feature:
+//
+// * The output results are new Amazon S3 objects. Unlike archive retrievals,
+// they are stored until explicitly deleted-manually or through a lifecycle
+// policy.
+//
+// * You can issue more than one select request on the same Amazon S3 object.
+// Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests.
+//
+// * Amazon S3 accepts a select request even if the object has already been
+// restored. A select request doesn’t return error response 409.
+//
+// Restoring objects
+//
+// Objects that you archive to the S3 Glacier or S3 Glacier Deep Archive storage
+// class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep
+// Archive tiers are not accessible in real time. For objects in Archive Access
+// or Deep Archive Access tiers you must first initiate a restore request, and
+// then wait until the object is moved into the Frequent Access tier. For objects
+// in S3 Glacier or S3 Glacier Deep Archive storage classes you must first initiate
+// a restore request, and then wait until a temporary copy of the object is
+// available. To access an archived object, you must restore the object for
+// the duration (number of days) that you specify.
+//
+// To restore a specific object version, you can provide a version ID. If you
+// don't provide a version ID, Amazon S3 restores the current version.
+//
+// When restoring an archived object (or using a select request), you can specify
+// one of the following data access tier options in the Tier element of the
+// request body:
+//
+// * Expedited - Expedited retrievals allow you to quickly access your data
+// stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive
+// tier when occasional urgent requests for a subset of archives are required.
+// For all but the largest archived objects (250 MB+), data accessed using
+// Expedited retrievals is typically made available within 1–5 minutes.
+// Provisioned capacity ensures that retrieval capacity for Expedited retrievals
+// is available when you need it. Expedited retrievals and provisioned capacity
+// are not available for objects stored in the S3 Glacier Deep Archive storage
+// class or S3 Intelligent-Tiering Deep Archive tier.
+//
+// * Standard - Standard retrievals allow you to access any of your archived
+// objects within several hours. This is the default option for retrieval
+// requests that do not specify the retrieval option. Standard retrievals
+// typically finish within 3–5 hours for objects stored in the S3 Glacier
+// storage class or S3 Intelligent-Tiering Archive tier. They typically finish
+// within 12 hours for objects stored in the S3 Glacier Deep Archive storage
+// class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals
+// are free for objects stored in S3 Intelligent-Tiering.
+//
+// * Bulk - Bulk retrievals are the lowest-cost retrieval option in S3 Glacier,
+// enabling you to retrieve large amounts, even petabytes, of data inexpensively.
+// Bulk retrievals typically finish within 5–12 hours for objects stored
+// in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier.
+// They typically finish within 48 hours for objects stored in the S3 Glacier
+// Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.
+// Bulk retrievals are free for objects stored in S3 Intelligent-Tiering.
+//
+// For more information about archive retrieval options and provisioned capacity
+// for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html)
+// in the Amazon S3 User Guide.
+//
+// You can use Amazon S3 restore speed upgrade to change the restore speed to
+// a faster speed while it is in progress. For more information, see Upgrading
+// the speed of an in-progress restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html)
+// in the Amazon S3 User Guide.
+//
+// To get the status of object restoration, you can send a HEAD request. Operations
+// return the x-amz-restore header, which provides information about the restoration
+// status, in the response. You can use Amazon S3 event notifications to notify
+// you when a restore is initiated or completed. For more information, see Configuring
+// Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+// in the Amazon S3 User Guide.
+//
+// After restoring an archived object, you can update the restoration period
+// by reissuing the request with a new period. Amazon S3 updates the restoration
+// period relative to the current time and charges only for the request-there
+// are no data transfer charges. You cannot update the restoration period when
+// Amazon S3 is actively processing your current restore request for the object.
+//
+// If your bucket has a lifecycle configuration with a rule that includes an
+// expiration action, the object expiration overrides the life span that you
+// specify in a restore request. For example, if you restore an object copy
+// for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes
+// the object in 3 days. For more information about lifecycle configuration,
+// see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)
+// and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
+// in Amazon S3 User Guide.
+//
+// Responses
+//
+// A successful action returns either the 200 OK or 202 Accepted status code.
+//
+// * If the object is not previously restored, then Amazon S3 returns 202
+// Accepted in the response.
+//
+// * If the object is previously restored, Amazon S3 returns 200 OK in the
+// response.
+//
+// Special Errors
+//
+// * Code: RestoreAlreadyInProgress Cause: Object restore is already in progress.
+// (This error does not apply to SELECT type requests.) HTTP Status Code:
+// 409 Conflict SOAP Fault Code Prefix: Client
+//
+// * Code: GlacierExpeditedRetrievalNotAvailable Cause: expedited retrievals
+// are currently not available. Try again later. (Returned if there is insufficient
+// capacity to process the Expedited request. This error applies only to
+// Expedited retrievals and not to S3 Standard or Bulk retrievals.) HTTP
+// Status Code: 503 SOAP Fault Code Prefix: N/A
+//
+// Related Resources
+//
+// * PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)
+//
+// * GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html)
+//
+// * SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html)
+// in the Amazon S3 User Guide
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation RestoreObject for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError"
+// This action is not allowed against this storage tier.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject
+func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) {
+ req, out := c.RestoreObjectRequest(input)
+ return out, req.Send()
+}
+
+// RestoreObjectWithContext is the same as RestoreObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See RestoreObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) {
+ req, out := c.RestoreObjectRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUploadPart = "UploadPart"
+
+// UploadPartRequest generates a "aws/request.Request" representing the
+// client's request for the UploadPart operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See UploadPart for more information on using the UploadPart
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the UploadPartRequest method.
+// req, resp := client.UploadPartRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart
+func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) {
+ op := &request.Operation{
+ Name: opUploadPart,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &UploadPartInput{}
+ }
+
+ output = &UploadPartOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// UploadPart API operation for Amazon Simple Storage Service.
+//
+// Uploads a part in a multipart upload.
+//
+// In this operation, you provide part data in your request. However, you have
+// an option to specify your existing Amazon S3 object as a data source for
+// the part you are uploading. To upload a part from an existing object, you
+// use the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html)
+// operation.
+//
+// You must initiate a multipart upload (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html))
+// before you can upload any part. In response to your initiate request, Amazon
+// S3 returns an upload ID, a unique identifier, that you must include in your
+// upload part request.
+//
+// Part numbers can be any number from 1 to 10,000, inclusive. A part number
+// uniquely identifies a part and also defines its position within the object
+// being created. If you upload a new part using the same part number that was
+// used with a previous part, the previously uploaded part is overwritten. Each
+// part must be at least 5 MB in size, except the last part. There is no size
+// limit on the last part of your multipart upload.
+//
+// To ensure that data is not corrupted when traversing the network, specify
+// the Content-MD5 header in the upload part request. Amazon S3 checks the part
+// data against the provided MD5 value. If they do not match, Amazon S3 returns
+// an error.
+//
+// If the upload request is signed with Signature Version 4, then AWS S3 uses
+// the x-amz-content-sha256 header as a checksum instead of Content-MD5. For
+// more information see Authenticating Requests: Using the Authorization Header
+// (AWS Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html).
+//
+// Note: After you initiate multipart upload and upload one or more parts, you
+// must either complete or abort multipart upload in order to stop getting charged
+// for storage of the uploaded parts. Only after you either complete or abort
+// multipart upload, Amazon S3 frees up the parts storage and stops charging
+// you for the parts storage.
+//
+// For more information on multipart uploads, go to Multipart Upload Overview
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the
+// Amazon S3 User Guide .
+//
+// For information on the permissions required to use the multipart upload API,
+// go to Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
+// in the Amazon S3 User Guide.
+//
+// You can optionally request server-side encryption where Amazon S3 encrypts
+// your data as it writes it to disks in its data centers and decrypts it for
+// you when you access it. You have the option of providing your own encryption
+// key, or you can use the AWS managed encryption keys. If you choose to provide
+// your own encryption key, the request headers you provide in the request must
+// match the headers you used in the request to initiate the upload by using
+// CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html).
+// For more information, go to Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html)
+// in the Amazon S3 User Guide.
+//
+// Server-side encryption is supported by the S3 Multipart Upload actions. Unless
+// you are using a customer-provided encryption key, you don't need to specify
+// the encryption parameters in each UploadPart request. Instead, you only need
+// to specify the server-side encryption parameters in the initial Initiate
+// Multipart request. For more information, see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html).
+//
+// If you requested server-side encryption using a customer-provided encryption
+// key in your initiate multipart upload request, you must provide identical
+// encryption information in each part upload using the following headers.
+//
+// * x-amz-server-side-encryption-customer-algorithm
+//
+// * x-amz-server-side-encryption-customer-key
+//
+// * x-amz-server-side-encryption-customer-key-MD5
+//
+// Special Errors
+//
+// * Code: NoSuchUpload Cause: The specified multipart upload does not exist.
+// The upload ID might be invalid, or the multipart upload might have been
+// aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault Code
+// Prefix: Client
+//
+// Related Resources
+//
+// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
+//
+// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
+//
+// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
+//
+// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
+//
+// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation UploadPart for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart
+func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) {
+ req, out := c.UploadPartRequest(input)
+ return out, req.Send()
+}
+
+// UploadPartWithContext is the same as UploadPart with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UploadPart for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) {
+ req, out := c.UploadPartRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUploadPartCopy = "UploadPartCopy"
+
+// UploadPartCopyRequest generates a "aws/request.Request" representing the
+// client's request for the UploadPartCopy operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See UploadPartCopy for more information on using the UploadPartCopy
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the UploadPartCopyRequest method.
+// req, resp := client.UploadPartCopyRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy
+func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) {
+ op := &request.Operation{
+ Name: opUploadPartCopy,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &UploadPartCopyInput{}
+ }
+
+ output = &UploadPartCopyOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// UploadPartCopy API operation for Amazon Simple Storage Service.
+//
+// Uploads a part by copying data from an existing object as data source. You
+// specify the data source by adding the request header x-amz-copy-source in
+// your request and a byte range by adding the request header x-amz-copy-source-range
+// in your request.
+//
+// The minimum allowable part size for a multipart upload is 5 MB. For more
+// information about multipart upload limits, go to Quick Facts (https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html)
+// in the Amazon S3 User Guide.
+//
+// Instead of using an existing object as part data, you might use the UploadPart
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) action
+// and provide data in your request.
+//
+// You must initiate a multipart upload before you can upload any part. In response
+// to your initiate request. Amazon S3 returns a unique identifier, the upload
+// ID, that you must include in your upload part request.
+//
+// For more information about using the UploadPartCopy operation, see the following:
+//
+// * For conceptual information about multipart uploads, see Uploading Objects
+// Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html)
+// in the Amazon S3 User Guide.
+//
+// * For information about permissions required to use the multipart upload
+// API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
+// in the Amazon S3 User Guide.
+//
+// * For information about copying objects using a single atomic action vs.
+// the multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html)
+// in the Amazon S3 User Guide.
+//
+// * For information about using server-side encryption with customer-provided
+// encryption keys with the UploadPartCopy operation, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html)
+// and UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html).
+//
+// Note the following additional considerations about the request headers x-amz-copy-source-if-match,
+// x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, and
+// x-amz-copy-source-if-modified-since:
+//
+// * Consideration 1 - If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since
+// headers are present in the request as follows: x-amz-copy-source-if-match
+// condition evaluates to true, and; x-amz-copy-source-if-unmodified-since
+// condition evaluates to false; Amazon S3 returns 200 OK and copies the
+// data.
+//
+// * Consideration 2 - If both of the x-amz-copy-source-if-none-match and
+// x-amz-copy-source-if-modified-since headers are present in the request
+// as follows: x-amz-copy-source-if-none-match condition evaluates to false,
+// and; x-amz-copy-source-if-modified-since condition evaluates to true;
+// Amazon S3 returns 412 Precondition Failed response code.
+//
+// Versioning
+//
+// If your bucket has versioning enabled, you could have multiple versions of
+// the same object. By default, x-amz-copy-source identifies the current version
+// of the object to copy. If the current version is a delete marker and you
+// don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a 404
+// error, because the object does not exist. If you specify versionId in the
+// x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns
+// an HTTP 400 error, because you are not allowed to specify a delete marker
+// as a version for the x-amz-copy-source.
+//
+// You can optionally specify a specific version of the source object to copy
+// by adding the versionId subresource as shown in the following example:
+//
+// x-amz-copy-source: /bucket/object?versionId=version id
+//
+// Special Errors
+//
+// * Code: NoSuchUpload Cause: The specified multipart upload does not exist.
+// The upload ID might be invalid, or the multipart upload might have been
+// aborted or completed. HTTP Status Code: 404 Not Found
+//
+// * Code: InvalidRequest Cause: The specified copy source is not supported
+// as a byte-range copy source. HTTP Status Code: 400 Bad Request
+//
+// Related Resources
+//
+// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
+//
+// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+//
+// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
+//
+// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
+//
+// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
+//
+// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html)
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation UploadPartCopy for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy
+func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) {
+ req, out := c.UploadPartCopyRequest(input)
+ return out, req.Send()
+}
+
+// UploadPartCopyWithContext is the same as UploadPartCopy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UploadPartCopy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInput, opts ...request.Option) (*UploadPartCopyOutput, error) {
+ req, out := c.UploadPartCopyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// Specifies the days since the initiation of an incomplete multipart upload
+// that Amazon S3 will wait before permanently removing all parts of the upload.
+// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket
+// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
+// in the Amazon S3 User Guide.
+type AbortIncompleteMultipartUpload struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the number of days after which Amazon S3 aborts an incomplete multipart
+ // upload.
+ DaysAfterInitiation *int64 `type:"integer"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AbortIncompleteMultipartUpload) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AbortIncompleteMultipartUpload) GoString() string {
+ return s.String()
+}
+
+// SetDaysAfterInitiation sets the DaysAfterInitiation field's value.
+func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortIncompleteMultipartUpload {
+ s.DaysAfterInitiation = &v
+ return s
+}
+
+type AbortMultipartUploadInput struct {
+ _ struct{} `locationName:"AbortMultipartUploadRequest" type:"structure"`
+
+ // The bucket name to which the upload was taking place.
+ //
+ // When using this action with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this action with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+ // in the Amazon S3 User Guide.
+ //
+ // When using this action with Amazon S3 on Outposts, you must direct requests
+ // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
+ // using this action using S3 on Outposts through the AWS SDKs, you provide
+ // the Outposts bucket ARN in place of the bucket name. For more information
+ // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+
+ // Key of the object for which the multipart upload was initiated.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Upload ID that identifies the multipart upload.
+ //
+ // UploadId is a required field
+ UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AbortMultipartUploadInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AbortMultipartUploadInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AbortMultipartUploadInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.UploadId == nil {
+ invalidParams.Add(request.NewErrParamRequired("UploadId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *AbortMultipartUploadInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *AbortMultipartUploadInput) SetExpectedBucketOwner(v string) *AbortMultipartUploadInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *AbortMultipartUploadInput) SetRequestPayer(v string) *AbortMultipartUploadInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadInput {
+ s.UploadId = &v
+ return s
+}
+
+type AbortMultipartUploadOutput struct {
+ _ struct{} `type:"structure"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AbortMultipartUploadOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AbortMultipartUploadOutput) GoString() string {
+ return s.String()
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipartUploadOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// Contains the elements that set the ACL permissions for an object per grantee.
+type AccessControlPolicy struct {
+ _ struct{} `type:"structure"`
+
+ // A list of grants.
+ Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"`
+
+ // Container for the bucket owner's display name and ID.
+ Owner *Owner `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AccessControlPolicy) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AccessControlPolicy) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AccessControlPolicy) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"}
+ if s.Grants != nil {
+ for i, v := range s.Grants {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetGrants sets the Grants field's value.
+func (s *AccessControlPolicy) SetGrants(v []*Grant) *AccessControlPolicy {
+ s.Grants = v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy {
+ s.Owner = v
+ return s
+}
+
+type AddLegalHoldInput struct {
+ _ struct{} `locationName:"AddLegalHoldRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // RetentionLegalHoldId is a required field
+ RetentionLegalHoldId *string `location:"querystring" locationName:"add" type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AddLegalHoldInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AddLegalHoldInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AddLegalHoldInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AddLegalHoldInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.RetentionLegalHoldId == nil {
+ invalidParams.Add(request.NewErrParamRequired("RetentionLegalHoldId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *AddLegalHoldInput) SetBucket(v string) *AddLegalHoldInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *AddLegalHoldInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetKey sets the Key field's value.
+func (s *AddLegalHoldInput) SetKey(v string) *AddLegalHoldInput {
+ s.Key = &v
+ return s
+}
+
+// SetRetentionLegalHoldId sets the RetentionLegalHoldId field's value.
+func (s *AddLegalHoldInput) SetRetentionLegalHoldId(v string) *AddLegalHoldInput {
+ s.RetentionLegalHoldId = &v
+ return s
+}
+
+type AddLegalHoldOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AddLegalHoldOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AddLegalHoldOutput) GoString() string {
+ return s.String()
+}
+
+// In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name
+// is globally unique, and the namespace is shared by all AWS accounts.
+type Bucket struct {
+ _ struct{} `type:"structure"`
+
+ // Date the bucket was created. This date can change when making changes to
+ // your bucket, such as editing its bucket policy.
+ CreationDate *time.Time `type:"timestamp"`
+
+ // The name of the bucket.
+ Name *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Bucket) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Bucket) GoString() string {
+ return s.String()
+}
+
+// SetCreationDate sets the CreationDate field's value.
+func (s *Bucket) SetCreationDate(v time.Time) *Bucket {
+ s.CreationDate = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *Bucket) SetName(v string) *Bucket {
+ s.Name = &v
+ return s
+}
+
+type BucketExtended struct {
+ _ struct{} `type:"structure"`
+
+ CreationDate *time.Time `type:"timestamp"`
+
+ CreationTemplateId *string `type:"string"`
+
+ // Specifies the region where the bucket was created.
+ LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"`
+
+ // The name of the bucket.
+ Name *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s BucketExtended) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s BucketExtended) GoString() string {
+ return s.String()
+}
+
+// SetCreationDate sets the CreationDate field's value.
+func (s *BucketExtended) SetCreationDate(v time.Time) *BucketExtended {
+ s.CreationDate = &v
+ return s
+}
+
+// SetCreationTemplateId sets the CreationTemplateId field's value.
+func (s *BucketExtended) SetCreationTemplateId(v string) *BucketExtended {
+ s.CreationTemplateId = &v
+ return s
+}
+
+// SetLocationConstraint sets the LocationConstraint field's value.
+func (s *BucketExtended) SetLocationConstraint(v string) *BucketExtended {
+ s.LocationConstraint = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *BucketExtended) SetName(v string) *BucketExtended {
+ s.Name = &v
+ return s
+}
+
+// Container for logging status information.
+type BucketLoggingStatus struct {
+ _ struct{} `type:"structure"`
+
+ // Describes where logs are stored and the prefix that Amazon S3 assigns to
+ // all log object keys for a bucket. For more information, see PUT Bucket logging
+ // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html)
+ // in the Amazon Simple Storage Service API Reference.
+ LoggingEnabled *LoggingEnabled `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s BucketLoggingStatus) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s BucketLoggingStatus) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *BucketLoggingStatus) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"}
+ if s.LoggingEnabled != nil {
+ if err := s.LoggingEnabled.Validate(); err != nil {
+ invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetLoggingEnabled sets the LoggingEnabled field's value.
+func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggingStatus {
+ s.LoggingEnabled = v
+ return s
+}
+
+type BucketProtectionDefaultRetention struct {
+ _ struct{} `type:"structure"`
+
+ // Days is a required field
+ Days *int64 `type:"integer" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s BucketProtectionDefaultRetention) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s BucketProtectionDefaultRetention) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *BucketProtectionDefaultRetention) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "BucketProtectionDefaultRetention"}
+ if s.Days == nil {
+ invalidParams.Add(request.NewErrParamRequired("Days"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDays sets the Days field's value.
+func (s *BucketProtectionDefaultRetention) SetDays(v int64) *BucketProtectionDefaultRetention {
+ s.Days = &v
+ return s
+}
+
+type BucketProtectionMaximumRetention struct {
+ _ struct{} `type:"structure"`
+
+ // Days is a required field
+ Days *int64 `type:"integer" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s BucketProtectionMaximumRetention) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s BucketProtectionMaximumRetention) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *BucketProtectionMaximumRetention) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "BucketProtectionMaximumRetention"}
+ if s.Days == nil {
+ invalidParams.Add(request.NewErrParamRequired("Days"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDays sets the Days field's value.
+func (s *BucketProtectionMaximumRetention) SetDays(v int64) *BucketProtectionMaximumRetention {
+ s.Days = &v
+ return s
+}
+
+type BucketProtectionMinimumRetention struct {
+ _ struct{} `type:"structure"`
+
+ // Days is a required field
+ Days *int64 `type:"integer" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s BucketProtectionMinimumRetention) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s BucketProtectionMinimumRetention) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *BucketProtectionMinimumRetention) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "BucketProtectionMinimumRetention"}
+ if s.Days == nil {
+ invalidParams.Add(request.NewErrParamRequired("Days"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDays sets the Days field's value.
+func (s *BucketProtectionMinimumRetention) SetDays(v int64) *BucketProtectionMinimumRetention {
+ s.Days = &v
+ return s
+}
+
+// Describes the cross-origin access configuration for objects in an Amazon
+// S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon
+// S3 User Guide.
+type CORSConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // A set of origins and methods (cross-origin access that you want to allow).
+ // You can add up to 100 rules to the configuration.
+ //
+ // CORSRules is a required field
+ CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CORSConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CORSConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CORSConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"}
+ if s.CORSRules == nil {
+ invalidParams.Add(request.NewErrParamRequired("CORSRules"))
+ }
+ if s.CORSRules != nil {
+ for i, v := range s.CORSRules {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetCORSRules sets the CORSRules field's value.
+func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration {
+ s.CORSRules = v
+ return s
+}
+
+// Specifies a cross-origin access rule for an Amazon S3 bucket.
+type CORSRule struct {
+ _ struct{} `type:"structure"`
+
+ // Headers that are specified in the Access-Control-Request-Headers header.
+ // These headers are allowed in a preflight OPTIONS request. In response to
+ // any preflight OPTIONS request, Amazon S3 returns any requested headers that
+ // are allowed.
+ AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"`
+
+ // An HTTP method that you allow the origin to execute. Valid values are GET,
+ // PUT, HEAD, POST, and DELETE.
+ //
+ // AllowedMethods is a required field
+ AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"`
+
+ // One or more origins you want customers to be able to access the bucket from.
+ //
+ // AllowedOrigins is a required field
+ AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"`
+
+ // One or more headers in the response that you want customers to be able to
+ // access from their applications (for example, from a JavaScript XMLHttpRequest
+ // object).
+ ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"`
+
+ // The time in seconds that your browser is to cache the preflight response
+ // for the specified resource.
+ MaxAgeSeconds *int64 `type:"integer"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CORSRule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CORSRule) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CORSRule) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CORSRule"}
+ if s.AllowedMethods == nil {
+ invalidParams.Add(request.NewErrParamRequired("AllowedMethods"))
+ }
+ if s.AllowedOrigins == nil {
+ invalidParams.Add(request.NewErrParamRequired("AllowedOrigins"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAllowedHeaders sets the AllowedHeaders field's value.
+func (s *CORSRule) SetAllowedHeaders(v []*string) *CORSRule {
+ s.AllowedHeaders = v
+ return s
+}
+
+// SetAllowedMethods sets the AllowedMethods field's value.
+func (s *CORSRule) SetAllowedMethods(v []*string) *CORSRule {
+ s.AllowedMethods = v
+ return s
+}
+
+// SetAllowedOrigins sets the AllowedOrigins field's value.
+func (s *CORSRule) SetAllowedOrigins(v []*string) *CORSRule {
+ s.AllowedOrigins = v
+ return s
+}
+
+// SetExposeHeaders sets the ExposeHeaders field's value.
+func (s *CORSRule) SetExposeHeaders(v []*string) *CORSRule {
+ s.ExposeHeaders = v
+ return s
+}
+
+// SetMaxAgeSeconds sets the MaxAgeSeconds field's value.
+func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule {
+ s.MaxAgeSeconds = &v
+ return s
+}
+
+// Container for all (if there are any) keys between Prefix and the next occurrence
+// of the string specified by a delimiter. CommonPrefixes lists keys that act
+// like subdirectories in the directory specified by Prefix. For example, if
+// the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july,
+// the common prefix is notes/summer/.
+type CommonPrefix struct {
+ _ struct{} `type:"structure"`
+
+ // Container for the specified common prefix.
+ Prefix *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CommonPrefix) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CommonPrefix) GoString() string {
+ return s.String()
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix {
+ s.Prefix = &v
+ return s
+}
+
+type CompleteMultipartUploadInput struct {
+ _ struct{} `locationName:"CompleteMultipartUploadRequest" type:"structure" payload:"MultipartUpload"`
+
+ // Name of the bucket to which the multipart upload was initiated.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+
+ // Object key for which the multipart upload was initiated.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // The container for the multipart upload request information.
+ MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Date on which it will be legal to delete or modify the object. This field
+ // can only be specified if Retention-Directive is REPLACE. You can only specify
+ // this or the Retention-Period header. If both are specified a 400 error will
+ // be returned. If neither is specified the bucket's DefaultRetention period
+ // will be used.
+ RetentionExpirationDate *time.Time `location:"header" locationName:"Retention-Expiration-Date" type:"timestamp"`
+
+ // A single legal hold to apply to the object. This field can only be specified
+ // if Retention-Directive is REPLACE. A legal hold is a character long string
+ // of max length 64. The object cannot be overwritten or deleted until all legal
+ // holds associated with the object are removed.
+ RetentionLegalHoldId *string `location:"header" locationName:"Retention-Legal-Hold-ID" type:"string"`
+
+ // Retention period to store on the object in seconds. If this field and Retention-Expiration-Date
+ // are specified a 400 error is returned. If neither is specified the bucket's
+ // DefaultRetention period will be used. 0 is a legal value assuming the bucket's
+ // minimum retention period is also 0.
+ RetentionPeriod *int64 `location:"header" locationName:"Retention-Period" type:"integer"`
+
+ // ID for the initiated multipart upload.
+ //
+ // UploadId is a required field
+ UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CompleteMultipartUploadInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CompleteMultipartUploadInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CompleteMultipartUploadInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.UploadId == nil {
+ invalidParams.Add(request.NewErrParamRequired("UploadId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUploadInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *CompleteMultipartUploadInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *CompleteMultipartUploadInput) SetExpectedBucketOwner(v string) *CompleteMultipartUploadInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput {
+ s.Key = &v
+ return s
+}
+
+// SetMultipartUpload sets the MultipartUpload field's value.
+func (s *CompleteMultipartUploadInput) SetMultipartUpload(v *CompletedMultipartUpload) *CompleteMultipartUploadInput {
+ s.MultipartUpload = v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *CompleteMultipartUploadInput) SetRequestPayer(v string) *CompleteMultipartUploadInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetRetentionExpirationDate sets the RetentionExpirationDate field's value.
+func (s *CompleteMultipartUploadInput) SetRetentionExpirationDate(v time.Time) *CompleteMultipartUploadInput {
+ s.RetentionExpirationDate = &v
+ return s
+}
+
+// SetRetentionLegalHoldId sets the RetentionLegalHoldId field's value.
+func (s *CompleteMultipartUploadInput) SetRetentionLegalHoldId(v string) *CompleteMultipartUploadInput {
+ s.RetentionLegalHoldId = &v
+ return s
+}
+
+// SetRetentionPeriod sets the RetentionPeriod field's value.
+func (s *CompleteMultipartUploadInput) SetRetentionPeriod(v int64) *CompleteMultipartUploadInput {
+ s.RetentionPeriod = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartUploadInput {
+ s.UploadId = &v
+ return s
+}
+
+type CompleteMultipartUploadOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the bucket that contains the newly created object.
+ //
+ // When using this action with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this action with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+ // in the Amazon S3 User Guide.
+ //
+ // When using this action with Amazon S3 on Outposts, you must direct requests
+ // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
+ // using this action using S3 on Outposts through the AWS SDKs, you provide
+ // the Outposts bucket ARN in place of the bucket name. For more information
+ // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // in the Amazon S3 User Guide.
+ Bucket *string `type:"string"`
+
+ // Entity tag that identifies the newly created object's data. Objects with
+ // different object data will have different entity tags. The entity tag is
+ // an opaque string. The entity tag may or may not be an MD5 digest of the object
+ // data. If the entity tag is not an MD5 digest of the object data, it will
+ // contain one or more nonhexadecimal characters and/or will consist of less
+ // than 32 or more than 32 hexadecimal digits.
+ ETag *string `type:"string"`
+
+ // If the object expiration is configured, this will contain the expiration
+ // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.
+ Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+ // The object key of the newly created object.
+ Key *string `min:"1" type:"string"`
+
+ // The URI that identifies the newly created object.
+ Location *string `type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // If present, specifies the ID of the AWS Key Management Service (AWS KMS)
+ // symmetric customer managed customer master key (CMK) that was used for the
+ // object.
+ //
+ // SSEKMSKeyId is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CompleteMultipartUploadOutput's
+ // String and GoString methods.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
+
+ // If you specified server-side encryption either with an Amazon S3-managed
+ // encryption key or an AWS KMS customer master key (CMK) in your initiate multipart
+ // upload request, the response includes this header. It confirms the encryption
+ // algorithm that Amazon S3 used to encrypt the object.
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // Version ID of the newly created object, in case the bucket has versioning
+ // turned on.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CompleteMultipartUploadOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CompleteMultipartUploadOutput) GoString() string {
+ return s.String()
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUploadOutput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *CompleteMultipartUploadOutput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetETag sets the ETag field's value.
+func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput {
+ s.ETag = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *CompleteMultipartUploadOutput) SetExpiration(v string) *CompleteMultipartUploadOutput {
+ s.Expiration = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *CompleteMultipartUploadOutput) SetKey(v string) *CompleteMultipartUploadOutput {
+ s.Key = &v
+ return s
+}
+
+// SetLocation sets the Location field's value.
+func (s *CompleteMultipartUploadOutput) SetLocation(v string) *CompleteMultipartUploadOutput {
+ s.Location = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *CompleteMultipartUploadOutput) SetRequestCharged(v string) *CompleteMultipartUploadOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *CompleteMultipartUploadOutput) SetSSEKMSKeyId(v string) *CompleteMultipartUploadOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *CompleteMultipartUploadOutput) SetServerSideEncryption(v string) *CompleteMultipartUploadOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipartUploadOutput {
+ s.VersionId = &v
+ return s
+}
+
+// The container for the completed multipart upload details.
+type CompletedMultipartUpload struct {
+ _ struct{} `type:"structure"`
+
+ // Array of CompletedPart data types.
+ Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CompletedMultipartUpload) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CompletedMultipartUpload) GoString() string {
+ return s.String()
+}
+
+// SetParts sets the Parts field's value.
+func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultipartUpload {
+ s.Parts = v
+ return s
+}
+
+// Details of the parts that were uploaded.
+type CompletedPart struct {
+ _ struct{} `type:"structure"`
+
+ // Entity tag returned when the part was uploaded.
+ ETag *string `type:"string"`
+
+ // Part number that identifies the part. This is a positive integer between
+ // 1 and 10,000.
+ PartNumber *int64 `type:"integer"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CompletedPart) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CompletedPart) GoString() string {
+ return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *CompletedPart) SetETag(v string) *CompletedPart {
+ s.ETag = &v
+ return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart {
+ s.PartNumber = &v
+ return s
+}
+
+// A container for describing a condition that must be met for the specified
+// redirect to apply. For example, 1. If request is for pages in the /docs folder,
+// redirect to the /documents folder. 2. If request results in HTTP error 4xx,
+// redirect request to another host where you might process the error.
+type Condition struct {
+ _ struct{} `type:"structure"`
+
+ // The HTTP error code when the redirect is applied. In the event of an error,
+ // if the error code equals this value, then the specified redirect is applied.
+ // Required when parent element Condition is specified and sibling KeyPrefixEquals
+ // is not specified. If both are specified, then both must be true for the redirect
+ // to be applied.
+ HttpErrorCodeReturnedEquals *string `type:"string"`
+
+ // The object key name prefix when the redirect is applied. For example, to
+ // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html.
+ // To redirect request for all pages with the prefix docs/, the key prefix will
+ // be /docs, which identifies all objects in the docs/ folder. Required when
+ // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals
+ // is not specified. If both conditions are specified, both must be true for
+ // the redirect to be applied.
+ //
+ // Replacement must be made for object keys containing special characters (such
+ // as carriage returns) when using XML requests. For more information, see XML
+ // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints).
+ KeyPrefixEquals *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Condition) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Condition) GoString() string {
+ return s.String()
+}
+
+// SetHttpErrorCodeReturnedEquals sets the HttpErrorCodeReturnedEquals field's value.
+func (s *Condition) SetHttpErrorCodeReturnedEquals(v string) *Condition {
+ s.HttpErrorCodeReturnedEquals = &v
+ return s
+}
+
+// SetKeyPrefixEquals sets the KeyPrefixEquals field's value.
+func (s *Condition) SetKeyPrefixEquals(v string) *Condition {
+ s.KeyPrefixEquals = &v
+ return s
+}
+
+type CopyObjectInput struct {
+ _ struct{} `locationName:"CopyObjectRequest" type:"structure"`
+
+ // The canned ACL to apply to the object.
+ //
+ // This action is not supported by Amazon S3 on Outposts.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
+
+ // The name of the destination bucket.
+ //
+ // When using this action with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this action with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+ // in the Amazon S3 User Guide.
+ //
+ // When using this action with Amazon S3 on Outposts, you must direct requests
+ // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
+ // using this action using S3 on Outposts through the AWS SDKs, you provide
+ // the Outposts bucket ARN in place of the bucket name. For more information
+ // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // Specifies the source object for the copy operation. You specify the value
+ // in one of two formats, depending on whether you want to access the source
+ // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html):
+ //
+ // * For objects not accessed through an access point, specify the name of
+ // the source bucket and the key of the source object, separated by a slash
+ // (/). For example, to copy the object reports/january.pdf from the bucket
+ // awsexamplebucket, use awsexamplebucket/reports/january.pdf. The value
+ // must be URL encoded.
+ //
+ // * For objects accessed through access points, specify the Amazon Resource
+ // Name (ARN) of the object as accessed through the access point, in the
+ // format arn:aws:s3:<Region>:<account-id>:accesspoint/<access-point-name>/object/<key>.
+ // For example, to copy the object reports/january.pdf through access point
+ // my-access-point owned by account 123456789012 in Region us-west-2, use
+ // the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf.
+ // The value must be URL encoded. Amazon S3 supports copy operations using
+ // access points only when the source and destination buckets are in the
+ // same AWS Region. Alternatively, for objects accessed through Amazon S3
+ // on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/object/<key>.
+ // For example, to copy the object reports/january.pdf through outpost my-outpost
+ // owned by account 123456789012 in Region us-west-2, use the URL encoding
+ // of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf.
+ // The value must be URL encoded.
+ //
+ // To copy a specific version of an object, append ?versionId=<version-id> to
+ // the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893).
+ // If you don't specify a version ID, Amazon S3 copies the latest version of
+ // the source object.
+ //
+ // CopySource is a required field
+ CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"`
+
+ // Copies the object if its entity tag (ETag) matches the specified tag.
+ CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"`
+
+ // Copies the object if it has been modified since the specified time.
+ CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"`
+
+ // Copies the object if its entity tag (ETag) is different than the specified
+ // ETag.
+ CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"`
+
+ // Copies the object if it hasn't been modified since the specified time.
+ CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"`
+
+ // Specifies the algorithm to use when decrypting the source object (for example,
+ // AES256).
+ CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
+ // the source object. The encryption key provided in this header must be one
+ // that was used when the source object was created.
+ //
+ // CopySourceSSECustomerKey is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CopyObjectInput's
+ // String and GoString methods.
+ CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
+ CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"`
+
+ // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
+ //
+ // This action is not supported by Amazon S3 on Outposts.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to read the object data and its metadata.
+ //
+ // This action is not supported by Amazon S3 on Outposts.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the object ACL.
+ //
+ // This action is not supported by Amazon S3 on Outposts.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable object.
+ //
+ // This action is not supported by Amazon S3 on Outposts.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ // The key of the destination object.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // Specifies whether the metadata is copied from the source object or replaced
+ // with metadata provided in the request.
+ MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"`
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // This header controls how the Protection state of the source object is copied
+ // to the destination object.If copied, the retention period and all legal holds
+ // are copied onto the new object. The legal hold date's is set to the date
+ // of the copy.
+ RetentionDirective *string `location:"header" locationName:"Retention-Directive" type:"string" enum:"RetentionDirective"`
+
+ // Date on which it will be legal to delete or modify the object. This field
+ // can only be specified if Retention-Directive is REPLACE. You can only specify
+ // this or the Retention-Period header. If both are specified a 400 error will
+ // be returned. If neither is specified the bucket's DefaultRetention period
+ // will be used.
+ RetentionExpirationDate *time.Time `location:"header" locationName:"Retention-Expiration-Date" type:"timestamp"`
+
+ // A single legal hold to apply to the object. This field can only be specified
+ // if Retention-Directive is REPLACE. A legal hold is a character long string
+ // of max length 64. The object cannot be overwritten or deleted until all legal
+ // holds associated with the object are removed.
+ RetentionLegalHoldId *string `location:"header" locationName:"Retention-Legal-Hold-ID" type:"string"`
+
+ // Retention period to store on the object in seconds. The object can be neither
+ // overwritten nor deleted until the amount of time specified in the retention
+ // period has elapsed. If this field and Retention-Expiration-Date are specified
+ // a 400 error is returned. If neither is specified the bucket's DefaultRetention
+ // period will be used. 0 is a legal value assuming the bucket's minimum retention
+ // period is also 0.
+ RetentionPeriod *int64 `location:"header" locationName:"Retention-Period" type:"integer"`
+
+ // Specifies the algorithm to use to when encrypting the object (for example,
+ // AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // S3 does not store the encryption key. The key must be appropriate for use
+ // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
+ // header.
+ //
+ // SSECustomerKey is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CopyObjectInput's
+ // String and GoString methods.
+ SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
+ // requests for an object protected by AWS KMS will fail if not made via SSL
+ // or using SigV4. For information about configuring using any of the officially
+ // supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request
+ // Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version)
+ // in the Amazon S3 Developer Guide.
+ //
+ // SSEKMSKeyId is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CopyObjectInput's
+ // String and GoString methods.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
+
+ // The server-side encryption algorithm used when storing this object in Amazon
+ // S3 (for example, AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // By default, Amazon S3 uses the STANDARD Storage Class to store newly created
+ // objects. The STANDARD storage class provides high durability and high availability.
+ // Depending on performance needs, you can specify a different Storage Class.
+ // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information,
+ // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
+ // in the Amazon S3 Service Developer Guide.
+ StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+ // The tag-set for the object destination object this value must be used in
+ // conjunction with the TaggingDirective. The tag-set must be encoded as URL
+ // Query parameters.
+ Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
+
+ // Specifies whether the object tag-set are copied from the source object or
+ // replaced with tag-set provided in the request.
+ TaggingDirective *string `location:"header" locationName:"x-amz-tagging-directive" type:"string" enum:"TaggingDirective"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CopyObjectInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CopyObjectInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CopyObjectInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.CopySource == nil {
+ invalidParams.Add(request.NewErrParamRequired("CopySource"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetACL sets the ACL field's value.
+func (s *CopyObjectInput) SetACL(v string) *CopyObjectInput {
+ s.ACL = &v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *CopyObjectInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetCacheControl sets the CacheControl field's value.
+func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput {
+ s.CacheControl = &v
+ return s
+}
+
+// SetContentDisposition sets the ContentDisposition field's value.
+func (s *CopyObjectInput) SetContentDisposition(v string) *CopyObjectInput {
+ s.ContentDisposition = &v
+ return s
+}
+
+// SetContentEncoding sets the ContentEncoding field's value.
+func (s *CopyObjectInput) SetContentEncoding(v string) *CopyObjectInput {
+ s.ContentEncoding = &v
+ return s
+}
+
+// SetContentLanguage sets the ContentLanguage field's value.
+func (s *CopyObjectInput) SetContentLanguage(v string) *CopyObjectInput {
+ s.ContentLanguage = &v
+ return s
+}
+
+// SetContentType sets the ContentType field's value.
+func (s *CopyObjectInput) SetContentType(v string) *CopyObjectInput {
+ s.ContentType = &v
+ return s
+}
+
+// SetCopySource sets the CopySource field's value.
+func (s *CopyObjectInput) SetCopySource(v string) *CopyObjectInput {
+ s.CopySource = &v
+ return s
+}
+
+// SetCopySourceIfMatch sets the CopySourceIfMatch field's value.
+func (s *CopyObjectInput) SetCopySourceIfMatch(v string) *CopyObjectInput {
+ s.CopySourceIfMatch = &v
+ return s
+}
+
+// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value.
+func (s *CopyObjectInput) SetCopySourceIfModifiedSince(v time.Time) *CopyObjectInput {
+ s.CopySourceIfModifiedSince = &v
+ return s
+}
+
+// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value.
+func (s *CopyObjectInput) SetCopySourceIfNoneMatch(v string) *CopyObjectInput {
+ s.CopySourceIfNoneMatch = &v
+ return s
+}
+
+// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value.
+func (s *CopyObjectInput) SetCopySourceIfUnmodifiedSince(v time.Time) *CopyObjectInput {
+ s.CopySourceIfUnmodifiedSince = &v
+ return s
+}
+
+// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value.
+func (s *CopyObjectInput) SetCopySourceSSECustomerAlgorithm(v string) *CopyObjectInput {
+ s.CopySourceSSECustomerAlgorithm = &v
+ return s
+}
+
+// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value.
+func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput {
+ s.CopySourceSSECustomerKey = &v
+ return s
+}
+
+func (s *CopyObjectInput) getCopySourceSSECustomerKey() (v string) {
+ if s.CopySourceSSECustomerKey == nil {
+ return v
+ }
+ return *s.CopySourceSSECustomerKey
+}
+
+// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value.
+func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput {
+ s.CopySourceSSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetExpires sets the Expires field's value.
+func (s *CopyObjectInput) SetExpires(v time.Time) *CopyObjectInput {
+ s.Expires = &v
+ return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *CopyObjectInput) SetGrantFullControl(v string) *CopyObjectInput {
+ s.GrantFullControl = &v
+ return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *CopyObjectInput) SetGrantRead(v string) *CopyObjectInput {
+ s.GrantRead = &v
+ return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *CopyObjectInput) SetGrantReadACP(v string) *CopyObjectInput {
+ s.GrantReadACP = &v
+ return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *CopyObjectInput) SetGrantWriteACP(v string) *CopyObjectInput {
+ s.GrantWriteACP = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *CopyObjectInput) SetKey(v string) *CopyObjectInput {
+ s.Key = &v
+ return s
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *CopyObjectInput) SetMetadata(v map[string]*string) *CopyObjectInput {
+ s.Metadata = v
+ return s
+}
+
+// SetMetadataDirective sets the MetadataDirective field's value.
+func (s *CopyObjectInput) SetMetadataDirective(v string) *CopyObjectInput {
+ s.MetadataDirective = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetRetentionDirective sets the RetentionDirective field's value.
+func (s *CopyObjectInput) SetRetentionDirective(v string) *CopyObjectInput {
+ s.RetentionDirective = &v
+ return s
+}
+
+// SetRetentionExpirationDate sets the RetentionExpirationDate field's value.
+func (s *CopyObjectInput) SetRetentionExpirationDate(v time.Time) *CopyObjectInput {
+ s.RetentionExpirationDate = &v
+ return s
+}
+
+// SetRetentionLegalHoldId sets the RetentionLegalHoldId field's value.
+func (s *CopyObjectInput) SetRetentionLegalHoldId(v string) *CopyObjectInput {
+ s.RetentionLegalHoldId = &v
+ return s
+}
+
+// SetRetentionPeriod sets the RetentionPeriod field's value.
+func (s *CopyObjectInput) SetRetentionPeriod(v int64) *CopyObjectInput {
+ s.RetentionPeriod = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *CopyObjectInput) SetSSECustomerAlgorithm(v string) *CopyObjectInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+func (s *CopyObjectInput) getSSECustomerKey() (v string) {
+ if s.SSECustomerKey == nil {
+ return v
+ }
+ return *s.SSECustomerKey
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *CopyObjectInput) SetServerSideEncryption(v string) *CopyObjectInput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *CopyObjectInput) SetStorageClass(v string) *CopyObjectInput {
+ s.StorageClass = &v
+ return s
+}
+
+// SetTagging sets the Tagging field's value.
+func (s *CopyObjectInput) SetTagging(v string) *CopyObjectInput {
+ s.Tagging = &v
+ return s
+}
+
+// SetTaggingDirective sets the TaggingDirective field's value.
+func (s *CopyObjectInput) SetTaggingDirective(v string) *CopyObjectInput {
+ s.TaggingDirective = &v
+ return s
+}
+
+// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
+func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput {
+ s.WebsiteRedirectLocation = &v
+ return s
+}
+
+type CopyObjectOutput struct {
+ _ struct{} `type:"structure" payload:"CopyObjectResult"`
+
+ // Container for all response elements.
+ CopyObjectResult *CopyObjectResult `type:"structure"`
+
+ // Version of the copied object in the destination bucket.
+ CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"`
+
+ // If the object expiration is configured, the response includes this header.
+ Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round-trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (AWS KMS)
+ // symmetric customer managed customer master key (CMK) that was used for the
+ // object.
+ //
+ // SSEKMSKeyId is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CopyObjectOutput's
+ // String and GoString methods.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
+
+ // The server-side encryption algorithm used when storing this object in Amazon
+ // S3 (for example, AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // Version ID of the newly created copy.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CopyObjectOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CopyObjectOutput) GoString() string {
+ return s.String()
+}
+
+// SetCopyObjectResult sets the CopyObjectResult field's value.
+func (s *CopyObjectOutput) SetCopyObjectResult(v *CopyObjectResult) *CopyObjectOutput {
+ s.CopyObjectResult = v
+ return s
+}
+
+// SetCopySourceVersionId sets the CopySourceVersionId field's value.
+func (s *CopyObjectOutput) SetCopySourceVersionId(v string) *CopyObjectOutput {
+ s.CopySourceVersionId = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *CopyObjectOutput) SetExpiration(v string) *CopyObjectOutput {
+ s.Expiration = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *CopyObjectOutput) SetRequestCharged(v string) *CopyObjectOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *CopyObjectOutput) SetSSECustomerAlgorithm(v string) *CopyObjectOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *CopyObjectOutput) SetServerSideEncryption(v string) *CopyObjectOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput {
+ s.VersionId = &v
+ return s
+}
+
+// Container for all response elements.
+type CopyObjectResult struct {
+ _ struct{} `type:"structure"`
+
+ // Returns the ETag of the new object. The ETag reflects only changes to the
+ // contents of an object, not its metadata. The source and destination ETag
+ // is identical for a successfully copied non-multipart object.
+ ETag *string `type:"string"`
+
+ // Creation date of the object.
+ LastModified *time.Time `type:"timestamp"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CopyObjectResult) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CopyObjectResult) GoString() string {
+ return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *CopyObjectResult) SetETag(v string) *CopyObjectResult {
+ s.ETag = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult {
+ s.LastModified = &v
+ return s
+}
+
+// Container for all response elements.
+type CopyPartResult struct {
+ _ struct{} `type:"structure"`
+
+ // Entity tag of the object.
+ ETag *string `type:"string"`
+
+ // Date and time at which the object was uploaded.
+ LastModified *time.Time `type:"timestamp"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CopyPartResult) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CopyPartResult) GoString() string {
+ return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *CopyPartResult) SetETag(v string) *CopyPartResult {
+ s.ETag = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult {
+ s.LastModified = &v
+ return s
+}
+
+// The configuration information for the bucket.
+type CreateBucketConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the Region where the bucket will be created. If you don't specify
+ // a Region, the bucket is created in the US East (N. Virginia) Region (us-east-1).
+ LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateBucketConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateBucketConfiguration) GoString() string {
+ return s.String()
+}
+
+// SetLocationConstraint sets the LocationConstraint field's value.
+func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucketConfiguration {
+ s.LocationConstraint = &v
+ return s
+}
+
+type CreateBucketInput struct {
+ _ struct{} `locationName:"CreateBucketRequest" type:"structure" payload:"CreateBucketConfiguration"`
+
+ // The canned ACL to apply to the bucket.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
+
+ // The name of the bucket to create.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The configuration information for the bucket.
+ CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+
+ // Allows grantee the read, write, read ACP, and write ACP permissions on the
+ // bucket.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to list the objects in the bucket.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the bucket ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to create, overwrite, and delete any object in the bucket.
+ GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable bucket.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ // The root key used by Key Protect to encrypt this bucket. This value must
+ // be the full CRN of the root key.
+ IBMSSEKPCustomerRootKeyCrn *string `location:"header" locationName:"ibm-sse-kp-customer-root-key-crn" type:"string"`
+
+ // The algorithm and key size to use with the encryption key stored by using
+ // Key Protect. This value must be set to the string "AES256".
+ IBMSSEKPEncryptionAlgorithm *string `location:"header" locationName:"ibm-sse-kp-encryption-algorithm" type:"string"`
+
+ // Sets the IBM Service Instance Id in the request.
+ //
+ // Only Valid for IBM IAM Authentication
+ IBMServiceInstanceId *string `location:"header" locationName:"ibm-service-instance-id" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateBucketInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateBucketInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateBucketInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetACL sets the ACL field's value.
+func (s *CreateBucketInput) SetACL(v string) *CreateBucketInput {
+ s.ACL = &v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *CreateBucketInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value.
+func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput {
+ s.CreateBucketConfiguration = v
+ return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *CreateBucketInput) SetGrantFullControl(v string) *CreateBucketInput {
+ s.GrantFullControl = &v
+ return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *CreateBucketInput) SetGrantRead(v string) *CreateBucketInput {
+ s.GrantRead = &v
+ return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *CreateBucketInput) SetGrantReadACP(v string) *CreateBucketInput {
+ s.GrantReadACP = &v
+ return s
+}
+
+// SetGrantWrite sets the GrantWrite field's value.
+func (s *CreateBucketInput) SetGrantWrite(v string) *CreateBucketInput {
+ s.GrantWrite = &v
+ return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput {
+ s.GrantWriteACP = &v
+ return s
+}
+
+// SetIBMSSEKPCustomerRootKeyCrn sets the IBMSSEKPCustomerRootKeyCrn field's value.
+func (s *CreateBucketInput) SetIBMSSEKPCustomerRootKeyCrn(v string) *CreateBucketInput {
+ s.IBMSSEKPCustomerRootKeyCrn = &v
+ return s
+}
+
+// SetIBMSSEKPEncryptionAlgorithm sets the IBMSSEKPEncryptionAlgorithm field's value.
+func (s *CreateBucketInput) SetIBMSSEKPEncryptionAlgorithm(v string) *CreateBucketInput {
+ s.IBMSSEKPEncryptionAlgorithm = &v
+ return s
+}
+
+// SetIBMServiceInstanceId sets the IBMServiceInstanceId field's value.
+func (s *CreateBucketInput) SetIBMServiceInstanceId(v string) *CreateBucketInput {
+ s.IBMServiceInstanceId = &v
+ return s
+}
+
+type CreateBucketOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the Region where the bucket will be created. If you are creating
+ // a bucket on the US East (N. Virginia) Region (us-east-1), you do not need
+ // to specify the location.
+ Location *string `location:"header" locationName:"Location" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateBucketOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateBucketOutput) GoString() string {
+ return s.String()
+}
+
+// SetLocation sets the Location field's value.
+func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput {
+ s.Location = &v
+ return s
+}
+
+type CreateMultipartUploadInput struct {
+ _ struct{} `locationName:"CreateMultipartUploadRequest" type:"structure"`
+
+ // The canned ACL to apply to the object.
+ //
+ // This action is not supported by Amazon S3 on Outposts.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
+
+ // The name of the bucket to which to initiate the upload
+ //
+ // When using this action with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this action with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+ // in the Amazon S3 User Guide.
+ //
+ // When using this action with Amazon S3 on Outposts, you must direct requests
+ // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
+ // using this action using S3 on Outposts through the AWS SDKs, you provide
+ // the Outposts bucket ARN in place of the bucket name. For more information
+ // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"`
+
+ // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
+ //
+ // This action is not supported by Amazon S3 on Outposts.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to read the object data and its metadata.
+ //
+ // This action is not supported by Amazon S3 on Outposts.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the object ACL.
+ //
+ // This action is not supported by Amazon S3 on Outposts.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable object.
+ //
+ // This action is not supported by Amazon S3 on Outposts.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ // Object key for which the multipart upload is to be initiated.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Specifies the algorithm to use to when encrypting the object (for example,
+ // AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // S3 does not store the encryption key. The key must be appropriate for use
+ // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
+ // header.
+ //
+ // SSECustomerKey is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateMultipartUploadInput's
+ // String and GoString methods.
+ SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Specifies the ID of the symmetric customer managed AWS KMS CMK to use for
+ // object encryption. All GET and PUT requests for an object protected by AWS
+ // KMS will fail if not made via SSL or using SigV4. For information about configuring
+ // using any of the officially supported AWS SDKs and AWS CLI, see Specifying
+ // the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version)
+ // in the Amazon S3 Developer Guide.
+ //
+ // SSEKMSKeyId is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateMultipartUploadInput's
+ // String and GoString methods.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
+
+ // The server-side encryption algorithm used when storing this object in Amazon
+ // S3 (for example, AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // By default, Amazon S3 uses the STANDARD Storage Class to store newly created
+ // objects. The STANDARD storage class provides high durability and high availability.
+ // Depending on performance needs, you can specify a different Storage Class.
+ // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information,
+ // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
+ // in the Amazon S3 Service Developer Guide.
+ StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+ // The tag-set for the object. The tag-set must be encoded as URL Query parameters.
+ Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateMultipartUploadInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateMultipartUploadInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateMultipartUploadInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetACL sets the ACL field's value.
+func (s *CreateMultipartUploadInput) SetACL(v string) *CreateMultipartUploadInput {
+ s.ACL = &v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *CreateMultipartUploadInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetCacheControl sets the CacheControl field's value.
+func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput {
+ s.CacheControl = &v
+ return s
+}
+
+// SetContentDisposition sets the ContentDisposition field's value.
+func (s *CreateMultipartUploadInput) SetContentDisposition(v string) *CreateMultipartUploadInput {
+ s.ContentDisposition = &v
+ return s
+}
+
+// SetContentEncoding sets the ContentEncoding field's value.
+func (s *CreateMultipartUploadInput) SetContentEncoding(v string) *CreateMultipartUploadInput {
+ s.ContentEncoding = &v
+ return s
+}
+
+// SetContentLanguage sets the ContentLanguage field's value.
+func (s *CreateMultipartUploadInput) SetContentLanguage(v string) *CreateMultipartUploadInput {
+ s.ContentLanguage = &v
+ return s
+}
+
+// SetContentType sets the ContentType field's value.
+func (s *CreateMultipartUploadInput) SetContentType(v string) *CreateMultipartUploadInput {
+ s.ContentType = &v
+ return s
+}
+
+// SetExpires sets the Expires field's value.
+func (s *CreateMultipartUploadInput) SetExpires(v time.Time) *CreateMultipartUploadInput {
+ s.Expires = &v
+ return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *CreateMultipartUploadInput) SetGrantFullControl(v string) *CreateMultipartUploadInput {
+ s.GrantFullControl = &v
+ return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *CreateMultipartUploadInput) SetGrantRead(v string) *CreateMultipartUploadInput {
+ s.GrantRead = &v
+ return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *CreateMultipartUploadInput) SetGrantReadACP(v string) *CreateMultipartUploadInput {
+ s.GrantReadACP = &v
+ return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *CreateMultipartUploadInput) SetGrantWriteACP(v string) *CreateMultipartUploadInput {
+ s.GrantWriteACP = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *CreateMultipartUploadInput) SetKey(v string) *CreateMultipartUploadInput {
+ s.Key = &v
+ return s
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMultipartUploadInput {
+ s.Metadata = v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *CreateMultipartUploadInput) SetRequestPayer(v string) *CreateMultipartUploadInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *CreateMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipartUploadInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+func (s *CreateMultipartUploadInput) getSSECustomerKey() (v string) {
+ if s.SSECustomerKey == nil {
+ return v
+ }
+ return *s.SSECustomerKey
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *CreateMultipartUploadInput) SetServerSideEncryption(v string) *CreateMultipartUploadInput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartUploadInput {
+ s.StorageClass = &v
+ return s
+}
+
+// SetTagging sets the Tagging field's value.
+func (s *CreateMultipartUploadInput) SetTagging(v string) *CreateMultipartUploadInput {
+ s.Tagging = &v
+ return s
+}
+
+// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
+func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput {
+ s.WebsiteRedirectLocation = &v
+ return s
+}
+
+type CreateMultipartUploadOutput struct {
+ _ struct{} `type:"structure"`
+
+ // If the bucket has a lifecycle rule configured with an action to abort incomplete
+ // multipart uploads and the prefix in the lifecycle rule matches the object
+ // name in the request, the response includes this header. The header indicates
+ // when the initiated multipart upload becomes eligible for an abort operation.
+ // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket
+ // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config).
+ //
+ // The response also includes the x-amz-abort-rule-id header that provides the
+ // ID of the lifecycle configuration rule that defines this action.
+ AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"`
+
+ // This header is returned along with the x-amz-abort-date header. It identifies
+ // the applicable lifecycle configuration rule that defines the action to abort
+ // incomplete multipart uploads.
+ AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"`
+
+ // The name of the bucket to which the multipart upload was initiated.
+ //
+ // When using this action with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this action with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+ // in the Amazon S3 User Guide.
+ //
+ // When using this action with Amazon S3 on Outposts, you must direct requests
+ // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
+ // using this action using S3 on Outposts through the AWS SDKs, you provide
+ // the Outposts bucket ARN in place of the bucket name. For more information
+ // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // in the Amazon S3 User Guide.
+ Bucket *string `locationName:"Bucket" type:"string"`
+
+ // Object key for which the multipart upload was initiated.
+ Key *string `min:"1" type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round-trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (AWS KMS)
+ // symmetric customer managed customer master key (CMK) that was used for the
+ // object.
+ //
+ // SSEKMSKeyId is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateMultipartUploadOutput's
+ // String and GoString methods.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
+
+ // The server-side encryption algorithm used when storing this object in Amazon
+ // S3 (for example, AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // ID for the initiated multipart upload.
+ UploadId *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateMultipartUploadOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateMultipartUploadOutput) GoString() string {
+ return s.String()
+}
+
+// SetAbortDate sets the AbortDate field's value.
+func (s *CreateMultipartUploadOutput) SetAbortDate(v time.Time) *CreateMultipartUploadOutput {
+ s.AbortDate = &v
+ return s
+}
+
+// SetAbortRuleId sets the AbortRuleId field's value.
+func (s *CreateMultipartUploadOutput) SetAbortRuleId(v string) *CreateMultipartUploadOutput {
+ s.AbortRuleId = &v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUploadOutput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *CreateMultipartUploadOutput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetKey sets the Key field's value.
+func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *CreateMultipartUploadOutput) SetRequestCharged(v string) *CreateMultipartUploadOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *CreateMultipartUploadOutput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *CreateMultipartUploadOutput) SetServerSideEncryption(v string) *CreateMultipartUploadOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUploadOutput {
+ s.UploadId = &v
+ return s
+}
+
+// Container for the objects to delete.
+type Delete struct {
+ _ struct{} `type:"structure"`
+
+ // The objects to delete.
+ //
+ // Objects is a required field
+ Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"`
+
+ // Element to enable quiet mode for the request. When you add this element,
+ // you must set its value to true.
+ Quiet *bool `type:"boolean"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Delete) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Delete) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Delete) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Delete"}
+ if s.Objects == nil {
+ invalidParams.Add(request.NewErrParamRequired("Objects"))
+ }
+ if s.Objects != nil {
+ for i, v := range s.Objects {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetObjects sets the Objects field's value.
+func (s *Delete) SetObjects(v []*ObjectIdentifier) *Delete {
+ s.Objects = v
+ return s
+}
+
+// SetQuiet sets the Quiet field's value.
+func (s *Delete) SetQuiet(v bool) *Delete {
+ s.Quiet = &v
+ return s
+}
+
+type DeleteBucketCorsInput struct {
+ _ struct{} `locationName:"DeleteBucketCorsRequest" type:"structure"`
+
+ // Specifies the bucket whose cors configuration is being deleted.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteBucketCorsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteBucketCorsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketCorsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *DeleteBucketCorsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *DeleteBucketCorsInput) SetExpectedBucketOwner(v string) *DeleteBucketCorsInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+type DeleteBucketCorsOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteBucketCorsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteBucketCorsOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteBucketInput struct {
+ _ struct{} `locationName:"DeleteBucketRequest" type:"structure"`
+
+ // Specifies the bucket being deleted.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteBucketInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteBucketInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *DeleteBucketInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *DeleteBucketInput) SetExpectedBucketOwner(v string) *DeleteBucketInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+type DeleteBucketLifecycleInput struct {
+ _ struct{} `locationName:"DeleteBucketLifecycleRequest" type:"structure"`
+
+ // The bucket name of the lifecycle to delete.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteBucketLifecycleInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteBucketLifecycleInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketLifecycleInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *DeleteBucketLifecycleInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *DeleteBucketLifecycleInput) SetExpectedBucketOwner(v string) *DeleteBucketLifecycleInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+type DeleteBucketLifecycleOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteBucketLifecycleOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteBucketLifecycleOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteBucketOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteBucketOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteBucketOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteBucketReplicationInput struct {
+ _ struct{} `locationName:"DeleteBucketReplicationRequest" type:"structure"`
+
+ // The bucket name.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteBucketReplicationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteBucketReplicationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketReplicationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketReplicationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketReplicationInput) SetBucket(v string) *DeleteBucketReplicationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *DeleteBucketReplicationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *DeleteBucketReplicationInput) SetExpectedBucketOwner(v string) *DeleteBucketReplicationInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+type DeleteBucketReplicationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteBucketReplicationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteBucketReplicationOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteBucketWebsiteInput struct {
+ _ struct{} `locationName:"DeleteBucketWebsiteRequest" type:"structure"`
+
+ // The bucket name for which you want to remove the website configuration.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteBucketWebsiteInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteBucketWebsiteInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketWebsiteInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *DeleteBucketWebsiteInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *DeleteBucketWebsiteInput) SetExpectedBucketOwner(v string) *DeleteBucketWebsiteInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+type DeleteBucketWebsiteOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteBucketWebsiteOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteBucketWebsiteOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteLegalHoldInput struct {
+ _ struct{} `locationName:"DeleteLegalHoldRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // RetentionLegalHoldId is a required field
+ RetentionLegalHoldId *string `location:"querystring" locationName:"remove" type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteLegalHoldInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteLegalHoldInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteLegalHoldInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteLegalHoldInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.RetentionLegalHoldId == nil {
+ invalidParams.Add(request.NewErrParamRequired("RetentionLegalHoldId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteLegalHoldInput) SetBucket(v string) *DeleteLegalHoldInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *DeleteLegalHoldInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetKey sets the Key field's value.
+func (s *DeleteLegalHoldInput) SetKey(v string) *DeleteLegalHoldInput {
+ s.Key = &v
+ return s
+}
+
+// SetRetentionLegalHoldId sets the RetentionLegalHoldId field's value.
+func (s *DeleteLegalHoldInput) SetRetentionLegalHoldId(v string) *DeleteLegalHoldInput {
+ s.RetentionLegalHoldId = &v
+ return s
+}
+
+type DeleteLegalHoldOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteLegalHoldOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteLegalHoldOutput) GoString() string {
+ return s.String()
+}
+
+// Information about the delete marker.
+type DeleteMarkerEntry struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies whether the object is (true) or is not (false) the latest version
+ // of an object.
+ IsLatest *bool `type:"boolean"`
+
+ // The object key.
+ Key *string `min:"1" type:"string"`
+
+ // Date and time the object was last modified.
+ LastModified *time.Time `type:"timestamp"`
+
+ // The account that created the delete marker.>
+ Owner *Owner `type:"structure"`
+
+ // Version ID of an object.
+ VersionId *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteMarkerEntry) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteMarkerEntry) GoString() string {
+ return s.String()
+}
+
+// SetIsLatest sets the IsLatest field's value.
+func (s *DeleteMarkerEntry) SetIsLatest(v bool) *DeleteMarkerEntry {
+ s.IsLatest = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *DeleteMarkerEntry) SetKey(v string) *DeleteMarkerEntry {
+ s.Key = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *DeleteMarkerEntry) SetLastModified(v time.Time) *DeleteMarkerEntry {
+ s.LastModified = &v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *DeleteMarkerEntry) SetOwner(v *Owner) *DeleteMarkerEntry {
+ s.Owner = v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry {
+ s.VersionId = &v
+ return s
+}
+
+// Specifies whether Amazon S3 replicates delete markers. If you specify a Filter
+// in your replication configuration, you must also include a DeleteMarkerReplication
+// element. If your Filter includes a Tag element, the DeleteMarkerReplication
+// Status must be set to Disabled, because Amazon S3 does not support replicating
+// delete markers for tag-based rules. For an example configuration, see Basic
+// Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config).
+//
+// For more information about delete marker replication, see Basic Rule Configuration
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html).
+//
+// If you are using an earlier version of the replication configuration, Amazon
+// S3 handles replication of delete markers differently. For more information,
+// see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations).
+type DeleteMarkerReplication struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates whether to replicate delete markers.
+ //
+ // Indicates whether to replicate delete markers.
+ //
+ // Status is a required field
+ Status *string `type:"string" required:"true" enum:"DeleteMarkerReplicationStatus"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteMarkerReplication) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteMarkerReplication) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteMarkerReplication) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteMarkerReplication"}
+ if s.Status == nil {
+ invalidParams.Add(request.NewErrParamRequired("Status"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetStatus sets the Status field's value.
+func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication {
+ s.Status = &v
+ return s
+}
+
+type DeleteObjectInput struct {
+ _ struct{} `locationName:"DeleteObjectRequest" type:"structure"`
+
+ // The bucket name of the bucket containing the object.
+ //
+ // When using this action with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this action with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+ // in the Amazon S3 User Guide.
+ //
+ // When using this action with Amazon S3 on Outposts, you must direct requests
+ // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
+ // using this action using S3 on Outposts through the AWS SDKs, you provide
+ // the Outposts bucket ARN in place of the bucket name. For more information
+ // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+
+ // Key name of the object to delete.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // The concatenation of the authentication device's serial number, a space,
+ // and the value that is displayed on your authentication device. Required to
+ // permanently delete a versioned object if versioning is configured with MFA
+ // delete enabled.
+ MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteObjectInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteObjectInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteObjectInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *DeleteObjectInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *DeleteObjectInput) SetExpectedBucketOwner(v string) *DeleteObjectInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput {
+ s.Key = &v
+ return s
+}
+
+// SetMFA sets the MFA field's value.
+func (s *DeleteObjectInput) SetMFA(v string) *DeleteObjectInput {
+ s.MFA = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *DeleteObjectInput) SetRequestPayer(v string) *DeleteObjectInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput {
+ s.VersionId = &v
+ return s
+}
+
+type DeleteObjectOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies whether the versioned object that was permanently deleted was (true)
+ // or was not (false) a delete marker.
+ DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // Returns the version ID of the delete marker created as a result of the DELETE
+ // operation.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteObjectOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteObjectOutput) GoString() string {
+ return s.String()
+}
+
+// SetDeleteMarker sets the DeleteMarker field's value.
+func (s *DeleteObjectOutput) SetDeleteMarker(v bool) *DeleteObjectOutput {
+ s.DeleteMarker = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *DeleteObjectOutput) SetRequestCharged(v string) *DeleteObjectOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput {
+ s.VersionId = &v
+ return s
+}
+
+type DeleteObjectTaggingInput struct {
+ _ struct{} `locationName:"DeleteObjectTaggingRequest" type:"structure"`
+
+ // The bucket name containing the objects from which to remove the tags.
+ //
+ // When using this action with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this action with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+ // in the Amazon S3 User Guide.
+ //
+ // When using this action with Amazon S3 on Outposts, you must direct requests
+ // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
+ // using this action using S3 on Outposts through the AWS SDKs, you provide
+ // the Outposts bucket ARN in place of the bucket name. For more information
+ // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+
+ // The key that identifies the object in the bucket from which to remove all
+ // tags.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // The versionId of the object that the tag-set will be removed from.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteObjectTaggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteObjectTaggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteObjectTaggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteObjectTaggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *DeleteObjectTaggingInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *DeleteObjectTaggingInput) SetExpectedBucketOwner(v string) *DeleteObjectTaggingInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput {
+ s.Key = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingInput {
+ s.VersionId = &v
+ return s
+}
+
+type DeleteObjectTaggingOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The versionId of the object the tag-set was removed from.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteObjectTaggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteObjectTaggingOutput) GoString() string {
+ return s.String()
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingOutput {
+ s.VersionId = &v
+ return s
+}
+
+type DeleteObjectsInput struct {
+ _ struct{} `locationName:"DeleteObjectsRequest" type:"structure" payload:"Delete"`
+
+ // The bucket name containing the objects to delete.
+ //
+ // When using this action with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this action with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+ // in the Amazon S3 User Guide.
+ //
+ // When using this action with Amazon S3 on Outposts, you must direct requests
+ // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
+ // using this action using S3 on Outposts through the AWS SDKs, you provide
+ // the Outposts bucket ARN in place of the bucket name. For more information
+ // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Container for the request.
+ //
+ // Delete is a required field
+ Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+
+ // The concatenation of the authentication device's serial number, a space,
+ // and the value that is displayed on your authentication device. Required to
+ // permanently delete a versioned object if versioning is configured with MFA
+ // delete enabled.
+ MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteObjectsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteObjectsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteObjectsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Delete == nil {
+ invalidParams.Add(request.NewErrParamRequired("Delete"))
+ }
+ if s.Delete != nil {
+ if err := s.Delete.Validate(); err != nil {
+ invalidParams.AddNested("Delete", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *DeleteObjectsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetDelete sets the Delete field's value.
+func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput {
+ s.Delete = v
+ return s
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *DeleteObjectsInput) SetExpectedBucketOwner(v string) *DeleteObjectsInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+// SetMFA sets the MFA field's value.
+func (s *DeleteObjectsInput) SetMFA(v string) *DeleteObjectsInput {
+ s.MFA = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput {
+ s.RequestPayer = &v
+ return s
+}
+
+type DeleteObjectsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Container element for a successful delete. It identifies the object that
+ // was successfully deleted.
+ Deleted []*DeletedObject `type:"list" flattened:"true"`
+
+ // Container for a failed delete action that describes the object that Amazon
+ // S3 attempted to delete and the error it encountered.
+ Errors []*Error `locationName:"Error" type:"list" flattened:"true"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteObjectsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteObjectsOutput) GoString() string {
+ return s.String()
+}
+
+// SetDeleted sets the Deleted field's value.
+func (s *DeleteObjectsOutput) SetDeleted(v []*DeletedObject) *DeleteObjectsOutput {
+ s.Deleted = v
+ return s
+}
+
+// SetErrors sets the Errors field's value.
+func (s *DeleteObjectsOutput) SetErrors(v []*Error) *DeleteObjectsOutput {
+ s.Errors = v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+type DeletePublicAccessBlockInput struct {
+ _ struct{} `locationName:"DeletePublicAccessBlockRequest" type:"structure"`
+
+ // The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeletePublicAccessBlockInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeletePublicAccessBlockInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeletePublicAccessBlockInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeletePublicAccessBlockInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeletePublicAccessBlockInput) SetBucket(v string) *DeletePublicAccessBlockInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *DeletePublicAccessBlockInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *DeletePublicAccessBlockInput) SetExpectedBucketOwner(v string) *DeletePublicAccessBlockInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+type DeletePublicAccessBlockOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeletePublicAccessBlockOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeletePublicAccessBlockOutput) GoString() string {
+ return s.String()
+}
+
+// Information about the deleted object.
+type DeletedObject struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies whether the versioned object that was permanently deleted was (true)
+ // or was not (false) a delete marker. In a simple DELETE, this header indicates
+ // whether (true) or not (false) a delete marker was created.
+ DeleteMarker *bool `type:"boolean"`
+
+ // The version ID of the delete marker created as a result of the DELETE operation.
+ // If you delete a specific object version, the value returned by this header
+ // is the version ID of the object version deleted.
+ DeleteMarkerVersionId *string `type:"string"`
+
+ // The name of the deleted object.
+ Key *string `min:"1" type:"string"`
+
+ // The version ID of the deleted object.
+ VersionId *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeletedObject) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeletedObject) GoString() string {
+ return s.String()
+}
+
+// SetDeleteMarker sets the DeleteMarker field's value.
+func (s *DeletedObject) SetDeleteMarker(v bool) *DeletedObject {
+ s.DeleteMarker = &v
+ return s
+}
+
+// SetDeleteMarkerVersionId sets the DeleteMarkerVersionId field's value.
+func (s *DeletedObject) SetDeleteMarkerVersionId(v string) *DeletedObject {
+ s.DeleteMarkerVersionId = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *DeletedObject) SetKey(v string) *DeletedObject {
+ s.Key = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeletedObject) SetVersionId(v string) *DeletedObject {
+ s.VersionId = &v
+ return s
+}
+
+// Specifies information about where to publish analysis or configuration results
+// for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC).
+type Destination struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to
+ // store the results.
+ //
+ // Bucket is a required field
+ Bucket *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Destination) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Destination) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Destination) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Destination"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *Destination) SetBucket(v string) *Destination {
+ s.Bucket = &v
+ return s
+}
+
+func (s *Destination) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// Container for all error elements.
+type Error struct {
+ _ struct{} `type:"structure"`
+
+ // The error code is a string that uniquely identifies an error condition. It
+ // is meant to be read and understood by programs that detect and handle errors
+ // by type.
+ //
+ // Amazon S3 error codes
+ //
+ // * Code: AccessDenied Description: Access Denied HTTP Status Code: 403
+ // Forbidden SOAP Fault Code Prefix: Client
+ //
+ // * Code: AccountProblem Description: There is a problem with your AWS account
+ // that prevents the action from completing successfully. Contact AWS Support
+ // for further assistance. HTTP Status Code: 403 Forbidden SOAP Fault Code
+ // Prefix: Client
+ //
+ // * Code: AllAccessDisabled Description: All access to this Amazon S3 resource
+ // has been disabled. Contact AWS Support for further assistance. HTTP Status
+ // Code: 403 Forbidden SOAP Fault Code Prefix: Client
+ //
+ // * Code: AmbiguousGrantByEmailAddress Description: The email address you
+ // provided is associated with more than one account. HTTP Status Code: 400
+ // Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: AuthorizationHeaderMalformed Description: The authorization header
+ // you provided is invalid. HTTP Status Code: 400 Bad Request HTTP Status
+ // Code: N/A
+ //
+ // * Code: BadDigest Description: The Content-MD5 you specified did not match
+ // what we received. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix:
+ // Client
+ //
+ // * Code: BucketAlreadyExists Description: The requested bucket name is
+ // not available. The bucket namespace is shared by all users of the system.
+ // Please select a different name and try again. HTTP Status Code: 409 Conflict
+ // SOAP Fault Code Prefix: Client
+ //
+ // * Code: BucketAlreadyOwnedByYou Description: The bucket you tried to create
+ // already exists, and you own it. Amazon S3 returns this error in all AWS
+ // Regions except in the North Virginia Region. For legacy compatibility,
+ // if you re-create an existing bucket that you already own in the North
+ // Virginia Region, Amazon S3 returns 200 OK and resets the bucket access
+ // control lists (ACLs). Code: 409 Conflict (in all Regions except the North
+ // Virginia Region) SOAP Fault Code Prefix: Client
+ //
+ // * Code: BucketNotEmpty Description: The bucket you tried to delete is
+ // not empty. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client
+ //
+ // * Code: CredentialsNotSupported Description: This request does not support
+ // credentials. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix:
+ // Client
+ //
+ // * Code: CrossLocationLoggingProhibited Description: Cross-location logging
+ // not allowed. Buckets in one geographic location cannot log information
+ // to a bucket in another location. HTTP Status Code: 403 Forbidden SOAP
+ // Fault Code Prefix: Client
+ //
+ // * Code: EntityTooSmall Description: Your proposed upload is smaller than
+ // the minimum allowed object size. HTTP Status Code: 400 Bad Request SOAP
+ // Fault Code Prefix: Client
+ //
+ // * Code: EntityTooLarge Description: Your proposed upload exceeds the maximum
+ // allowed object size. HTTP Status Code: 400 Bad Request SOAP Fault Code
+ // Prefix: Client
+ //
+ // * Code: ExpiredToken Description: The provided token has expired. HTTP
+ // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: IllegalVersioningConfigurationException Description: Indicates
+ // that the versioning configuration specified in the request is invalid.
+ // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: IncompleteBody Description: You did not provide the number of
+ // bytes specified by the Content-Length HTTP header HTTP Status Code: 400
+ // Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: IncorrectNumberOfFilesInPostRequest Description: POST requires
+ // exactly one file upload per request. HTTP Status Code: 400 Bad Request
+ // SOAP Fault Code Prefix: Client
+ //
+ // * Code: InlineDataTooLarge Description: Inline data exceeds the maximum
+ // allowed size. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix:
+ // Client
+ //
+ // * Code: InternalError Description: We encountered an internal error. Please
+ // try again. HTTP Status Code: 500 Internal Server Error SOAP Fault Code
+ // Prefix: Server
+ //
+ // * Code: InvalidAccessKeyId Description: The AWS access key ID you provided
+ // does not exist in our records. HTTP Status Code: 403 Forbidden SOAP Fault
+ // Code Prefix: Client
+ //
+ // * Code: InvalidAddressingHeader Description: You must specify the Anonymous
+ // role. HTTP Status Code: N/A SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidArgument Description: Invalid Argument HTTP Status Code:
+ // 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidBucketName Description: The specified bucket is not valid.
+ // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidBucketState Description: The request is not valid with
+ // the current state of the bucket. HTTP Status Code: 409 Conflict SOAP Fault
+ // Code Prefix: Client
+ //
+ // * Code: InvalidDigest Description: The Content-MD5 you specified is not
+ // valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidEncryptionAlgorithmError Description: The encryption request
+ // you specified is not valid. The valid value is AES256. HTTP Status Code:
+ // 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidLocationConstraint Description: The specified location
+ // constraint is not valid. For more information about Regions, see How to
+ // Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro).
+ // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidObjectState Description: The action is not valid for the
+ // current state of the object. HTTP Status Code: 403 Forbidden SOAP Fault
+ // Code Prefix: Client
+ //
+ // * Code: InvalidPart Description: One or more of the specified parts could
+ // not be found. The part might not have been uploaded, or the specified
+ // entity tag might not have matched the part's entity tag. HTTP Status Code:
+ // 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidPartOrder Description: The list of parts was not in ascending
+ // order. Parts list must be specified in order by part number. HTTP Status
+ // Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidPayer Description: All access to this object has been disabled.
+ // Please contact AWS Support for further assistance. HTTP Status Code: 403
+ // Forbidden SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidPolicyDocument Description: The content of the form does
+ // not meet the conditions specified in the policy document. HTTP Status
+ // Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidRange Description: The requested range cannot be satisfied.
+ // HTTP Status Code: 416 Requested Range Not Satisfiable SOAP Fault Code
+ // Prefix: Client
+ //
+ // * Code: InvalidRequest Description: Please use AWS4-HMAC-SHA256. HTTP
+ // Status Code: 400 Bad Request Code: N/A
+ //
+ // * Code: InvalidRequest Description: SOAP requests must be made over an
+ // HTTPS connection. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix:
+ // Client
+ //
+ // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is
+ // not supported for buckets with non-DNS compliant names. HTTP Status Code:
+ // 400 Bad Request Code: N/A
+ //
+ // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is
+ // not supported for buckets with periods (.) in their names. HTTP Status
+ // Code: 400 Bad Request Code: N/A
+ //
+ // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate endpoint
+ // only supports virtual style requests. HTTP Status Code: 400 Bad Request
+ // Code: N/A
+ //
+ // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is not
+ // configured on this bucket. HTTP Status Code: 400 Bad Request Code: N/A
+ //
+ // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is disabled
+ // on this bucket. HTTP Status Code: 400 Bad Request Code: N/A
+ //
+ // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is
+ // not supported on this bucket. Contact AWS Support for more information.
+ // HTTP Status Code: 400 Bad Request Code: N/A
+ //
+ // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration cannot
+ // be enabled on this bucket. Contact AWS Support for more information. HTTP
+ // Status Code: 400 Bad Request Code: N/A
+ //
+ // * Code: InvalidSecurity Description: The provided security credentials
+ // are not valid. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix:
+ // Client
+ //
+ // * Code: InvalidSOAPRequest Description: The SOAP request body is invalid.
+ // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidStorageClass Description: The storage class you specified
+ // is not valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix:
+ // Client
+ //
+ // * Code: InvalidTargetBucketForLogging Description: The target bucket for
+ // logging does not exist, is not owned by you, or does not have the appropriate
+ // grants for the log-delivery group. HTTP Status Code: 400 Bad Request SOAP
+ // Fault Code Prefix: Client
+ //
+ // * Code: InvalidToken Description: The provided token is malformed or otherwise
+ // invalid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: InvalidURI Description: Couldn't parse the specified URI. HTTP
+ // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: KeyTooLongError Description: Your key is too long. HTTP Status
+ // Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: MalformedACLError Description: The XML you provided was not well-formed
+ // or did not validate against our published schema. HTTP Status Code: 400
+ // Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: MalformedPOSTRequest Description: The body of your POST request
+ // is not well-formed multipart/form-data. HTTP Status Code: 400 Bad Request
+ // SOAP Fault Code Prefix: Client
+ //
+ // * Code: MalformedXML Description: This happens when the user sends malformed
+ // XML (XML that doesn't conform to the published XSD) for the configuration.
+ // The error message is, "The XML you provided was not well-formed or did
+ // not validate against our published schema." HTTP Status Code: 400 Bad
+ // Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: MaxMessageLengthExceeded Description: Your request was too big.
+ // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: MaxPostPreDataLengthExceededError Description: Your POST request
+ // fields preceding the upload file were too large. HTTP Status Code: 400
+ // Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: MetadataTooLarge Description: Your metadata headers exceed the
+ // maximum allowed metadata size. HTTP Status Code: 400 Bad Request SOAP
+ // Fault Code Prefix: Client
+ //
+ // * Code: MethodNotAllowed Description: The specified method is not allowed
+ // against this resource. HTTP Status Code: 405 Method Not Allowed SOAP Fault
+ // Code Prefix: Client
+ //
+ // * Code: MissingAttachment Description: A SOAP attachment was expected,
+ // but none were found. HTTP Status Code: N/A SOAP Fault Code Prefix: Client
+ //
+ // * Code: MissingContentLength Description: You must provide the Content-Length
+ // HTTP header. HTTP Status Code: 411 Length Required SOAP Fault Code Prefix:
+ // Client
+ //
+ // * Code: MissingRequestBodyError Description: This happens when the user
+ // sends an empty XML document as a request. The error message is, "Request
+ // body is empty." HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix:
+ // Client
+ //
+ // * Code: MissingSecurityElement Description: The SOAP 1.1 request is missing
+ // a security element. HTTP Status Code: 400 Bad Request SOAP Fault Code
+ // Prefix: Client
+ //
+ // * Code: MissingSecurityHeader Description: Your request is missing a required
+ // header. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: NoLoggingStatusForKey Description: There is no such thing as a
+ // logging status subresource for a key. HTTP Status Code: 400 Bad Request
+ // SOAP Fault Code Prefix: Client
+ //
+ // * Code: NoSuchBucket Description: The specified bucket does not exist.
+ // HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: Client
+ //
+ // * Code: NoSuchBucketPolicy Description: The specified bucket does not
+ // have a bucket policy. HTTP Status Code: 404 Not Found SOAP Fault Code
+ // Prefix: Client
+ //
+ // * Code: NoSuchKey Description: The specified key does not exist. HTTP
+ // Status Code: 404 Not Found SOAP Fault Code Prefix: Client
+ //
+ // * Code: NoSuchLifecycleConfiguration Description: The lifecycle configuration
+ // does not exist. HTTP Status Code: 404 Not Found SOAP Fault Code Prefix:
+ // Client
+ //
+ // * Code: NoSuchUpload Description: The specified multipart upload does
+ // not exist. The upload ID might be invalid, or the multipart upload might
+ // have been aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault
+ // Code Prefix: Client
+ //
+ // * Code: NoSuchVersion Description: Indicates that the version ID specified
+ // in the request does not match an existing version. HTTP Status Code: 404
+ // Not Found SOAP Fault Code Prefix: Client
+ //
+ // * Code: NotImplemented Description: A header you provided implies functionality
+ // that is not implemented. HTTP Status Code: 501 Not Implemented SOAP Fault
+ // Code Prefix: Server
+ //
+ // * Code: NotSignedUp Description: Your account is not signed up for the
+ // Amazon S3 service. You must sign up before you can use Amazon S3. You
+ // can sign up at the following URL: https://aws.amazon.com/s3 HTTP Status
+ // Code: 403 Forbidden SOAP Fault Code Prefix: Client
+ //
+ // * Code: OperationAborted Description: A conflicting conditional action
+ // is currently in progress against this resource. Try again. HTTP Status
+ // Code: 409 Conflict SOAP Fault Code Prefix: Client
+ //
+ // * Code: PermanentRedirect Description: The bucket you are attempting to
+ // access must be addressed using the specified endpoint. Send all future
+ // requests to this endpoint. HTTP Status Code: 301 Moved Permanently SOAP
+ // Fault Code Prefix: Client
+ //
+ // * Code: PreconditionFailed Description: At least one of the preconditions
+ // you specified did not hold. HTTP Status Code: 412 Precondition Failed
+ // SOAP Fault Code Prefix: Client
+ //
+ // * Code: Redirect Description: Temporary redirect. HTTP Status Code: 307
+ // Moved Temporarily SOAP Fault Code Prefix: Client
+ //
+ // * Code: RestoreAlreadyInProgress Description: Object restore is already
+ // in progress. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client
+ //
+ // * Code: RequestIsNotMultiPartContent Description: Bucket POST must be
+ // of the enclosure-type multipart/form-data. HTTP Status Code: 400 Bad Request
+ // SOAP Fault Code Prefix: Client
+ //
+ // * Code: RequestTimeout Description: Your socket connection to the server
+ // was not read from or written to within the timeout period. HTTP Status
+ // Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: RequestTimeTooSkewed Description: The difference between the request
+ // time and the server's time is too large. HTTP Status Code: 403 Forbidden
+ // SOAP Fault Code Prefix: Client
+ //
+ // * Code: RequestTorrentOfBucketError Description: Requesting the torrent
+ // file of a bucket is not permitted. HTTP Status Code: 400 Bad Request SOAP
+ // Fault Code Prefix: Client
+ //
+ // * Code: SignatureDoesNotMatch Description: The request signature we calculated
+ // does not match the signature you provided. Check your AWS secret access
+ // key and signing method. For more information, see REST Authentication
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html)
+ // and SOAP Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html)
+ // for details. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: Client
+ //
+ // * Code: ServiceUnavailable Description: Reduce your request rate. HTTP
+ // Status Code: 503 Service Unavailable SOAP Fault Code Prefix: Server
+ //
+ // * Code: SlowDown Description: Reduce your request rate. HTTP Status Code:
+ // 503 Slow Down SOAP Fault Code Prefix: Server
+ //
+ // * Code: TemporaryRedirect Description: You are being redirected to the
+ // bucket while DNS updates. HTTP Status Code: 307 Moved Temporarily SOAP
+ // Fault Code Prefix: Client
+ //
+ // * Code: TokenRefreshRequired Description: The provided token must be refreshed.
+ // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: TooManyBuckets Description: You have attempted to create more
+ // buckets than allowed. HTTP Status Code: 400 Bad Request SOAP Fault Code
+ // Prefix: Client
+ //
+ // * Code: UnexpectedContent Description: This request does not support content.
+ // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: UnresolvableGrantByEmailAddress Description: The email address
+ // you provided does not match any account on record. HTTP Status Code: 400
+ // Bad Request SOAP Fault Code Prefix: Client
+ //
+ // * Code: UserKeyMustBeSpecified Description: The bucket POST must contain
+ // the specified field name. If it is specified, check the order of the fields.
+ // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client
+ Code *string `type:"string"`
+
+ // The error key.
+ Key *string `min:"1" type:"string"`
+
+ // The error message contains a generic description of the error condition in
+ // English. It is intended for a human audience. Simple programs display the
+ // message directly to the end user if they encounter an error condition they
+ // don't know how or don't care to handle. Sophisticated programs with more
+ // exhaustive error handling and proper internationalization are more likely
+ // to ignore the error message.
+ Message *string `type:"string"`
+
+ // The version ID of the error.
+ VersionId *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Error) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Error) GoString() string {
+ return s.String()
+}
+
+// SetCode sets the Code field's value.
+func (s *Error) SetCode(v string) *Error {
+ s.Code = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *Error) SetKey(v string) *Error {
+ s.Key = &v
+ return s
+}
+
+// SetMessage sets the Message field's value.
+func (s *Error) SetMessage(v string) *Error {
+ s.Message = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *Error) SetVersionId(v string) *Error {
+ s.VersionId = &v
+ return s
+}
+
+// The error information.
+type ErrorDocument struct {
+ _ struct{} `type:"structure"`
+
+ // The object key name to use when a 4XX class error occurs.
+ //
+ // Replacement must be made for object keys containing special characters (such
+ // as carriage returns) when using XML requests. For more information, see XML
+ // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints).
+ //
+ // Key is a required field
+ Key *string `min:"1" type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ErrorDocument) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ErrorDocument) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ErrorDocument) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"}
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetKey sets the Key field's value.
+func (s *ErrorDocument) SetKey(v string) *ErrorDocument {
+ s.Key = &v
+ return s
+}
+
+type ExtendObjectRetentionInput struct {
+ _ struct{} `locationName:"ExtendObjectRetentionRequest" type:"structure"`
+
+ // Additional time, in seconds, to add to the existing retention period for
+ // the object. If this field and New-Retention-Time and/or New-Retention-Expiration-Date
+ // are specified, a 400 error will be returned. If none of the Request Headers
+ // are specified, a 400 error will be returned to the user. The retention period
+ // of an object may be extended up to bucket maximum retention period from the
+ // time of the request.
+ AdditionalRetentionPeriod *int64 `location:"header" locationName:"Additional-Retention-Period" type:"integer"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Retention Period in seconds for the object. The Retention will be enforced
+ // from the current time until current time + the value in this header. This
+ // value has to be within the ranges defined for the bucket.
+ ExtendRetentionFromCurrentTime *int64 `location:"header" locationName:"Extend-Retention-From-Current-Time" type:"integer"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ NewRetentionExpirationDate *time.Time `location:"header" locationName:"New-Retention-Expiration-Date" type:"timestamp" timestampFormat:"iso8601"`
+
+ NewRetentionPeriod *int64 `location:"header" locationName:"New-Retention-Period" type:"integer"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ExtendObjectRetentionInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ExtendObjectRetentionInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ExtendObjectRetentionInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ExtendObjectRetentionInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAdditionalRetentionPeriod sets the AdditionalRetentionPeriod field's value.
+func (s *ExtendObjectRetentionInput) SetAdditionalRetentionPeriod(v int64) *ExtendObjectRetentionInput {
+ s.AdditionalRetentionPeriod = &v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ExtendObjectRetentionInput) SetBucket(v string) *ExtendObjectRetentionInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *ExtendObjectRetentionInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExtendRetentionFromCurrentTime sets the ExtendRetentionFromCurrentTime field's value.
+func (s *ExtendObjectRetentionInput) SetExtendRetentionFromCurrentTime(v int64) *ExtendObjectRetentionInput {
+ s.ExtendRetentionFromCurrentTime = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *ExtendObjectRetentionInput) SetKey(v string) *ExtendObjectRetentionInput {
+ s.Key = &v
+ return s
+}
+
+// SetNewRetentionExpirationDate sets the NewRetentionExpirationDate field's value.
+func (s *ExtendObjectRetentionInput) SetNewRetentionExpirationDate(v time.Time) *ExtendObjectRetentionInput {
+ s.NewRetentionExpirationDate = &v
+ return s
+}
+
+// SetNewRetentionPeriod sets the NewRetentionPeriod field's value.
+func (s *ExtendObjectRetentionInput) SetNewRetentionPeriod(v int64) *ExtendObjectRetentionInput {
+ s.NewRetentionPeriod = &v
+ return s
+}
+
+type ExtendObjectRetentionOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ExtendObjectRetentionOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ExtendObjectRetentionOutput) GoString() string {
+ return s.String()
+}
+
+type GetBucketAclInput struct {
+ _ struct{} `locationName:"GetBucketAclRequest" type:"structure"`
+
+ // Specifies the S3 bucket whose ACL is being requested.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketAclInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketAclInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketAclInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketAclInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *GetBucketAclInput) SetExpectedBucketOwner(v string) *GetBucketAclInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+type GetBucketAclOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A list of grants.
+ Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"`
+
+ // Container for the bucket owner's display name and ID.
+ Owner *Owner `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketAclOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketAclOutput) GoString() string {
+ return s.String()
+}
+
+// SetGrants sets the Grants field's value.
+func (s *GetBucketAclOutput) SetGrants(v []*Grant) *GetBucketAclOutput {
+ s.Grants = v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput {
+ s.Owner = v
+ return s
+}
+
+type GetBucketCorsInput struct {
+ _ struct{} `locationName:"GetBucketCorsRequest" type:"structure"`
+
+ // The bucket name for which to get the cors configuration.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketCorsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketCorsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketCorsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketCorsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *GetBucketCorsInput) SetExpectedBucketOwner(v string) *GetBucketCorsInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+type GetBucketCorsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A set of origins and methods (cross-origin access that you want to allow).
+ // You can add up to 100 rules to the configuration.
+ CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketCorsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketCorsOutput) GoString() string {
+ return s.String()
+}
+
+// SetCORSRules sets the CORSRules field's value.
+func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput {
+ s.CORSRules = v
+ return s
+}
+
+type GetBucketLifecycleConfigurationInput struct {
+ _ struct{} `locationName:"GetBucketLifecycleConfigurationRequest" type:"structure"`
+
+ // The name of the bucket for which to get the lifecycle information.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketLifecycleConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketLifecycleConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketLifecycleConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLifecycleConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *GetBucketLifecycleConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketLifecycleConfigurationInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+type GetBucketLifecycleConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+
+ // COS allows only one Rule.
+ Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketLifecycleConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketLifecycleConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// SetRules sets the Rules field's value.
+func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *GetBucketLifecycleConfigurationOutput {
+ s.Rules = v
+ return s
+}
+
+type GetBucketLocationInput struct {
+ _ struct{} `locationName:"GetBucketLocationRequest" type:"structure"`
+
+ // The name of the bucket for which to get the location.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketLocationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketLocationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketLocationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketLocationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *GetBucketLocationInput) SetExpectedBucketOwner(v string) *GetBucketLocationInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+type GetBucketLocationOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the Region where the bucket resides. For a list of all the Amazon
+ // S3 supported location constraints by Region, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region).
+ // Buckets in Region us-east-1 have a LocationConstraint of null.
+ LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketLocationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketLocationOutput) GoString() string {
+ return s.String()
+}
+
+// SetLocationConstraint sets the LocationConstraint field's value.
+func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLocationOutput {
+ s.LocationConstraint = &v
+ return s
+}
+
+type GetBucketLoggingInput struct {
+ _ struct{} `locationName:"GetBucketLoggingRequest" type:"structure"`
+
+ // The bucket name for which to get the logging information.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketLoggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketLoggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketLoggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketLoggingInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *GetBucketLoggingInput) SetExpectedBucketOwner(v string) *GetBucketLoggingInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+type GetBucketLoggingOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Describes where logs are stored and the prefix that Amazon S3 assigns to
+ // all log object keys for a bucket. For more information, see PUT Bucket logging
+ // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html)
+ // in the Amazon Simple Storage Service API Reference.
+ LoggingEnabled *LoggingEnabled `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketLoggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketLoggingOutput) GoString() string {
+ return s.String()
+}
+
+// SetLoggingEnabled sets the LoggingEnabled field's value.
+func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucketLoggingOutput {
+ s.LoggingEnabled = v
+ return s
+}
+
+type GetBucketProtectionConfigurationInput struct {
+ _ struct{} `locationName:"GetBucketProtectionConfigurationRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketProtectionConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketProtectionConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketProtectionConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketProtectionConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketProtectionConfigurationInput) SetBucket(v string) *GetBucketProtectionConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketProtectionConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type GetBucketProtectionConfigurationOutput struct {
+ _ struct{} `type:"structure" payload:"ProtectionConfiguration"`
+
+ // Bucket protection configuration
+ ProtectionConfiguration *ProtectionConfiguration `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketProtectionConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketProtectionConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// SetProtectionConfiguration sets the ProtectionConfiguration field's value.
+func (s *GetBucketProtectionConfigurationOutput) SetProtectionConfiguration(v *ProtectionConfiguration) *GetBucketProtectionConfigurationOutput {
+ s.ProtectionConfiguration = v
+ return s
+}
+
+type GetBucketReplicationInput struct {
+ _ struct{} `locationName:"GetBucketReplicationRequest" type:"structure"`
+
+ // The bucket name for which to get the replication information.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketReplicationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketReplicationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketReplicationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketReplicationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *GetBucketReplicationInput) SetExpectedBucketOwner(v string) *GetBucketReplicationInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+type GetBucketReplicationOutput struct {
+ _ struct{} `type:"structure" payload:"ReplicationConfiguration"`
+
+ // A container for replication rules. You can add up to 1,000 rules. The maximum
+ // size of a replication configuration is 2 MB.
+ ReplicationConfiguration *ReplicationConfiguration `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketReplicationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketReplicationOutput) GoString() string {
+ return s.String()
+}
+
+// SetReplicationConfiguration sets the ReplicationConfiguration field's value.
+func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationConfiguration) *GetBucketReplicationOutput {
+ s.ReplicationConfiguration = v
+ return s
+}
+
+type GetBucketVersioningInput struct {
+ _ struct{} `locationName:"GetBucketVersioningRequest" type:"structure"`
+
+ // The name of the bucket for which to get the versioning information.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketVersioningInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketVersioningInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketVersioningInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketVersioningInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *GetBucketVersioningInput) SetExpectedBucketOwner(v string) *GetBucketVersioningInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+type GetBucketVersioningOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies whether MFA delete is enabled in the bucket versioning configuration.
+ // This element is only returned if the bucket has been configured with MFA
+ // delete. If the bucket has never been so configured, this element is not returned.
+ MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"`
+
+ // The versioning state of the bucket.
+ Status *string `type:"string" enum:"BucketVersioningStatus"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketVersioningOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketVersioningOutput) GoString() string {
+ return s.String()
+}
+
+// SetMFADelete sets the MFADelete field's value.
+func (s *GetBucketVersioningOutput) SetMFADelete(v string) *GetBucketVersioningOutput {
+ s.MFADelete = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutput {
+ s.Status = &v
+ return s
+}
+
+type GetBucketWebsiteInput struct {
+ _ struct{} `locationName:"GetBucketWebsiteRequest" type:"structure"`
+
+ // The bucket name for which to get the website configuration.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketWebsiteInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketWebsiteInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketWebsiteInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketWebsiteInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *GetBucketWebsiteInput) SetExpectedBucketOwner(v string) *GetBucketWebsiteInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+type GetBucketWebsiteOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The object key name of the website error document to use for 4XX class errors.
+ ErrorDocument *ErrorDocument `type:"structure"`
+
+ // The name of the index document for the website (for example index.html).
+ IndexDocument *IndexDocument `type:"structure"`
+
+ // Specifies the redirect behavior of all requests to a website endpoint of
+ // an Amazon S3 bucket.
+ RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"`
+
+ // Rules that define when a redirect is applied and the redirect behavior.
+ RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketWebsiteOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetBucketWebsiteOutput) GoString() string {
+ return s.String()
+}
+
+// SetErrorDocument sets the ErrorDocument field's value.
+func (s *GetBucketWebsiteOutput) SetErrorDocument(v *ErrorDocument) *GetBucketWebsiteOutput {
+ s.ErrorDocument = v
+ return s
+}
+
+// SetIndexDocument sets the IndexDocument field's value.
+func (s *GetBucketWebsiteOutput) SetIndexDocument(v *IndexDocument) *GetBucketWebsiteOutput {
+ s.IndexDocument = v
+ return s
+}
+
+// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value.
+func (s *GetBucketWebsiteOutput) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *GetBucketWebsiteOutput {
+ s.RedirectAllRequestsTo = v
+ return s
+}
+
+// SetRoutingRules sets the RoutingRules field's value.
+func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWebsiteOutput {
+ s.RoutingRules = v
+ return s
+}
+
+type GetObjectAclInput struct {
+ _ struct{} `locationName:"GetObjectAclRequest" type:"structure"`
+
+ // The bucket name that contains the object for which to get the ACL information.
+ //
+ // When using this action with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this action with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+
+ // The key of the object for which to get the ACL information.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetObjectAclInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetObjectAclInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetObjectAclInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetObjectAclInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *GetObjectAclInput) SetExpectedBucketOwner(v string) *GetObjectAclInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *GetObjectAclInput) SetRequestPayer(v string) *GetObjectAclInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput {
+ s.VersionId = &v
+ return s
+}
+
+type GetObjectAclOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A list of grants.
+ Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"`
+
+ // Container for the bucket owner's display name and ID.
+ Owner *Owner `type:"structure"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetObjectAclOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetObjectAclOutput) GoString() string {
+ return s.String()
+}
+
+// SetGrants sets the Grants field's value.
+func (s *GetObjectAclOutput) SetGrants(v []*Grant) *GetObjectAclOutput {
+ s.Grants = v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *GetObjectAclOutput) SetOwner(v *Owner) *GetObjectAclOutput {
+ s.Owner = v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+type GetObjectInput struct {
+ _ struct{} `locationName:"GetObjectRequest" type:"structure"`
+
+ // The bucket name containing the object.
+ //
+ // When using this action with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this action with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+ // in the Amazon S3 User Guide.
+ //
+ // When using this action with Amazon S3 on Outposts, you must direct requests
+ // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
+ // using this action using S3 on Outposts through the AWS SDKs, you provide
+ // the Outposts bucket ARN in place of the bucket name. For more information
+ // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+
+ // Return the object only if its entity tag (ETag) is the same as the one specified,
+ // otherwise return a 412 (precondition failed).
+ IfMatch *string `location:"header" locationName:"If-Match" type:"string"`
+
+ // Return the object only if it has been modified since the specified time,
+ // otherwise return a 304 (not modified).
+ IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"`
+
+ // Return the object only if its entity tag (ETag) is different from the one
+ // specified, otherwise return a 304 (not modified).
+ IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"`
+
+ // Return the object only if it has not been modified since the specified time,
+ // otherwise return a 412 (precondition failed).
+ IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"`
+
+ // Key of the object to get.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Part number of the object being read. This is a positive integer between
+ // 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified.
+ // Useful for downloading just a part of an object.
+ PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"`
+
+ // Downloads the specified range bytes of an object. For more information about
+ // the HTTP Range header, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35
+ // (https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35).
+ //
+ // Amazon S3 doesn't support retrieving multiple ranges of data per GET request.
+ Range *string `location:"header" locationName:"Range" type:"string"`
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Sets the Cache-Control header of the response.
+ ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"`
+
+ // Sets the Content-Disposition header of the response
+ ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"`
+
+ // Sets the Content-Encoding header of the response.
+ ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"`
+
+ // Sets the Content-Language header of the response.
+ ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"`
+
+ // Sets the Content-Type header of the response.
+ ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"`
+
+ // Sets the Expires header of the response.
+ ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp"`
+
+ // Specifies the algorithm to use to when decrypting the object (for example,
+ // AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 used to encrypt
+ // the data. This value is used to decrypt the object when recovering it and
+ // must match the one used when storing the data. The key must be appropriate
+ // for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
+ // header.
+ //
+ // SSECustomerKey is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by GetObjectInput's
+ // String and GoString methods.
+ SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetObjectInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetObjectInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetObjectInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetObjectInput) SetBucket(v string) *GetObjectInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetObjectInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *GetObjectInput) SetExpectedBucketOwner(v string) *GetObjectInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+// SetIfMatch sets the IfMatch field's value.
+func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput {
+ s.IfMatch = &v
+ return s
+}
+
+// SetIfModifiedSince sets the IfModifiedSince field's value.
+func (s *GetObjectInput) SetIfModifiedSince(v time.Time) *GetObjectInput {
+ s.IfModifiedSince = &v
+ return s
+}
+
+// SetIfNoneMatch sets the IfNoneMatch field's value.
+func (s *GetObjectInput) SetIfNoneMatch(v string) *GetObjectInput {
+ s.IfNoneMatch = &v
+ return s
+}
+
+// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value.
+func (s *GetObjectInput) SetIfUnmodifiedSince(v time.Time) *GetObjectInput {
+ s.IfUnmodifiedSince = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *GetObjectInput) SetKey(v string) *GetObjectInput {
+ s.Key = &v
+ return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *GetObjectInput) SetPartNumber(v int64) *GetObjectInput {
+ s.PartNumber = &v
+ return s
+}
+
+// SetRange sets the Range field's value.
+func (s *GetObjectInput) SetRange(v string) *GetObjectInput {
+ s.Range = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *GetObjectInput) SetRequestPayer(v string) *GetObjectInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetResponseCacheControl sets the ResponseCacheControl field's value.
+func (s *GetObjectInput) SetResponseCacheControl(v string) *GetObjectInput {
+ s.ResponseCacheControl = &v
+ return s
+}
+
+// SetResponseContentDisposition sets the ResponseContentDisposition field's value.
+func (s *GetObjectInput) SetResponseContentDisposition(v string) *GetObjectInput {
+ s.ResponseContentDisposition = &v
+ return s
+}
+
+// SetResponseContentEncoding sets the ResponseContentEncoding field's value.
+func (s *GetObjectInput) SetResponseContentEncoding(v string) *GetObjectInput {
+ s.ResponseContentEncoding = &v
+ return s
+}
+
+// SetResponseContentLanguage sets the ResponseContentLanguage field's value.
+func (s *GetObjectInput) SetResponseContentLanguage(v string) *GetObjectInput {
+ s.ResponseContentLanguage = &v
+ return s
+}
+
+// SetResponseContentType sets the ResponseContentType field's value.
+func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput {
+ s.ResponseContentType = &v
+ return s
+}
+
+// SetResponseExpires sets the ResponseExpires field's value.
+func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput {
+ s.ResponseExpires = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+func (s *GetObjectInput) getSSECustomerKey() (v string) {
+ if s.SSECustomerKey == nil {
+ return v
+ }
+ return *s.SSECustomerKey
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput {
+ s.VersionId = &v
+ return s
+}
+
+type GetObjectOutput struct {
+ _ struct{} `type:"structure" payload:"Body"`
+
+ // Indicates that a range of bytes was specified.
+ AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"`
+
+ // Object data.
+ Body io.ReadCloser `type:"blob"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // Size of the body in bytes.
+ ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
+
+ // The portion of the object returned in the response.
+ ContentRange *string `location:"header" locationName:"Content-Range" type:"string"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // Specifies whether the object retrieved was (true) or was not (false) a Delete
+ // Marker. If false, this response header does not appear in the response.
+ DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
+
+ // An ETag is an opaque identifier assigned by a web server to a specific version
+ // of a resource found at a URL.
+ ETag *string `location:"header" locationName:"ETag" type:"string"`
+
+ // If the object expiration is configured (see PUT Bucket lifecycle), the response
+ // includes this header. It includes the expiry-date and rule-id key-value pairs
+ // providing object expiration information. The value of the rule-id is URL
+ // encoded.
+ Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *string `location:"header" locationName:"Expires" type:"string"`
+
+ IBMRestoredCopyStorageClass *string `location:"header" locationName:"x-ibm-restored-copy-storage-class" type:"string" enum:"StorageClass"`
+
+ // This header is only included if an object has transition metadata. This header
+ // will indicate the transition storage class and time of transition. If this
+ // header and the x-amz-restore header are both included, this header will indicate
+ // the time at which the object was originally archived.
+ IBMTransition *string `location:"header" locationName:"x-ibm-transition" type:"string"`
+
+ // Creation date of the object.
+ LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"`
+
+ // A map of metadata to store with the object in S3.
+ //
+ // By default unmarshaled keys are written as a map keys in following canonicalized format:
+ // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase.
+ // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // This is set to the number of metadata entries not returned in x-amz-meta
+ // headers. This can happen if you create metadata using an API like SOAP that
+ // supports more flexible metadata than the REST API. For example, using SOAP,
+ // you can create metadata whose values are not legal HTTP headers.
+ MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"`
+
+ // The count of parts this object has.
+ PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"`
+
+ // Amazon S3 can return this if your request involves a bucket that is either
+ // a source or destination in a replication rule.
+ ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // Provides information about object restoration action and expiration time
+ // of the restored object copy.
+ Restore *string `location:"header" locationName:"x-amz-restore" type:"string"`
+
+ // Date on which it will be legal to delete or modify the object. You can only
+ // specify this or the Retention-Period header. If both are specified a 400
+ // error will be returned. If neither is specified the bucket's DefaultRetention
+ // period will be used.
+ RetentionExpirationDate *time.Time `location:"header" locationName:"Retention-Expiration-Date" type:"timestamp"`
+
+ RetentionLegalHoldCount *int64 `location:"header" locationName:"Retention-Legal-Hold-Count" type:"integer"`
+
+ // Retention period to store on the object in seconds. If this field and Retention-Expiration-Date
+ // are specified a 400 error is returned. If neither is specified the bucket's
+ // DefaultRetention period will be used. 0 is a legal value assuming the bucket's
+ // minimum retention period is also 0.
+ RetentionPeriod *int64 `location:"header" locationName:"Retention-Period" type:"integer"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round-trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (AWS KMS)
+ // symmetric customer managed customer master key (CMK) that was used for the
+ // object.
+ //
+ // SSEKMSKeyId is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by GetObjectOutput's
+ // String and GoString methods.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
+
+ // The server-side encryption algorithm used when storing this object in Amazon
+ // S3 (for example, AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // Provides storage class information of the object. Amazon S3 returns this
+ // header for all objects except for S3 Standard storage class objects.
+ StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+ // The number of tags, if any, on the object.
+ TagCount *int64 `location:"header" locationName:"x-amz-tagging-count" type:"integer"`
+
+ // Version of the object.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetObjectOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetObjectOutput) GoString() string {
+ return s.String()
+}
+
+// SetAcceptRanges sets the AcceptRanges field's value.
+func (s *GetObjectOutput) SetAcceptRanges(v string) *GetObjectOutput {
+ s.AcceptRanges = &v
+ return s
+}
+
+// SetBody sets the Body field's value.
+func (s *GetObjectOutput) SetBody(v io.ReadCloser) *GetObjectOutput {
+ s.Body = v
+ return s
+}
+
+// SetCacheControl sets the CacheControl field's value.
+func (s *GetObjectOutput) SetCacheControl(v string) *GetObjectOutput {
+ s.CacheControl = &v
+ return s
+}
+
+// SetContentDisposition sets the ContentDisposition field's value.
+func (s *GetObjectOutput) SetContentDisposition(v string) *GetObjectOutput {
+ s.ContentDisposition = &v
+ return s
+}
+
+// SetContentEncoding sets the ContentEncoding field's value.
+func (s *GetObjectOutput) SetContentEncoding(v string) *GetObjectOutput {
+ s.ContentEncoding = &v
+ return s
+}
+
+// SetContentLanguage sets the ContentLanguage field's value.
+func (s *GetObjectOutput) SetContentLanguage(v string) *GetObjectOutput {
+ s.ContentLanguage = &v
+ return s
+}
+
+// SetContentLength sets the ContentLength field's value.
+func (s *GetObjectOutput) SetContentLength(v int64) *GetObjectOutput {
+ s.ContentLength = &v
+ return s
+}
+
+// SetContentRange sets the ContentRange field's value.
+func (s *GetObjectOutput) SetContentRange(v string) *GetObjectOutput {
+ s.ContentRange = &v
+ return s
+}
+
+// SetContentType sets the ContentType field's value.
+func (s *GetObjectOutput) SetContentType(v string) *GetObjectOutput {
+ s.ContentType = &v
+ return s
+}
+
+// SetDeleteMarker sets the DeleteMarker field's value.
+func (s *GetObjectOutput) SetDeleteMarker(v bool) *GetObjectOutput {
+ s.DeleteMarker = &v
+ return s
+}
+
+// SetETag sets the ETag field's value.
+func (s *GetObjectOutput) SetETag(v string) *GetObjectOutput {
+ s.ETag = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *GetObjectOutput) SetExpiration(v string) *GetObjectOutput {
+ s.Expiration = &v
+ return s
+}
+
+// SetExpires sets the Expires field's value.
+func (s *GetObjectOutput) SetExpires(v string) *GetObjectOutput {
+ s.Expires = &v
+ return s
+}
+
+// SetIBMRestoredCopyStorageClass sets the IBMRestoredCopyStorageClass field's value.
+func (s *GetObjectOutput) SetIBMRestoredCopyStorageClass(v string) *GetObjectOutput {
+ s.IBMRestoredCopyStorageClass = &v
+ return s
+}
+
+// SetIBMTransition sets the IBMTransition field's value.
+func (s *GetObjectOutput) SetIBMTransition(v string) *GetObjectOutput {
+ s.IBMTransition = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *GetObjectOutput) SetLastModified(v time.Time) *GetObjectOutput {
+ s.LastModified = &v
+ return s
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *GetObjectOutput) SetMetadata(v map[string]*string) *GetObjectOutput {
+ s.Metadata = v
+ return s
+}
+
+// SetMissingMeta sets the MissingMeta field's value.
+func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput {
+ s.MissingMeta = &v
+ return s
+}
+
+// SetPartsCount sets the PartsCount field's value.
+func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput {
+ s.PartsCount = &v
+ return s
+}
+
+// SetReplicationStatus sets the ReplicationStatus field's value.
+func (s *GetObjectOutput) SetReplicationStatus(v string) *GetObjectOutput {
+ s.ReplicationStatus = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *GetObjectOutput) SetRequestCharged(v string) *GetObjectOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetRestore sets the Restore field's value.
+func (s *GetObjectOutput) SetRestore(v string) *GetObjectOutput {
+ s.Restore = &v
+ return s
+}
+
+// SetRetentionExpirationDate sets the RetentionExpirationDate field's value.
+func (s *GetObjectOutput) SetRetentionExpirationDate(v time.Time) *GetObjectOutput {
+ s.RetentionExpirationDate = &v
+ return s
+}
+
+// SetRetentionLegalHoldCount sets the RetentionLegalHoldCount field's value.
+func (s *GetObjectOutput) SetRetentionLegalHoldCount(v int64) *GetObjectOutput {
+ s.RetentionLegalHoldCount = &v
+ return s
+}
+
+// SetRetentionPeriod sets the RetentionPeriod field's value.
+func (s *GetObjectOutput) SetRetentionPeriod(v int64) *GetObjectOutput {
+ s.RetentionPeriod = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *GetObjectOutput) SetSSECustomerAlgorithm(v string) *GetObjectOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *GetObjectOutput) SetSSECustomerKeyMD5(v string) *GetObjectOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *GetObjectOutput) SetSSEKMSKeyId(v string) *GetObjectOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *GetObjectOutput) SetServerSideEncryption(v string) *GetObjectOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *GetObjectOutput) SetStorageClass(v string) *GetObjectOutput {
+ s.StorageClass = &v
+ return s
+}
+
+// SetTagCount sets the TagCount field's value.
+func (s *GetObjectOutput) SetTagCount(v int64) *GetObjectOutput {
+ s.TagCount = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectOutput) SetVersionId(v string) *GetObjectOutput {
+ s.VersionId = &v
+ return s
+}
+
+// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
+func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput {
+ s.WebsiteRedirectLocation = &v
+ return s
+}
+
+type GetObjectTaggingInput struct {
+ _ struct{} `locationName:"GetObjectTaggingRequest" type:"structure"`
+
+ // The bucket name containing the object for which to get the tagging information.
+ //
+ // When using this action with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this action with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+ // in the Amazon S3 User Guide.
+ //
+ // When using this action with Amazon S3 on Outposts, you must direct requests
+ // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
+ // using this action using S3 on Outposts through the AWS SDKs, you provide
+ // the Outposts bucket ARN in place of the bucket name. For more information
+ // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+
+ // Object key for which to get the tagging information.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // The versionId of the object for which to get the tagging information.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetObjectTaggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetObjectTaggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetObjectTaggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetObjectTaggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetObjectTaggingInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *GetObjectTaggingInput) SetExpectedBucketOwner(v string) *GetObjectTaggingInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *GetObjectTaggingInput) SetRequestPayer(v string) *GetObjectTaggingInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput {
+ s.VersionId = &v
+ return s
+}
+
+type GetObjectTaggingOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Contains the tag set.
+ //
+ // TagSet is a required field
+ TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"`
+
+ // The versionId of the object for which you got the tagging information.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetObjectTaggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetObjectTaggingOutput) GoString() string {
+ return s.String()
+}
+
+// SetTagSet sets the TagSet field's value.
+func (s *GetObjectTaggingOutput) SetTagSet(v []*Tag) *GetObjectTaggingOutput {
+ s.TagSet = v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput {
+ s.VersionId = &v
+ return s
+}
+
+type GetPublicAccessBlockInput struct {
+ _ struct{} `locationName:"GetPublicAccessBlockRequest" type:"structure"`
+
+ // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you
+ // want to retrieve.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetPublicAccessBlockInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetPublicAccessBlockInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetPublicAccessBlockInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetPublicAccessBlockInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetPublicAccessBlockInput) SetBucket(v string) *GetPublicAccessBlockInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetPublicAccessBlockInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *GetPublicAccessBlockInput) SetExpectedBucketOwner(v string) *GetPublicAccessBlockInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+type GetPublicAccessBlockOutput struct {
+ _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"`
+
+ // The PublicAccessBlock configuration currently in effect for this Amazon S3
+ // bucket.
+ PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetPublicAccessBlockOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetPublicAccessBlockOutput) GoString() string {
+ return s.String()
+}
+
+// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value.
+func (s *GetPublicAccessBlockOutput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *GetPublicAccessBlockOutput {
+ s.PublicAccessBlockConfiguration = v
+ return s
+}
+
+// Container for S3 Glacier job parameters.
+type GlacierJobParameters struct {
+ _ struct{} `type:"structure"`
+
+ // Retrieval tier at which the restore will be processed.
+ //
+ // Tier is a required field
+ Tier *string `type:"string" required:"true" enum:"Tier"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GlacierJobParameters) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GlacierJobParameters) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GlacierJobParameters) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GlacierJobParameters"}
+ if s.Tier == nil {
+ invalidParams.Add(request.NewErrParamRequired("Tier"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetTier sets the Tier field's value.
+func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters {
+ s.Tier = &v
+ return s
+}
+
+// Container for grant information.
+type Grant struct {
+ _ struct{} `type:"structure"`
+
+ // The person being granted permissions.
+ Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"`
+
+ // Specifies the permission given to the grantee.
+ Permission *string `type:"string" enum:"Permission"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Grant) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Grant) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Grant) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Grant"}
+ if s.Grantee != nil {
+ if err := s.Grantee.Validate(); err != nil {
+ invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetGrantee sets the Grantee field's value.
+func (s *Grant) SetGrantee(v *Grantee) *Grant {
+ s.Grantee = v
+ return s
+}
+
+// SetPermission sets the Permission field's value.
+func (s *Grant) SetPermission(v string) *Grant {
+ s.Permission = &v
+ return s
+}
+
+// Container for the person being granted permissions.
+type Grantee struct {
+ _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"`
+
+ // Screen name of the grantee.
+ DisplayName *string `type:"string"`
+
+ // Email address of the grantee.
+ //
+ // Using email addresses to specify a grantee is only supported in the following
+ // AWS Regions:
+ //
+ // * US East (N. Virginia)
+ //
+ // * US West (N. California)
+ //
+ // * US West (Oregon)
+ //
+ // * Asia Pacific (Singapore)
+ //
+ // * Asia Pacific (Sydney)
+ //
+ // * Asia Pacific (Tokyo)
+ //
+ // * Europe (Ireland)
+ //
+ // * South America (São Paulo)
+ //
+ // For a list of all the Amazon S3 supported Regions and endpoints, see Regions
+ // and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
+ // in the AWS General Reference.
+ EmailAddress *string `type:"string"`
+
+ // The canonical user ID of the grantee.
+ ID *string `type:"string"`
+
+ // Type of grantee
+ //
+ // Type is a required field
+ Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"`
+
+ // URI of the grantee group.
+ URI *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Grantee) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Grantee) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Grantee) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Grantee"}
+ if s.Type == nil {
+ invalidParams.Add(request.NewErrParamRequired("Type"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDisplayName sets the DisplayName field's value.
+func (s *Grantee) SetDisplayName(v string) *Grantee {
+ s.DisplayName = &v
+ return s
+}
+
+// SetEmailAddress sets the EmailAddress field's value.
+func (s *Grantee) SetEmailAddress(v string) *Grantee {
+ s.EmailAddress = &v
+ return s
+}
+
+// SetID sets the ID field's value.
+func (s *Grantee) SetID(v string) *Grantee {
+ s.ID = &v
+ return s
+}
+
+// SetType sets the Type field's value.
+func (s *Grantee) SetType(v string) *Grantee {
+ s.Type = &v
+ return s
+}
+
+// SetURI sets the URI field's value.
+func (s *Grantee) SetURI(v string) *Grantee {
+ s.URI = &v
+ return s
+}
+
+type HeadBucketInput struct {
+ _ struct{} `locationName:"HeadBucketRequest" type:"structure"`
+
+ // The bucket name.
+ //
+ // When using this action with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this action with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+ // in the Amazon S3 User Guide.
+ //
+ // When using this action with Amazon S3 on Outposts, you must direct requests
+ // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
+ // using this action using S3 on Outposts through the AWS SDKs, you provide
+ // the Outposts bucket ARN in place of the bucket name. For more information
+ // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s HeadBucketInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s HeadBucketInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *HeadBucketInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *HeadBucketInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *HeadBucketInput) SetExpectedBucketOwner(v string) *HeadBucketInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+type HeadBucketOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The root key used by Key Protect to encrypt this bucket. This value must
+ // be the full CRN of the root key.
+ IBMSSEKPCrkId *string `location:"header" locationName:"ibm-sse-kp-customer-root-key-crn" type:"string"`
+
+ // Specifies whether the Bucket has Key Protect enabled.
+ IBMSSEKPEnabled *bool `location:"header" locationName:"ibm-sse-kp-enabled" type:"boolean"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s HeadBucketOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s HeadBucketOutput) GoString() string {
+ return s.String()
+}
+
+// SetIBMSSEKPCrkId sets the IBMSSEKPCrkId field's value.
+func (s *HeadBucketOutput) SetIBMSSEKPCrkId(v string) *HeadBucketOutput {
+ s.IBMSSEKPCrkId = &v
+ return s
+}
+
+// SetIBMSSEKPEnabled sets the IBMSSEKPEnabled field's value.
+func (s *HeadBucketOutput) SetIBMSSEKPEnabled(v bool) *HeadBucketOutput {
+ s.IBMSSEKPEnabled = &v
+ return s
+}
+
+type HeadObjectInput struct {
+ _ struct{} `locationName:"HeadObjectRequest" type:"structure"`
+
+ // The name of the bucket containing the object.
+ //
+ // When using this action with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this action with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+ // in the Amazon S3 User Guide.
+ //
+ // When using this action with Amazon S3 on Outposts, you must direct requests
+ // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
+ // using this action using S3 on Outposts through the AWS SDKs, you provide
+ // the Outposts bucket ARN in place of the bucket name. For more information
+ // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+
+ // Return the object only if its entity tag (ETag) is the same as the one specified,
+ // otherwise return a 412 (precondition failed).
+ IfMatch *string `location:"header" locationName:"If-Match" type:"string"`
+
+ // Return the object only if it has been modified since the specified time,
+ // otherwise return a 304 (not modified).
+ IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"`
+
+ // Return the object only if its entity tag (ETag) is different from the one
+ // specified, otherwise return a 304 (not modified).
+ IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"`
+
+ // Return the object only if it has not been modified since the specified time,
+ // otherwise return a 412 (precondition failed).
+ IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"`
+
+ // The object key.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Part number of the object being read. This is a positive integer between
+ // 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified.
+ // Useful querying about the size of the part and the number of parts in this
+ // object.
+ PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"`
+
+ // Downloads the specified range bytes of an object. For more information about
+ // the HTTP Range header, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35
+ // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35).
+ //
+ // Amazon S3 doesn't support retrieving multiple ranges of data per GET request.
+ Range *string `location:"header" locationName:"Range" type:"string"`
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Specifies the algorithm to use to when encrypting the object (for example,
+ // AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // S3 does not store the encryption key. The key must be appropriate for use
+ // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
+ // header.
+ //
+ // SSECustomerKey is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by HeadObjectInput's
+ // String and GoString methods.
+ SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s HeadObjectInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s HeadObjectInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *HeadObjectInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *HeadObjectInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *HeadObjectInput) SetExpectedBucketOwner(v string) *HeadObjectInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+// SetIfMatch sets the IfMatch field's value.
+func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput {
+ s.IfMatch = &v
+ return s
+}
+
+// SetIfModifiedSince sets the IfModifiedSince field's value.
+func (s *HeadObjectInput) SetIfModifiedSince(v time.Time) *HeadObjectInput {
+ s.IfModifiedSince = &v
+ return s
+}
+
+// SetIfNoneMatch sets the IfNoneMatch field's value.
+func (s *HeadObjectInput) SetIfNoneMatch(v string) *HeadObjectInput {
+ s.IfNoneMatch = &v
+ return s
+}
+
+// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value.
+func (s *HeadObjectInput) SetIfUnmodifiedSince(v time.Time) *HeadObjectInput {
+ s.IfUnmodifiedSince = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *HeadObjectInput) SetKey(v string) *HeadObjectInput {
+ s.Key = &v
+ return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *HeadObjectInput) SetPartNumber(v int64) *HeadObjectInput {
+ s.PartNumber = &v
+ return s
+}
+
+// SetRange sets the Range field's value.
+func (s *HeadObjectInput) SetRange(v string) *HeadObjectInput {
+ s.Range = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *HeadObjectInput) SetRequestPayer(v string) *HeadObjectInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *HeadObjectInput) SetSSECustomerAlgorithm(v string) *HeadObjectInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+func (s *HeadObjectInput) getSSECustomerKey() (v string) {
+ if s.SSECustomerKey == nil {
+ return v
+ }
+ return *s.SSECustomerKey
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput {
+ s.VersionId = &v
+ return s
+}
+
+type HeadObjectOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates that a range of bytes was specified.
+ AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // Size of the body in bytes.
+ ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // Specifies whether the object retrieved was (true) or was not (false) a Delete
+ // Marker. If false, this response header does not appear in the response.
+ DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
+
+ // An ETag is an opaque identifier assigned by a web server to a specific version
+ // of a resource found at a URL.
+ ETag *string `location:"header" locationName:"ETag" type:"string"`
+
+ // If the object expiration is configured (see PUT Bucket lifecycle), the response
+ // includes this header. It includes the expiry-date and rule-id key-value pairs
+ // providing object expiration information. The value of the rule-id is URL
+ // encoded.
+ Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *string `location:"header" locationName:"Expires" type:"string"`
+
+ IBMRestoredCopyStorageClass *string `location:"header" locationName:"x-ibm-restored-copy-storage-class" type:"string" enum:"StorageClass"`
+
+ // This header is only included if an object has transition metadata. This header
+ // will indicate the transition storage class and time of transition. If this
+ // header and the x-amz-restore header are both included, this header will indicate
+ // the time at which the object was originally archived.
+ IBMTransition *string `location:"header" locationName:"x-ibm-transition" type:"string"`
+
+ // Creation date of the object.
+ LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"`
+
+ // A map of metadata to store with the object in S3.
+ //
+ // By default unmarshaled keys are written as a map keys in following canonicalized format:
+ // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase.
+ // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // This is set to the number of metadata entries not returned in x-amz-meta
+ // headers. This can happen if you create metadata using an API like SOAP that
+ // supports more flexible metadata than the REST API. For example, using SOAP,
+ // you can create metadata whose values are not legal HTTP headers.
+ MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"`
+
+ // The count of parts this object has.
+ PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"`
+
+ // Amazon S3 can return this header if your request involves a bucket that is
+ // either a source or a destination in a replication rule.
+ //
+ // In replication, you have a source bucket on which you configure replication
+ // and destination bucket or buckets where Amazon S3 stores object replicas.
+ // When you request an object (GetObject) or object metadata (HeadObject) from
+ // these buckets, Amazon S3 will return the x-amz-replication-status header
+ // in the response as follows:
+ //
+ // * If requesting an object from the source bucket — Amazon S3 will return
+ // the x-amz-replication-status header if the object in your request is eligible
+ // for replication. For example, suppose that in your replication configuration,
+ // you specify object prefix TaxDocs requesting Amazon S3 to replicate objects
+ // with key prefix TaxDocs. Any objects you upload with this key name prefix,
+ // for example TaxDocs/document1.pdf, are eligible for replication. For any
+ // object request with this key name prefix, Amazon S3 will return the x-amz-replication-status
+ // header with value PENDING, COMPLETED or FAILED indicating object replication
+ // status.
+ //
+ // * If requesting an object from a destination bucket — Amazon S3 will
+ // return the x-amz-replication-status header with value REPLICA if the object
+ // in your request is a replica that Amazon S3 created and there is no replica
+ // modification replication in progress.
+ //
+ // * When replicating objects to multiple destination buckets the x-amz-replication-status
+ // header acts differently. The header of the source object will only return
+ // a value of COMPLETED when replication is successful to all destinations.
+ // The header will remain at value PENDING until replication has completed
+ // for all destinations. If one or more destinations fails replication the
+ // header will return FAILED.
+ //
+ // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html).
+ ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // If the object is an archived object (an object whose storage class is GLACIER),
+ // the response includes this header if either the archive restoration is in
+ // progress (see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html)
+ // or an archive copy is already restored.
+ //
+ // If an archive copy is already restored, the header value indicates when Amazon
+ // S3 is scheduled to delete the object copy. For example:
+ //
+ // x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec 2012 00:00:00
+ // GMT"
+ //
+ // If the object restoration is in progress, the header returns the value ongoing-request="true".
+ //
+ // For more information about archiving objects, see Transitioning Objects:
+ // General Considerations (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations).
+ Restore *string `location:"header" locationName:"x-amz-restore" type:"string"`
+
+ // Date on which it will be legal to delete or modify the object. You can only
+ // specify this or the Retention-Period header. If both are specified a 400
+ // error will be returned. If neither is specified the bucket's DefaultRetention
+ // period will be used.
+ RetentionExpirationDate *time.Time `location:"header" locationName:"Retention-Expiration-Date" type:"timestamp"`
+
+ RetentionLegalHoldCount *int64 `location:"header" locationName:"Retention-Legal-Hold-Count" type:"integer"`
+
+ // Retention period to store on the object in seconds. If this field and Retention-Expiration-Date
+ // are specified a 400 error is returned. If neither is specified the bucket's
+ // DefaultRetention period will be used. 0 is a legal value assuming the bucket's
+ // minimum retention period is also 0.
+ RetentionPeriod *int64 `location:"header" locationName:"Retention-Period" type:"integer"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round-trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (AWS KMS)
+ // symmetric customer managed customer master key (CMK) that was used for the
+ // object.
+ //
+ // SSEKMSKeyId is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by HeadObjectOutput's
+ // String and GoString methods.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
+
+ // If the object is stored using server-side encryption either with an AWS KMS
+ // customer master key (CMK) or an Amazon S3-managed encryption key, the response
+ // includes this header with the value of the server-side encryption algorithm
+ // used when storing this object in Amazon S3 (for example, AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // Provides storage class information of the object. Amazon S3 returns this
+ // header for all objects except for S3 Standard storage class objects.
+ //
+ // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html).
+ StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+ // Version of the object.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s HeadObjectOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s HeadObjectOutput) GoString() string {
+ return s.String()
+}
+
+// SetAcceptRanges sets the AcceptRanges field's value.
+func (s *HeadObjectOutput) SetAcceptRanges(v string) *HeadObjectOutput {
+ s.AcceptRanges = &v
+ return s
+}
+
+// SetCacheControl sets the CacheControl field's value.
+func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput {
+ s.CacheControl = &v
+ return s
+}
+
+// SetContentDisposition sets the ContentDisposition field's value.
+func (s *HeadObjectOutput) SetContentDisposition(v string) *HeadObjectOutput {
+ s.ContentDisposition = &v
+ return s
+}
+
+// SetContentEncoding sets the ContentEncoding field's value.
+func (s *HeadObjectOutput) SetContentEncoding(v string) *HeadObjectOutput {
+ s.ContentEncoding = &v
+ return s
+}
+
+// SetContentLanguage sets the ContentLanguage field's value.
+func (s *HeadObjectOutput) SetContentLanguage(v string) *HeadObjectOutput {
+ s.ContentLanguage = &v
+ return s
+}
+
+// SetContentLength sets the ContentLength field's value.
+func (s *HeadObjectOutput) SetContentLength(v int64) *HeadObjectOutput {
+ s.ContentLength = &v
+ return s
+}
+
+// SetContentType sets the ContentType field's value.
+func (s *HeadObjectOutput) SetContentType(v string) *HeadObjectOutput {
+ s.ContentType = &v
+ return s
+}
+
+// SetDeleteMarker sets the DeleteMarker field's value.
+func (s *HeadObjectOutput) SetDeleteMarker(v bool) *HeadObjectOutput {
+ s.DeleteMarker = &v
+ return s
+}
+
+// SetETag sets the ETag field's value.
+func (s *HeadObjectOutput) SetETag(v string) *HeadObjectOutput {
+ s.ETag = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *HeadObjectOutput) SetExpiration(v string) *HeadObjectOutput {
+ s.Expiration = &v
+ return s
+}
+
+// SetExpires sets the Expires field's value.
+func (s *HeadObjectOutput) SetExpires(v string) *HeadObjectOutput {
+ s.Expires = &v
+ return s
+}
+
+// SetIBMRestoredCopyStorageClass sets the IBMRestoredCopyStorageClass field's value.
+func (s *HeadObjectOutput) SetIBMRestoredCopyStorageClass(v string) *HeadObjectOutput {
+ s.IBMRestoredCopyStorageClass = &v
+ return s
+}
+
+// SetIBMTransition sets the IBMTransition field's value.
+func (s *HeadObjectOutput) SetIBMTransition(v string) *HeadObjectOutput {
+ s.IBMTransition = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *HeadObjectOutput) SetLastModified(v time.Time) *HeadObjectOutput {
+ s.LastModified = &v
+ return s
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *HeadObjectOutput) SetMetadata(v map[string]*string) *HeadObjectOutput {
+ s.Metadata = v
+ return s
+}
+
+// SetMissingMeta sets the MissingMeta field's value.
+func (s *HeadObjectOutput) SetMissingMeta(v int64) *HeadObjectOutput {
+ s.MissingMeta = &v
+ return s
+}
+
+// SetPartsCount sets the PartsCount field's value.
+func (s *HeadObjectOutput) SetPartsCount(v int64) *HeadObjectOutput {
+ s.PartsCount = &v
+ return s
+}
+
+// SetReplicationStatus sets the ReplicationStatus field's value.
+func (s *HeadObjectOutput) SetReplicationStatus(v string) *HeadObjectOutput {
+ s.ReplicationStatus = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *HeadObjectOutput) SetRequestCharged(v string) *HeadObjectOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetRestore sets the Restore field's value.
+func (s *HeadObjectOutput) SetRestore(v string) *HeadObjectOutput {
+ s.Restore = &v
+ return s
+}
+
+// SetRetentionExpirationDate sets the RetentionExpirationDate field's value.
+func (s *HeadObjectOutput) SetRetentionExpirationDate(v time.Time) *HeadObjectOutput {
+ s.RetentionExpirationDate = &v
+ return s
+}
+
+// SetRetentionLegalHoldCount sets the RetentionLegalHoldCount field's value.
+func (s *HeadObjectOutput) SetRetentionLegalHoldCount(v int64) *HeadObjectOutput {
+ s.RetentionLegalHoldCount = &v
+ return s
+}
+
+// SetRetentionPeriod sets the RetentionPeriod field's value.
+func (s *HeadObjectOutput) SetRetentionPeriod(v int64) *HeadObjectOutput {
+ s.RetentionPeriod = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *HeadObjectOutput) SetSSECustomerAlgorithm(v string) *HeadObjectOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *HeadObjectOutput) SetSSECustomerKeyMD5(v string) *HeadObjectOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *HeadObjectOutput) SetSSEKMSKeyId(v string) *HeadObjectOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *HeadObjectOutput) SetServerSideEncryption(v string) *HeadObjectOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *HeadObjectOutput) SetStorageClass(v string) *HeadObjectOutput {
+ s.StorageClass = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *HeadObjectOutput) SetVersionId(v string) *HeadObjectOutput {
+ s.VersionId = &v
+ return s
+}
+
+// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
+func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutput {
+ s.WebsiteRedirectLocation = &v
+ return s
+}
+
+// Container for the Suffix element.
+type IndexDocument struct {
+ _ struct{} `type:"structure"`
+
+ // A suffix that is appended to a request that is for a directory on the website
+ // endpoint (for example,if the suffix is index.html and you make a request
+ // to samplebucket/images/ the data that is returned will be for the object
+ // with the key name images/index.html) The suffix must not be empty and must
+ // not include a slash character.
+ //
+ // Replacement must be made for object keys containing special characters (such
+ // as carriage returns) when using XML requests. For more information, see XML
+ // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints).
+ //
+ // Suffix is a required field
+ Suffix *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s IndexDocument) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s IndexDocument) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *IndexDocument) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "IndexDocument"}
+ if s.Suffix == nil {
+ invalidParams.Add(request.NewErrParamRequired("Suffix"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetSuffix sets the Suffix field's value.
+func (s *IndexDocument) SetSuffix(v string) *IndexDocument {
+ s.Suffix = &v
+ return s
+}
+
+// Container element that identifies who initiated the multipart upload.
+type Initiator struct {
+ _ struct{} `type:"structure"`
+
+ // Name of the Principal.
+ DisplayName *string `type:"string"`
+
+ // If the principal is an AWS account, it provides the Canonical User ID. If
+ // the principal is an IAM User, it provides a user ARN value.
+ ID *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Initiator) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Initiator) GoString() string {
+ return s.String()
+}
+
+// SetDisplayName sets the DisplayName field's value.
+func (s *Initiator) SetDisplayName(v string) *Initiator {
+ s.DisplayName = &v
+ return s
+}
+
+// SetID sets the ID field's value.
+func (s *Initiator) SetID(v string) *Initiator {
+ s.ID = &v
+ return s
+}
+
+type LegalHold struct {
+ _ struct{} `type:"structure"`
+
+ Date *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ ID *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s LegalHold) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s LegalHold) GoString() string {
+ return s.String()
+}
+
+// SetDate sets the Date field's value.
+func (s *LegalHold) SetDate(v time.Time) *LegalHold {
+ s.Date = &v
+ return s
+}
+
+// SetID sets the ID field's value.
+func (s *LegalHold) SetID(v string) *LegalHold {
+ s.ID = &v
+ return s
+}
+
+// Container for lifecycle rules. You can add as many as 1000 rules.
+type LifecycleConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Rules is a required field
+ Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s LifecycleConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s LifecycleConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LifecycleConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"}
+ if s.Rules == nil {
+ invalidParams.Add(request.NewErrParamRequired("Rules"))
+ }
+ if s.Rules != nil {
+ for i, v := range s.Rules {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRules sets the Rules field's value.
+func (s *LifecycleConfiguration) SetRules(v []*LifecycleRule) *LifecycleConfiguration {
+ s.Rules = v
+ return s
+}
+
+// Container for the expiration for the lifecycle of the object.
+type LifecycleExpiration struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates at what date the object is to be moved or deleted. Should be in
+ // GMT ISO 8601 Format.
+ Date *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ // Indicates the lifetime, in days, of the objects that are subject to the rule.
+ // The value must be a non-zero positive integer.
+ Days *int64 `type:"integer"`
+
+ // Indicates whether Amazon S3 will remove a delete marker with no noncurrent
+ // versions. If set to true, the delete marker will be expired; if set to false
+ // the policy takes no action. This cannot be specified with Days or Date in
+ // a Lifecycle Expiration Policy.
+ ExpiredObjectDeleteMarker *bool `type:"boolean"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s LifecycleExpiration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s LifecycleExpiration) GoString() string {
+ return s.String()
+}
+
+// SetDate sets the Date field's value.
+func (s *LifecycleExpiration) SetDate(v time.Time) *LifecycleExpiration {
+ s.Date = &v
+ return s
+}
+
+// SetDays sets the Days field's value.
+func (s *LifecycleExpiration) SetDays(v int64) *LifecycleExpiration {
+ s.Days = &v
+ return s
+}
+
+// SetExpiredObjectDeleteMarker sets the ExpiredObjectDeleteMarker field's value.
+func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExpiration {
+ s.ExpiredObjectDeleteMarker = &v
+ return s
+}
+
+// A lifecycle rule for individual objects in an Amazon S3 bucket.
+type LifecycleRule struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the days since the initiation of an incomplete multipart upload
+ // that Amazon S3 will wait before permanently removing all parts of the upload.
+ // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket
+ // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
+ // in the Amazon S3 User Guide.
+ AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"`
+
+ // Specifies the expiration for the lifecycle of the object in the form of date,
+ // days and, whether the object has a delete marker.
+ Expiration *LifecycleExpiration `type:"structure"`
+
+ // The Filter is used to identify objects that a Lifecycle Rule applies to.
+ // A Filter must have exactly one of Prefix, Tag, or And specified. Filter is
+ // required if the LifecycleRule does not containt a Prefix element.
+ //
+ // Filter is a required field
+ Filter *LifecycleRuleFilter `type:"structure" required:"true"`
+
+ // Unique identifier for the rule. The value cannot be longer than 255 characters.
+ ID *string `type:"string"`
+
+ // Specifies when noncurrent object versions expire. Upon expiration, Amazon
+ // S3 permanently deletes the noncurrent object versions. You set this lifecycle
+ // configuration action on a bucket that has versioning enabled (or suspended)
+ // to request that Amazon S3 delete noncurrent object versions at a specific
+ // period in the object's lifetime.
+ NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"`
+
+ // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule
+ // is not currently being applied.
+ //
+ // Status is a required field
+ Status *string `type:"string" required:"true" enum:"ExpirationStatus"`
+
+ // Specifies when an Amazon S3 object transitions to a specified storage class.
+ Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s LifecycleRule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s LifecycleRule) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LifecycleRule) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"}
+ if s.Filter == nil {
+ invalidParams.Add(request.NewErrParamRequired("Filter"))
+ }
+ if s.Status == nil {
+ invalidParams.Add(request.NewErrParamRequired("Status"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value.
+func (s *LifecycleRule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *LifecycleRule {
+ s.AbortIncompleteMultipartUpload = v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *LifecycleRule) SetExpiration(v *LifecycleExpiration) *LifecycleRule {
+ s.Expiration = v
+ return s
+}
+
+// SetFilter sets the Filter field's value.
+func (s *LifecycleRule) SetFilter(v *LifecycleRuleFilter) *LifecycleRule {
+ s.Filter = v
+ return s
+}
+
+// SetID sets the ID field's value.
+func (s *LifecycleRule) SetID(v string) *LifecycleRule {
+ s.ID = &v
+ return s
+}
+
+// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value.
+func (s *LifecycleRule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *LifecycleRule {
+ s.NoncurrentVersionExpiration = v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *LifecycleRule) SetStatus(v string) *LifecycleRule {
+ s.Status = &v
+ return s
+}
+
+// SetTransitions sets the Transitions field's value.
+func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule {
+ s.Transitions = v
+ return s
+}
+
+// The Filter is used to identify objects that a Lifecycle Rule applies to.
+// A Filter must have exactly one of Prefix, Tag, or And specified.
+type LifecycleRuleFilter struct {
+ _ struct{} `type:"structure"`
+
+ // Prefix identifying one or more objects to which the rule applies.
+ //
+ // Replacement must be made for object keys containing special characters (such
+ // as carriage returns) when using XML requests. For more information, see XML
+ // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints).
+ Prefix *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s LifecycleRuleFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s LifecycleRuleFilter) GoString() string {
+ return s.String()
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *LifecycleRuleFilter) SetPrefix(v string) *LifecycleRuleFilter {
+ s.Prefix = &v
+ return s
+}
+
+type ListBucketsExtendedInput struct {
+ _ struct{} `locationName:"ListBucketsExtendedRequest" type:"structure"`
+
+ // Sets the IBM Service Instance Id in the request.
+ //
+ // Only Valid for IBM IAM Authentication
+ IBMServiceInstanceId *string `location:"header" locationName:"ibm-service-instance-id" type:"string"`
+
+ // Specifies the bucket to start with when listing all buckets.
+ Marker *string `location:"querystring" locationName:"marker" type:"string"`
+
+ // Sets the maximum number of keys returned in the response. The response might
+ // contain fewer keys but will never contain more.
+ MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"`
+
+ // Limits the response to buckets that begin with the specified prefix.
+ Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListBucketsExtendedInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListBucketsExtendedInput) GoString() string {
+ return s.String()
+}
+
+// SetIBMServiceInstanceId sets the IBMServiceInstanceId field's value.
+func (s *ListBucketsExtendedInput) SetIBMServiceInstanceId(v string) *ListBucketsExtendedInput {
+ s.IBMServiceInstanceId = &v
+ return s
+}
+
+// SetMarker sets the Marker field's value.
+func (s *ListBucketsExtendedInput) SetMarker(v string) *ListBucketsExtendedInput {
+ s.Marker = &v
+ return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListBucketsExtendedInput) SetMaxKeys(v int64) *ListBucketsExtendedInput {
+ s.MaxKeys = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListBucketsExtendedInput) SetPrefix(v string) *ListBucketsExtendedInput {
+ s.Prefix = &v
+ return s
+}
+
+type ListBucketsExtendedOutput struct {
+ _ struct{} `type:"structure"`
+
+ Buckets []*BucketExtended `locationNameList:"Bucket" type:"list"`
+
+ // Indicates whether the returned list of buckets is truncated.
+ IsTruncated *bool `type:"boolean"`
+
+ // The bucket at or after which the listing began.
+ Marker *string `type:"string"`
+
+ MaxKeys *int64 `type:"integer"`
+
+ // Container for the owner's display name and ID.
+ Owner *Owner `type:"structure"`
+
+ // When a prefix is provided in the request, this field contains the specified
+ // prefix. The result contains only buckets starting with the specified prefix.
+ Prefix *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListBucketsExtendedOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListBucketsExtendedOutput) GoString() string {
+ return s.String()
+}
+
+// SetBuckets sets the Buckets field's value.
+func (s *ListBucketsExtendedOutput) SetBuckets(v []*BucketExtended) *ListBucketsExtendedOutput {
+ s.Buckets = v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListBucketsExtendedOutput) SetIsTruncated(v bool) *ListBucketsExtendedOutput {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetMarker sets the Marker field's value.
+func (s *ListBucketsExtendedOutput) SetMarker(v string) *ListBucketsExtendedOutput {
+ s.Marker = &v
+ return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListBucketsExtendedOutput) SetMaxKeys(v int64) *ListBucketsExtendedOutput {
+ s.MaxKeys = &v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *ListBucketsExtendedOutput) SetOwner(v *Owner) *ListBucketsExtendedOutput {
+ s.Owner = v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListBucketsExtendedOutput) SetPrefix(v string) *ListBucketsExtendedOutput {
+ s.Prefix = &v
+ return s
+}
+
+type ListBucketsInput struct {
+ _ struct{} `locationName:"ListBucketsRequest" type:"structure"`
+
+ // Sets the IBM Service Instance Id in the request.
+ //
+ // Only Valid for IBM IAM Authentication
+ IBMServiceInstanceId *string `location:"header" locationName:"ibm-service-instance-id" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListBucketsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListBucketsInput) GoString() string {
+ return s.String()
+}
+
+// SetIBMServiceInstanceId sets the IBMServiceInstanceId field's value.
+func (s *ListBucketsInput) SetIBMServiceInstanceId(v string) *ListBucketsInput {
+ s.IBMServiceInstanceId = &v
+ return s
+}
+
+type ListBucketsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The list of buckets owned by the requestor.
+ Buckets []*Bucket `locationNameList:"Bucket" type:"list"`
+
+ // The owner of the buckets listed.
+ Owner *Owner `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListBucketsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListBucketsOutput) GoString() string {
+ return s.String()
+}
+
+// SetBuckets sets the Buckets field's value.
+func (s *ListBucketsOutput) SetBuckets(v []*Bucket) *ListBucketsOutput {
+ s.Buckets = v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput {
+ s.Owner = v
+ return s
+}
+
+type ListLegalHoldsInput struct {
+ _ struct{} `locationName:"ListLegalHoldsRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListLegalHoldsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListLegalHoldsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListLegalHoldsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListLegalHoldsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListLegalHoldsInput) SetBucket(v string) *ListLegalHoldsInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *ListLegalHoldsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetKey sets the Key field's value.
+func (s *ListLegalHoldsInput) SetKey(v string) *ListLegalHoldsInput {
+ s.Key = &v
+ return s
+}
+
+type ListLegalHoldsOutput struct {
+ _ struct{} `type:"structure"`
+
+ CreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ LegalHolds []*LegalHold `type:"list"`
+
+ // Retention period to store on the object in seconds. The object can be neither
+ // overwritten nor deleted until the amount of time specified in the retention
+ // period has elapsed. If this field and Retention-Expiration-Date are specified
+ // a 400 error is returned. If neither is specified the bucket's DefaultRetention
+ // period will be used. 0 is a legal value assuming the bucket's minimum retention
+ // period is also 0.
+ RetentionPeriod *int64 `type:"integer"`
+
+ RetentionPeriodExpirationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListLegalHoldsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListLegalHoldsOutput) GoString() string {
+ return s.String()
+}
+
+// SetCreateTime sets the CreateTime field's value.
+func (s *ListLegalHoldsOutput) SetCreateTime(v time.Time) *ListLegalHoldsOutput {
+ s.CreateTime = &v
+ return s
+}
+
+// SetLegalHolds sets the LegalHolds field's value.
+func (s *ListLegalHoldsOutput) SetLegalHolds(v []*LegalHold) *ListLegalHoldsOutput {
+ s.LegalHolds = v
+ return s
+}
+
+// SetRetentionPeriod sets the RetentionPeriod field's value.
+func (s *ListLegalHoldsOutput) SetRetentionPeriod(v int64) *ListLegalHoldsOutput {
+ s.RetentionPeriod = &v
+ return s
+}
+
+// SetRetentionPeriodExpirationDate sets the RetentionPeriodExpirationDate field's value.
+func (s *ListLegalHoldsOutput) SetRetentionPeriodExpirationDate(v time.Time) *ListLegalHoldsOutput {
+ s.RetentionPeriodExpirationDate = &v
+ return s
+}
+
+type ListMultipartUploadsInput struct {
+ _ struct{} `locationName:"ListMultipartUploadsRequest" type:"structure"`
+
+ // The name of the bucket to which the multipart upload was initiated.
+ //
+ // When using this action with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this action with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+ // in the Amazon S3 User Guide.
+ //
+ // When using this action with Amazon S3 on Outposts, you must direct requests
+ // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
+ // using this action using S3 on Outposts through the AWS SDKs, you provide
+ // the Outposts bucket ARN in place of the bucket name. For more information
+ // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Character you use to group keys.
+ //
+ // All keys that contain the same string between the prefix, if specified, and
+ // the first occurrence of the delimiter after the prefix are grouped under
+ // a single result element, CommonPrefixes. If you don't specify the prefix
+ // parameter, then the substring starts at the beginning of the key. The keys
+ // that are grouped under CommonPrefixes result element are not returned elsewhere
+ // in the response.
+ Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
+
+ // Requests Amazon S3 to encode the object keys in the response and specifies
+ // the encoding method to use. An object key may contain any Unicode character;
+ // however, XML 1.0 parser cannot parse some characters, such as characters
+ // with an ASCII value from 0 to 10. For characters that are not supported in
+ // XML 1.0, you can add this parameter to request that Amazon S3 encode the
+ // keys in the response.
+ EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+
+ // Together with upload-id-marker, this parameter specifies the multipart upload
+ // after which listing should begin.
+ //
+ // If upload-id-marker is not specified, only the keys lexicographically greater
+ // than the specified key-marker will be included in the list.
+ //
+ // If upload-id-marker is specified, any multipart uploads for a key equal to
+ // the key-marker might also be included, provided those multipart uploads have
+ // upload IDs lexicographically greater than the specified upload-id-marker.
+ KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"`
+
+ // Sets the maximum number of multipart uploads, from 1 to 1,000, to return
+ // in the response body. 1,000 is the maximum number of uploads that can be
+ // returned in a response.
+ MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"`
+
+ // Lists in-progress uploads only for those keys that begin with the specified
+ // prefix. You can use prefixes to separate a bucket into different grouping
+ // of keys. (You can think of using prefix to make groups in the same way you'd
+ // use a folder in a file system.)
+ Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
+
+ // Together with key-marker, specifies the multipart upload after which listing
+ // should begin. If key-marker is not specified, the upload-id-marker parameter
+ // is ignored. Otherwise, any multipart uploads for a key equal to the key-marker
+ // might be included in the list only if they have an upload ID lexicographically
+ // greater than the specified upload-id-marker.
+ UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListMultipartUploadsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListMultipartUploadsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListMultipartUploadsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *ListMultipartUploadsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListMultipartUploadsInput) SetEncodingType(v string) *ListMultipartUploadsInput {
+ s.EncodingType = &v
+ return s
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *ListMultipartUploadsInput) SetExpectedBucketOwner(v string) *ListMultipartUploadsInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+// SetKeyMarker sets the KeyMarker field's value.
+func (s *ListMultipartUploadsInput) SetKeyMarker(v string) *ListMultipartUploadsInput {
+ s.KeyMarker = &v
+ return s
+}
+
+// SetMaxUploads sets the MaxUploads field's value.
+func (s *ListMultipartUploadsInput) SetMaxUploads(v int64) *ListMultipartUploadsInput {
+ s.MaxUploads = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListMultipartUploadsInput) SetPrefix(v string) *ListMultipartUploadsInput {
+ s.Prefix = &v
+ return s
+}
+
+// SetUploadIdMarker sets the UploadIdMarker field's value.
+func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUploadsInput {
+ s.UploadIdMarker = &v
+ return s
+}
+
+type ListMultipartUploadsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the bucket to which the multipart upload was initiated.
+ Bucket *string `type:"string"`
+
+ // If you specify a delimiter in the request, then the result returns each distinct
+ // key prefix containing the delimiter in a CommonPrefixes element. The distinct
+ // key prefixes are returned in the Prefix child element.
+ CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
+
+ // Contains the delimiter you specified in the request. If you don't specify
+ // a delimiter in your request, this element is absent from the response.
+ Delimiter *string `type:"string"`
+
+ // Encoding type used by Amazon S3 to encode object keys in the response.
+ //
+ // If you specify encoding-type request parameter, Amazon S3 includes this element
+ // in the response, and returns encoded key name values in the following response
+ // elements:
+ //
+ // Delimiter, KeyMarker, Prefix, NextKeyMarker, Key.
+ EncodingType *string `type:"string" enum:"EncodingType"`
+
+ // Indicates whether the returned list of multipart uploads is truncated. A
+ // value of true indicates that the list was truncated. The list can be truncated
+ // if the number of multipart uploads exceeds the limit allowed or specified
+ // by max uploads.
+ IsTruncated *bool `type:"boolean"`
+
+ // The key at or after which the listing began.
+ KeyMarker *string `type:"string"`
+
+ // Maximum number of multipart uploads that could have been included in the
+ // response.
+ MaxUploads *int64 `type:"integer"`
+
+ // When a list is truncated, this element specifies the value that should be
+ // used for the key-marker request parameter in a subsequent request.
+ NextKeyMarker *string `type:"string"`
+
+ // When a list is truncated, this element specifies the value that should be
+ // used for the upload-id-marker request parameter in a subsequent request.
+ NextUploadIdMarker *string `type:"string"`
+
+ // When a prefix is provided in the request, this field contains the specified
+ // prefix. The result contains only keys starting with the specified prefix.
+ Prefix *string `type:"string"`
+
+ // Upload ID after which listing began.
+ UploadIdMarker *string `type:"string"`
+
+ // Container for elements related to a particular multipart upload. A response
+ // can contain zero or more Upload elements.
+ Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListMultipartUploadsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListMultipartUploadsOutput) GoString() string {
+ return s.String()
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOutput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *ListMultipartUploadsOutput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetCommonPrefixes sets the CommonPrefixes field's value.
+func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput {
+ s.CommonPrefixes = v
+ return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListMultipartUploadsOutput) SetDelimiter(v string) *ListMultipartUploadsOutput {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListMultipartUploadsOutput) SetEncodingType(v string) *ListMultipartUploadsOutput {
+ s.EncodingType = &v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListMultipartUploadsOutput) SetIsTruncated(v bool) *ListMultipartUploadsOutput {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetKeyMarker sets the KeyMarker field's value.
+func (s *ListMultipartUploadsOutput) SetKeyMarker(v string) *ListMultipartUploadsOutput {
+ s.KeyMarker = &v
+ return s
+}
+
+// SetMaxUploads sets the MaxUploads field's value.
+func (s *ListMultipartUploadsOutput) SetMaxUploads(v int64) *ListMultipartUploadsOutput {
+ s.MaxUploads = &v
+ return s
+}
+
+// SetNextKeyMarker sets the NextKeyMarker field's value.
+func (s *ListMultipartUploadsOutput) SetNextKeyMarker(v string) *ListMultipartUploadsOutput {
+ s.NextKeyMarker = &v
+ return s
+}
+
+// SetNextUploadIdMarker sets the NextUploadIdMarker field's value.
+func (s *ListMultipartUploadsOutput) SetNextUploadIdMarker(v string) *ListMultipartUploadsOutput {
+ s.NextUploadIdMarker = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListMultipartUploadsOutput) SetPrefix(v string) *ListMultipartUploadsOutput {
+ s.Prefix = &v
+ return s
+}
+
+// SetUploadIdMarker sets the UploadIdMarker field's value.
+func (s *ListMultipartUploadsOutput) SetUploadIdMarker(v string) *ListMultipartUploadsOutput {
+ s.UploadIdMarker = &v
+ return s
+}
+
+// SetUploads sets the Uploads field's value.
+func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMultipartUploadsOutput {
+ s.Uploads = v
+ return s
+}
+
+type ListObjectVersionsInput struct {
+ _ struct{} `locationName:"ListObjectVersionsRequest" type:"structure"`
+
+ // The bucket name that contains the objects.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // A delimiter is a character that you specify to group keys. All keys that
+ // contain the same string between the prefix and the first occurrence of the
+ // delimiter are grouped under a single result element in CommonPrefixes. These
+ // groups are counted as one result against the max-keys limitation. These keys
+ // are not returned elsewhere in the response.
+ Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
+
+ // Requests Amazon S3 to encode the object keys in the response and specifies
+ // the encoding method to use. An object key may contain any Unicode character;
+ // however, XML 1.0 parser cannot parse some characters, such as characters
+ // with an ASCII value from 0 to 10. For characters that are not supported in
+ // XML 1.0, you can add this parameter to request that Amazon S3 encode the
+ // keys in the response.
+ EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+
+ // Specifies the key to start with when listing objects in a bucket.
+ KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"`
+
+ // Sets the maximum number of keys returned in the response. By default the
+ // action returns up to 1,000 key names. The response might contain fewer keys
+ // but will never contain more. If additional keys satisfy the search criteria,
+ // but were not returned because max-keys was exceeded, the response contains
+ // <isTruncated>true</isTruncated>. To return the additional keys, see key-marker
+ // and version-id-marker.
+ MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"`
+
+ // Use this parameter to select only those keys that begin with the specified
+ // prefix. You can use prefixes to separate a bucket into different groupings
+ // of keys. (You can think of using prefix to make groups in the same way you'd
+ // use a folder in a file system.) You can use prefix with delimiter to roll
+ // up numerous objects into a single result under CommonPrefixes.
+ Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
+
+ // Specifies the object version you want to start listing from.
+ VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListObjectVersionsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListObjectVersionsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListObjectVersionsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *ListObjectVersionsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectVersionsInput) SetEncodingType(v string) *ListObjectVersionsInput {
+ s.EncodingType = &v
+ return s
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *ListObjectVersionsInput) SetExpectedBucketOwner(v string) *ListObjectVersionsInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+// SetKeyMarker sets the KeyMarker field's value.
+func (s *ListObjectVersionsInput) SetKeyMarker(v string) *ListObjectVersionsInput {
+ s.KeyMarker = &v
+ return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectVersionsInput) SetMaxKeys(v int64) *ListObjectVersionsInput {
+ s.MaxKeys = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectVersionsInput) SetPrefix(v string) *ListObjectVersionsInput {
+ s.Prefix = &v
+ return s
+}
+
+// SetVersionIdMarker sets the VersionIdMarker field's value.
+func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersionsInput {
+ s.VersionIdMarker = &v
+ return s
+}
+
+type ListObjectVersionsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // All of the keys rolled up into a common prefix count as a single return when
+ // calculating the number of returns.
+ CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
+
+ // Container for an object that is a delete marker.
+ DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"`
+
+ // The delimiter grouping the included keys. A delimiter is a character that
+ // you specify to group keys. All keys that contain the same string between
+ // the prefix and the first occurrence of the delimiter are grouped under a
+ // single result element in CommonPrefixes. These groups are counted as one
+ // result against the max-keys limitation. These keys are not returned elsewhere
+ // in the response.
+ Delimiter *string `type:"string"`
+
+ // Encoding type used by Amazon S3 to encode object key names in the XML response.
+ //
+ // If you specify encoding-type request parameter, Amazon S3 includes this element
+ // in the response, and returns encoded key name values in the following response
+ // elements:
+ //
+ // KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter.
+ EncodingType *string `type:"string" enum:"EncodingType"`
+
+ // A flag that indicates whether Amazon S3 returned all of the results that
+ // satisfied the search criteria. If your results were truncated, you can make
+ // a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker
+ // response parameters as a starting place in another request to return the
+ // rest of the results.
+ IsTruncated *bool `type:"boolean"`
+
+ // Marks the last key returned in a truncated response.
+ KeyMarker *string `type:"string"`
+
+ // Specifies the maximum number of objects to return.
+ MaxKeys *int64 `type:"integer"`
+
+ // The bucket name.
+ Name *string `type:"string"`
+
+ // When the number of responses exceeds the value of MaxKeys, NextKeyMarker
+ // specifies the first key not returned that satisfies the search criteria.
+ // Use this value for the key-marker request parameter in a subsequent request.
+ NextKeyMarker *string `type:"string"`
+
+ // When the number of responses exceeds the value of MaxKeys, NextVersionIdMarker
+ // specifies the first object version not returned that satisfies the search
+ // criteria. Use this value for the version-id-marker request parameter in a
+ // subsequent request.
+ NextVersionIdMarker *string `type:"string"`
+
+ // Selects objects that start with the value supplied by this parameter.
+ Prefix *string `type:"string"`
+
+ // Marks the last version of the key returned in a truncated response.
+ VersionIdMarker *string `type:"string"`
+
+ // Container for version information.
+ Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListObjectVersionsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListObjectVersionsOutput) GoString() string {
+ return s.String()
+}
+
+// SetCommonPrefixes sets the CommonPrefixes field's value.
+func (s *ListObjectVersionsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectVersionsOutput {
+ s.CommonPrefixes = v
+ return s
+}
+
+// SetDeleteMarkers sets the DeleteMarkers field's value.
+func (s *ListObjectVersionsOutput) SetDeleteMarkers(v []*DeleteMarkerEntry) *ListObjectVersionsOutput {
+ s.DeleteMarkers = v
+ return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectVersionsOutput) SetDelimiter(v string) *ListObjectVersionsOutput {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectVersionsOutput) SetEncodingType(v string) *ListObjectVersionsOutput {
+ s.EncodingType = &v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListObjectVersionsOutput) SetIsTruncated(v bool) *ListObjectVersionsOutput {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetKeyMarker sets the KeyMarker field's value.
+func (s *ListObjectVersionsOutput) SetKeyMarker(v string) *ListObjectVersionsOutput {
+ s.KeyMarker = &v
+ return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectVersionsOutput) SetMaxKeys(v int64) *ListObjectVersionsOutput {
+ s.MaxKeys = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *ListObjectVersionsOutput) SetName(v string) *ListObjectVersionsOutput {
+ s.Name = &v
+ return s
+}
+
+// SetNextKeyMarker sets the NextKeyMarker field's value.
+func (s *ListObjectVersionsOutput) SetNextKeyMarker(v string) *ListObjectVersionsOutput {
+ s.NextKeyMarker = &v
+ return s
+}
+
+// SetNextVersionIdMarker sets the NextVersionIdMarker field's value.
+func (s *ListObjectVersionsOutput) SetNextVersionIdMarker(v string) *ListObjectVersionsOutput {
+ s.NextVersionIdMarker = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectVersionsOutput) SetPrefix(v string) *ListObjectVersionsOutput {
+ s.Prefix = &v
+ return s
+}
+
+// SetVersionIdMarker sets the VersionIdMarker field's value.
+func (s *ListObjectVersionsOutput) SetVersionIdMarker(v string) *ListObjectVersionsOutput {
+ s.VersionIdMarker = &v
+ return s
+}
+
+// SetVersions sets the Versions field's value.
+func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVersionsOutput {
+ s.Versions = v
+ return s
+}
+
+type ListObjectsInput struct {
+ _ struct{} `locationName:"ListObjectsRequest" type:"structure"`
+
+ // The name of the bucket containing the objects.
+ //
+ // When using this action with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this action with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+ // in the Amazon S3 User Guide.
+ //
+ // When using this action with Amazon S3 on Outposts, you must direct requests
+ // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
+ // using this action using S3 on Outposts through the AWS SDKs, you provide
+ // the Outposts bucket ARN in place of the bucket name. For more information
+ // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // A delimiter is a character you use to group keys.
+ Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
+
+ // Requests Amazon S3 to encode the object keys in the response and specifies
+ // the encoding method to use. An object key may contain any Unicode character;
+ // however, XML 1.0 parser cannot parse some characters, such as characters
+ // with an ASCII value from 0 to 10. For characters that are not supported in
+ // XML 1.0, you can add this parameter to request that Amazon S3 encode the
+ // keys in the response.
+ EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+
+ // Specifies the key to start with when listing objects in a bucket.
+ Marker *string `location:"querystring" locationName:"marker" type:"string"`
+
+ // Sets the maximum number of keys returned in the response. By default the
+ // action returns up to 1,000 key names. The response might contain fewer keys
+ // but will never contain more.
+ MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"`
+
+ // Limits the response to keys that begin with the specified prefix.
+ Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // list objects request. Bucket owners need not specify this parameter in their
+ // requests.
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListObjectsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListObjectsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListObjectsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *ListObjectsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectsInput) SetEncodingType(v string) *ListObjectsInput {
+ s.EncodingType = &v
+ return s
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *ListObjectsInput) SetExpectedBucketOwner(v string) *ListObjectsInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+// SetMarker sets the Marker field's value.
+func (s *ListObjectsInput) SetMarker(v string) *ListObjectsInput {
+ s.Marker = &v
+ return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectsInput) SetMaxKeys(v int64) *ListObjectsInput {
+ s.MaxKeys = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectsInput) SetPrefix(v string) *ListObjectsInput {
+ s.Prefix = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput {
+ s.RequestPayer = &v
+ return s
+}
+
+type ListObjectsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // All of the keys (up to 1,000) rolled up in a common prefix count as a single
+ // return when calculating the number of returns.
+ //
+ // A response can contain CommonPrefixes only if you specify a delimiter.
+ //
+ // CommonPrefixes contains all (if there are any) keys between Prefix and the
+ // next occurrence of the string specified by the delimiter.
+ //
+ // CommonPrefixes lists keys that act like subdirectories in the directory specified
+ // by Prefix.
+ //
+ // For example, if the prefix is notes/ and the delimiter is a slash (/) as
+ // in notes/summer/july, the common prefix is notes/summer/. All of the keys
+ // that roll up into a common prefix count as a single return when calculating
+ // the number of returns.
+ CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
+
+ // Metadata about each object returned.
+ Contents []*Object `type:"list" flattened:"true"`
+
+ // Causes keys that contain the same string between the prefix and the first
+ // occurrence of the delimiter to be rolled up into a single result element
+ // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere
+ // in the response. Each rolled-up result counts as only one return against
+ // the MaxKeys value.
+ Delimiter *string `type:"string"`
+
+ // Encoding type used by Amazon S3 to encode object keys in the response.
+ EncodingType *string `type:"string" enum:"EncodingType"`
+
+ // The root key used by Key Protect to encrypt this bucket. This value must
+ // be the full CRN of the root key.
+ IBMSSEKPCrkId *string `location:"header" locationName:"ibm-sse-kp-customer-root-key-crn" type:"string"`
+
+ // Specifies whether the Bucket has Key Protect enabled.
+ IBMSSEKPEnabled *bool `location:"header" locationName:"ibm-sse-kp-enabled" type:"boolean"`
+
+ // A flag that indicates whether Amazon S3 returned all of the results that
+ // satisfied the search criteria.
+ IsTruncated *bool `type:"boolean"`
+
+ // Indicates where in the bucket listing begins. Marker is included in the response
+ // if it was sent with the request.
+ Marker *string `type:"string"`
+
+ // The maximum number of keys returned in the response body.
+ MaxKeys *int64 `type:"integer"`
+
+ // The bucket name.
+ Name *string `type:"string"`
+
+ // When response is truncated (the IsTruncated element value in the response
+ // is true), you can use the key name in this field as marker in the subsequent
+ // request to get next set of objects. Amazon S3 lists objects in alphabetical
+ // order Note: This element is returned only if you have delimiter request parameter
+ // specified. If response does not include the NextMarker and it is truncated,
+ // you can use the value of the last Key in the response as the marker in the
+ // subsequent request to get the next set of object keys.
+ NextMarker *string `type:"string"`
+
+ // Keys that begin with the indicated prefix.
+ Prefix *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListObjectsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListObjectsOutput) GoString() string {
+ return s.String()
+}
+
+// SetCommonPrefixes sets the CommonPrefixes field's value.
+func (s *ListObjectsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsOutput {
+ s.CommonPrefixes = v
+ return s
+}
+
+// SetContents sets the Contents field's value.
+func (s *ListObjectsOutput) SetContents(v []*Object) *ListObjectsOutput {
+ s.Contents = v
+ return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectsOutput) SetDelimiter(v string) *ListObjectsOutput {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectsOutput) SetEncodingType(v string) *ListObjectsOutput {
+ s.EncodingType = &v
+ return s
+}
+
+// SetIBMSSEKPCrkId sets the IBMSSEKPCrkId field's value.
+func (s *ListObjectsOutput) SetIBMSSEKPCrkId(v string) *ListObjectsOutput {
+ s.IBMSSEKPCrkId = &v
+ return s
+}
+
+// SetIBMSSEKPEnabled sets the IBMSSEKPEnabled field's value.
+func (s *ListObjectsOutput) SetIBMSSEKPEnabled(v bool) *ListObjectsOutput {
+ s.IBMSSEKPEnabled = &v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListObjectsOutput) SetIsTruncated(v bool) *ListObjectsOutput {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetMarker sets the Marker field's value.
+func (s *ListObjectsOutput) SetMarker(v string) *ListObjectsOutput {
+ s.Marker = &v
+ return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectsOutput) SetMaxKeys(v int64) *ListObjectsOutput {
+ s.MaxKeys = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *ListObjectsOutput) SetName(v string) *ListObjectsOutput {
+ s.Name = &v
+ return s
+}
+
+// SetNextMarker sets the NextMarker field's value.
+func (s *ListObjectsOutput) SetNextMarker(v string) *ListObjectsOutput {
+ s.NextMarker = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput {
+ s.Prefix = &v
+ return s
+}
+
+type ListObjectsV2Input struct {
+ _ struct{} `locationName:"ListObjectsV2Request" type:"structure"`
+
+ // Bucket name to list.
+ //
+ // When using this action with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this action with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+ // in the Amazon S3 User Guide.
+ //
+ // When using this action with Amazon S3 on Outposts, you must direct requests
+ // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
+ // using this action using S3 on Outposts through the AWS SDKs, you provide
+ // the Outposts bucket ARN in place of the bucket name. For more information
+ // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // ContinuationToken indicates Amazon S3 that the list is being continued on
+ // this bucket with a token. ContinuationToken is obfuscated and is not a real
+ // key.
+ ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
+
+ // A delimiter is a character you use to group keys.
+ Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
+
+ // Encoding type used by Amazon S3 to encode object keys in the response.
+ EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+
+ // The owner field is not present in listV2 by default, if you want to return
+ // owner field with each key in the result then set the fetch owner field to
+ // true.
+ FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"`
+
+ // Sets the maximum number of keys returned in the response. By default the
+ // action returns up to 1,000 key names. The response might contain fewer keys
+ // but will never contain more.
+ MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"`
+
+ // Limits the response to keys that begin with the specified prefix.
+ Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // list objects request in V2 style. Bucket owners need not specify this parameter
+ // in their requests.
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts
+ // listing after this specified key. StartAfter can be any key in the bucket.
+ StartAfter *string `location:"querystring" locationName:"start-after" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListObjectsV2Input) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListObjectsV2Input) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListObjectsV2Input) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input {
+ s.Bucket = &v
+ return s
+}
+
+func (s *ListObjectsV2Input) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input {
+ s.ContinuationToken = &v
+ return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectsV2Input) SetDelimiter(v string) *ListObjectsV2Input {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectsV2Input) SetEncodingType(v string) *ListObjectsV2Input {
+ s.EncodingType = &v
+ return s
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *ListObjectsV2Input) SetExpectedBucketOwner(v string) *ListObjectsV2Input {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+// SetFetchOwner sets the FetchOwner field's value.
+func (s *ListObjectsV2Input) SetFetchOwner(v bool) *ListObjectsV2Input {
+ s.FetchOwner = &v
+ return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectsV2Input) SetMaxKeys(v int64) *ListObjectsV2Input {
+ s.MaxKeys = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectsV2Input) SetPrefix(v string) *ListObjectsV2Input {
+ s.Prefix = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *ListObjectsV2Input) SetRequestPayer(v string) *ListObjectsV2Input {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetStartAfter sets the StartAfter field's value.
+func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input {
+ s.StartAfter = &v
+ return s
+}
+
+type ListObjectsV2Output struct {
+ _ struct{} `type:"structure"`
+
+ // All of the keys (up to 1,000) rolled up into a common prefix count as a single
+ // return when calculating the number of returns.
+ //
+ // A response can contain CommonPrefixes only if you specify a delimiter.
+ //
+ // CommonPrefixes contains all (if there are any) keys between Prefix and the
+ // next occurrence of the string specified by a delimiter.
+ //
+ // CommonPrefixes lists keys that act like subdirectories in the directory specified
+ // by Prefix.
+ //
+ // For example, if the prefix is notes/ and the delimiter is a slash (/) as
+ // in notes/summer/july, the common prefix is notes/summer/. All of the keys
+ // that roll up into a common prefix count as a single return when calculating
+ // the number of returns.
+ CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
+
+ // Metadata about each object returned.
+ Contents []*Object `type:"list" flattened:"true"`
+
+ // If ContinuationToken was sent with the request, it is included in the response.
+ ContinuationToken *string `type:"string"`
+
+ // Causes keys that contain the same string between the prefix and the first
+ // occurrence of the delimiter to be rolled up into a single result element
+ // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere
+ // in the response. Each rolled-up result counts as only one return against
+ // the MaxKeys value.
+ Delimiter *string `type:"string"`
+
+ // Encoding type used by Amazon S3 to encode object key names in the XML response.
+ //
+ // If you specify the encoding-type request parameter, Amazon S3 includes this
+ // element in the response, and returns encoded key name values in the following
+ // response elements:
+ //
+ // Delimiter, Prefix, Key, and StartAfter.
+ EncodingType *string `type:"string" enum:"EncodingType"`
+
+ // Set to false if all of the results were returned. Set to true if more keys
+ // are available to return. If the number of results exceeds that specified
+ // by MaxKeys, all of the results might not be returned.
+ IsTruncated *bool `type:"boolean"`
+
+ // KeyCount is the number of keys returned with this request. KeyCount will
+ // always be less than or equals to MaxKeys field. Say you ask for 50 keys,
+ // your result will include less than equals 50 keys
+ KeyCount *int64 `type:"integer"`
+
+ // Sets the maximum number of keys returned in the response. By default the
+ // action returns up to 1,000 key names. The response might contain fewer keys
+ // but will never contain more.
+ MaxKeys *int64 `type:"integer"`
+
+ // The bucket name.
+ //
+ // When using this action with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this action with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+ // in the Amazon S3 User Guide.
+ //
+ // When using this action with Amazon S3 on Outposts, you must direct requests
+ // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
+ // using this action using S3 on Outposts through the AWS SDKs, you provide
+ // the Outposts bucket ARN in place of the bucket name. For more information
+ // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // in the Amazon S3 User Guide.
+ Name *string `type:"string"`
+
+ // NextContinuationToken is sent when isTruncated is true, which means there
+ // are more keys in the bucket that can be listed. The next list requests to
+ // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken
+ // is obfuscated and is not a real key
+ NextContinuationToken *string `type:"string"`
+
+ // Keys that begin with the indicated prefix.
+ Prefix *string `type:"string"`
+
+ // If StartAfter was sent with the request, it is included in the response.
+ StartAfter *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListObjectsV2Output) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListObjectsV2Output) GoString() string {
+ return s.String()
+}
+
+// SetCommonPrefixes sets the CommonPrefixes field's value.
+func (s *ListObjectsV2Output) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsV2Output {
+ s.CommonPrefixes = v
+ return s
+}
+
+// SetContents sets the Contents field's value.
+func (s *ListObjectsV2Output) SetContents(v []*Object) *ListObjectsV2Output {
+ s.Contents = v
+ return s
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListObjectsV2Output) SetContinuationToken(v string) *ListObjectsV2Output {
+ s.ContinuationToken = &v
+ return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectsV2Output) SetDelimiter(v string) *ListObjectsV2Output {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectsV2Output) SetEncodingType(v string) *ListObjectsV2Output {
+ s.EncodingType = &v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListObjectsV2Output) SetIsTruncated(v bool) *ListObjectsV2Output {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetKeyCount sets the KeyCount field's value.
+func (s *ListObjectsV2Output) SetKeyCount(v int64) *ListObjectsV2Output {
+ s.KeyCount = &v
+ return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectsV2Output) SetMaxKeys(v int64) *ListObjectsV2Output {
+ s.MaxKeys = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *ListObjectsV2Output) SetName(v string) *ListObjectsV2Output {
+ s.Name = &v
+ return s
+}
+
+// SetNextContinuationToken sets the NextContinuationToken field's value.
+func (s *ListObjectsV2Output) SetNextContinuationToken(v string) *ListObjectsV2Output {
+ s.NextContinuationToken = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectsV2Output) SetPrefix(v string) *ListObjectsV2Output {
+ s.Prefix = &v
+ return s
+}
+
+// SetStartAfter sets the StartAfter field's value.
+func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output {
+ s.StartAfter = &v
+ return s
+}
+
+type ListPartsInput struct {
+ _ struct{} `locationName:"ListPartsRequest" type:"structure"`
+
+ // The name of the bucket to which the parts are being uploaded.
+ //
+ // When using this action with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this action with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+ // in the Amazon S3 User Guide.
+ //
+ // When using this action with Amazon S3 on Outposts, you must direct requests
+ // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
+ // using this action using S3 on Outposts through the AWS SDKs, you provide
+ // the Outposts bucket ARN in place of the bucket name. For more information
+ // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+
+ // Object key for which the multipart upload was initiated.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Sets the maximum number of parts to return.
+ MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"`
+
+ // Specifies the part after which listing should begin. Only parts with higher
+ // part numbers will be listed.
+ PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"`
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Upload ID identifying the multipart upload whose parts are being listed.
+ //
+ // UploadId is a required field
+ UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListPartsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListPartsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListPartsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.UploadId == nil {
+ invalidParams.Add(request.NewErrParamRequired("UploadId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListPartsInput) SetBucket(v string) *ListPartsInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *ListPartsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *ListPartsInput) SetExpectedBucketOwner(v string) *ListPartsInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *ListPartsInput) SetKey(v string) *ListPartsInput {
+ s.Key = &v
+ return s
+}
+
+// SetMaxParts sets the MaxParts field's value.
+func (s *ListPartsInput) SetMaxParts(v int64) *ListPartsInput {
+ s.MaxParts = &v
+ return s
+}
+
+// SetPartNumberMarker sets the PartNumberMarker field's value.
+func (s *ListPartsInput) SetPartNumberMarker(v int64) *ListPartsInput {
+ s.PartNumberMarker = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *ListPartsInput) SetRequestPayer(v string) *ListPartsInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput {
+ s.UploadId = &v
+ return s
+}
+
+type ListPartsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // If the bucket has a lifecycle rule configured with an action to abort incomplete
+ // multipart uploads and the prefix in the lifecycle rule matches the object
+ // name in the request, then the response includes this header indicating when
+ // the initiated multipart upload will become eligible for abort operation.
+ // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket
+ // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config).
+ //
+ // The response will also include the x-amz-abort-rule-id header that will provide
+ // the ID of the lifecycle configuration rule that defines this action.
+ AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"`
+
+ // This header is returned along with the x-amz-abort-date header. It identifies
+ // applicable lifecycle configuration rule that defines the action to abort
+ // incomplete multipart uploads.
+ AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"`
+
+ // The name of the bucket to which the multipart upload was initiated.
+ Bucket *string `type:"string"`
+
+ // Container element that identifies who initiated the multipart upload. If
+ // the initiator is an AWS account, this element provides the same information
+ // as the Owner element. If the initiator is an IAM User, this element provides
+ // the user ARN and display name.
+ Initiator *Initiator `type:"structure"`
+
+ // Indicates whether the returned list of parts is truncated. A true value indicates
+ // that the list was truncated. A list can be truncated if the number of parts
+ // exceeds the limit returned in the MaxParts element.
+ IsTruncated *bool `type:"boolean"`
+
+ // Object key for which the multipart upload was initiated.
+ Key *string `min:"1" type:"string"`
+
+ // Maximum number of parts that were allowed in the response.
+ MaxParts *int64 `type:"integer"`
+
+ // When a list is truncated, this element specifies the last part in the list,
+ // as well as the value to use for the part-number-marker request parameter
+ // in a subsequent request.
+ NextPartNumberMarker *int64 `type:"integer"`
+
+ // Container element that identifies the object owner, after the object is created.
+ // If multipart upload is initiated by an IAM user, this element provides the
+ // parent account ID and display name.
+ Owner *Owner `type:"structure"`
+
+ // When a list is truncated, this element specifies the last part in the list,
+ // as well as the value to use for the part-number-marker request parameter
+ // in a subsequent request.
+ PartNumberMarker *int64 `type:"integer"`
+
+ // Container for elements related to a particular part. A response can contain
+ // zero or more Part elements.
+ Parts []*Part `locationName:"Part" type:"list" flattened:"true"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded
+ // object.
+ StorageClass *string `type:"string" enum:"StorageClass"`
+
+ // Upload ID identifying the multipart upload whose parts are being listed.
+ UploadId *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListPartsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListPartsOutput) GoString() string {
+ return s.String()
+}
+
+// SetAbortDate sets the AbortDate field's value.
+func (s *ListPartsOutput) SetAbortDate(v time.Time) *ListPartsOutput {
+ s.AbortDate = &v
+ return s
+}
+
+// SetAbortRuleId sets the AbortRuleId field's value.
+func (s *ListPartsOutput) SetAbortRuleId(v string) *ListPartsOutput {
+ s.AbortRuleId = &v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *ListPartsOutput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetInitiator sets the Initiator field's value.
+func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput {
+ s.Initiator = v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListPartsOutput) SetIsTruncated(v bool) *ListPartsOutput {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *ListPartsOutput) SetKey(v string) *ListPartsOutput {
+ s.Key = &v
+ return s
+}
+
+// SetMaxParts sets the MaxParts field's value.
+func (s *ListPartsOutput) SetMaxParts(v int64) *ListPartsOutput {
+ s.MaxParts = &v
+ return s
+}
+
+// SetNextPartNumberMarker sets the NextPartNumberMarker field's value.
+func (s *ListPartsOutput) SetNextPartNumberMarker(v int64) *ListPartsOutput {
+ s.NextPartNumberMarker = &v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *ListPartsOutput) SetOwner(v *Owner) *ListPartsOutput {
+ s.Owner = v
+ return s
+}
+
+// SetPartNumberMarker sets the PartNumberMarker field's value.
+func (s *ListPartsOutput) SetPartNumberMarker(v int64) *ListPartsOutput {
+ s.PartNumberMarker = &v
+ return s
+}
+
+// SetParts sets the Parts field's value.
+func (s *ListPartsOutput) SetParts(v []*Part) *ListPartsOutput {
+ s.Parts = v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *ListPartsOutput) SetRequestCharged(v string) *ListPartsOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *ListPartsOutput) SetStorageClass(v string) *ListPartsOutput {
+ s.StorageClass = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput {
+ s.UploadId = &v
+ return s
+}
+
+// Describes where logs are stored and the prefix that Amazon S3 assigns to
+// all log object keys for a bucket. For more information, see PUT Bucket logging
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html)
+// in the Amazon Simple Storage Service API Reference.
+type LoggingEnabled struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the bucket where you want Amazon S3 to store server access logs.
+ // You can have your logs delivered to any bucket that you own, including the
+ // same bucket that is being logged. You can also configure multiple buckets
+ // to deliver their logs to the same target bucket. In this case, you should
+ // choose a different TargetPrefix for each source bucket so that the delivered
+ // log files can be distinguished by key.
+ //
+ // TargetBucket is a required field
+ TargetBucket *string `type:"string" required:"true"`
+
+ // Container for granting information.
+ TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"`
+
+ // A prefix for all log object keys. If you store log files from multiple Amazon
+ // S3 buckets in a single bucket, you can use a prefix to distinguish which
+ // log files came from which bucket.
+ //
+ // TargetPrefix is a required field
+ TargetPrefix *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s LoggingEnabled) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s LoggingEnabled) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LoggingEnabled) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"}
+ if s.TargetBucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("TargetBucket"))
+ }
+ if s.TargetPrefix == nil {
+ invalidParams.Add(request.NewErrParamRequired("TargetPrefix"))
+ }
+ if s.TargetGrants != nil {
+ for i, v := range s.TargetGrants {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetTargetBucket sets the TargetBucket field's value.
+func (s *LoggingEnabled) SetTargetBucket(v string) *LoggingEnabled {
+ s.TargetBucket = &v
+ return s
+}
+
+// SetTargetGrants sets the TargetGrants field's value.
+func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled {
+ s.TargetGrants = v
+ return s
+}
+
+// SetTargetPrefix sets the TargetPrefix field's value.
+func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled {
+ s.TargetPrefix = &v
+ return s
+}
+
+// Container for the MultipartUpload for the Amazon S3 object.
+type MultipartUpload struct {
+ _ struct{} `type:"structure"`
+
+ // Date and time at which the multipart upload was initiated.
+ Initiated *time.Time `type:"timestamp"`
+
+ // Identifies who initiated the multipart upload.
+ Initiator *Initiator `type:"structure"`
+
+ // Key of the object for which the multipart upload was initiated.
+ Key *string `min:"1" type:"string"`
+
+ // Specifies the owner of the object that is part of the multipart upload.
+ Owner *Owner `type:"structure"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string" enum:"StorageClass"`
+
+ // Upload ID that identifies the multipart upload.
+ UploadId *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s MultipartUpload) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s MultipartUpload) GoString() string {
+ return s.String()
+}
+
+// SetInitiated sets the Initiated field's value.
+func (s *MultipartUpload) SetInitiated(v time.Time) *MultipartUpload {
+ s.Initiated = &v
+ return s
+}
+
+// SetInitiator sets the Initiator field's value.
+func (s *MultipartUpload) SetInitiator(v *Initiator) *MultipartUpload {
+ s.Initiator = v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *MultipartUpload) SetKey(v string) *MultipartUpload {
+ s.Key = &v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *MultipartUpload) SetOwner(v *Owner) *MultipartUpload {
+ s.Owner = v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *MultipartUpload) SetStorageClass(v string) *MultipartUpload {
+ s.StorageClass = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload {
+ s.UploadId = &v
+ return s
+}
+
+// Specifies when noncurrent object versions expire. Upon expiration, Amazon
+// S3 permanently deletes the noncurrent object versions. You set this lifecycle
+// configuration action on a bucket that has versioning enabled (or suspended)
+// to request that Amazon S3 delete noncurrent object versions at a specific
+// period in the object's lifetime.
+type NoncurrentVersionExpiration struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the number of days an object is noncurrent before Amazon S3 can
+ // perform the associated action. For information about the noncurrent days
+ // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations)
+ // in the Amazon Simple Storage Service Developer Guide.
+ NoncurrentDays *int64 `type:"integer"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s NoncurrentVersionExpiration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s NoncurrentVersionExpiration) GoString() string {
+ return s.String()
+}
+
+// SetNoncurrentDays sets the NoncurrentDays field's value.
+func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVersionExpiration {
+ s.NoncurrentDays = &v
+ return s
+}
+
+// An object consists of data and its descriptive metadata.
+type Object struct {
+ _ struct{} `type:"structure"`
+
+ // The entity tag is a hash of the object. The ETag reflects changes only to
+ // the contents of an object, not its metadata. The ETag may or may not be an
+ // MD5 digest of the object data. Whether or not it is depends on how the object
+ // was created and how it is encrypted as described below:
+ //
+ // * Objects created by the PUT Object, POST Object, or Copy operation, or
+ // through the AWS Management Console, and are encrypted by SSE-S3 or plaintext,
+ // have ETags that are an MD5 digest of their object data.
+ //
+ // * Objects created by the PUT Object, POST Object, or Copy operation, or
+ // through the AWS Management Console, and are encrypted by SSE-C or SSE-KMS,
+ // have ETags that are not an MD5 digest of their object data.
+ //
+ // * If an object is created by either the Multipart Upload or Part Copy
+ // operation, the ETag is not an MD5 digest, regardless of the method of
+ // encryption.
+ ETag *string `type:"string"`
+
+ // The name that you assign to an object. You use the object key to retrieve
+ // the object.
+ Key *string `min:"1" type:"string"`
+
+ // Creation date of the object.
+ LastModified *time.Time `type:"timestamp"`
+
+ // The owner of the object
+ Owner *Owner `type:"structure"`
+
+ // Size in bytes of the object
+ Size *int64 `type:"integer"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string" enum:"ObjectStorageClass"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Object) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Object) GoString() string {
+ return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *Object) SetETag(v string) *Object {
+ s.ETag = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *Object) SetKey(v string) *Object {
+ s.Key = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *Object) SetLastModified(v time.Time) *Object {
+ s.LastModified = &v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *Object) SetOwner(v *Owner) *Object {
+ s.Owner = v
+ return s
+}
+
+// SetSize sets the Size field's value.
+func (s *Object) SetSize(v int64) *Object {
+ s.Size = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *Object) SetStorageClass(v string) *Object {
+ s.StorageClass = &v
+ return s
+}
+
+// Object Identifier is unique value to identify objects.
+type ObjectIdentifier struct {
+ _ struct{} `type:"structure"`
+
+ // Key name of the object.
+ //
+ // Replacement must be made for object keys containing special characters (such
+ // as carriage returns) when using XML requests. For more information, see XML
+ // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints).
+ //
+ // Key is a required field
+ Key *string `min:"1" type:"string" required:"true"`
+
+ // VersionId for the specific version of the object to delete.
+ VersionId *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ObjectIdentifier) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ObjectIdentifier) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ObjectIdentifier) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"}
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetKey sets the Key field's value.
+func (s *ObjectIdentifier) SetKey(v string) *ObjectIdentifier {
+ s.Key = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier {
+ s.VersionId = &v
+ return s
+}
+
+// The version of an object.
+type ObjectVersion struct {
+ _ struct{} `type:"structure"`
+
+ // The entity tag is an MD5 hash of that version of the object.
+ ETag *string `type:"string"`
+
+ // Specifies whether the object is (true) or is not (false) the latest version
+ // of an object.
+ IsLatest *bool `type:"boolean"`
+
+ // The object key.
+ Key *string `min:"1" type:"string"`
+
+ // Date and time the object was last modified.
+ LastModified *time.Time `type:"timestamp"`
+
+ // Specifies the owner of the object.
+ Owner *Owner `type:"structure"`
+
+ // Size in bytes of the object.
+ Size *int64 `type:"integer"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"`
+
+ // Version ID of an object.
+ VersionId *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ObjectVersion) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ObjectVersion) GoString() string {
+ return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *ObjectVersion) SetETag(v string) *ObjectVersion {
+ s.ETag = &v
+ return s
+}
+
+// SetIsLatest sets the IsLatest field's value.
+func (s *ObjectVersion) SetIsLatest(v bool) *ObjectVersion {
+ s.IsLatest = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *ObjectVersion) SetKey(v string) *ObjectVersion {
+ s.Key = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *ObjectVersion) SetLastModified(v time.Time) *ObjectVersion {
+ s.LastModified = &v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *ObjectVersion) SetOwner(v *Owner) *ObjectVersion {
+ s.Owner = v
+ return s
+}
+
+// SetSize sets the Size field's value.
+func (s *ObjectVersion) SetSize(v int64) *ObjectVersion {
+ s.Size = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *ObjectVersion) SetStorageClass(v string) *ObjectVersion {
+ s.StorageClass = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion {
+ s.VersionId = &v
+ return s
+}
+
+// Container for the owner's display name and ID.
+type Owner struct {
+ _ struct{} `type:"structure"`
+
+ // Container for the display name of the owner.
+ DisplayName *string `type:"string"`
+
+ // Container for the ID of the owner.
+ ID *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Owner) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Owner) GoString() string {
+ return s.String()
+}
+
+// SetDisplayName sets the DisplayName field's value.
+func (s *Owner) SetDisplayName(v string) *Owner {
+ s.DisplayName = &v
+ return s
+}
+
+// SetID sets the ID field's value.
+func (s *Owner) SetID(v string) *Owner {
+ s.ID = &v
+ return s
+}
+
+// Container for elements related to a part.
+type Part struct {
+ _ struct{} `type:"structure"`
+
+ // Entity tag returned when the part was uploaded.
+ ETag *string `type:"string"`
+
+ // Date and time at which the part was uploaded.
+ LastModified *time.Time `type:"timestamp"`
+
+ // Part number identifying the part. This is a positive integer between 1 and
+ // 10,000.
+ PartNumber *int64 `type:"integer"`
+
+ // Size in bytes of the uploaded part data.
+ Size *int64 `type:"integer"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Part) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Part) GoString() string {
+ return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *Part) SetETag(v string) *Part {
+ s.ETag = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *Part) SetLastModified(v time.Time) *Part {
+ s.LastModified = &v
+ return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *Part) SetPartNumber(v int64) *Part {
+ s.PartNumber = &v
+ return s
+}
+
+// SetSize sets the Size field's value.
+func (s *Part) SetSize(v int64) *Part {
+ s.Size = &v
+ return s
+}
+
+type ProtectionConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Default retention period for an object, if a PUT of an object does not specify
+ // a retention period this value will be converted to seconds and used.
+ //
+ // DefaultRetention is a required field
+ DefaultRetention *BucketProtectionDefaultRetention `type:"structure" required:"true"`
+
+ // Enable permanent retention for an object.
+ EnablePermanentRetention *bool `type:"boolean"`
+
+ // Maximum retention period for an object, if a PUT of an object specifies a
+ // longer retention period the PUT object will fail.
+ //
+ // MaximumRetention is a required field
+ MaximumRetention *BucketProtectionMaximumRetention `type:"structure" required:"true"`
+
+ // Minimum retention period for an object, if a PUT of an object specifies a
+ // shorter retention period the PUT object will fail.
+ //
+ // MinimumRetention is a required field
+ MinimumRetention *BucketProtectionMinimumRetention `type:"structure" required:"true"`
+
+ // Retention status of a bucket.
+ //
+ // Status is a required field
+ Status *string `type:"string" required:"true" enum:"BucketProtectionStatus"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ProtectionConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ProtectionConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ProtectionConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ProtectionConfiguration"}
+ if s.DefaultRetention == nil {
+ invalidParams.Add(request.NewErrParamRequired("DefaultRetention"))
+ }
+ if s.MaximumRetention == nil {
+ invalidParams.Add(request.NewErrParamRequired("MaximumRetention"))
+ }
+ if s.MinimumRetention == nil {
+ invalidParams.Add(request.NewErrParamRequired("MinimumRetention"))
+ }
+ if s.Status == nil {
+ invalidParams.Add(request.NewErrParamRequired("Status"))
+ }
+ if s.DefaultRetention != nil {
+ if err := s.DefaultRetention.Validate(); err != nil {
+ invalidParams.AddNested("DefaultRetention", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.MaximumRetention != nil {
+ if err := s.MaximumRetention.Validate(); err != nil {
+ invalidParams.AddNested("MaximumRetention", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.MinimumRetention != nil {
+ if err := s.MinimumRetention.Validate(); err != nil {
+ invalidParams.AddNested("MinimumRetention", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDefaultRetention sets the DefaultRetention field's value.
+func (s *ProtectionConfiguration) SetDefaultRetention(v *BucketProtectionDefaultRetention) *ProtectionConfiguration {
+ s.DefaultRetention = v
+ return s
+}
+
+// SetEnablePermanentRetention sets the EnablePermanentRetention field's value.
+func (s *ProtectionConfiguration) SetEnablePermanentRetention(v bool) *ProtectionConfiguration {
+ s.EnablePermanentRetention = &v
+ return s
+}
+
+// SetMaximumRetention sets the MaximumRetention field's value.
+func (s *ProtectionConfiguration) SetMaximumRetention(v *BucketProtectionMaximumRetention) *ProtectionConfiguration {
+ s.MaximumRetention = v
+ return s
+}
+
+// SetMinimumRetention sets the MinimumRetention field's value.
+func (s *ProtectionConfiguration) SetMinimumRetention(v *BucketProtectionMinimumRetention) *ProtectionConfiguration {
+ s.MinimumRetention = v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *ProtectionConfiguration) SetStatus(v string) *ProtectionConfiguration {
+ s.Status = &v
+ return s
+}
+
+// The PublicAccessBlock configuration that you want to apply to this Amazon
+// S3 bucket. You can enable the configuration options in any combination. For
+// more information about when Amazon S3 considers a bucket or object public,
+// see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status)
+// in the Amazon S3 User Guide.
+type PublicAccessBlockConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies whether Amazon S3 should block public access control lists (ACLs)
+ // for this bucket and objects in this bucket. Setting this element to TRUE
+ // causes the following behavior:
+ //
+ // * PUT Bucket acl and PUT Object acl calls fail if the specified ACL is
+ // public.
+ //
+ // * PUT Object calls fail if the request includes a public ACL.
+ //
+ // * PUT Bucket calls fail if the request includes a public ACL.
+ //
+ // Enabling this setting doesn't affect existing policies or ACLs.
+ BlockPublicAcls *bool `locationName:"BlockPublicAcls" type:"boolean"`
+
+ // Specifies whether Amazon S3 should ignore public ACLs for this bucket and
+ // objects in this bucket. Setting this element to TRUE causes Amazon S3 to
+ // ignore all public ACLs on this bucket and objects in this bucket.
+ //
+ // Enabling this setting doesn't affect the persistence of any existing ACLs
+ // and doesn't prevent new public ACLs from being set.
+ IgnorePublicAcls *bool `locationName:"IgnorePublicAcls" type:"boolean"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PublicAccessBlockConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PublicAccessBlockConfiguration) GoString() string {
+ return s.String()
+}
+
+// SetBlockPublicAcls sets the BlockPublicAcls field's value.
+func (s *PublicAccessBlockConfiguration) SetBlockPublicAcls(v bool) *PublicAccessBlockConfiguration {
+ s.BlockPublicAcls = &v
+ return s
+}
+
+// SetIgnorePublicAcls sets the IgnorePublicAcls field's value.
+func (s *PublicAccessBlockConfiguration) SetIgnorePublicAcls(v bool) *PublicAccessBlockConfiguration {
+ s.IgnorePublicAcls = &v
+ return s
+}
+
+type PutBucketAclInput struct {
+ _ struct{} `locationName:"PutBucketAclRequest" type:"structure" payload:"AccessControlPolicy"`
+
+ // The canned ACL to apply to the bucket.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
+
+ // Contains the elements that set the ACL permissions for an object per grantee.
+ AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+
+ // The bucket to which to apply the ACL.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+
+ // Allows grantee the read, write, read ACP, and write ACP permissions on the
+ // bucket.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to list the objects in the bucket.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the bucket ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to create, overwrite, and delete any object in the bucket.
+ GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable bucket.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketAclInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketAclInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketAclInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.AccessControlPolicy != nil {
+ if err := s.AccessControlPolicy.Validate(); err != nil {
+ invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetACL sets the ACL field's value.
+func (s *PutBucketAclInput) SetACL(v string) *PutBucketAclInput {
+ s.ACL = &v
+ return s
+}
+
+// SetAccessControlPolicy sets the AccessControlPolicy field's value.
+func (s *PutBucketAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutBucketAclInput {
+ s.AccessControlPolicy = v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutBucketAclInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *PutBucketAclInput) SetExpectedBucketOwner(v string) *PutBucketAclInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput {
+ s.GrantFullControl = &v
+ return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *PutBucketAclInput) SetGrantRead(v string) *PutBucketAclInput {
+ s.GrantRead = &v
+ return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *PutBucketAclInput) SetGrantReadACP(v string) *PutBucketAclInput {
+ s.GrantReadACP = &v
+ return s
+}
+
+// SetGrantWrite sets the GrantWrite field's value.
+func (s *PutBucketAclInput) SetGrantWrite(v string) *PutBucketAclInput {
+ s.GrantWrite = &v
+ return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput {
+ s.GrantWriteACP = &v
+ return s
+}
+
+type PutBucketAclOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketAclOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketAclOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketCorsInput struct {
+ _ struct{} `locationName:"PutBucketCorsRequest" type:"structure" payload:"CORSConfiguration"`
+
+ // Specifies the bucket impacted by the corsconfiguration.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Describes the cross-origin access configuration for objects in an Amazon
+ // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon
+ // S3 User Guide.
+ //
+ // CORSConfiguration is a required field
+ CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketCorsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketCorsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketCorsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.CORSConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("CORSConfiguration"))
+ }
+ if s.CORSConfiguration != nil {
+ if err := s.CORSConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutBucketCorsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetCORSConfiguration sets the CORSConfiguration field's value.
+func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput {
+ s.CORSConfiguration = v
+ return s
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *PutBucketCorsInput) SetExpectedBucketOwner(v string) *PutBucketCorsInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+type PutBucketCorsOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketCorsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketCorsOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketLifecycleConfigurationInput struct {
+ _ struct{} `locationName:"PutBucketLifecycleConfigurationRequest" type:"structure" payload:"LifecycleConfiguration"`
+
+ // The name of the bucket for which to set the configuration.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+
+ // Container for lifecycle rules. You can add as many as 1000 rules.
+ LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketLifecycleConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketLifecycleConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketLifecycleConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.LifecycleConfiguration != nil {
+ if err := s.LifecycleConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLifecycleConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutBucketLifecycleConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *PutBucketLifecycleConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketLifecycleConfigurationInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+// SetLifecycleConfiguration sets the LifecycleConfiguration field's value.
+func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleConfigurationInput {
+ s.LifecycleConfiguration = v
+ return s
+}
+
+type PutBucketLifecycleConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketLifecycleConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketLifecycleConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketLoggingInput struct {
+ _ struct{} `locationName:"PutBucketLoggingRequest" type:"structure" payload:"BucketLoggingStatus"`
+
+ // The name of the bucket for which to set the logging parameters.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Container for logging status information.
+ //
+ // BucketLoggingStatus is a required field
+ BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketLoggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketLoggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketLoggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.BucketLoggingStatus == nil {
+ invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus"))
+ }
+ if s.BucketLoggingStatus != nil {
+ if err := s.BucketLoggingStatus.Validate(); err != nil {
+ invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutBucketLoggingInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetBucketLoggingStatus sets the BucketLoggingStatus field's value.
+func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput {
+ s.BucketLoggingStatus = v
+ return s
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *PutBucketLoggingInput) SetExpectedBucketOwner(v string) *PutBucketLoggingInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+type PutBucketLoggingOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketLoggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketLoggingOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketProtectionConfigurationInput struct {
+ _ struct{} `locationName:"PutBucketProtectionConfigurationRequest" type:"structure" payload:"ProtectionConfiguration"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // ProtectionConfiguration is a required field
+ ProtectionConfiguration *ProtectionConfiguration `locationName:"ProtectionConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketProtectionConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketProtectionConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketProtectionConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketProtectionConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.ProtectionConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("ProtectionConfiguration"))
+ }
+ if s.ProtectionConfiguration != nil {
+ if err := s.ProtectionConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("ProtectionConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketProtectionConfigurationInput) SetBucket(v string) *PutBucketProtectionConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutBucketProtectionConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetProtectionConfiguration sets the ProtectionConfiguration field's value.
+func (s *PutBucketProtectionConfigurationInput) SetProtectionConfiguration(v *ProtectionConfiguration) *PutBucketProtectionConfigurationInput {
+ s.ProtectionConfiguration = v
+ return s
+}
+
+type PutBucketProtectionConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketProtectionConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketProtectionConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketReplicationInput struct {
+ _ struct{} `locationName:"PutBucketReplicationRequest" type:"structure" payload:"ReplicationConfiguration"`
+
+ // The name of the bucket
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+
+ // A container for replication rules. You can add up to 1,000 rules. The maximum
+ // size of a replication configuration is 2 MB.
+ //
+ // ReplicationConfiguration is a required field
+ ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketReplicationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketReplicationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketReplicationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketReplicationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.ReplicationConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration"))
+ }
+ if s.ReplicationConfiguration != nil {
+ if err := s.ReplicationConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketReplicationInput) SetBucket(v string) *PutBucketReplicationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutBucketReplicationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *PutBucketReplicationInput) SetExpectedBucketOwner(v string) *PutBucketReplicationInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+// SetReplicationConfiguration sets the ReplicationConfiguration field's value.
+func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput {
+ s.ReplicationConfiguration = v
+ return s
+}
+
+type PutBucketReplicationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketReplicationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketReplicationOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketVersioningInput struct {
+ _ struct{} `locationName:"PutBucketVersioningRequest" type:"structure" payload:"VersioningConfiguration"`
+
+ // The bucket name.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+
+ // The concatenation of the authentication device's serial number, a space,
+ // and the value that is displayed on your authentication device.
+ MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
+
+ // Container for setting the versioning state.
+ //
+ // VersioningConfiguration is a required field
+ VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketVersioningInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketVersioningInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketVersioningInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.VersioningConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutBucketVersioningInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *PutBucketVersioningInput) SetExpectedBucketOwner(v string) *PutBucketVersioningInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+// SetMFA sets the MFA field's value.
+func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput {
+ s.MFA = &v
+ return s
+}
+
+// SetVersioningConfiguration sets the VersioningConfiguration field's value.
+func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfiguration) *PutBucketVersioningInput {
+ s.VersioningConfiguration = v
+ return s
+}
+
+type PutBucketVersioningOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketVersioningOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketVersioningOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketWebsiteInput struct {
+ _ struct{} `locationName:"PutBucketWebsiteRequest" type:"structure" payload:"WebsiteConfiguration"`
+
+ // The bucket name.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+
+ // Container for the request.
+ //
+ // WebsiteConfiguration is a required field
+ WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketWebsiteInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketWebsiteInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketWebsiteInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.WebsiteConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration"))
+ }
+ if s.WebsiteConfiguration != nil {
+ if err := s.WebsiteConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutBucketWebsiteInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *PutBucketWebsiteInput) SetExpectedBucketOwner(v string) *PutBucketWebsiteInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+// SetWebsiteConfiguration sets the WebsiteConfiguration field's value.
+func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput {
+ s.WebsiteConfiguration = v
+ return s
+}
+
+type PutBucketWebsiteOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketWebsiteOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutBucketWebsiteOutput) GoString() string {
+ return s.String()
+}
+
+type PutObjectAclInput struct {
+ _ struct{} `locationName:"PutObjectAclRequest" type:"structure" payload:"AccessControlPolicy"`
+
+ // The canned ACL to apply to the object. For more information, see Canned ACL
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
+
+ // Contains the elements that set the ACL permissions for an object per grantee.
+ AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+
+ // The bucket name that contains the object to which you want to attach the
+ // ACL.
+ //
+ // When using this action with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this action with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+
+ // Allows grantee the read, write, read ACP, and write ACP permissions on the
+ // bucket.
+ //
+ // This action is not supported by Amazon S3 on Outposts.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to list the objects in the bucket.
+ //
+ // This action is not supported by Amazon S3 on Outposts.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the bucket ACL.
+ //
+ // This action is not supported by Amazon S3 on Outposts.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to create, overwrite, and delete any object in the bucket.
+ GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable bucket.
+ //
+ // This action is not supported by Amazon S3 on Outposts.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ // Key for which the PUT action was initiated.
+ //
+ // When using this action with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this action with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+ // in the Amazon S3 User Guide.
+ //
+ // When using this action with Amazon S3 on Outposts, you must direct requests
+ // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
+ // using this action using S3 on Outposts through the AWS SDKs, you provide
+ // the Outposts bucket ARN in place of the bucket name. For more information
+ // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutObjectAclInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutObjectAclInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutObjectAclInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.AccessControlPolicy != nil {
+ if err := s.AccessControlPolicy.Validate(); err != nil {
+ invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetACL sets the ACL field's value.
+func (s *PutObjectAclInput) SetACL(v string) *PutObjectAclInput {
+ s.ACL = &v
+ return s
+}
+
+// SetAccessControlPolicy sets the AccessControlPolicy field's value.
+func (s *PutObjectAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutObjectAclInput {
+ s.AccessControlPolicy = v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutObjectAclInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *PutObjectAclInput) SetExpectedBucketOwner(v string) *PutObjectAclInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput {
+ s.GrantFullControl = &v
+ return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *PutObjectAclInput) SetGrantRead(v string) *PutObjectAclInput {
+ s.GrantRead = &v
+ return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *PutObjectAclInput) SetGrantReadACP(v string) *PutObjectAclInput {
+ s.GrantReadACP = &v
+ return s
+}
+
+// SetGrantWrite sets the GrantWrite field's value.
+func (s *PutObjectAclInput) SetGrantWrite(v string) *PutObjectAclInput {
+ s.GrantWrite = &v
+ return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *PutObjectAclInput) SetGrantWriteACP(v string) *PutObjectAclInput {
+ s.GrantWriteACP = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *PutObjectAclInput) SetKey(v string) *PutObjectAclInput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *PutObjectAclInput) SetRequestPayer(v string) *PutObjectAclInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput {
+ s.VersionId = &v
+ return s
+}
+
+type PutObjectAclOutput struct {
+ _ struct{} `type:"structure"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutObjectAclOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutObjectAclOutput) GoString() string {
+ return s.String()
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+type PutObjectInput struct {
+ _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"`
+
+ // The canned ACL to apply to the object. For more information, see Canned ACL
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
+ //
+ // This action is not supported by Amazon S3 on Outposts.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
+
+ // Object data.
+ Body io.ReadSeeker `type:"blob"`
+
+ // The bucket name to which the PUT action was initiated.
+ //
+ // When using this action with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this action with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+ // in the Amazon S3 User Guide.
+ //
+ // When using this action with Amazon S3 on Outposts, you must direct requests
+ // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
+ // using this action using S3 on Outposts through the AWS SDKs, you provide
+ // the Outposts bucket ARN in place of the bucket name. For more information
+ // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Can be used to specify caching behavior along the request/reply chain. For
+ // more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9
+ // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9).
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object. For more information,
+ // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1).
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
+ // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11).
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // Size of the body in bytes. This parameter is useful when the size of the
+ // body cannot be determined automatically. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13
+ // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13).
+ ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
+
+ // The base64-encoded 128-bit MD5 digest of the message (without the headers)
+ // according to RFC 1864. This header can be used as a message integrity check
+ // to verify that the data is the same data that was originally sent. Although
+ // it is optional, we recommend using the Content-MD5 mechanism as an end-to-end
+ // integrity check. For more information about REST request authentication,
+ // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html).
+ ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
+
+ // A standard MIME type describing the format of the contents. For more information,
+ // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17).
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // The date and time at which the object is no longer cacheable. For more information,
+ // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21).
+ Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"`
+
+ // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
+ //
+ // This action is not supported by Amazon S3 on Outposts.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to read the object data and its metadata.
+ //
+ // This action is not supported by Amazon S3 on Outposts.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the object ACL.
+ //
+ // This action is not supported by Amazon S3 on Outposts.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable object.
+ //
+ // This action is not supported by Amazon S3 on Outposts.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ // Object key for which the PUT action was initiated.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Date on which it will be legal to delete or modify the object. This field
+ // can only be specified if Retention-Directive is REPLACE. You can only specify
+ // this or the Retention-Period header. If both are specified a 400 error will
+ // be returned. If neither is specified the bucket's DefaultRetention period
+ // will be used.
+ RetentionExpirationDate *time.Time `location:"header" locationName:"Retention-Expiration-Date" type:"timestamp"`
+
+ // A single legal hold to apply to the object. This field can only be specified
+ // if Retention-Directive is REPLACE. A legal hold is a character long string
+ // of max length 64. The object cannot be overwritten or deleted until all legal
+ // holds associated with the object are removed.
+ RetentionLegalHoldId *string `location:"header" locationName:"Retention-Legal-Hold-ID" type:"string"`
+
+ // Retention period to store on the object in seconds. If this field and Retention-Expiration-Date
+ // are specified a 400 error is returned. If neither is specified the bucket's
+ // DefaultRetention period will be used. 0 is a legal value assuming the bucket's
+ // minimum retention period is also 0.
+ RetentionPeriod *int64 `location:"header" locationName:"Retention-Period" type:"integer"`
+
+ // Specifies the algorithm to use to when encrypting the object (for example,
+ // AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // S3 does not store the encryption key. The key must be appropriate for use
+ // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
+ // header.
+ //
+ // SSECustomerKey is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by PutObjectInput's
+ // String and GoString methods.
+ SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If x-amz-server-side-encryption is present and has the value of aws:kms,
+ // this header specifies the ID of the AWS Key Management Service (AWS KMS)
+ // symmetrical customer managed customer master key (CMK) that was used for
+ // the object.
+ //
+ // If the value of x-amz-server-side-encryption is aws:kms, this header specifies
+ // the ID of the symmetric customer managed AWS KMS CMK that will be used for
+ // the object. If you specify x-amz-server-side-encryption:aws:kms, but do not
+ // providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS
+ // managed CMK in AWS to protect the data.
+ //
+ // SSEKMSKeyId is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by PutObjectInput's
+ // String and GoString methods.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
+
+ // The server-side encryption algorithm used when storing this object in Amazon
+ // S3 (for example, AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // By default, Amazon S3 uses the STANDARD Storage Class to store newly created
+ // objects. The STANDARD storage class provides high durability and high availability.
+ // Depending on performance needs, you can specify a different Storage Class.
+ // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information,
+ // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
+ // in the Amazon S3 Service Developer Guide.
+ StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+ // The tag-set for the object. The tag-set must be encoded as URL Query parameters.
+ // (For example, "Key1=Value1")
+ Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata. For information about object
+ // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html).
+ //
+ // In the following example, the request header sets the redirect to an object
+ // (anotherPage.html) in the same bucket:
+ //
+ // x-amz-website-redirect-location: /anotherPage.html
+ //
+ // In the following example, the request header sets the object redirect to
+ // another website:
+ //
+ // x-amz-website-redirect-location: http://www.example.com/
+ //
+ // For more information about website hosting in Amazon S3, see Hosting Websites
+ // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html)
+ // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutObjectInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutObjectInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutObjectInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetACL sets the ACL field's value.
+func (s *PutObjectInput) SetACL(v string) *PutObjectInput {
+ s.ACL = &v
+ return s
+}
+
+// SetBody sets the Body field's value.
+func (s *PutObjectInput) SetBody(v io.ReadSeeker) *PutObjectInput {
+ s.Body = v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutObjectInput) SetBucket(v string) *PutObjectInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutObjectInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetCacheControl sets the CacheControl field's value.
+func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput {
+ s.CacheControl = &v
+ return s
+}
+
+// SetContentDisposition sets the ContentDisposition field's value.
+func (s *PutObjectInput) SetContentDisposition(v string) *PutObjectInput {
+ s.ContentDisposition = &v
+ return s
+}
+
+// SetContentEncoding sets the ContentEncoding field's value.
+func (s *PutObjectInput) SetContentEncoding(v string) *PutObjectInput {
+ s.ContentEncoding = &v
+ return s
+}
+
+// SetContentLanguage sets the ContentLanguage field's value.
+func (s *PutObjectInput) SetContentLanguage(v string) *PutObjectInput {
+ s.ContentLanguage = &v
+ return s
+}
+
+// SetContentLength sets the ContentLength field's value.
+func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput {
+ s.ContentLength = &v
+ return s
+}
+
+// SetContentMD5 sets the ContentMD5 field's value.
+func (s *PutObjectInput) SetContentMD5(v string) *PutObjectInput {
+ s.ContentMD5 = &v
+ return s
+}
+
+// SetContentType sets the ContentType field's value.
+func (s *PutObjectInput) SetContentType(v string) *PutObjectInput {
+ s.ContentType = &v
+ return s
+}
+
+// SetExpires sets the Expires field's value.
+func (s *PutObjectInput) SetExpires(v time.Time) *PutObjectInput {
+ s.Expires = &v
+ return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *PutObjectInput) SetGrantFullControl(v string) *PutObjectInput {
+ s.GrantFullControl = &v
+ return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *PutObjectInput) SetGrantRead(v string) *PutObjectInput {
+ s.GrantRead = &v
+ return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *PutObjectInput) SetGrantReadACP(v string) *PutObjectInput {
+ s.GrantReadACP = &v
+ return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *PutObjectInput) SetGrantWriteACP(v string) *PutObjectInput {
+ s.GrantWriteACP = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *PutObjectInput) SetKey(v string) *PutObjectInput {
+ s.Key = &v
+ return s
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput {
+ s.Metadata = v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetRetentionExpirationDate sets the RetentionExpirationDate field's value.
+func (s *PutObjectInput) SetRetentionExpirationDate(v time.Time) *PutObjectInput {
+ s.RetentionExpirationDate = &v
+ return s
+}
+
+// SetRetentionLegalHoldId sets the RetentionLegalHoldId field's value.
+func (s *PutObjectInput) SetRetentionLegalHoldId(v string) *PutObjectInput {
+ s.RetentionLegalHoldId = &v
+ return s
+}
+
+// SetRetentionPeriod sets the RetentionPeriod field's value.
+func (s *PutObjectInput) SetRetentionPeriod(v int64) *PutObjectInput {
+ s.RetentionPeriod = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+func (s *PutObjectInput) getSSECustomerKey() (v string) {
+ if s.SSECustomerKey == nil {
+ return v
+ }
+ return *s.SSECustomerKey
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput {
+ s.StorageClass = &v
+ return s
+}
+
+// SetTagging sets the Tagging field's value.
+func (s *PutObjectInput) SetTagging(v string) *PutObjectInput {
+ s.Tagging = &v
+ return s
+}
+
+// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
+func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput {
+ s.WebsiteRedirectLocation = &v
+ return s
+}
+
+type PutObjectOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Entity tag for the uploaded object.
+ ETag *string `location:"header" locationName:"ETag" type:"string"`
+
+ // If the expiration is configured for the object (see PutBucketLifecycleConfiguration
+ // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)),
+ // the response includes this header. It includes the expiry-date and rule-id
+ // key-value pairs that provide information about object expiration. The value
+ // of the rule-id is URL encoded.
+ Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round-trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If x-amz-server-side-encryption is present and has the value of aws:kms,
+ // this header specifies the ID of the AWS Key Management Service (AWS KMS)
+ // symmetric customer managed customer master key (CMK) that was used for the
+ // object.
+ //
+ // SSEKMSKeyId is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by PutObjectOutput's
+ // String and GoString methods.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
+
+ // If you specified server-side encryption either with an AWS KMS customer master
+ // key (CMK) or Amazon S3-managed encryption key in your PUT request, the response
+ // includes this header. It confirms the encryption algorithm that Amazon S3
+ // used to encrypt the object.
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // Version of the object.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutObjectOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutObjectOutput) GoString() string {
+ return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput {
+ s.ETag = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *PutObjectOutput) SetExpiration(v string) *PutObjectOutput {
+ s.Expiration = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *PutObjectOutput) SetRequestCharged(v string) *PutObjectOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *PutObjectOutput) SetSSECustomerAlgorithm(v string) *PutObjectOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *PutObjectOutput) SetServerSideEncryption(v string) *PutObjectOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput {
+ s.VersionId = &v
+ return s
+}
+
+type PutObjectTaggingInput struct {
+ _ struct{} `locationName:"PutObjectTaggingRequest" type:"structure" payload:"Tagging"`
+
+ // The bucket name containing the object.
+ //
+ // When using this action with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this action with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+ // in the Amazon S3 User Guide.
+ //
+ // When using this action with Amazon S3 on Outposts, you must direct requests
+ // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
+ // using this action using S3 on Outposts through the AWS SDKs, you provide
+ // the Outposts bucket ARN in place of the bucket name. For more information
+ // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+
+ // Name of the object key.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Container for the TagSet and Tag elements
+ //
+ // Tagging is a required field
+ Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+
+ // The versionId of the object that the tag-set will be added to.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutObjectTaggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutObjectTaggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutObjectTaggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutObjectTaggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.Tagging == nil {
+ invalidParams.Add(request.NewErrParamRequired("Tagging"))
+ }
+ if s.Tagging != nil {
+ if err := s.Tagging.Validate(); err != nil {
+ invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutObjectTaggingInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *PutObjectTaggingInput) SetExpectedBucketOwner(v string) *PutObjectTaggingInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *PutObjectTaggingInput) SetRequestPayer(v string) *PutObjectTaggingInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetTagging sets the Tagging field's value.
+func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput {
+ s.Tagging = v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput {
+ s.VersionId = &v
+ return s
+}
+
+type PutObjectTaggingOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The versionId of the object the tag-set was added to.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutObjectTaggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutObjectTaggingOutput) GoString() string {
+ return s.String()
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput {
+ s.VersionId = &v
+ return s
+}
+
+type PutPublicAccessBlockInput struct {
+ _ struct{} `locationName:"PutPublicAccessBlockRequest" type:"structure" payload:"PublicAccessBlockConfiguration"`
+
+ // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you
+ // want to set.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+
+ // The PublicAccessBlock configuration that you want to apply to this Amazon
+ // S3 bucket. You can enable the configuration options in any combination. For
+ // more information about when Amazon S3 considers a bucket or object public,
+ // see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status)
+ // in the Amazon S3 User Guide.
+ //
+ // PublicAccessBlockConfiguration is a required field
+ PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `locationName:"PublicAccessBlockConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutPublicAccessBlockInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutPublicAccessBlockInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutPublicAccessBlockInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutPublicAccessBlockInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.PublicAccessBlockConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("PublicAccessBlockConfiguration"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutPublicAccessBlockInput) SetBucket(v string) *PutPublicAccessBlockInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutPublicAccessBlockInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *PutPublicAccessBlockInput) SetExpectedBucketOwner(v string) *PutPublicAccessBlockInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value.
+func (s *PutPublicAccessBlockInput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *PutPublicAccessBlockInput {
+ s.PublicAccessBlockConfiguration = v
+ return s
+}
+
+type PutPublicAccessBlockOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutPublicAccessBlockOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutPublicAccessBlockOutput) GoString() string {
+ return s.String()
+}
+
+// Specifies how requests are redirected. In the event of an error, you can
+// specify a different error code to return.
+type Redirect struct {
+ _ struct{} `type:"structure"`
+
+ // The host name to use in the redirect request.
+ HostName *string `type:"string"`
+
+ // The HTTP redirect code to use on the response. Not required if one of the
+ // siblings is present.
+ HttpRedirectCode *string `type:"string"`
+
+ // Protocol to use when redirecting requests. The default is the protocol that
+ // is used in the original request.
+ Protocol *string `type:"string" enum:"Protocol"`
+
+ // The object key prefix to use in the redirect request. For example, to redirect
+ // requests for all pages with prefix docs/ (objects in the docs/ folder) to
+ // documents/, you can set a condition block with KeyPrefixEquals set to docs/
+ // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required
+ // if one of the siblings is present. Can be present only if ReplaceKeyWith
+ // is not provided.
+ //
+ // Replacement must be made for object keys containing special characters (such
+ // as carriage returns) when using XML requests. For more information, see XML
+ // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints).
+ ReplaceKeyPrefixWith *string `type:"string"`
+
+ // The specific object key to use in the redirect request. For example, redirect
+ // request to error.html. Not required if one of the siblings is present. Can
+ // be present only if ReplaceKeyPrefixWith is not provided.
+ //
+ // Replacement must be made for object keys containing special characters (such
+ // as carriage returns) when using XML requests. For more information, see XML
+ // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints).
+ ReplaceKeyWith *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Redirect) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Redirect) GoString() string {
+ return s.String()
+}
+
+// SetHostName sets the HostName field's value.
+func (s *Redirect) SetHostName(v string) *Redirect {
+ s.HostName = &v
+ return s
+}
+
+// SetHttpRedirectCode sets the HttpRedirectCode field's value.
+func (s *Redirect) SetHttpRedirectCode(v string) *Redirect {
+ s.HttpRedirectCode = &v
+ return s
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *Redirect) SetProtocol(v string) *Redirect {
+ s.Protocol = &v
+ return s
+}
+
+// SetReplaceKeyPrefixWith sets the ReplaceKeyPrefixWith field's value.
+func (s *Redirect) SetReplaceKeyPrefixWith(v string) *Redirect {
+ s.ReplaceKeyPrefixWith = &v
+ return s
+}
+
+// SetReplaceKeyWith sets the ReplaceKeyWith field's value.
+func (s *Redirect) SetReplaceKeyWith(v string) *Redirect {
+ s.ReplaceKeyWith = &v
+ return s
+}
+
+// Specifies the redirect behavior of all requests to a website endpoint of
+// an Amazon S3 bucket.
+type RedirectAllRequestsTo struct {
+ _ struct{} `type:"structure"`
+
+ // Name of the host where requests are redirected.
+ //
+ // HostName is a required field
+ HostName *string `type:"string" required:"true"`
+
+ // Protocol to use when redirecting requests. The default is the protocol that
+ // is used in the original request.
+ Protocol *string `type:"string" enum:"Protocol"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RedirectAllRequestsTo) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RedirectAllRequestsTo) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RedirectAllRequestsTo) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"}
+ if s.HostName == nil {
+ invalidParams.Add(request.NewErrParamRequired("HostName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetHostName sets the HostName field's value.
+func (s *RedirectAllRequestsTo) SetHostName(v string) *RedirectAllRequestsTo {
+ s.HostName = &v
+ return s
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo {
+ s.Protocol = &v
+ return s
+}
+
+// A container for replication rules. You can add up to 1,000 rules. The maximum
+// size of a replication configuration is 2 MB.
+type ReplicationConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the AWS Identity and Access Management
+ // (IAM) role that Amazon S3 assumes when replicating objects. For more information,
+ // see How to Set Up Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html)
+ // in the Amazon S3 User Guide.
+ Role *string `type:"string"`
+
+ // A container for one or more replication rules. A replication configuration
+ // must have at least one rule and can contain a maximum of 1,000 rules.
+ //
+ // Rules is a required field
+ Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ReplicationConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ReplicationConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ReplicationConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"}
+ if s.Rules == nil {
+ invalidParams.Add(request.NewErrParamRequired("Rules"))
+ }
+ if s.Rules != nil {
+ for i, v := range s.Rules {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRole sets the Role field's value.
+func (s *ReplicationConfiguration) SetRole(v string) *ReplicationConfiguration {
+ s.Role = &v
+ return s
+}
+
+// SetRules sets the Rules field's value.
+func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationConfiguration {
+ s.Rules = v
+ return s
+}
+
+// Specifies which Amazon S3 objects to replicate and where to store the replicas.
+type ReplicationRule struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies whether Amazon S3 replicates delete markers. If you specify a Filter
+ // in your replication configuration, you must also include a DeleteMarkerReplication
+ // element. If your Filter includes a Tag element, the DeleteMarkerReplication
+ // Status must be set to Disabled, because Amazon S3 does not support replicating
+ // delete markers for tag-based rules. For an example configuration, see Basic
+ // Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config).
+ //
+ // For more information about delete marker replication, see Basic Rule Configuration
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html).
+ //
+ // If you are using an earlier version of the replication configuration, Amazon
+ // S3 handles replication of delete markers differently. For more information,
+ // see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations).
+ //
+ // DeleteMarkerReplication is a required field
+ DeleteMarkerReplication *DeleteMarkerReplication `type:"structure" required:"true"`
+
+ // A container for information about the replication destination and its configurations
+ // including enabling the S3 Replication Time Control (S3 RTC).
+ //
+ // Destination is a required field
+ Destination *Destination `type:"structure" required:"true"`
+
+ // A filter that identifies the subset of objects to which the replication rule
+ // applies. A Filter must specify exactly one Prefix, Tag, or an And child element.
+ //
+ // Filter is a required field
+ Filter *ReplicationRuleFilter `type:"structure" required:"true"`
+
+ // A unique identifier for the rule. The maximum value is 255 characters.
+ ID *string `type:"string"`
+
+ // An object key name prefix that identifies the object or objects to which
+ // the rule applies. The maximum prefix length is 1,024 characters. To include
+ // all objects in a bucket, specify an empty string.
+ //
+ // Replacement must be made for object keys containing special characters (such
+ // as carriage returns) when using XML requests. For more information, see XML
+ // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints).
+ //
+ // Deprecated: Prefix has been deprecated
+ Prefix *string `deprecated:"true" type:"string"`
+
+ // The priority indicates which rule has precedence whenever two or more replication
+ // rules conflict. Amazon S3 will attempt to replicate objects according to
+ // all replication rules. However, if there are two or more rules with the same
+ // destination bucket, then objects will be replicated according to the rule
+ // with the highest priority. The higher the number, the higher the priority.
+ //
+ // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Priority is a required field
+ Priority *int64 `type:"integer" required:"true"`
+
+ // Specifies whether the rule is enabled.
+ //
+ // Status is a required field
+ Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ReplicationRule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ReplicationRule) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ReplicationRule) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"}
+ if s.DeleteMarkerReplication == nil {
+ invalidParams.Add(request.NewErrParamRequired("DeleteMarkerReplication"))
+ }
+ if s.Destination == nil {
+ invalidParams.Add(request.NewErrParamRequired("Destination"))
+ }
+ if s.Filter == nil {
+ invalidParams.Add(request.NewErrParamRequired("Filter"))
+ }
+ if s.Priority == nil {
+ invalidParams.Add(request.NewErrParamRequired("Priority"))
+ }
+ if s.Status == nil {
+ invalidParams.Add(request.NewErrParamRequired("Status"))
+ }
+ if s.DeleteMarkerReplication != nil {
+ if err := s.DeleteMarkerReplication.Validate(); err != nil {
+ invalidParams.AddNested("DeleteMarkerReplication", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Destination != nil {
+ if err := s.Destination.Validate(); err != nil {
+ invalidParams.AddNested("Destination", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Filter != nil {
+ if err := s.Filter.Validate(); err != nil {
+ invalidParams.AddNested("Filter", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDeleteMarkerReplication sets the DeleteMarkerReplication field's value.
+func (s *ReplicationRule) SetDeleteMarkerReplication(v *DeleteMarkerReplication) *ReplicationRule {
+ s.DeleteMarkerReplication = v
+ return s
+}
+
+// SetDestination sets the Destination field's value.
+func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule {
+ s.Destination = v
+ return s
+}
+
+// SetFilter sets the Filter field's value.
+func (s *ReplicationRule) SetFilter(v *ReplicationRuleFilter) *ReplicationRule {
+ s.Filter = v
+ return s
+}
+
+// SetID sets the ID field's value.
+func (s *ReplicationRule) SetID(v string) *ReplicationRule {
+ s.ID = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule {
+ s.Prefix = &v
+ return s
+}
+
+// SetPriority sets the Priority field's value.
+func (s *ReplicationRule) SetPriority(v int64) *ReplicationRule {
+ s.Priority = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *ReplicationRule) SetStatus(v string) *ReplicationRule {
+ s.Status = &v
+ return s
+}
+
+// A container for specifying rule filters. The filters determine the subset
+// of objects to which the rule applies. This element is required only if you
+// specify more than one filter.
+//
+// For example:
+//
+// * If you specify both a Prefix and a Tag filter, wrap these filters in
+// an And tag.
+//
+// * If you specify a filter based on multiple tags, wrap the Tag elements
+// in an And tag.
+type ReplicationRuleAndOperator struct {
+ _ struct{} `type:"structure"`
+
+ // An object key name prefix that identifies the subset of objects to which
+ // the rule applies.
+ Prefix *string `type:"string"`
+
+ // An array of tags containing key and value pairs.
+ Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ReplicationRuleAndOperator) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ReplicationRuleAndOperator) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ReplicationRuleAndOperator) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleAndOperator"}
+ if s.Tags != nil {
+ for i, v := range s.Tags {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ReplicationRuleAndOperator) SetPrefix(v string) *ReplicationRuleAndOperator {
+ s.Prefix = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *ReplicationRuleAndOperator) SetTags(v []*Tag) *ReplicationRuleAndOperator {
+ s.Tags = v
+ return s
+}
+
+// A filter that identifies the subset of objects to which the replication rule
+// applies. A Filter must specify exactly one Prefix, Tag, or an And child element.
+type ReplicationRuleFilter struct {
+ _ struct{} `type:"structure"`
+
+ // A container for specifying rule filters. The filters determine the subset
+ // of objects to which the rule applies. This element is required only if you
+ // specify more than one filter. For example:
+ //
+ // * If you specify both a Prefix and a Tag filter, wrap these filters in
+ // an And tag.
+ //
+ // * If you specify a filter based on multiple tags, wrap the Tag elements
+ // in an And tag.
+ And *ReplicationRuleAndOperator `type:"structure"`
+
+ // An object key name prefix that identifies the subset of objects to which
+ // the rule applies.
+ //
+ // Replacement must be made for object keys containing special characters (such
+ // as carriage returns) when using XML requests. For more information, see XML
+ // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints).
+ Prefix *string `type:"string"`
+
+ // A container for specifying a tag key and value.
+ //
+ // The rule applies only to objects that have the tag in their tag set.
+ Tag *Tag `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ReplicationRuleFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ReplicationRuleFilter) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ReplicationRuleFilter) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleFilter"}
+ if s.And != nil {
+ if err := s.And.Validate(); err != nil {
+ invalidParams.AddNested("And", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Tag != nil {
+ if err := s.Tag.Validate(); err != nil {
+ invalidParams.AddNested("Tag", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAnd sets the And field's value.
+func (s *ReplicationRuleFilter) SetAnd(v *ReplicationRuleAndOperator) *ReplicationRuleFilter {
+ s.And = v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ReplicationRuleFilter) SetPrefix(v string) *ReplicationRuleFilter {
+ s.Prefix = &v
+ return s
+}
+
+// SetTag sets the Tag field's value.
+func (s *ReplicationRuleFilter) SetTag(v *Tag) *ReplicationRuleFilter {
+ s.Tag = v
+ return s
+}
+
+type RestoreObjectInput struct {
+ _ struct{} `locationName:"RestoreObjectRequest" type:"structure" payload:"RestoreRequest"`
+
+ // The bucket name containing the object to restore.
+ //
+ // When using this action with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this action with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+ // in the Amazon S3 User Guide.
+ //
+ // When using this action with Amazon S3 on Outposts, you must direct requests
+ // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
+ // using this action using S3 on Outposts through the AWS SDKs, you provide
+ // the Outposts bucket ARN in place of the bucket name. For more information
+ // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+
+ // Object key for which the action was initiated.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Container for restore job parameters.
+ RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RestoreObjectInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RestoreObjectInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RestoreObjectInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.RestoreRequest != nil {
+ if err := s.RestoreRequest.Validate(); err != nil {
+ invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *RestoreObjectInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *RestoreObjectInput) SetExpectedBucketOwner(v string) *RestoreObjectInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *RestoreObjectInput) SetRequestPayer(v string) *RestoreObjectInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetRestoreRequest sets the RestoreRequest field's value.
+func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput {
+ s.RestoreRequest = v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput {
+ s.VersionId = &v
+ return s
+}
+
+type RestoreObjectOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RestoreObjectOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RestoreObjectOutput) GoString() string {
+ return s.String()
+}
+
+// Container for restore job parameters.
+type RestoreRequest struct {
+ _ struct{} `type:"structure"`
+
+ // Lifetime of the active copy in days. Do not use with restores that specify
+ // OutputLocation.
+ //
+ // The Days element is required for regular restores, and must not be provided
+ // for select requests.
+ //
+ // Days is a required field
+ Days *int64 `type:"integer" required:"true"`
+
+ // S3 Glacier related parameters pertaining to this job. Do not use with restores
+ // that specify OutputLocation.
+ GlacierJobParameters *GlacierJobParameters `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RestoreRequest) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RestoreRequest) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RestoreRequest) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"}
+ if s.Days == nil {
+ invalidParams.Add(request.NewErrParamRequired("Days"))
+ }
+ if s.GlacierJobParameters != nil {
+ if err := s.GlacierJobParameters.Validate(); err != nil {
+ invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDays sets the Days field's value.
+func (s *RestoreRequest) SetDays(v int64) *RestoreRequest {
+ s.Days = &v
+ return s
+}
+
+// SetGlacierJobParameters sets the GlacierJobParameters field's value.
+func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest {
+ s.GlacierJobParameters = v
+ return s
+}
+
+// Specifies the redirect behavior and when a redirect is applied. For more
+// information about routing rules, see Configuring advanced conditional redirects
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects)
+// in the Amazon S3 User Guide.
+type RoutingRule struct {
+ _ struct{} `type:"structure"`
+
+ // A container for describing a condition that must be met for the specified
+ // redirect to apply. For example, 1. If request is for pages in the /docs folder,
+ // redirect to the /documents folder. 2. If request results in HTTP error 4xx,
+ // redirect request to another host where you might process the error.
+ Condition *Condition `type:"structure"`
+
+ // Container for redirect information. You can redirect requests to another
+ // host, to another page, or with another protocol. In the event of an error,
+ // you can specify a different error code to return.
+ //
+ // Redirect is a required field
+ Redirect *Redirect `type:"structure" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RoutingRule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RoutingRule) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RoutingRule) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RoutingRule"}
+ if s.Redirect == nil {
+ invalidParams.Add(request.NewErrParamRequired("Redirect"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetCondition sets the Condition field's value.
+func (s *RoutingRule) SetCondition(v *Condition) *RoutingRule {
+ s.Condition = v
+ return s
+}
+
+// SetRedirect sets the Redirect field's value.
+func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule {
+ s.Redirect = v
+ return s
+}
+
+// A container of a key value name pair.
+type Tag struct {
+ _ struct{} `type:"structure"`
+
+ // Name of the object key.
+ //
+ // Key is a required field
+ Key *string `min:"1" type:"string" required:"true"`
+
+ // Value of the tag.
+ //
+ // Value is a required field
+ Value *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Tag) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Tag) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Tag) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Tag"}
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.Value == nil {
+ invalidParams.Add(request.NewErrParamRequired("Value"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetKey sets the Key field's value.
+func (s *Tag) SetKey(v string) *Tag {
+ s.Key = &v
+ return s
+}
+
+// SetValue sets the Value field's value.
+func (s *Tag) SetValue(v string) *Tag {
+ s.Value = &v
+ return s
+}
+
+// Container for TagSet elements.
+type Tagging struct {
+ _ struct{} `type:"structure"`
+
+ // A collection for a set of tags
+ //
+ // TagSet is a required field
+ TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Tagging) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Tagging) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Tagging) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Tagging"}
+ if s.TagSet == nil {
+ invalidParams.Add(request.NewErrParamRequired("TagSet"))
+ }
+ if s.TagSet != nil {
+ for i, v := range s.TagSet {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagSet", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetTagSet sets the TagSet field's value.
+func (s *Tagging) SetTagSet(v []*Tag) *Tagging {
+ s.TagSet = v
+ return s
+}
+
+// Container for granting information.
+type TargetGrant struct {
+ _ struct{} `type:"structure"`
+
+ // Container for the person being granted permissions.
+ Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"`
+
+ // Logging permissions assigned to the grantee for the bucket.
+ Permission *string `type:"string" enum:"BucketLogsPermission"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s TargetGrant) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s TargetGrant) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *TargetGrant) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "TargetGrant"}
+ if s.Grantee != nil {
+ if err := s.Grantee.Validate(); err != nil {
+ invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetGrantee sets the Grantee field's value.
+func (s *TargetGrant) SetGrantee(v *Grantee) *TargetGrant {
+ s.Grantee = v
+ return s
+}
+
+// SetPermission sets the Permission field's value.
+func (s *TargetGrant) SetPermission(v string) *TargetGrant {
+ s.Permission = &v
+ return s
+}
+
+// Specifies when an object transitions to a specified storage class. For more
+// information about Amazon S3 lifecycle configuration rules, see Transitioning
+// Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html)
+// in the Amazon S3 User Guide.
+type Transition struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates when objects are transitioned to the specified storage class. The
+ // date value must be in ISO 8601 format. The time is always midnight UTC.
+ Date *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ // Indicates the number of days after creation when objects are transitioned
+ // to the specified storage class. The value must be a positive integer.
+ Days *int64 `type:"integer"`
+
+ // The storage class to which you want the object to transition.
+ StorageClass *string `type:"string" enum:"TransitionStorageClass"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Transition) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Transition) GoString() string {
+ return s.String()
+}
+
+// SetDate sets the Date field's value.
+func (s *Transition) SetDate(v time.Time) *Transition {
+ s.Date = &v
+ return s
+}
+
+// SetDays sets the Days field's value.
+func (s *Transition) SetDays(v int64) *Transition {
+ s.Days = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *Transition) SetStorageClass(v string) *Transition {
+ s.StorageClass = &v
+ return s
+}
+
+type UploadPartCopyInput struct {
+ _ struct{} `locationName:"UploadPartCopyRequest" type:"structure"`
+
+ // The bucket name.
+ //
+ // When using this action with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this action with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+ // in the Amazon S3 User Guide.
+ //
+ // When using this action with Amazon S3 on Outposts, you must direct requests
+ // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
+ // using this action using S3 on Outposts through the AWS SDKs, you provide
+ // the Outposts bucket ARN in place of the bucket name. For more information
+ // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Specifies the source object for the copy operation. You specify the value
+ // in one of two formats, depending on whether you want to access the source
+ // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html):
+ //
+ // * For objects not accessed through an access point, specify the name of
+ // the source bucket and key of the source object, separated by a slash (/).
+ // For example, to copy the object reports/january.pdf from the bucket awsexamplebucket,
+ // use awsexamplebucket/reports/january.pdf. The value must be URL encoded.
+ //
+ // * For objects accessed through access points, specify the Amazon Resource
+ // Name (ARN) of the object as accessed through the access point, in the
+ // format arn:aws:s3:<Region>:<account-id>:accesspoint/<access-point-name>/object/<key>.
+ // For example, to copy the object reports/january.pdf through access point
+ // my-access-point owned by account 123456789012 in Region us-west-2, use
+ // the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf.
+ // The value must be URL encoded. Amazon S3 supports copy operations using
+ // access points only when the source and destination buckets are in the
+ // same AWS Region. Alternatively, for objects accessed through Amazon S3
+ // on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/object/<key>.
+ // For example, to copy the object reports/january.pdf through outpost my-outpost
+ // owned by account 123456789012 in Region us-west-2, use the URL encoding
+ // of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf.
+ // The value must be URL encoded.
+ //
+ // To copy a specific version of an object, append ?versionId=<version-id> to
+ // the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893).
+ // If you don't specify a version ID, Amazon S3 copies the latest version of
+ // the source object.
+ //
+ // CopySource is a required field
+ CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"`
+
+ // Copies the object if its entity tag (ETag) matches the specified tag.
+ CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"`
+
+ // Copies the object if it has been modified since the specified time.
+ CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"`
+
+ // Copies the object if its entity tag (ETag) is different than the specified
+ // ETag.
+ CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"`
+
+ // Copies the object if it hasn't been modified since the specified time.
+ CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"`
+
+ // The range of bytes to copy from the source object. The range value must use
+ // the form bytes=first-last, where the first and last are the zero-based byte
+ // offsets to copy. For example, bytes=0-9 indicates that you want to copy the
+ // first 10 bytes of the source. You can copy a range only if the source object
+ // is greater than 5 MB.
+ CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"`
+
+ // Specifies the algorithm to use when decrypting the source object (for example,
+ // AES256).
+ CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
+ // the source object. The encryption key provided in this header must be one
+ // that was used when the source object was created.
+ //
+ // CopySourceSSECustomerKey is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by UploadPartCopyInput's
+ // String and GoString methods.
+ CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
+ CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+
+ // The account ID of the expected source bucket owner. If the source bucket
+ // is owned by a different account, the request will fail with an HTTP 403 (Access
+ // Denied) error.
+ ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"`
+
+ // Object key for which the multipart upload was initiated.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Part number of part being copied. This is a positive integer between 1 and
+ // 10,000.
+ //
+ // PartNumber is a required field
+ PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"`
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Specifies the algorithm to use to when encrypting the object (for example,
+ // AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // S3 does not store the encryption key. The key must be appropriate for use
+ // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
+ // header. This must be the same encryption key specified in the initiate multipart
+ // upload request.
+ //
+ // SSECustomerKey is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by UploadPartCopyInput's
+ // String and GoString methods.
+ SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Upload ID identifying the multipart upload whose part is being copied.
+ //
+ // UploadId is a required field
+ UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s UploadPartCopyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s UploadPartCopyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UploadPartCopyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UploadPartCopyInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.CopySource == nil {
+ invalidParams.Add(request.NewErrParamRequired("CopySource"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.PartNumber == nil {
+ invalidParams.Add(request.NewErrParamRequired("PartNumber"))
+ }
+ if s.UploadId == nil {
+ invalidParams.Add(request.NewErrParamRequired("UploadId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *UploadPartCopyInput) SetBucket(v string) *UploadPartCopyInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *UploadPartCopyInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetCopySource sets the CopySource field's value.
+func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput {
+ s.CopySource = &v
+ return s
+}
+
+// SetCopySourceIfMatch sets the CopySourceIfMatch field's value.
+func (s *UploadPartCopyInput) SetCopySourceIfMatch(v string) *UploadPartCopyInput {
+ s.CopySourceIfMatch = &v
+ return s
+}
+
+// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value.
+func (s *UploadPartCopyInput) SetCopySourceIfModifiedSince(v time.Time) *UploadPartCopyInput {
+ s.CopySourceIfModifiedSince = &v
+ return s
+}
+
+// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value.
+func (s *UploadPartCopyInput) SetCopySourceIfNoneMatch(v string) *UploadPartCopyInput {
+ s.CopySourceIfNoneMatch = &v
+ return s
+}
+
+// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value.
+func (s *UploadPartCopyInput) SetCopySourceIfUnmodifiedSince(v time.Time) *UploadPartCopyInput {
+ s.CopySourceIfUnmodifiedSince = &v
+ return s
+}
+
+// SetCopySourceRange sets the CopySourceRange field's value.
+func (s *UploadPartCopyInput) SetCopySourceRange(v string) *UploadPartCopyInput {
+ s.CopySourceRange = &v
+ return s
+}
+
+// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value.
+func (s *UploadPartCopyInput) SetCopySourceSSECustomerAlgorithm(v string) *UploadPartCopyInput {
+ s.CopySourceSSECustomerAlgorithm = &v
+ return s
+}
+
+// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value.
+func (s *UploadPartCopyInput) SetCopySourceSSECustomerKey(v string) *UploadPartCopyInput {
+ s.CopySourceSSECustomerKey = &v
+ return s
+}
+
+func (s *UploadPartCopyInput) getCopySourceSSECustomerKey() (v string) {
+ if s.CopySourceSSECustomerKey == nil {
+ return v
+ }
+ return *s.CopySourceSSECustomerKey
+}
+
+// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value.
+func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput {
+ s.CopySourceSSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *UploadPartCopyInput) SetExpectedBucketOwner(v string) *UploadPartCopyInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+// SetExpectedSourceBucketOwner sets the ExpectedSourceBucketOwner field's value.
+func (s *UploadPartCopyInput) SetExpectedSourceBucketOwner(v string) *UploadPartCopyInput {
+ s.ExpectedSourceBucketOwner = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *UploadPartCopyInput) SetKey(v string) *UploadPartCopyInput {
+ s.Key = &v
+ return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *UploadPartCopyInput) SetPartNumber(v int64) *UploadPartCopyInput {
+ s.PartNumber = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *UploadPartCopyInput) SetRequestPayer(v string) *UploadPartCopyInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *UploadPartCopyInput) SetSSECustomerAlgorithm(v string) *UploadPartCopyInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *UploadPartCopyInput) SetSSECustomerKey(v string) *UploadPartCopyInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+func (s *UploadPartCopyInput) getSSECustomerKey() (v string) {
+ if s.SSECustomerKey == nil {
+ return v
+ }
+ return *s.SSECustomerKey
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput {
+ s.UploadId = &v
+ return s
+}
+
+type UploadPartCopyOutput struct {
+ _ struct{} `type:"structure" payload:"CopyPartResult"`
+
+ // Container for all response elements.
+ CopyPartResult *CopyPartResult `type:"structure"`
+
+ // The version of the source object that was copied, if you have enabled versioning
+ // on the source bucket.
+ CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round-trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (AWS KMS)
+ // symmetric customer managed customer master key (CMK) that was used for the
+ // object.
+ //
+ // SSEKMSKeyId is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by UploadPartCopyOutput's
+ // String and GoString methods.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
+
+ // The server-side encryption algorithm used when storing this object in Amazon
+ // S3 (for example, AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s UploadPartCopyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s UploadPartCopyOutput) GoString() string {
+ return s.String()
+}
+
+// SetCopyPartResult sets the CopyPartResult field's value.
+func (s *UploadPartCopyOutput) SetCopyPartResult(v *CopyPartResult) *UploadPartCopyOutput {
+ s.CopyPartResult = v
+ return s
+}
+
+// SetCopySourceVersionId sets the CopySourceVersionId field's value.
+func (s *UploadPartCopyOutput) SetCopySourceVersionId(v string) *UploadPartCopyOutput {
+ s.CopySourceVersionId = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *UploadPartCopyOutput) SetRequestCharged(v string) *UploadPartCopyOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *UploadPartCopyOutput) SetSSECustomerAlgorithm(v string) *UploadPartCopyOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *UploadPartCopyOutput) SetSSECustomerKeyMD5(v string) *UploadPartCopyOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *UploadPartCopyOutput) SetSSEKMSKeyId(v string) *UploadPartCopyOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopyOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+type UploadPartInput struct {
+ _ struct{} `locationName:"UploadPartRequest" type:"structure" payload:"Body"`
+
+ // Object data.
+ Body io.ReadSeeker `type:"blob"`
+
+ // The name of the bucket to which the multipart upload was initiated.
+ //
+ // When using this action with an access point, you must direct requests to
+ // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // When using this action with an access point through the AWS SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+ // in the Amazon S3 User Guide.
+ //
+ // When using this action with Amazon S3 on Outposts, you must direct requests
+ // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
+ // using this action using S3 on Outposts through the AWS SDKs, you provide
+ // the Outposts bucket ARN in place of the bucket name. For more information
+ // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Size of the body in bytes. This parameter is useful when the size of the
+ // body cannot be determined automatically.
+ ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
+
+ // The base64-encoded 128-bit MD5 digest of the part data. This parameter is
+ // auto-populated when using the command from the CLI. This parameter is required
+ // if object lock parameters are specified.
+ ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
+
+ // Ignored by COS.
+ ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
+
+ // Object key for which the multipart upload was initiated.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Part number of part being uploaded. This is a positive integer between 1
+ // and 10,000.
+ //
+ // PartNumber is a required field
+ PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"`
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. For information
+ // about downloading objects from requester pays buckets, see Downloading Objects
+ // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // in the Amazon S3 Developer Guide.
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Specifies the algorithm to use to when encrypting the object (for example,
+ // AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // S3 does not store the encryption key. The key must be appropriate for use
+ // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
+ // header. This must be the same encryption key specified in the initiate multipart
+ // upload request.
+ //
+ // SSECustomerKey is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by UploadPartInput's
+ // String and GoString methods.
+ SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Upload ID identifying the multipart upload whose part is being uploaded.
+ //
+ // UploadId is a required field
+ UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s UploadPartInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s UploadPartInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UploadPartInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UploadPartInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.PartNumber == nil {
+ invalidParams.Add(request.NewErrParamRequired("PartNumber"))
+ }
+ if s.UploadId == nil {
+ invalidParams.Add(request.NewErrParamRequired("UploadId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBody sets the Body field's value.
+func (s *UploadPartInput) SetBody(v io.ReadSeeker) *UploadPartInput {
+ s.Body = v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *UploadPartInput) SetBucket(v string) *UploadPartInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *UploadPartInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetContentLength sets the ContentLength field's value.
+func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput {
+ s.ContentLength = &v
+ return s
+}
+
+// SetContentMD5 sets the ContentMD5 field's value.
+func (s *UploadPartInput) SetContentMD5(v string) *UploadPartInput {
+ s.ContentMD5 = &v
+ return s
+}
+
+// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value.
+func (s *UploadPartInput) SetExpectedBucketOwner(v string) *UploadPartInput {
+ s.ExpectedBucketOwner = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *UploadPartInput) SetKey(v string) *UploadPartInput {
+ s.Key = &v
+ return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *UploadPartInput) SetPartNumber(v int64) *UploadPartInput {
+ s.PartNumber = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *UploadPartInput) SetRequestPayer(v string) *UploadPartInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *UploadPartInput) SetSSECustomerAlgorithm(v string) *UploadPartInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *UploadPartInput) SetSSECustomerKey(v string) *UploadPartInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+func (s *UploadPartInput) getSSECustomerKey() (v string) {
+ if s.SSECustomerKey == nil {
+ return v
+ }
+ return *s.SSECustomerKey
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput {
+ s.UploadId = &v
+ return s
+}
+
+type UploadPartOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Entity tag for the uploaded object.
+ ETag *string `location:"header" locationName:"ETag" type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round-trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (AWS KMS)
+ // symmetric customer managed customer master key (CMK) was used for the object.
+ //
+ // SSEKMSKeyId is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by UploadPartOutput's
+ // String and GoString methods.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
+
+ // The server-side encryption algorithm used when storing this object in Amazon
+ // S3 (for example, AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s UploadPartOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s UploadPartOutput) GoString() string {
+ return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *UploadPartOutput) SetETag(v string) *UploadPartOutput {
+ s.ETag = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *UploadPartOutput) SetRequestCharged(v string) *UploadPartOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *UploadPartOutput) SetSSECustomerAlgorithm(v string) *UploadPartOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *UploadPartOutput) SetSSECustomerKeyMD5(v string) *UploadPartOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *UploadPartOutput) SetSSEKMSKeyId(v string) *UploadPartOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// Describes the versioning state of an Amazon S3 bucket. For more information,
+// see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html)
+// in the Amazon Simple Storage Service API Reference.
+type VersioningConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies whether MFA delete is enabled in the bucket versioning configuration.
+ // This element is only returned if the bucket has been configured with MFA
+ // delete. If the bucket has never been so configured, this element is not returned.
+ MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"`
+
+ // The versioning state of the bucket.
+ Status *string `type:"string" enum:"BucketVersioningStatus"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VersioningConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VersioningConfiguration) GoString() string {
+ return s.String()
+}
+
+// SetMFADelete sets the MFADelete field's value.
+func (s *VersioningConfiguration) SetMFADelete(v string) *VersioningConfiguration {
+ s.MFADelete = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration {
+ s.Status = &v
+ return s
+}
+
+// Specifies website configuration parameters for an Amazon S3 bucket.
+type WebsiteConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the error document for the website.
+ ErrorDocument *ErrorDocument `type:"structure"`
+
+ // The name of the index document for the website.
+ IndexDocument *IndexDocument `type:"structure"`
+
+ // The redirect behavior for every request to this bucket's website endpoint.
+ //
+ // If you specify this property, you can't specify any other property.
+ RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"`
+
+ // Rules that define when a redirect is applied and the redirect behavior.
+ RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s WebsiteConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s WebsiteConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *WebsiteConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "WebsiteConfiguration"}
+ if s.ErrorDocument != nil {
+ if err := s.ErrorDocument.Validate(); err != nil {
+ invalidParams.AddNested("ErrorDocument", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.IndexDocument != nil {
+ if err := s.IndexDocument.Validate(); err != nil {
+ invalidParams.AddNested("IndexDocument", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.RedirectAllRequestsTo != nil {
+ if err := s.RedirectAllRequestsTo.Validate(); err != nil {
+ invalidParams.AddNested("RedirectAllRequestsTo", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.RoutingRules != nil {
+ for i, v := range s.RoutingRules {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RoutingRules", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetErrorDocument sets the ErrorDocument field's value.
+func (s *WebsiteConfiguration) SetErrorDocument(v *ErrorDocument) *WebsiteConfiguration {
+ s.ErrorDocument = v
+ return s
+}
+
+// SetIndexDocument sets the IndexDocument field's value.
+func (s *WebsiteConfiguration) SetIndexDocument(v *IndexDocument) *WebsiteConfiguration {
+ s.IndexDocument = v
+ return s
+}
+
+// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value.
+func (s *WebsiteConfiguration) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *WebsiteConfiguration {
+ s.RedirectAllRequestsTo = v
+ return s
+}
+
+// SetRoutingRules sets the RoutingRules field's value.
+func (s *WebsiteConfiguration) SetRoutingRules(v []*RoutingRule) *WebsiteConfiguration {
+ s.RoutingRules = v
+ return s
+}
+
+const (
+ // BucketCannedACLPrivate is a BucketCannedACL enum value
+ BucketCannedACLPrivate = "private"
+
+ // BucketCannedACLPublicRead is a BucketCannedACL enum value
+ BucketCannedACLPublicRead = "public-read"
+
+ // BucketCannedACLPublicReadWrite is a BucketCannedACL enum value
+ BucketCannedACLPublicReadWrite = "public-read-write"
+
+ // BucketCannedACLAuthenticatedRead is a BucketCannedACL enum value
+ BucketCannedACLAuthenticatedRead = "authenticated-read"
+)
+
+// BucketCannedACL_Values returns all elements of the BucketCannedACL enum
+func BucketCannedACL_Values() []string {
+ return []string{
+ BucketCannedACLPrivate,
+ BucketCannedACLPublicRead,
+ BucketCannedACLPublicReadWrite,
+ BucketCannedACLAuthenticatedRead,
+ }
+}
+
+const (
+ // BucketLocationConstraintEu is a BucketLocationConstraint enum value
+ BucketLocationConstraintEu = "EU"
+
+ // BucketLocationConstraintEuWest1 is a BucketLocationConstraint enum value
+ BucketLocationConstraintEuWest1 = "eu-west-1"
+
+ // BucketLocationConstraintUsWest1 is a BucketLocationConstraint enum value
+ BucketLocationConstraintUsWest1 = "us-west-1"
+
+ // BucketLocationConstraintUsWest2 is a BucketLocationConstraint enum value
+ BucketLocationConstraintUsWest2 = "us-west-2"
+
+ // BucketLocationConstraintApSouth1 is a BucketLocationConstraint enum value
+ BucketLocationConstraintApSouth1 = "ap-south-1"
+
+ // BucketLocationConstraintApSoutheast1 is a BucketLocationConstraint enum value
+ BucketLocationConstraintApSoutheast1 = "ap-southeast-1"
+
+ // BucketLocationConstraintApSoutheast2 is a BucketLocationConstraint enum value
+ BucketLocationConstraintApSoutheast2 = "ap-southeast-2"
+
+ // BucketLocationConstraintApNortheast1 is a BucketLocationConstraint enum value
+ BucketLocationConstraintApNortheast1 = "ap-northeast-1"
+
+ // BucketLocationConstraintSaEast1 is a BucketLocationConstraint enum value
+ BucketLocationConstraintSaEast1 = "sa-east-1"
+
+ // BucketLocationConstraintCnNorth1 is a BucketLocationConstraint enum value
+ BucketLocationConstraintCnNorth1 = "cn-north-1"
+
+ // BucketLocationConstraintEuCentral1 is a BucketLocationConstraint enum value
+ BucketLocationConstraintEuCentral1 = "eu-central-1"
+)
+
+// BucketLocationConstraint_Values returns all elements of the BucketLocationConstraint enum
+func BucketLocationConstraint_Values() []string {
+ return []string{
+ BucketLocationConstraintEu,
+ BucketLocationConstraintEuWest1,
+ BucketLocationConstraintUsWest1,
+ BucketLocationConstraintUsWest2,
+ BucketLocationConstraintApSouth1,
+ BucketLocationConstraintApSoutheast1,
+ BucketLocationConstraintApSoutheast2,
+ BucketLocationConstraintApNortheast1,
+ BucketLocationConstraintSaEast1,
+ BucketLocationConstraintCnNorth1,
+ BucketLocationConstraintEuCentral1,
+ }
+}
+
+const (
+ // BucketLogsPermissionFullControl is a BucketLogsPermission enum value
+ BucketLogsPermissionFullControl = "FULL_CONTROL"
+
+ // BucketLogsPermissionRead is a BucketLogsPermission enum value
+ BucketLogsPermissionRead = "READ"
+
+ // BucketLogsPermissionWrite is a BucketLogsPermission enum value
+ BucketLogsPermissionWrite = "WRITE"
+)
+
+// BucketLogsPermission_Values returns all elements of the BucketLogsPermission enum
+func BucketLogsPermission_Values() []string {
+ return []string{
+ BucketLogsPermissionFullControl,
+ BucketLogsPermissionRead,
+ BucketLogsPermissionWrite,
+ }
+}
+
+const (
+ // BucketProtectionStatusRetention is a BucketProtectionStatus enum value
+ BucketProtectionStatusRetention = "Retention"
+)
+
+// BucketProtectionStatus_Values returns all elements of the BucketProtectionStatus enum
+func BucketProtectionStatus_Values() []string {
+ return []string{
+ BucketProtectionStatusRetention,
+ }
+}
+
+const (
+ // BucketVersioningStatusEnabled is a BucketVersioningStatus enum value
+ BucketVersioningStatusEnabled = "Enabled"
+
+ // BucketVersioningStatusSuspended is a BucketVersioningStatus enum value
+ BucketVersioningStatusSuspended = "Suspended"
+)
+
+// BucketVersioningStatus_Values returns all elements of the BucketVersioningStatus enum
+func BucketVersioningStatus_Values() []string {
+ return []string{
+ BucketVersioningStatusEnabled,
+ BucketVersioningStatusSuspended,
+ }
+}
+
+const (
+ // DeleteMarkerReplicationStatusEnabled is a DeleteMarkerReplicationStatus enum value
+ DeleteMarkerReplicationStatusEnabled = "Enabled"
+
+ // DeleteMarkerReplicationStatusDisabled is a DeleteMarkerReplicationStatus enum value
+ DeleteMarkerReplicationStatusDisabled = "Disabled"
+)
+
+// DeleteMarkerReplicationStatus_Values returns all elements of the DeleteMarkerReplicationStatus enum
+func DeleteMarkerReplicationStatus_Values() []string {
+ return []string{
+ DeleteMarkerReplicationStatusEnabled,
+ DeleteMarkerReplicationStatusDisabled,
+ }
+}
+
+// Requests Amazon S3 to encode the object keys in the response and specifies
+// the encoding method to use. An object key may contain any Unicode character;
+// however, XML 1.0 parser cannot parse some characters, such as characters
+// with an ASCII value from 0 to 10. For characters that are not supported in
+// XML 1.0, you can add this parameter to request that Amazon S3 encode the
+// keys in the response.
+const (
+ // EncodingTypeUrl is a EncodingType enum value
+ EncodingTypeUrl = "url"
+)
+
+// EncodingType_Values returns all elements of the EncodingType enum
+func EncodingType_Values() []string {
+ return []string{
+ EncodingTypeUrl,
+ }
+}
+
+const (
+ // ExpirationStatusEnabled is a ExpirationStatus enum value
+ ExpirationStatusEnabled = "Enabled"
+
+ // ExpirationStatusDisabled is a ExpirationStatus enum value
+ ExpirationStatusDisabled = "Disabled"
+)
+
+// ExpirationStatus_Values returns all elements of the ExpirationStatus enum
+func ExpirationStatus_Values() []string {
+ return []string{
+ ExpirationStatusEnabled,
+ ExpirationStatusDisabled,
+ }
+}
+
+const (
+ // MFADeleteEnabled is a MFADelete enum value
+ MFADeleteEnabled = "Enabled"
+
+ // MFADeleteDisabled is a MFADelete enum value
+ MFADeleteDisabled = "Disabled"
+)
+
+// MFADelete_Values returns all elements of the MFADelete enum
+func MFADelete_Values() []string {
+ return []string{
+ MFADeleteEnabled,
+ MFADeleteDisabled,
+ }
+}
+
+const (
+ // MFADeleteStatusEnabled is a MFADeleteStatus enum value
+ MFADeleteStatusEnabled = "Enabled"
+
+ // MFADeleteStatusDisabled is a MFADeleteStatus enum value
+ MFADeleteStatusDisabled = "Disabled"
+)
+
+// MFADeleteStatus_Values returns all elements of the MFADeleteStatus enum
+func MFADeleteStatus_Values() []string {
+ return []string{
+ MFADeleteStatusEnabled,
+ MFADeleteStatusDisabled,
+ }
+}
+
+const (
+ // MetadataDirectiveCopy is a MetadataDirective enum value
+ MetadataDirectiveCopy = "COPY"
+
+ // MetadataDirectiveReplace is a MetadataDirective enum value
+ MetadataDirectiveReplace = "REPLACE"
+)
+
+// MetadataDirective_Values returns all elements of the MetadataDirective enum
+func MetadataDirective_Values() []string {
+ return []string{
+ MetadataDirectiveCopy,
+ MetadataDirectiveReplace,
+ }
+}
+
+const (
+ // ObjectCannedACLPrivate is a ObjectCannedACL enum value
+ ObjectCannedACLPrivate = "private"
+
+ // ObjectCannedACLPublicRead is a ObjectCannedACL enum value
+ ObjectCannedACLPublicRead = "public-read"
+
+ // ObjectCannedACLPublicReadWrite is a ObjectCannedACL enum value
+ ObjectCannedACLPublicReadWrite = "public-read-write"
+
+ // ObjectCannedACLAuthenticatedRead is a ObjectCannedACL enum value
+ ObjectCannedACLAuthenticatedRead = "authenticated-read"
+
+ // ObjectCannedACLAwsExecRead is a ObjectCannedACL enum value
+ ObjectCannedACLAwsExecRead = "aws-exec-read"
+
+ // ObjectCannedACLBucketOwnerRead is a ObjectCannedACL enum value
+ ObjectCannedACLBucketOwnerRead = "bucket-owner-read"
+
+ // ObjectCannedACLBucketOwnerFullControl is a ObjectCannedACL enum value
+ ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control"
+)
+
+// ObjectCannedACL_Values returns all elements of the ObjectCannedACL enum
+func ObjectCannedACL_Values() []string {
+ return []string{
+ ObjectCannedACLPrivate,
+ ObjectCannedACLPublicRead,
+ ObjectCannedACLPublicReadWrite,
+ ObjectCannedACLAuthenticatedRead,
+ ObjectCannedACLAwsExecRead,
+ ObjectCannedACLBucketOwnerRead,
+ ObjectCannedACLBucketOwnerFullControl,
+ }
+}
+
+const (
+ // ObjectStorageClassStandard is a ObjectStorageClass enum value
+ ObjectStorageClassStandard = "STANDARD"
+
+ // ObjectStorageClassReducedRedundancy is a ObjectStorageClass enum value
+ ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY"
+
+ // ObjectStorageClassGlacier is a ObjectStorageClass enum value
+ ObjectStorageClassGlacier = "GLACIER"
+
+ // ObjectStorageClassAccelerated is a ObjectStorageClass enum value
+ ObjectStorageClassAccelerated = "ACCELERATED"
+
+ // ObjectStorageClassStandardIa is a ObjectStorageClass enum value
+ ObjectStorageClassStandardIa = "STANDARD_IA"
+
+ // ObjectStorageClassOnezoneIa is a ObjectStorageClass enum value
+ ObjectStorageClassOnezoneIa = "ONEZONE_IA"
+
+ // ObjectStorageClassIntelligentTiering is a ObjectStorageClass enum value
+ ObjectStorageClassIntelligentTiering = "INTELLIGENT_TIERING"
+
+ // ObjectStorageClassDeepArchive is a ObjectStorageClass enum value
+ ObjectStorageClassDeepArchive = "DEEP_ARCHIVE"
+)
+
+// ObjectStorageClass_Values returns all elements of the ObjectStorageClass enum
+func ObjectStorageClass_Values() []string {
+ return []string{
+ ObjectStorageClassStandard,
+ ObjectStorageClassReducedRedundancy,
+ ObjectStorageClassGlacier,
+ ObjectStorageClassAccelerated,
+ ObjectStorageClassStandardIa,
+ ObjectStorageClassOnezoneIa,
+ ObjectStorageClassIntelligentTiering,
+ ObjectStorageClassDeepArchive,
+ }
+}
+
+const (
+ // ObjectVersionStorageClassStandard is a ObjectVersionStorageClass enum value
+ ObjectVersionStorageClassStandard = "STANDARD"
+)
+
+// ObjectVersionStorageClass_Values returns all elements of the ObjectVersionStorageClass enum
+func ObjectVersionStorageClass_Values() []string {
+ return []string{
+ ObjectVersionStorageClassStandard,
+ }
+}
+
+const (
+ // PermissionFullControl is a Permission enum value
+ PermissionFullControl = "FULL_CONTROL"
+
+ // PermissionWrite is a Permission enum value
+ PermissionWrite = "WRITE"
+
+ // PermissionWriteAcp is a Permission enum value
+ PermissionWriteAcp = "WRITE_ACP"
+
+ // PermissionRead is a Permission enum value
+ PermissionRead = "READ"
+
+ // PermissionReadAcp is a Permission enum value
+ PermissionReadAcp = "READ_ACP"
+)
+
+// Permission_Values returns all elements of the Permission enum
+func Permission_Values() []string {
+ return []string{
+ PermissionFullControl,
+ PermissionWrite,
+ PermissionWriteAcp,
+ PermissionRead,
+ PermissionReadAcp,
+ }
+}
+
+const (
+ // ProtocolHttp is a Protocol enum value
+ ProtocolHttp = "http"
+
+ // ProtocolHttps is a Protocol enum value
+ ProtocolHttps = "https"
+)
+
+// Protocol_Values returns all elements of the Protocol enum
+func Protocol_Values() []string {
+ return []string{
+ ProtocolHttp,
+ ProtocolHttps,
+ }
+}
+
+const (
+ // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value
+ ReplicationRuleStatusEnabled = "Enabled"
+
+ // ReplicationRuleStatusDisabled is a ReplicationRuleStatus enum value
+ ReplicationRuleStatusDisabled = "Disabled"
+)
+
+// ReplicationRuleStatus_Values returns all elements of the ReplicationRuleStatus enum
+func ReplicationRuleStatus_Values() []string {
+ return []string{
+ ReplicationRuleStatusEnabled,
+ ReplicationRuleStatusDisabled,
+ }
+}
+
+const (
+ // ReplicationStatusComplete is a ReplicationStatus enum value
+ ReplicationStatusComplete = "COMPLETE"
+
+ // ReplicationStatusPending is a ReplicationStatus enum value
+ ReplicationStatusPending = "PENDING"
+
+ // ReplicationStatusFailed is a ReplicationStatus enum value
+ ReplicationStatusFailed = "FAILED"
+
+ // ReplicationStatusReplica is a ReplicationStatus enum value
+ ReplicationStatusReplica = "REPLICA"
+)
+
+// ReplicationStatus_Values returns all elements of the ReplicationStatus enum
+func ReplicationStatus_Values() []string {
+ return []string{
+ ReplicationStatusComplete,
+ ReplicationStatusPending,
+ ReplicationStatusFailed,
+ ReplicationStatusReplica,
+ }
+}
+
+// If present, indicates that the requester was successfully charged for the
+// request.
+const (
+ // RequestChargedRequester is a RequestCharged enum value
+ RequestChargedRequester = "requester"
+)
+
+// RequestCharged_Values returns all elements of the RequestCharged enum
+func RequestCharged_Values() []string {
+ return []string{
+ RequestChargedRequester,
+ }
+}
+
+// Confirms that the requester knows that they will be charged for the request.
+// Bucket owners need not specify this parameter in their requests. For information
+// about downloading objects from requester pays buckets, see Downloading Objects
+// in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+// in the Amazon S3 Developer Guide.
+const (
+ // RequestPayerRequester is a RequestPayer enum value
+ RequestPayerRequester = "requester"
+)
+
+// RequestPayer_Values returns all elements of the RequestPayer enum
+func RequestPayer_Values() []string {
+ return []string{
+ RequestPayerRequester,
+ }
+}
+
+const (
+ // RetentionDirectiveCopy is a RetentionDirective enum value
+ RetentionDirectiveCopy = "COPY"
+
+ // RetentionDirectiveReplace is a RetentionDirective enum value
+ RetentionDirectiveReplace = "REPLACE"
+)
+
+// RetentionDirective_Values returns all elements of the RetentionDirective enum
+func RetentionDirective_Values() []string {
+ return []string{
+ RetentionDirectiveCopy,
+ RetentionDirectiveReplace,
+ }
+}
+
+const (
+ // ServerSideEncryptionAes256 is a ServerSideEncryption enum value
+ ServerSideEncryptionAes256 = "AES256"
+
+ // ServerSideEncryptionAwsKms is a ServerSideEncryption enum value
+ ServerSideEncryptionAwsKms = "aws:kms"
+)
+
+// ServerSideEncryption_Values returns all elements of the ServerSideEncryption enum
+func ServerSideEncryption_Values() []string {
+ return []string{
+ ServerSideEncryptionAes256,
+ ServerSideEncryptionAwsKms,
+ }
+}
+
+const (
+ // StorageClassStandard is a StorageClass enum value
+ StorageClassStandard = "STANDARD"
+
+ // StorageClassReducedRedundancy is a StorageClass enum value
+ StorageClassReducedRedundancy = "REDUCED_REDUNDANCY"
+
+ // StorageClassStandardIa is a StorageClass enum value
+ StorageClassStandardIa = "STANDARD_IA"
+
+ // StorageClassOnezoneIa is a StorageClass enum value
+ StorageClassOnezoneIa = "ONEZONE_IA"
+
+ // StorageClassIntelligentTiering is a StorageClass enum value
+ StorageClassIntelligentTiering = "INTELLIGENT_TIERING"
+
+ // StorageClassGlacier is a StorageClass enum value
+ StorageClassGlacier = "GLACIER"
+
+ // StorageClassAccelerated is a StorageClass enum value
+ StorageClassAccelerated = "ACCELERATED"
+
+ // StorageClassDeepArchive is a StorageClass enum value
+ StorageClassDeepArchive = "DEEP_ARCHIVE"
+)
+
+// StorageClass_Values returns all elements of the StorageClass enum
+func StorageClass_Values() []string {
+ return []string{
+ StorageClassStandard,
+ StorageClassReducedRedundancy,
+ StorageClassStandardIa,
+ StorageClassOnezoneIa,
+ StorageClassIntelligentTiering,
+ StorageClassGlacier,
+ StorageClassAccelerated,
+ StorageClassDeepArchive,
+ }
+}
+
+const (
+ // TaggingDirectiveCopy is a TaggingDirective enum value
+ TaggingDirectiveCopy = "COPY"
+
+ // TaggingDirectiveReplace is a TaggingDirective enum value
+ TaggingDirectiveReplace = "REPLACE"
+)
+
+// TaggingDirective_Values returns all elements of the TaggingDirective enum
+func TaggingDirective_Values() []string {
+ return []string{
+ TaggingDirectiveCopy,
+ TaggingDirectiveReplace,
+ }
+}
+
+const (
+ // TierAccelerated is a Tier enum value
+ TierAccelerated = "Accelerated"
+
+ // TierStandard is a Tier enum value
+ TierStandard = "Standard"
+
+ // TierBulk is a Tier enum value
+ TierBulk = "Bulk"
+
+ // TierExpedited is a Tier enum value
+ TierExpedited = "Expedited"
+)
+
+// Tier_Values returns all elements of the Tier enum
+func Tier_Values() []string {
+ return []string{
+ TierAccelerated,
+ TierStandard,
+ TierBulk,
+ TierExpedited,
+ }
+}
+
+const (
+ // TransitionStorageClassGlacier is a TransitionStorageClass enum value
+ TransitionStorageClassGlacier = "GLACIER"
+
+ // TransitionStorageClassAccelerated is a TransitionStorageClass enum value
+ TransitionStorageClassAccelerated = "ACCELERATED"
+
+ // TransitionStorageClassStandardIa is a TransitionStorageClass enum value
+ TransitionStorageClassStandardIa = "STANDARD_IA"
+
+ // TransitionStorageClassOnezoneIa is a TransitionStorageClass enum value
+ TransitionStorageClassOnezoneIa = "ONEZONE_IA"
+
+ // TransitionStorageClassIntelligentTiering is a TransitionStorageClass enum value
+ TransitionStorageClassIntelligentTiering = "INTELLIGENT_TIERING"
+
+ // TransitionStorageClassDeepArchive is a TransitionStorageClass enum value
+ TransitionStorageClassDeepArchive = "DEEP_ARCHIVE"
+)
+
+// TransitionStorageClass_Values returns all elements of the TransitionStorageClass enum
+func TransitionStorageClass_Values() []string {
+ return []string{
+ TransitionStorageClassGlacier,
+ TransitionStorageClassAccelerated,
+ TransitionStorageClassStandardIa,
+ TransitionStorageClassOnezoneIa,
+ TransitionStorageClassIntelligentTiering,
+ TransitionStorageClassDeepArchive,
+ }
+}
+
+const (
+ // TypeCanonicalUser is a Type enum value
+ TypeCanonicalUser = "CanonicalUser"
+
+ // TypeAmazonCustomerByEmail is a Type enum value
+ TypeAmazonCustomerByEmail = "AmazonCustomerByEmail"
+
+ // TypeGroup is a Type enum value
+ TypeGroup = "Group"
+)
+
+// Type_Values returns all elements of the Type enum
+func Type_Values() []string {
+ return []string{
+ TypeCanonicalUser,
+ TypeAmazonCustomerByEmail,
+ TypeGroup,
+ }
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/body_hash.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/body_hash.go
new file mode 100644
index 0000000000000..f6b387e85339f
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/body_hash.go
@@ -0,0 +1,202 @@
+package s3
+
+import (
+ "bytes"
+ "crypto/md5"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "fmt"
+ "hash"
+ "io"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+)
+
+const (
+ contentMD5Header = "Content-Md5"
+ contentSha256Header = "X-Amz-Content-Sha256"
+ amzTeHeader = "X-Amz-Te"
+ amzTxEncodingHeader = "X-Amz-Transfer-Encoding"
+
+ appendMD5TxEncoding = "append-md5"
+)
+
+// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the
+// request. If the body is not seekable or S3DisableContentMD5Validation set
+// this handler will be ignored.
+func computeBodyHashes(r *request.Request) {
+ if aws.BoolValue(r.Config.S3DisableContentMD5Validation) {
+ return
+ }
+ if r.IsPresigned() {
+ return
+ }
+ if r.Error != nil || !aws.IsReaderSeekable(r.Body) {
+ return
+ }
+
+ var md5Hash, sha256Hash hash.Hash
+ hashers := make([]io.Writer, 0, 2)
+
+ // Determine upfront which hashes can be set without overriding user
+ // provide header data.
+ if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) == 0 {
+ md5Hash = md5.New()
+ hashers = append(hashers, md5Hash)
+ }
+
+ if v := r.HTTPRequest.Header.Get(contentSha256Header); len(v) == 0 {
+ sha256Hash = sha256.New()
+ hashers = append(hashers, sha256Hash)
+ }
+
+ // Create the destination writer based on the hashes that are not already
+ // provided by the user.
+ var dst io.Writer
+ switch len(hashers) {
+ case 0:
+ return
+ case 1:
+ dst = hashers[0]
+ default:
+ dst = io.MultiWriter(hashers...)
+ }
+
+ if _, err := aws.CopySeekableBody(dst, r.Body); err != nil {
+ r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err)
+ return
+ }
+
+ // For the hashes created, set the associated headers that the user did not
+ // already provide.
+ if md5Hash != nil {
+ sum := make([]byte, md5.Size)
+ encoded := make([]byte, md5Base64EncLen)
+
+ base64.StdEncoding.Encode(encoded, md5Hash.Sum(sum[0:0]))
+ r.HTTPRequest.Header[contentMD5Header] = []string{string(encoded)}
+ }
+
+ if sha256Hash != nil {
+ encoded := make([]byte, sha256HexEncLen)
+ sum := make([]byte, sha256.Size)
+
+ hex.Encode(encoded, sha256Hash.Sum(sum[0:0]))
+ r.HTTPRequest.Header[contentSha256Header] = []string{string(encoded)}
+ }
+}
+
+const (
+ md5Base64EncLen = (md5.Size + 2) / 3 * 4 // base64.StdEncoding.EncodedLen
+ sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen
+)
+
+// Adds the x-amz-te: append_md5 header to the request. This requests the service
+// responds with a trailing MD5 checksum.
+//
+// Will not ask for append MD5 if disabled, the request is presigned or,
+// or the API operation does not support content MD5 validation.
+func askForTxEncodingAppendMD5(r *request.Request) {
+ if aws.BoolValue(r.Config.S3DisableContentMD5Validation) {
+ return
+ }
+ if r.IsPresigned() {
+ return
+ }
+ r.HTTPRequest.Header.Set(amzTeHeader, appendMD5TxEncoding)
+}
+
+func useMD5ValidationReader(r *request.Request) {
+ if r.Error != nil {
+ return
+ }
+
+ if v := r.HTTPResponse.Header.Get(amzTxEncodingHeader); v != appendMD5TxEncoding {
+ return
+ }
+
+ var bodyReader *io.ReadCloser
+ var contentLen int64
+ switch tv := r.Data.(type) {
+ case *GetObjectOutput:
+ bodyReader = &tv.Body
+ contentLen = aws.Int64Value(tv.ContentLength)
+ // Update ContentLength hiden the trailing MD5 checksum.
+ tv.ContentLength = aws.Int64(contentLen - md5.Size)
+ tv.ContentRange = aws.String(r.HTTPResponse.Header.Get("X-Amz-Content-Range"))
+ default:
+ r.Error = awserr.New("ChecksumValidationError",
+ fmt.Sprintf("%s: %s header received on unsupported API, %s",
+ amzTxEncodingHeader, appendMD5TxEncoding, r.Operation.Name,
+ ), nil)
+ return
+ }
+
+ if contentLen < md5.Size {
+ r.Error = awserr.New("ChecksumValidationError",
+ fmt.Sprintf("invalid Content-Length %d for %s %s",
+ contentLen, appendMD5TxEncoding, amzTxEncodingHeader,
+ ), nil)
+ return
+ }
+
+ // Wrap and swap the response body reader with the validation reader.
+ *bodyReader = newMD5ValidationReader(*bodyReader, contentLen-md5.Size)
+}
+
+type md5ValidationReader struct {
+ rawReader io.ReadCloser
+ payload io.Reader
+ hash hash.Hash
+
+ payloadLen int64
+ read int64
+}
+
+func newMD5ValidationReader(reader io.ReadCloser, payloadLen int64) *md5ValidationReader {
+ h := md5.New()
+ return &md5ValidationReader{
+ rawReader: reader,
+ payload: io.TeeReader(&io.LimitedReader{R: reader, N: payloadLen}, h),
+ hash: h,
+ payloadLen: payloadLen,
+ }
+}
+
+func (v *md5ValidationReader) Read(p []byte) (n int, err error) {
+ n, err = v.payload.Read(p)
+ if err != nil && err != io.EOF {
+ return n, err
+ }
+
+ v.read += int64(n)
+
+ if err == io.EOF {
+ if v.read != v.payloadLen {
+ return n, io.ErrUnexpectedEOF
+ }
+ expectSum := make([]byte, md5.Size)
+ actualSum := make([]byte, md5.Size)
+ if _, sumReadErr := io.ReadFull(v.rawReader, expectSum); sumReadErr != nil {
+ return n, sumReadErr
+ }
+ actualSum = v.hash.Sum(actualSum[0:0])
+ if !bytes.Equal(expectSum, actualSum) {
+ return n, awserr.New("InvalidChecksum",
+ fmt.Sprintf("expected MD5 checksum %s, got %s",
+ hex.EncodeToString(expectSum),
+ hex.EncodeToString(actualSum),
+ ),
+ nil)
+ }
+ }
+
+ return n, err
+}
+
+func (v *md5ValidationReader) Close() error {
+ return v.rawReader.Close()
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/bucket_location.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/bucket_location.go
new file mode 100644
index 0000000000000..410fca1061186
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/bucket_location.go
@@ -0,0 +1,108 @@
+package s3
+
+import (
+ "io/ioutil"
+ "regexp"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+)
+
+var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`)
+
+// NormalizeBucketLocation is a utility function which will update the
+// passed in value to always be a region ID. Generally this would be used
+// with GetBucketLocation API operation.
+//
+// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
+//
+// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
+// for more information on the values that can be returned.
+func NormalizeBucketLocation(loc string) string {
+ switch loc {
+ case "":
+ loc = "us-east-1"
+ case "EU":
+ loc = "eu-west-1"
+ }
+
+ return loc
+}
+
+// NormalizeBucketLocationHandler is a request handler which will update the
+// GetBucketLocation's result LocationConstraint value to always be a region ID.
+//
+// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
+//
+// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
+// for more information on the values that can be returned.
+//
+// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{
+// Bucket: aws.String(bucket),
+// })
+// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler)
+// err := req.Send()
+var NormalizeBucketLocationHandler = request.NamedHandler{
+ Name: "awssdk.s3.NormalizeBucketLocation",
+ Fn: func(req *request.Request) {
+ if req.Error != nil {
+ return
+ }
+
+ out := req.Data.(*GetBucketLocationOutput)
+ loc := NormalizeBucketLocation(aws.StringValue(out.LocationConstraint))
+ out.LocationConstraint = aws.String(loc)
+ },
+}
+
+// WithNormalizeBucketLocation is a request option which will update the
+// GetBucketLocation's result LocationConstraint value to always be a region ID.
+//
+// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
+//
+// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
+// for more information on the values that can be returned.
+//
+// result, err := svc.GetBucketLocationWithContext(ctx,
+// &s3.GetBucketLocationInput{
+// Bucket: aws.String(bucket),
+// },
+// s3.WithNormalizeBucketLocation,
+// )
+func WithNormalizeBucketLocation(r *request.Request) {
+ r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler)
+}
+
+func buildGetBucketLocation(r *request.Request) {
+ if r.DataFilled() {
+ out := r.Data.(*GetBucketLocationOutput)
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization,
+ "failed reading response body", err)
+ return
+ }
+
+ match := reBucketLocation.FindSubmatch(b)
+ if len(match) > 1 {
+ loc := string(match[1])
+ out.LocationConstraint = aws.String(loc)
+ }
+ }
+}
+
+// IBM COS SDK Code -- START
+// func populateLocationConstraint(r *request.Request) {
+// if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" {
+// in := r.Params.(*CreateBucketInput)
+// if in.CreateBucketConfiguration == nil {
+// r.Params = awsutil.CopyOf(r.Params)
+// in = r.Params.(*CreateBucketInput)
+// in.CreateBucketConfiguration = &CreateBucketConfiguration{
+// LocationConstraint: r.Config.Region,
+// }
+// }
+// }
+// }
+// IBM COS SDK Code -- END
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/customizations.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/customizations.go
new file mode 100644
index 0000000000000..4cc55c42b19b2
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/customizations.go
@@ -0,0 +1,87 @@
+package s3
+
+import (
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/client"
+ "github.com/IBM/ibm-cos-sdk-go/aws/endpoints"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+ "github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn"
+ "github.com/IBM/ibm-cos-sdk-go/internal/s3shared/s3err"
+)
+
+func init() {
+ initClient = defaultInitClientFn
+ initRequest = defaultInitRequestFn
+}
+
+func defaultInitClientFn(c *client.Client) {
+ if c.Config.UseDualStackEndpoint == endpoints.DualStackEndpointStateUnset {
+ if aws.BoolValue(c.Config.UseDualStack) {
+ c.Config.UseDualStackEndpoint = endpoints.DualStackEndpointStateEnabled
+ } else {
+ c.Config.UseDualStackEndpoint = endpoints.DualStackEndpointStateDisabled
+ }
+ }
+
+ // Support building custom endpoints based on config
+ c.Handlers.Build.PushFront(endpointHandler)
+
+ // Require SSL when using SSE keys
+ c.Handlers.Validate.PushBack(validateSSERequiresSSL)
+ c.Handlers.Build.PushBack(computeSSEKeyMD5)
+ c.Handlers.Build.PushBack(computeCopySourceSSEKeyMD5)
+
+ // S3 uses custom error unmarshaling logic
+ c.Handlers.UnmarshalError.Clear()
+ c.Handlers.UnmarshalError.PushBack(unmarshalError)
+ c.Handlers.UnmarshalError.PushBackNamed(s3err.RequestFailureWrapperHandler())
+}
+
+func defaultInitRequestFn(r *request.Request) {
+ // Add request handlers for specific platforms.
+ // e.g. 100-continue support for PUT requests using Go 1.6
+ platformRequestHandlers(r)
+
+ switch r.Operation.Name {
+ case opGetBucketLocation:
+ // GetBucketLocation has custom parsing logic
+ r.Handlers.Unmarshal.PushFront(buildGetBucketLocation)
+ // IBM COS SDK Code -- START
+ // IBM do not popluate opCreateBucket LocationConstraint
+ // IBM COS SDK Code -- END
+ case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload:
+ r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarshalError)
+ r.Handlers.Unmarshal.PushBackNamed(s3err.RequestFailureWrapperHandler())
+ case opPutObject, opUploadPart:
+ r.Handlers.Build.PushBack(computeBodyHashes)
+ // Disabled until #1837 root issue is resolved.
+ // case opGetObject:
+ // r.Handlers.Build.PushBack(askForTxEncodingAppendMD5)
+ // r.Handlers.Unmarshal.PushBack(useMD5ValidationReader)
+ }
+}
+
+// bucketGetter is an accessor interface to grab the "Bucket" field from
+// an S3 type.
+type bucketGetter interface {
+ getBucket() string
+}
+
+// sseCustomerKeyGetter is an accessor interface to grab the "SSECustomerKey"
+// field from an S3 type.
+type sseCustomerKeyGetter interface {
+ getSSECustomerKey() string
+}
+
+// copySourceSSECustomerKeyGetter is an accessor interface to grab the
+// "CopySourceSSECustomerKey" field from an S3 type.
+type copySourceSSECustomerKeyGetter interface {
+ getCopySourceSSECustomerKey() string
+}
+
+// endpointARNGetter is an accessor interface to grab the
+// the field corresponding to an endpoint ARN input.
+type endpointARNGetter interface {
+ getEndpointARN() (arn.Resource, error)
+ hasEndpointARN() bool
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/doc.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/doc.go
new file mode 100644
index 0000000000000..0def02255ac8b
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/doc.go
@@ -0,0 +1,26 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package s3 provides the client and types for making API
+// requests to Amazon Simple Storage Service.
+//
+// See https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 for more information on this service.
+//
+// See s3 package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/
+//
+// Using the Client
+//
+// To contact Amazon Simple Storage Service with the SDK use the New function to create
+// a new service client. With that client you can make API requests to the service.
+// These clients are safe to use concurrently.
+//
+// See the SDK's documentation for more information on how to use the SDK.
+// https://docs.aws.amazon.com/sdk-for-go/api/
+//
+// See aws.Config documentation for more information on configuring SDK clients.
+// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
+//
+// See the Amazon Simple Storage Service client S3 for more
+// information on creating client for this service.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New
+package s3
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/doc_custom.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/doc_custom.go
new file mode 100644
index 0000000000000..7f7aca2085948
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/doc_custom.go
@@ -0,0 +1,110 @@
+// Upload Managers
+//
+// The s3manager package's Uploader provides concurrent upload of content to S3
+// by taking advantage of S3's Multipart APIs. The Uploader also supports both
+// io.Reader for streaming uploads, and will also take advantage of io.ReadSeeker
+// for optimizations if the Body satisfies that type. Once the Uploader instance
+// is created you can call Upload concurrently from multiple goroutines safely.
+//
+// // The session the S3 Uploader will use
+// sess := session.Must(session.NewSession())
+//
+// // Create an uploader with the session and default options
+// uploader := s3manager.NewUploader(sess)
+//
+// f, err := os.Open(filename)
+// if err != nil {
+// return fmt.Errorf("failed to open file %q, %v", filename, err)
+// }
+//
+// // Upload the file to S3.
+// result, err := uploader.Upload(&s3manager.UploadInput{
+// Bucket: aws.String(myBucket),
+// Key: aws.String(myString),
+// Body: f,
+// })
+// if err != nil {
+// return fmt.Errorf("failed to upload file, %v", err)
+// }
+// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location))
+//
+// See the s3manager package's Uploader type documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader
+//
+// Download Manager
+//
+// The s3manager package's Downloader provides concurrently downloading of Objects
+// from S3. The Downloader will write S3 Object content with an io.WriterAt.
+// Once the Downloader instance is created you can call Download concurrently from
+// multiple goroutines safely.
+//
+// // The session the S3 Downloader will use
+// sess := session.Must(session.NewSession())
+//
+// // Create a downloader with the session and default options
+// downloader := s3manager.NewDownloader(sess)
+//
+// // Create a file to write the S3 Object contents to.
+// f, err := os.Create(filename)
+// if err != nil {
+// return fmt.Errorf("failed to create file %q, %v", filename, err)
+// }
+//
+// // Write the contents of S3 Object to the file
+// n, err := downloader.Download(f, &s3.GetObjectInput{
+// Bucket: aws.String(myBucket),
+// Key: aws.String(myString),
+// })
+// if err != nil {
+// return fmt.Errorf("failed to download file, %v", err)
+// }
+// fmt.Printf("file downloaded, %d bytes\n", n)
+//
+// See the s3manager package's Downloader type documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader
+//
+// Automatic URI cleaning
+//
+// Interacting with objects whose keys contain adjacent slashes (e.g. bucketname/foo//bar/objectname)
+// requires setting DisableRestProtocolURICleaning to true in the aws.Config struct
+// used by the service client.
+//
+// svc := s3.New(sess, &aws.Config{
+// DisableRestProtocolURICleaning: aws.Bool(true),
+// })
+// out, err := svc.GetObject(&s3.GetObjectInput {
+// Bucket: aws.String("bucketname"),
+// Key: aws.String("//foo//bar//moo"),
+// })
+//
+// Get Bucket Region
+//
+// GetBucketRegion will attempt to get the region for a bucket using a region
+// hint to determine which AWS partition to perform the query on. Use this utility
+// to determine the region a bucket is in.
+//
+// sess := session.Must(session.NewSession())
+//
+// bucket := "my-bucket"
+// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2")
+// if err != nil {
+// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" {
+// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket)
+// }
+// return err
+// }
+// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
+//
+// See the s3manager package's GetBucketRegion function documentation for more information
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion
+//
+// S3 Crypto Client
+//
+// The s3crypto package provides the tools to upload and download encrypted
+// content from S3. The Encryption and Decryption clients can be used concurrently
+// once the client is created.
+//
+// See the s3crypto package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/
+//
+package s3
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/endpoint.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/endpoint.go
new file mode 100644
index 0000000000000..cb8e54e05cb19
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/endpoint.go
@@ -0,0 +1,298 @@
+package s3
+
+import (
+ "fmt"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/endpoints"
+ "net/url"
+ "strings"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ awsarn "github.com/IBM/ibm-cos-sdk-go/aws/arn"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+ "github.com/IBM/ibm-cos-sdk-go/internal/s3shared"
+ "github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn"
+)
+
+const (
+ s3Namespace = "s3"
+ s3AccessPointNamespace = "s3-accesspoint"
+ s3ObjectsLambdaNamespace = "s3-object-lambda"
+ s3OutpostsNamespace = "s3-outposts"
+)
+
+// Used by shapes with members decorated as endpoint ARN.
+func parseEndpointARN(v string) (arn.Resource, error) {
+ return arn.ParseResource(v, accessPointResourceParser)
+}
+
+func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) {
+ resParts := arn.SplitResource(a.Resource)
+ switch resParts[0] {
+ case "accesspoint":
+ switch a.Service {
+ case s3Namespace:
+ return arn.ParseAccessPointResource(a, resParts[1:])
+ case s3ObjectsLambdaNamespace:
+ return parseS3ObjectLambdaAccessPointResource(a, resParts)
+ default:
+ return arn.AccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s or %s", s3Namespace, s3ObjectsLambdaNamespace)}
+ }
+ case "outpost":
+ if a.Service != "s3-outposts" {
+ return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"}
+ }
+ return parseOutpostAccessPointResource(a, resParts[1:])
+ default:
+ return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"}
+ }
+}
+
+// parseOutpostAccessPointResource attempts to parse the ARNs resource as an
+// outpost access-point resource.
+//
+// Supported Outpost AccessPoint ARN format:
+// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName}
+// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint
+//
+func parseOutpostAccessPointResource(a awsarn.ARN, resParts []string) (arn.OutpostAccessPointARN, error) {
+ // outpost accesspoint arn is only valid if service is s3-outposts
+ if a.Service != "s3-outposts" {
+ return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"}
+ }
+
+ if len(resParts) == 0 {
+ return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"}
+ }
+
+ if len(resParts) < 3 {
+ return arn.OutpostAccessPointARN{}, arn.InvalidARNError{
+ ARN: a, Reason: "access-point resource not set in Outpost ARN",
+ }
+ }
+
+ resID := strings.TrimSpace(resParts[0])
+ if len(resID) == 0 {
+ return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"}
+ }
+
+ var outpostAccessPointARN = arn.OutpostAccessPointARN{}
+ switch resParts[1] {
+ case "accesspoint":
+ accessPointARN, err := arn.ParseAccessPointResource(a, resParts[2:])
+ if err != nil {
+ return arn.OutpostAccessPointARN{}, err
+ }
+ // set access-point arn
+ outpostAccessPointARN.AccessPointARN = accessPointARN
+ default:
+ return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "access-point resource not set in Outpost ARN"}
+ }
+
+ // set outpost id
+ outpostAccessPointARN.OutpostID = resID
+ return outpostAccessPointARN, nil
+}
+
+func parseS3ObjectLambdaAccessPointResource(a awsarn.ARN, resParts []string) (arn.S3ObjectLambdaAccessPointARN, error) {
+ if a.Service != s3ObjectsLambdaNamespace {
+ return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s", s3ObjectsLambdaNamespace)}
+ }
+
+ accessPointARN, err := arn.ParseAccessPointResource(a, resParts[1:])
+ if err != nil {
+ return arn.S3ObjectLambdaAccessPointARN{}, err
+ }
+
+ if len(accessPointARN.Region) == 0 {
+ return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("%s region not set", s3ObjectsLambdaNamespace)}
+ }
+
+ return arn.S3ObjectLambdaAccessPointARN{
+ AccessPointARN: accessPointARN,
+ }, nil
+}
+
+func endpointHandler(req *request.Request) {
+ endpoint, ok := req.Params.(endpointARNGetter)
+ if !ok || !endpoint.hasEndpointARN() {
+ updateBucketEndpointFromParams(req)
+ return
+ }
+
+ resource, err := endpoint.getEndpointARN()
+ if err != nil {
+ req.Error = s3shared.NewInvalidARNError(nil, err)
+ return
+ }
+
+ resReq := s3shared.ResourceRequest{
+ Resource: resource,
+ Request: req,
+ }
+
+ if len(resReq.Request.ClientInfo.PartitionID) != 0 && resReq.IsCrossPartition() {
+ req.Error = s3shared.NewClientPartitionMismatchError(resource,
+ req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
+ return
+ }
+
+ if !resReq.AllowCrossRegion() && resReq.IsCrossRegion() {
+ req.Error = s3shared.NewClientRegionMismatchError(resource,
+ req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
+ return
+ }
+
+ switch tv := resource.(type) {
+ case arn.AccessPointARN:
+ err = updateRequestAccessPointEndpoint(req, tv)
+ if err != nil {
+ req.Error = err
+ }
+ case arn.S3ObjectLambdaAccessPointARN:
+ err = updateRequestS3ObjectLambdaAccessPointEndpoint(req, tv)
+ if err != nil {
+ req.Error = err
+ }
+ case arn.OutpostAccessPointARN:
+ // outposts does not support FIPS regions
+ if resReq.ResourceConfiguredForFIPS() {
+ req.Error = s3shared.NewInvalidARNWithFIPSError(resource, nil)
+ return
+ }
+
+ err = updateRequestOutpostAccessPointEndpoint(req, tv)
+ if err != nil {
+ req.Error = err
+ }
+ default:
+ req.Error = s3shared.NewInvalidARNError(resource, nil)
+ }
+}
+
+func updateBucketEndpointFromParams(r *request.Request) {
+ bucket, ok := bucketNameFromReqParams(r.Params)
+ if !ok {
+ // Ignore operation requests if the bucket name was not provided
+ // if this is an input validation error the validation handler
+ // will report it.
+ return
+ }
+ updateEndpointForS3Config(r, bucket)
+}
+
+func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.AccessPointARN) error {
+ // Accelerate not supported
+ if aws.BoolValue(req.Config.S3UseAccelerate) {
+ return s3shared.NewClientConfiguredForAccelerateError(accessPoint,
+ req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
+ }
+
+ // Ignore the disable host prefix for access points
+ req.Config.DisableEndpointHostPrefix = aws.Bool(false)
+
+ if err := accessPointEndpointBuilder(accessPoint).build(req); err != nil {
+ return err
+ }
+
+ removeBucketFromPath(req.HTTPRequest.URL)
+
+ return nil
+}
+
+func updateRequestS3ObjectLambdaAccessPointEndpoint(req *request.Request, accessPoint arn.S3ObjectLambdaAccessPointARN) error {
+ // DualStack not supported
+ if isUseDualStackEndpoint(req) {
+ return s3shared.NewClientConfiguredForDualStackError(accessPoint,
+ req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
+ }
+
+ // Accelerate not supported
+ if aws.BoolValue(req.Config.S3UseAccelerate) {
+ return s3shared.NewClientConfiguredForAccelerateError(accessPoint,
+ req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
+ }
+
+ // Ignore the disable host prefix for access points
+ req.Config.DisableEndpointHostPrefix = aws.Bool(false)
+
+ if err := s3ObjectLambdaAccessPointEndpointBuilder(accessPoint).build(req); err != nil {
+ return err
+ }
+
+ removeBucketFromPath(req.HTTPRequest.URL)
+
+ return nil
+}
+
+func updateRequestOutpostAccessPointEndpoint(req *request.Request, accessPoint arn.OutpostAccessPointARN) error {
+ // Accelerate not supported
+ if aws.BoolValue(req.Config.S3UseAccelerate) {
+ return s3shared.NewClientConfiguredForAccelerateError(accessPoint,
+ req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
+ }
+
+ // Dualstack not supported
+ if isUseDualStackEndpoint(req) {
+ return s3shared.NewClientConfiguredForDualStackError(accessPoint,
+ req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
+ }
+
+ // Ignore the disable host prefix for access points
+ req.Config.DisableEndpointHostPrefix = aws.Bool(false)
+
+ if err := outpostAccessPointEndpointBuilder(accessPoint).build(req); err != nil {
+ return err
+ }
+
+ removeBucketFromPath(req.HTTPRequest.URL)
+ return nil
+}
+
+func removeBucketFromPath(u *url.URL) {
+ u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1)
+ if u.Path == "" {
+ u.Path = "/"
+ }
+}
+
+func buildWriteGetObjectResponseEndpoint(req *request.Request) {
+ // DualStack not supported
+ if isUseDualStackEndpoint(req) {
+ req.Error = awserr.New("ConfigurationError", "client configured for dualstack but not supported for operation", nil)
+ return
+ }
+
+ // Accelerate not supported
+ if aws.BoolValue(req.Config.S3UseAccelerate) {
+ req.Error = awserr.New("ConfigurationError", "client configured for accelerate but not supported for operation", nil)
+ return
+ }
+
+ signingName := s3ObjectsLambdaNamespace
+ signingRegion := req.ClientInfo.SigningRegion
+
+ if !hasCustomEndpoint(req) {
+ endpoint, err := resolveRegionalEndpoint(req, aws.StringValue(req.Config.Region), req.ClientInfo.ResolvedRegion, EndpointsID)
+ if err != nil {
+ req.Error = awserr.New(request.ErrCodeSerialization, "failed to resolve endpoint", err)
+ return
+ }
+ signingRegion = endpoint.SigningRegion
+
+ if err = updateRequestEndpoint(req, endpoint.URL); err != nil {
+ req.Error = err
+ return
+ }
+ updateS3HostPrefixForS3ObjectLambda(req)
+ }
+
+ redirectSigner(req, signingName, signingRegion)
+}
+
+func isUseDualStackEndpoint(req *request.Request) bool {
+ if req.Config.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset {
+ return req.Config.UseDualStackEndpoint == endpoints.DualStackEndpointStateEnabled
+ }
+ return aws.BoolValue(req.Config.UseDualStack)
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/endpoint_builder.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/endpoint_builder.go
new file mode 100644
index 0000000000000..f6ccb122637d0
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/endpoint_builder.go
@@ -0,0 +1,241 @@
+package s3
+
+import (
+ "net/url"
+ "strings"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/endpoints"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+ "github.com/IBM/ibm-cos-sdk-go/internal/s3shared"
+ "github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn"
+ "github.com/IBM/ibm-cos-sdk-go/private/protocol"
+)
+
+const (
+ accessPointPrefixLabel = "accesspoint"
+ accountIDPrefixLabel = "accountID"
+ accessPointPrefixTemplate = "{" + accessPointPrefixLabel + "}-{" + accountIDPrefixLabel + "}."
+
+ outpostPrefixLabel = "outpost"
+ outpostAccessPointPrefixTemplate = accessPointPrefixTemplate + "{" + outpostPrefixLabel + "}."
+)
+
+// hasCustomEndpoint returns true if endpoint is a custom endpoint
+func hasCustomEndpoint(r *request.Request) bool {
+ return len(aws.StringValue(r.Config.Endpoint)) > 0
+}
+
+// accessPointEndpointBuilder represents the endpoint builder for access point arn
+type accessPointEndpointBuilder arn.AccessPointARN
+
+// build builds the endpoint for corresponding access point arn
+//
+// For building an endpoint from access point arn, format used is:
+// - Access point endpoint format : {accesspointName}-{accountId}.s3-accesspoint.{region}.{dnsSuffix}
+// - example : myaccesspoint-012345678901.s3-accesspoint.us-west-2.amazonaws.com
+//
+// Access Point Endpoint requests are signed using "s3" as signing name.
+//
+func (a accessPointEndpointBuilder) build(req *request.Request) error {
+ resolveService := arn.AccessPointARN(a).Service
+ resolveRegion := arn.AccessPointARN(a).Region
+
+ endpoint, err := resolveRegionalEndpoint(req, resolveRegion, "", resolveService)
+ if err != nil {
+ return s3shared.NewFailedToResolveEndpointError(arn.AccessPointARN(a),
+ req.ClientInfo.PartitionID, resolveRegion, err)
+ }
+
+ endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL))
+
+ if !hasCustomEndpoint(req) {
+ if err = updateRequestEndpoint(req, endpoint.URL); err != nil {
+ return err
+ }
+
+ // dual stack provided by endpoint resolver
+ updateS3HostForS3AccessPoint(req)
+ }
+
+ protocol.HostPrefixBuilder{
+ Prefix: accessPointPrefixTemplate,
+ LabelsFn: a.hostPrefixLabelValues,
+ }.Build(req)
+
+ // signer redirection
+ redirectSigner(req, endpoint.SigningName, endpoint.SigningRegion)
+
+ err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host)
+ if err != nil {
+ return s3shared.NewInvalidARNError(arn.AccessPointARN(a), err)
+ }
+
+ return nil
+}
+
+func (a accessPointEndpointBuilder) hostPrefixLabelValues() map[string]string {
+ return map[string]string{
+ accessPointPrefixLabel: arn.AccessPointARN(a).AccessPointName,
+ accountIDPrefixLabel: arn.AccessPointARN(a).AccountID,
+ }
+}
+
+// s3ObjectLambdaAccessPointEndpointBuilder represents the endpoint builder for an s3 object lambda access point arn
+type s3ObjectLambdaAccessPointEndpointBuilder arn.S3ObjectLambdaAccessPointARN
+
+// build builds the endpoint for corresponding access point arn
+//
+// For building an endpoint from access point arn, format used is:
+// - Access point endpoint format : {accesspointName}-{accountId}.s3-object-lambda.{region}.{dnsSuffix}
+// - example : myaccesspoint-012345678901.s3-object-lambda.us-west-2.amazonaws.com
+//
+// Access Point Endpoint requests are signed using "s3-object-lambda" as signing name.
+//
+func (a s3ObjectLambdaAccessPointEndpointBuilder) build(req *request.Request) error {
+ resolveRegion := arn.S3ObjectLambdaAccessPointARN(a).Region
+
+ endpoint, err := resolveRegionalEndpoint(req, resolveRegion, "", EndpointsID)
+ if err != nil {
+ return s3shared.NewFailedToResolveEndpointError(arn.S3ObjectLambdaAccessPointARN(a),
+ req.ClientInfo.PartitionID, resolveRegion, err)
+ }
+
+ endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL))
+
+ endpoint.SigningName = s3ObjectsLambdaNamespace
+
+ if !hasCustomEndpoint(req) {
+ if err = updateRequestEndpoint(req, endpoint.URL); err != nil {
+ return err
+ }
+
+ updateS3HostPrefixForS3ObjectLambda(req)
+ }
+
+ protocol.HostPrefixBuilder{
+ Prefix: accessPointPrefixTemplate,
+ LabelsFn: a.hostPrefixLabelValues,
+ }.Build(req)
+
+ // signer redirection
+ redirectSigner(req, endpoint.SigningName, endpoint.SigningRegion)
+
+ err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host)
+ if err != nil {
+ return s3shared.NewInvalidARNError(arn.S3ObjectLambdaAccessPointARN(a), err)
+ }
+
+ return nil
+}
+
+func (a s3ObjectLambdaAccessPointEndpointBuilder) hostPrefixLabelValues() map[string]string {
+ return map[string]string{
+ accessPointPrefixLabel: arn.S3ObjectLambdaAccessPointARN(a).AccessPointName,
+ accountIDPrefixLabel: arn.S3ObjectLambdaAccessPointARN(a).AccountID,
+ }
+}
+
+// outpostAccessPointEndpointBuilder represents the Endpoint builder for outpost access point arn.
+type outpostAccessPointEndpointBuilder arn.OutpostAccessPointARN
+
+// build builds an endpoint corresponding to the outpost access point arn.
+//
+// For building an endpoint from outpost access point arn, format used is:
+// - Outpost access point endpoint format : {accesspointName}-{accountId}.{outpostId}.s3-outposts.{region}.{dnsSuffix}
+// - example : myaccesspoint-012345678901.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com
+//
+// Outpost AccessPoint Endpoint request are signed using "s3-outposts" as signing name.
+//
+func (o outpostAccessPointEndpointBuilder) build(req *request.Request) error {
+ resolveRegion := o.Region
+ resolveService := o.Service
+
+ endpointsID := resolveService
+ if resolveService == s3OutpostsNamespace {
+ endpointsID = "s3"
+ }
+
+ endpoint, err := resolveRegionalEndpoint(req, resolveRegion, "", endpointsID)
+ if err != nil {
+ return s3shared.NewFailedToResolveEndpointError(o,
+ req.ClientInfo.PartitionID, resolveRegion, err)
+ }
+
+ endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL))
+
+ if !hasCustomEndpoint(req) {
+ if err = updateRequestEndpoint(req, endpoint.URL); err != nil {
+ return err
+ }
+ updateHostPrefix(req, endpointsID, resolveService)
+ }
+
+ protocol.HostPrefixBuilder{
+ Prefix: outpostAccessPointPrefixTemplate,
+ LabelsFn: o.hostPrefixLabelValues,
+ }.Build(req)
+
+ // set the signing region, name to resolved names from ARN
+ redirectSigner(req, resolveService, resolveRegion)
+
+ err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host)
+ if err != nil {
+ return s3shared.NewInvalidARNError(o, err)
+ }
+
+ return nil
+}
+
+func (o outpostAccessPointEndpointBuilder) hostPrefixLabelValues() map[string]string {
+ return map[string]string{
+ accessPointPrefixLabel: o.AccessPointName,
+ accountIDPrefixLabel: o.AccountID,
+ outpostPrefixLabel: o.OutpostID,
+ }
+}
+
+func resolveRegionalEndpoint(r *request.Request, region, resolvedRegion, endpointsID string) (endpoints.ResolvedEndpoint, error) {
+ return r.Config.EndpointResolver.EndpointFor(endpointsID, region, func(opts *endpoints.Options) {
+ opts.DisableSSL = aws.BoolValue(r.Config.DisableSSL)
+ opts.UseDualStack = aws.BoolValue(r.Config.UseDualStack)
+ opts.UseDualStackEndpoint = r.Config.UseDualStackEndpoint
+ opts.S3UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint
+ opts.ResolvedRegion = resolvedRegion
+ opts.Logger = r.Config.Logger
+ opts.LogDeprecated = r.Config.LogLevel.Matches(aws.LogDebugWithDeprecated)
+ })
+}
+
+func updateRequestEndpoint(r *request.Request, endpoint string) (err error) {
+ r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath)
+ if err != nil {
+ return awserr.New(request.ErrCodeSerialization,
+ "failed to parse endpoint URL", err)
+ }
+
+ return nil
+}
+
+// redirectSigner sets signing name, signing region for a request
+func redirectSigner(req *request.Request, signingName string, signingRegion string) {
+ req.ClientInfo.SigningName = signingName
+ req.ClientInfo.SigningRegion = signingRegion
+}
+
+func updateS3HostForS3AccessPoint(req *request.Request) {
+ updateHostPrefix(req, "s3", s3AccessPointNamespace)
+}
+
+func updateS3HostPrefixForS3ObjectLambda(req *request.Request) {
+ updateHostPrefix(req, "s3", s3ObjectsLambdaNamespace)
+}
+
+func updateHostPrefix(req *request.Request, oldEndpointPrefix, newEndpointPrefix string) {
+ host := req.HTTPRequest.URL.Host
+ if strings.HasPrefix(host, oldEndpointPrefix) {
+ // replace service hostlabel oldEndpointPrefix to newEndpointPrefix
+ req.HTTPRequest.URL.Host = newEndpointPrefix + host[len(oldEndpointPrefix):]
+ }
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/errors.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/errors.go
new file mode 100644
index 0000000000000..6d3e726cf516b
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/errors.go
@@ -0,0 +1,60 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package s3
+
+const (
+
+ // ErrCodeBucketAlreadyExists for service response error code
+ // "BucketAlreadyExists".
+ //
+ // The requested bucket name is not available. The bucket namespace is shared
+ // by all users of the system. Select a different name and try again.
+ ErrCodeBucketAlreadyExists = "BucketAlreadyExists"
+
+ // ErrCodeBucketAlreadyOwnedByYou for service response error code
+ // "BucketAlreadyOwnedByYou".
+ //
+ // The bucket you tried to create already exists, and you own it. Amazon S3
+ // returns this error in all AWS Regions except in the North Virginia Region.
+ // For legacy compatibility, if you re-create an existing bucket that you already
+ // own in the North Virginia Region, Amazon S3 returns 200 OK and resets the
+ // bucket access control lists (ACLs).
+ ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou"
+
+ // ErrCodeInvalidObjectState for service response error code
+ // "InvalidObjectState".
+ //
+ // Object is archived and inaccessible until restored.
+ ErrCodeInvalidObjectState = "InvalidObjectState"
+
+ // ErrCodeNoSuchBucket for service response error code
+ // "NoSuchBucket".
+ //
+ // The specified bucket does not exist.
+ ErrCodeNoSuchBucket = "NoSuchBucket"
+
+ // ErrCodeNoSuchKey for service response error code
+ // "NoSuchKey".
+ //
+ // The specified key does not exist.
+ ErrCodeNoSuchKey = "NoSuchKey"
+
+ // ErrCodeNoSuchUpload for service response error code
+ // "NoSuchUpload".
+ //
+ // The specified multipart upload does not exist.
+ ErrCodeNoSuchUpload = "NoSuchUpload"
+
+ // ErrCodeObjectAlreadyInActiveTierError for service response error code
+ // "ObjectAlreadyInActiveTierError".
+ //
+ // This action is not allowed against this storage tier.
+ ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError"
+
+ // ErrCodeObjectNotInActiveTierError for service response error code
+ // "ObjectNotInActiveTierError".
+ //
+ // The source object of the COPY action is not in the active tier and is only
+ // stored in Amazon S3 Glacier.
+ ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError"
+)
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/host_style_bucket.go
new file mode 100644
index 0000000000000..68094cd741d95
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/host_style_bucket.go
@@ -0,0 +1,136 @@
+package s3
+
+import (
+ "fmt"
+ "net/url"
+ "regexp"
+ "strings"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+)
+
+// an operationBlacklist is a list of operation names that should a
+// request handler should not be executed with.
+type operationBlacklist []string
+
+// Continue will return true of the Request's operation name is not
+// in the blacklist. False otherwise.
+func (b operationBlacklist) Continue(r *request.Request) bool {
+ for i := 0; i < len(b); i++ {
+ if b[i] == r.Operation.Name {
+ return false
+ }
+ }
+ return true
+}
+
+var accelerateOpBlacklist = operationBlacklist{
+ opListBuckets, opCreateBucket, opDeleteBucket,
+}
+
+// Automatically add the bucket name to the endpoint domain
+// if possible. This style of bucket is valid for all bucket names which are
+// DNS compatible and do not contain "."
+func updateEndpointForS3Config(r *request.Request, bucketName string) {
+ forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle)
+ accelerate := aws.BoolValue(r.Config.S3UseAccelerate)
+
+ if accelerate && accelerateOpBlacklist.Continue(r) {
+ if forceHostStyle {
+ if r.Config.Logger != nil {
+ r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.")
+ }
+ }
+ updateEndpointForAccelerate(r, bucketName)
+ } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation {
+ updateEndpointForHostStyle(r, bucketName)
+ }
+}
+
+func updateEndpointForHostStyle(r *request.Request, bucketName string) {
+ if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) {
+ // bucket name must be valid to put into the host
+ return
+ }
+
+ moveBucketToHost(r.HTTPRequest.URL, bucketName)
+}
+
+var (
+ accelElem = []byte("s3-accelerate.dualstack.")
+)
+
+func updateEndpointForAccelerate(r *request.Request, bucketName string) {
+ if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) {
+ r.Error = awserr.New("InvalidParameterException",
+ fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucketName),
+ nil)
+ return
+ }
+
+ parts := strings.Split(r.HTTPRequest.URL.Host, ".")
+ if len(parts) < 3 {
+ r.Error = awserr.New("InvalidParameterExecption",
+ fmt.Sprintf("unable to update endpoint host for S3 accelerate, hostname invalid, %s",
+ r.HTTPRequest.URL.Host), nil)
+ return
+ }
+
+ if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") {
+ parts[0] = "s3-accelerate"
+ }
+ for i := 1; i+1 < len(parts); i++ {
+ if parts[i] == aws.StringValue(r.Config.Region) {
+ parts = append(parts[:i], parts[i+1:]...)
+ break
+ }
+ }
+
+ r.HTTPRequest.URL.Host = strings.Join(parts, ".")
+
+ moveBucketToHost(r.HTTPRequest.URL, bucketName)
+}
+
+// Attempts to retrieve the bucket name from the request input parameters.
+// If no bucket is found, or the field is empty "", false will be returned.
+func bucketNameFromReqParams(params interface{}) (string, bool) {
+ if iface, ok := params.(bucketGetter); ok {
+ b := iface.getBucket()
+ return b, len(b) > 0
+ }
+
+ return "", false
+}
+
+// hostCompatibleBucketName returns true if the request should
+// put the bucket in the host. This is false if S3ForcePathStyle is
+// explicitly set or if the bucket is not DNS compatible.
+func hostCompatibleBucketName(u *url.URL, bucket string) bool {
+ // Bucket might be DNS compatible but dots in the hostname will fail
+ // certificate validation, so do not use host-style.
+ if u.Scheme == "https" && strings.Contains(bucket, ".") {
+ return false
+ }
+
+ // if the bucket is DNS compatible
+ return dnsCompatibleBucketName(bucket)
+}
+
+var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
+var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
+
+// dnsCompatibleBucketName returns true if the bucket name is DNS compatible.
+// Buckets created outside of the classic region MUST be DNS compatible.
+func dnsCompatibleBucketName(bucket string) bool {
+ return reDomain.MatchString(bucket) &&
+ !reIPAddress.MatchString(bucket) &&
+ !strings.Contains(bucket, "..")
+}
+
+// moveBucketToHost moves the bucket name from the URI path to URL host.
+func moveBucketToHost(u *url.URL, bucket string) {
+ u.Host = bucket + "." + u.Host
+ removeBucketFromPath(u)
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/platform_handlers.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/platform_handlers.go
new file mode 100644
index 0000000000000..5942304a6e023
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/platform_handlers.go
@@ -0,0 +1,9 @@
+//go:build !go1.6
+// +build !go1.6
+
+package s3
+
+import "github.com/IBM/ibm-cos-sdk-go/aws/request"
+
+func platformRequestHandlers(r *request.Request) {
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/platform_handlers_go1.6.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/platform_handlers_go1.6.go
new file mode 100644
index 0000000000000..a2c81ef0cc83e
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/platform_handlers_go1.6.go
@@ -0,0 +1,29 @@
+//go:build go1.6
+// +build go1.6
+
+package s3
+
+import (
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+)
+
+func platformRequestHandlers(r *request.Request) {
+ if r.Operation.HTTPMethod == "PUT" {
+ // 100-Continue should only be used on put requests.
+ r.Handlers.Sign.PushBack(add100Continue)
+ }
+}
+
+func add100Continue(r *request.Request) {
+ if aws.BoolValue(r.Config.S3Disable100Continue) {
+ return
+ }
+ if r.HTTPRequest.ContentLength < 1024*1024*2 {
+ // Ignore requests smaller than 2MB. This helps prevent delaying
+ // requests unnecessarily.
+ return
+ }
+
+ r.HTTPRequest.Header.Set("Expect", "100-Continue")
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/s3iface/interface.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/s3iface/interface.go
new file mode 100644
index 0000000000000..c83fc9600796c
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/s3iface/interface.go
@@ -0,0 +1,316 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package s3iface provides an interface to enable mocking the Amazon Simple Storage Service service client
+// for testing your code.
+//
+// It is important to note that this interface will have breaking changes
+// when the service model is updated and adds new API operations, paginators,
+// and waiters.
+package s3iface
+
+import (
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+ "github.com/IBM/ibm-cos-sdk-go/service/s3"
+)
+
+// S3API provides an interface to enable mocking the
+// s3.S3 service client's API operation,
+// paginators, and waiters. This make unit testing your code that calls out
+// to the SDK's service client's calls easier.
+//
+// The best way to use this interface is so the SDK's service client's calls
+// can be stubbed out for unit testing your code with the SDK without needing
+// to inject custom request handlers into the SDK's request pipeline.
+//
+// // myFunc uses an SDK service client to make a request to
+// // Amazon Simple Storage Service.
+// func myFunc(svc s3iface.S3API) bool {
+// // Make svc.AbortMultipartUpload request
+// }
+//
+// IBM COS SDK Code -- START
+// func main() {
+// sess := session.Must(session.NewSession())
+// svc := s3.New(sess)
+//
+// myFunc(svc)
+// }
+// IBM COS SDK Code -- END
+//
+// In your _test.go file:
+//
+// // Define a mock struct to be used in your unit tests of myFunc.
+// type mockS3Client struct {
+// s3iface.S3API
+// }
+// func (m *mockS3Client) AbortMultipartUpload(input *s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) {
+// // mock response/functionality
+// }
+//
+// func TestMyFunc(t *testing.T) {
+// // Setup Test
+// mockSvc := &mockS3Client{}
+//
+// myfunc(mockSvc)
+//
+// // Verify myFunc's functionality
+// }
+//
+// It is important to note that this interface will have breaking changes
+// when the service model is updated and adds new API operations, paginators,
+// and waiters. Its suggested to use the pattern above for testing, or using
+// tooling to generate mocks to satisfy the interfaces.
+type S3API interface {
+ AbortMultipartUpload(*s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error)
+ AbortMultipartUploadWithContext(aws.Context, *s3.AbortMultipartUploadInput, ...request.Option) (*s3.AbortMultipartUploadOutput, error)
+ AbortMultipartUploadRequest(*s3.AbortMultipartUploadInput) (*request.Request, *s3.AbortMultipartUploadOutput)
+
+ AddLegalHold(*s3.AddLegalHoldInput) (*s3.AddLegalHoldOutput, error)
+ AddLegalHoldWithContext(aws.Context, *s3.AddLegalHoldInput, ...request.Option) (*s3.AddLegalHoldOutput, error)
+ AddLegalHoldRequest(*s3.AddLegalHoldInput) (*request.Request, *s3.AddLegalHoldOutput)
+
+ CompleteMultipartUpload(*s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
+ CompleteMultipartUploadWithContext(aws.Context, *s3.CompleteMultipartUploadInput, ...request.Option) (*s3.CompleteMultipartUploadOutput, error)
+ CompleteMultipartUploadRequest(*s3.CompleteMultipartUploadInput) (*request.Request, *s3.CompleteMultipartUploadOutput)
+
+ CopyObject(*s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
+ CopyObjectWithContext(aws.Context, *s3.CopyObjectInput, ...request.Option) (*s3.CopyObjectOutput, error)
+ CopyObjectRequest(*s3.CopyObjectInput) (*request.Request, *s3.CopyObjectOutput)
+
+ CreateBucket(*s3.CreateBucketInput) (*s3.CreateBucketOutput, error)
+ CreateBucketWithContext(aws.Context, *s3.CreateBucketInput, ...request.Option) (*s3.CreateBucketOutput, error)
+ CreateBucketRequest(*s3.CreateBucketInput) (*request.Request, *s3.CreateBucketOutput)
+
+ CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error)
+ CreateMultipartUploadWithContext(aws.Context, *s3.CreateMultipartUploadInput, ...request.Option) (*s3.CreateMultipartUploadOutput, error)
+ CreateMultipartUploadRequest(*s3.CreateMultipartUploadInput) (*request.Request, *s3.CreateMultipartUploadOutput)
+
+ DeleteBucket(*s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error)
+ DeleteBucketWithContext(aws.Context, *s3.DeleteBucketInput, ...request.Option) (*s3.DeleteBucketOutput, error)
+ DeleteBucketRequest(*s3.DeleteBucketInput) (*request.Request, *s3.DeleteBucketOutput)
+
+ DeleteBucketCors(*s3.DeleteBucketCorsInput) (*s3.DeleteBucketCorsOutput, error)
+ DeleteBucketCorsWithContext(aws.Context, *s3.DeleteBucketCorsInput, ...request.Option) (*s3.DeleteBucketCorsOutput, error)
+ DeleteBucketCorsRequest(*s3.DeleteBucketCorsInput) (*request.Request, *s3.DeleteBucketCorsOutput)
+
+ DeleteBucketLifecycle(*s3.DeleteBucketLifecycleInput) (*s3.DeleteBucketLifecycleOutput, error)
+ DeleteBucketLifecycleWithContext(aws.Context, *s3.DeleteBucketLifecycleInput, ...request.Option) (*s3.DeleteBucketLifecycleOutput, error)
+ DeleteBucketLifecycleRequest(*s3.DeleteBucketLifecycleInput) (*request.Request, *s3.DeleteBucketLifecycleOutput)
+
+ DeleteBucketReplication(*s3.DeleteBucketReplicationInput) (*s3.DeleteBucketReplicationOutput, error)
+ DeleteBucketReplicationWithContext(aws.Context, *s3.DeleteBucketReplicationInput, ...request.Option) (*s3.DeleteBucketReplicationOutput, error)
+ DeleteBucketReplicationRequest(*s3.DeleteBucketReplicationInput) (*request.Request, *s3.DeleteBucketReplicationOutput)
+
+ DeleteBucketWebsite(*s3.DeleteBucketWebsiteInput) (*s3.DeleteBucketWebsiteOutput, error)
+ DeleteBucketWebsiteWithContext(aws.Context, *s3.DeleteBucketWebsiteInput, ...request.Option) (*s3.DeleteBucketWebsiteOutput, error)
+ DeleteBucketWebsiteRequest(*s3.DeleteBucketWebsiteInput) (*request.Request, *s3.DeleteBucketWebsiteOutput)
+
+ DeleteLegalHold(*s3.DeleteLegalHoldInput) (*s3.DeleteLegalHoldOutput, error)
+ DeleteLegalHoldWithContext(aws.Context, *s3.DeleteLegalHoldInput, ...request.Option) (*s3.DeleteLegalHoldOutput, error)
+ DeleteLegalHoldRequest(*s3.DeleteLegalHoldInput) (*request.Request, *s3.DeleteLegalHoldOutput)
+
+ DeleteObject(*s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
+ DeleteObjectWithContext(aws.Context, *s3.DeleteObjectInput, ...request.Option) (*s3.DeleteObjectOutput, error)
+ DeleteObjectRequest(*s3.DeleteObjectInput) (*request.Request, *s3.DeleteObjectOutput)
+
+ DeleteObjectTagging(*s3.DeleteObjectTaggingInput) (*s3.DeleteObjectTaggingOutput, error)
+ DeleteObjectTaggingWithContext(aws.Context, *s3.DeleteObjectTaggingInput, ...request.Option) (*s3.DeleteObjectTaggingOutput, error)
+ DeleteObjectTaggingRequest(*s3.DeleteObjectTaggingInput) (*request.Request, *s3.DeleteObjectTaggingOutput)
+
+ DeleteObjects(*s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error)
+ DeleteObjectsWithContext(aws.Context, *s3.DeleteObjectsInput, ...request.Option) (*s3.DeleteObjectsOutput, error)
+ DeleteObjectsRequest(*s3.DeleteObjectsInput) (*request.Request, *s3.DeleteObjectsOutput)
+
+ DeletePublicAccessBlock(*s3.DeletePublicAccessBlockInput) (*s3.DeletePublicAccessBlockOutput, error)
+ DeletePublicAccessBlockWithContext(aws.Context, *s3.DeletePublicAccessBlockInput, ...request.Option) (*s3.DeletePublicAccessBlockOutput, error)
+ DeletePublicAccessBlockRequest(*s3.DeletePublicAccessBlockInput) (*request.Request, *s3.DeletePublicAccessBlockOutput)
+
+ ExtendObjectRetention(*s3.ExtendObjectRetentionInput) (*s3.ExtendObjectRetentionOutput, error)
+ ExtendObjectRetentionWithContext(aws.Context, *s3.ExtendObjectRetentionInput, ...request.Option) (*s3.ExtendObjectRetentionOutput, error)
+ ExtendObjectRetentionRequest(*s3.ExtendObjectRetentionInput) (*request.Request, *s3.ExtendObjectRetentionOutput)
+
+ GetBucketAcl(*s3.GetBucketAclInput) (*s3.GetBucketAclOutput, error)
+ GetBucketAclWithContext(aws.Context, *s3.GetBucketAclInput, ...request.Option) (*s3.GetBucketAclOutput, error)
+ GetBucketAclRequest(*s3.GetBucketAclInput) (*request.Request, *s3.GetBucketAclOutput)
+
+ GetBucketCors(*s3.GetBucketCorsInput) (*s3.GetBucketCorsOutput, error)
+ GetBucketCorsWithContext(aws.Context, *s3.GetBucketCorsInput, ...request.Option) (*s3.GetBucketCorsOutput, error)
+ GetBucketCorsRequest(*s3.GetBucketCorsInput) (*request.Request, *s3.GetBucketCorsOutput)
+
+ GetBucketLifecycleConfiguration(*s3.GetBucketLifecycleConfigurationInput) (*s3.GetBucketLifecycleConfigurationOutput, error)
+ GetBucketLifecycleConfigurationWithContext(aws.Context, *s3.GetBucketLifecycleConfigurationInput, ...request.Option) (*s3.GetBucketLifecycleConfigurationOutput, error)
+ GetBucketLifecycleConfigurationRequest(*s3.GetBucketLifecycleConfigurationInput) (*request.Request, *s3.GetBucketLifecycleConfigurationOutput)
+
+ GetBucketLocation(*s3.GetBucketLocationInput) (*s3.GetBucketLocationOutput, error)
+ GetBucketLocationWithContext(aws.Context, *s3.GetBucketLocationInput, ...request.Option) (*s3.GetBucketLocationOutput, error)
+ GetBucketLocationRequest(*s3.GetBucketLocationInput) (*request.Request, *s3.GetBucketLocationOutput)
+
+ GetBucketLogging(*s3.GetBucketLoggingInput) (*s3.GetBucketLoggingOutput, error)
+ GetBucketLoggingWithContext(aws.Context, *s3.GetBucketLoggingInput, ...request.Option) (*s3.GetBucketLoggingOutput, error)
+ GetBucketLoggingRequest(*s3.GetBucketLoggingInput) (*request.Request, *s3.GetBucketLoggingOutput)
+
+ GetBucketProtectionConfiguration(*s3.GetBucketProtectionConfigurationInput) (*s3.GetBucketProtectionConfigurationOutput, error)
+ GetBucketProtectionConfigurationWithContext(aws.Context, *s3.GetBucketProtectionConfigurationInput, ...request.Option) (*s3.GetBucketProtectionConfigurationOutput, error)
+ GetBucketProtectionConfigurationRequest(*s3.GetBucketProtectionConfigurationInput) (*request.Request, *s3.GetBucketProtectionConfigurationOutput)
+
+ GetBucketReplication(*s3.GetBucketReplicationInput) (*s3.GetBucketReplicationOutput, error)
+ GetBucketReplicationWithContext(aws.Context, *s3.GetBucketReplicationInput, ...request.Option) (*s3.GetBucketReplicationOutput, error)
+ GetBucketReplicationRequest(*s3.GetBucketReplicationInput) (*request.Request, *s3.GetBucketReplicationOutput)
+
+ GetBucketVersioning(*s3.GetBucketVersioningInput) (*s3.GetBucketVersioningOutput, error)
+ GetBucketVersioningWithContext(aws.Context, *s3.GetBucketVersioningInput, ...request.Option) (*s3.GetBucketVersioningOutput, error)
+ GetBucketVersioningRequest(*s3.GetBucketVersioningInput) (*request.Request, *s3.GetBucketVersioningOutput)
+
+ GetBucketWebsite(*s3.GetBucketWebsiteInput) (*s3.GetBucketWebsiteOutput, error)
+ GetBucketWebsiteWithContext(aws.Context, *s3.GetBucketWebsiteInput, ...request.Option) (*s3.GetBucketWebsiteOutput, error)
+ GetBucketWebsiteRequest(*s3.GetBucketWebsiteInput) (*request.Request, *s3.GetBucketWebsiteOutput)
+
+ GetObject(*s3.GetObjectInput) (*s3.GetObjectOutput, error)
+ GetObjectWithContext(aws.Context, *s3.GetObjectInput, ...request.Option) (*s3.GetObjectOutput, error)
+ GetObjectRequest(*s3.GetObjectInput) (*request.Request, *s3.GetObjectOutput)
+
+ GetObjectAcl(*s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
+ GetObjectAclWithContext(aws.Context, *s3.GetObjectAclInput, ...request.Option) (*s3.GetObjectAclOutput, error)
+ GetObjectAclRequest(*s3.GetObjectAclInput) (*request.Request, *s3.GetObjectAclOutput)
+
+ GetObjectTagging(*s3.GetObjectTaggingInput) (*s3.GetObjectTaggingOutput, error)
+ GetObjectTaggingWithContext(aws.Context, *s3.GetObjectTaggingInput, ...request.Option) (*s3.GetObjectTaggingOutput, error)
+ GetObjectTaggingRequest(*s3.GetObjectTaggingInput) (*request.Request, *s3.GetObjectTaggingOutput)
+
+ GetPublicAccessBlock(*s3.GetPublicAccessBlockInput) (*s3.GetPublicAccessBlockOutput, error)
+ GetPublicAccessBlockWithContext(aws.Context, *s3.GetPublicAccessBlockInput, ...request.Option) (*s3.GetPublicAccessBlockOutput, error)
+ GetPublicAccessBlockRequest(*s3.GetPublicAccessBlockInput) (*request.Request, *s3.GetPublicAccessBlockOutput)
+
+ HeadBucket(*s3.HeadBucketInput) (*s3.HeadBucketOutput, error)
+ HeadBucketWithContext(aws.Context, *s3.HeadBucketInput, ...request.Option) (*s3.HeadBucketOutput, error)
+ HeadBucketRequest(*s3.HeadBucketInput) (*request.Request, *s3.HeadBucketOutput)
+
+ HeadObject(*s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
+ HeadObjectWithContext(aws.Context, *s3.HeadObjectInput, ...request.Option) (*s3.HeadObjectOutput, error)
+ HeadObjectRequest(*s3.HeadObjectInput) (*request.Request, *s3.HeadObjectOutput)
+
+ ListBuckets(*s3.ListBucketsInput) (*s3.ListBucketsOutput, error)
+ ListBucketsWithContext(aws.Context, *s3.ListBucketsInput, ...request.Option) (*s3.ListBucketsOutput, error)
+ ListBucketsRequest(*s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput)
+
+ ListBucketsExtended(*s3.ListBucketsExtendedInput) (*s3.ListBucketsExtendedOutput, error)
+ ListBucketsExtendedWithContext(aws.Context, *s3.ListBucketsExtendedInput, ...request.Option) (*s3.ListBucketsExtendedOutput, error)
+ ListBucketsExtendedRequest(*s3.ListBucketsExtendedInput) (*request.Request, *s3.ListBucketsExtendedOutput)
+
+ ListBucketsExtendedPages(*s3.ListBucketsExtendedInput, func(*s3.ListBucketsExtendedOutput, bool) bool) error
+ ListBucketsExtendedPagesWithContext(aws.Context, *s3.ListBucketsExtendedInput, func(*s3.ListBucketsExtendedOutput, bool) bool, ...request.Option) error
+
+ ListLegalHolds(*s3.ListLegalHoldsInput) (*s3.ListLegalHoldsOutput, error)
+ ListLegalHoldsWithContext(aws.Context, *s3.ListLegalHoldsInput, ...request.Option) (*s3.ListLegalHoldsOutput, error)
+ ListLegalHoldsRequest(*s3.ListLegalHoldsInput) (*request.Request, *s3.ListLegalHoldsOutput)
+
+ ListMultipartUploads(*s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error)
+ ListMultipartUploadsWithContext(aws.Context, *s3.ListMultipartUploadsInput, ...request.Option) (*s3.ListMultipartUploadsOutput, error)
+ ListMultipartUploadsRequest(*s3.ListMultipartUploadsInput) (*request.Request, *s3.ListMultipartUploadsOutput)
+
+ ListMultipartUploadsPages(*s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool) error
+ ListMultipartUploadsPagesWithContext(aws.Context, *s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool, ...request.Option) error
+
+ ListObjectVersions(*s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error)
+ ListObjectVersionsWithContext(aws.Context, *s3.ListObjectVersionsInput, ...request.Option) (*s3.ListObjectVersionsOutput, error)
+ ListObjectVersionsRequest(*s3.ListObjectVersionsInput) (*request.Request, *s3.ListObjectVersionsOutput)
+
+ ListObjectVersionsPages(*s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool) error
+ ListObjectVersionsPagesWithContext(aws.Context, *s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool, ...request.Option) error
+
+ ListObjects(*s3.ListObjectsInput) (*s3.ListObjectsOutput, error)
+ ListObjectsWithContext(aws.Context, *s3.ListObjectsInput, ...request.Option) (*s3.ListObjectsOutput, error)
+ ListObjectsRequest(*s3.ListObjectsInput) (*request.Request, *s3.ListObjectsOutput)
+
+ ListObjectsPages(*s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool) error
+ ListObjectsPagesWithContext(aws.Context, *s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool, ...request.Option) error
+
+ ListObjectsV2(*s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error)
+ ListObjectsV2WithContext(aws.Context, *s3.ListObjectsV2Input, ...request.Option) (*s3.ListObjectsV2Output, error)
+ ListObjectsV2Request(*s3.ListObjectsV2Input) (*request.Request, *s3.ListObjectsV2Output)
+
+ ListObjectsV2Pages(*s3.ListObjectsV2Input, func(*s3.ListObjectsV2Output, bool) bool) error
+ ListObjectsV2PagesWithContext(aws.Context, *s3.ListObjectsV2Input, func(*s3.ListObjectsV2Output, bool) bool, ...request.Option) error
+
+ ListParts(*s3.ListPartsInput) (*s3.ListPartsOutput, error)
+ ListPartsWithContext(aws.Context, *s3.ListPartsInput, ...request.Option) (*s3.ListPartsOutput, error)
+ ListPartsRequest(*s3.ListPartsInput) (*request.Request, *s3.ListPartsOutput)
+
+ ListPartsPages(*s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool) error
+ ListPartsPagesWithContext(aws.Context, *s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool, ...request.Option) error
+
+ PutBucketAcl(*s3.PutBucketAclInput) (*s3.PutBucketAclOutput, error)
+ PutBucketAclWithContext(aws.Context, *s3.PutBucketAclInput, ...request.Option) (*s3.PutBucketAclOutput, error)
+ PutBucketAclRequest(*s3.PutBucketAclInput) (*request.Request, *s3.PutBucketAclOutput)
+
+ PutBucketCors(*s3.PutBucketCorsInput) (*s3.PutBucketCorsOutput, error)
+ PutBucketCorsWithContext(aws.Context, *s3.PutBucketCorsInput, ...request.Option) (*s3.PutBucketCorsOutput, error)
+ PutBucketCorsRequest(*s3.PutBucketCorsInput) (*request.Request, *s3.PutBucketCorsOutput)
+
+ PutBucketLifecycleConfiguration(*s3.PutBucketLifecycleConfigurationInput) (*s3.PutBucketLifecycleConfigurationOutput, error)
+ PutBucketLifecycleConfigurationWithContext(aws.Context, *s3.PutBucketLifecycleConfigurationInput, ...request.Option) (*s3.PutBucketLifecycleConfigurationOutput, error)
+ PutBucketLifecycleConfigurationRequest(*s3.PutBucketLifecycleConfigurationInput) (*request.Request, *s3.PutBucketLifecycleConfigurationOutput)
+
+ PutBucketLogging(*s3.PutBucketLoggingInput) (*s3.PutBucketLoggingOutput, error)
+ PutBucketLoggingWithContext(aws.Context, *s3.PutBucketLoggingInput, ...request.Option) (*s3.PutBucketLoggingOutput, error)
+ PutBucketLoggingRequest(*s3.PutBucketLoggingInput) (*request.Request, *s3.PutBucketLoggingOutput)
+
+ PutBucketProtectionConfiguration(*s3.PutBucketProtectionConfigurationInput) (*s3.PutBucketProtectionConfigurationOutput, error)
+ PutBucketProtectionConfigurationWithContext(aws.Context, *s3.PutBucketProtectionConfigurationInput, ...request.Option) (*s3.PutBucketProtectionConfigurationOutput, error)
+ PutBucketProtectionConfigurationRequest(*s3.PutBucketProtectionConfigurationInput) (*request.Request, *s3.PutBucketProtectionConfigurationOutput)
+
+ PutBucketReplication(*s3.PutBucketReplicationInput) (*s3.PutBucketReplicationOutput, error)
+ PutBucketReplicationWithContext(aws.Context, *s3.PutBucketReplicationInput, ...request.Option) (*s3.PutBucketReplicationOutput, error)
+ PutBucketReplicationRequest(*s3.PutBucketReplicationInput) (*request.Request, *s3.PutBucketReplicationOutput)
+
+ PutBucketVersioning(*s3.PutBucketVersioningInput) (*s3.PutBucketVersioningOutput, error)
+ PutBucketVersioningWithContext(aws.Context, *s3.PutBucketVersioningInput, ...request.Option) (*s3.PutBucketVersioningOutput, error)
+ PutBucketVersioningRequest(*s3.PutBucketVersioningInput) (*request.Request, *s3.PutBucketVersioningOutput)
+
+ PutBucketWebsite(*s3.PutBucketWebsiteInput) (*s3.PutBucketWebsiteOutput, error)
+ PutBucketWebsiteWithContext(aws.Context, *s3.PutBucketWebsiteInput, ...request.Option) (*s3.PutBucketWebsiteOutput, error)
+ PutBucketWebsiteRequest(*s3.PutBucketWebsiteInput) (*request.Request, *s3.PutBucketWebsiteOutput)
+
+ PutObject(*s3.PutObjectInput) (*s3.PutObjectOutput, error)
+ PutObjectWithContext(aws.Context, *s3.PutObjectInput, ...request.Option) (*s3.PutObjectOutput, error)
+ PutObjectRequest(*s3.PutObjectInput) (*request.Request, *s3.PutObjectOutput)
+
+ PutObjectAcl(*s3.PutObjectAclInput) (*s3.PutObjectAclOutput, error)
+ PutObjectAclWithContext(aws.Context, *s3.PutObjectAclInput, ...request.Option) (*s3.PutObjectAclOutput, error)
+ PutObjectAclRequest(*s3.PutObjectAclInput) (*request.Request, *s3.PutObjectAclOutput)
+
+ PutObjectTagging(*s3.PutObjectTaggingInput) (*s3.PutObjectTaggingOutput, error)
+ PutObjectTaggingWithContext(aws.Context, *s3.PutObjectTaggingInput, ...request.Option) (*s3.PutObjectTaggingOutput, error)
+ PutObjectTaggingRequest(*s3.PutObjectTaggingInput) (*request.Request, *s3.PutObjectTaggingOutput)
+
+ PutPublicAccessBlock(*s3.PutPublicAccessBlockInput) (*s3.PutPublicAccessBlockOutput, error)
+ PutPublicAccessBlockWithContext(aws.Context, *s3.PutPublicAccessBlockInput, ...request.Option) (*s3.PutPublicAccessBlockOutput, error)
+ PutPublicAccessBlockRequest(*s3.PutPublicAccessBlockInput) (*request.Request, *s3.PutPublicAccessBlockOutput)
+
+ RestoreObject(*s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error)
+ RestoreObjectWithContext(aws.Context, *s3.RestoreObjectInput, ...request.Option) (*s3.RestoreObjectOutput, error)
+ RestoreObjectRequest(*s3.RestoreObjectInput) (*request.Request, *s3.RestoreObjectOutput)
+
+ UploadPart(*s3.UploadPartInput) (*s3.UploadPartOutput, error)
+ UploadPartWithContext(aws.Context, *s3.UploadPartInput, ...request.Option) (*s3.UploadPartOutput, error)
+ UploadPartRequest(*s3.UploadPartInput) (*request.Request, *s3.UploadPartOutput)
+
+ UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error)
+ UploadPartCopyWithContext(aws.Context, *s3.UploadPartCopyInput, ...request.Option) (*s3.UploadPartCopyOutput, error)
+ UploadPartCopyRequest(*s3.UploadPartCopyInput) (*request.Request, *s3.UploadPartCopyOutput)
+
+ WaitUntilBucketExists(*s3.HeadBucketInput) error
+ WaitUntilBucketExistsWithContext(aws.Context, *s3.HeadBucketInput, ...request.WaiterOption) error
+
+ WaitUntilBucketNotExists(*s3.HeadBucketInput) error
+ WaitUntilBucketNotExistsWithContext(aws.Context, *s3.HeadBucketInput, ...request.WaiterOption) error
+
+ WaitUntilObjectExists(*s3.HeadObjectInput) error
+ WaitUntilObjectExistsWithContext(aws.Context, *s3.HeadObjectInput, ...request.WaiterOption) error
+
+ WaitUntilObjectNotExists(*s3.HeadObjectInput) error
+ WaitUntilObjectNotExistsWithContext(aws.Context, *s3.HeadObjectInput, ...request.WaiterOption) error
+}
+
+var _ S3API = (*s3.S3)(nil)
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/service.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/service.go
new file mode 100644
index 0000000000000..5a108580f0243
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/service.go
@@ -0,0 +1,105 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package s3
+
+import (
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/client"
+ "github.com/IBM/ibm-cos-sdk-go/aws/client/metadata"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+ "github.com/IBM/ibm-cos-sdk-go/aws/signer"
+ "github.com/IBM/ibm-cos-sdk-go/aws/signer/v4"
+ "github.com/IBM/ibm-cos-sdk-go/private/protocol/restxml"
+)
+
+// S3 provides the API operation methods for making requests to
+// Amazon Simple Storage Service. See this package's package overview docs
+// for details on the service.
+//
+// S3 methods are safe to use concurrently. It is not safe to
+// modify mutate any of the struct's properties though.
+type S3 struct {
+ *client.Client
+}
+
+// Used for custom client initialization logic
+var initClient func(*client.Client)
+
+// Used for custom request initialization logic
+var initRequest func(*request.Request)
+
+// Service information constants
+const (
+ ServiceName = "s3" // Name of service.
+ EndpointsID = ServiceName // ID to lookup a service endpoint with.
+ ServiceID = "S3" // ServiceID is a unique identifier of a specific service.
+)
+
+// New creates a new instance of the S3 client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// mySession := session.Must(session.NewSession())
+//
+// // Create a S3 client from just a session.
+// svc := s3.New(mySession)
+//
+// // Create a S3 client with additional configuration
+// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 {
+ c := p.ClientConfig(EndpointsID, cfgs...)
+ if c.SigningNameDerived || len(c.SigningName) == 0 {
+ c.SigningName = "s3"
+ }
+ return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *S3 {
+ svc := &S3{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ ServiceID: ServiceID,
+ SigningName: signingName,
+ SigningRegion: signingRegion,
+ PartitionID: partitionID,
+ Endpoint: endpoint,
+ APIVersion: "2006-03-01",
+ ResolvedRegion: resolvedRegion,
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBackNamed(signer.CustomRequestSignerRouter(func(s *v4.Signer) {
+ s.DisableURIPathEscaping = true
+ }))
+ svc.Handlers.Build.PushBackNamed(restxml.BuildHandler)
+ svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler)
+ svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler)
+ svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler)
+
+ // Run custom client initialization if present
+ if initClient != nil {
+ initClient(svc.Client)
+ }
+
+ return svc
+}
+
+// newRequest creates a new request for a S3 operation and runs any
+// custom request initialization.
+func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ // Run custom request initialization if present
+ if initRequest != nil {
+ initRequest(req)
+ }
+
+ return req
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/sse.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/sse.go
new file mode 100644
index 0000000000000..f364a139562a0
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/sse.go
@@ -0,0 +1,84 @@
+package s3
+
+import (
+ "crypto/md5"
+ "encoding/base64"
+ "net/http"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+)
+
+var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil)
+
+func validateSSERequiresSSL(r *request.Request) {
+ if r.HTTPRequest.URL.Scheme == "https" {
+ return
+ }
+
+ if iface, ok := r.Params.(sseCustomerKeyGetter); ok {
+ if len(iface.getSSECustomerKey()) > 0 {
+ r.Error = errSSERequiresSSL
+ return
+ }
+ }
+
+ if iface, ok := r.Params.(copySourceSSECustomerKeyGetter); ok {
+ if len(iface.getCopySourceSSECustomerKey()) > 0 {
+ r.Error = errSSERequiresSSL
+ return
+ }
+ }
+}
+
+const (
+ sseKeyHeader = "x-amz-server-side-encryption-customer-key"
+ sseKeyMD5Header = sseKeyHeader + "-md5"
+)
+
+func computeSSEKeyMD5(r *request.Request) {
+ var key string
+ if g, ok := r.Params.(sseCustomerKeyGetter); ok {
+ key = g.getSSECustomerKey()
+ }
+
+ computeKeyMD5(sseKeyHeader, sseKeyMD5Header, key, r.HTTPRequest)
+}
+
+const (
+ copySrcSSEKeyHeader = "x-amz-copy-source-server-side-encryption-customer-key"
+ copySrcSSEKeyMD5Header = copySrcSSEKeyHeader + "-md5"
+)
+
+func computeCopySourceSSEKeyMD5(r *request.Request) {
+ var key string
+ if g, ok := r.Params.(copySourceSSECustomerKeyGetter); ok {
+ key = g.getCopySourceSSECustomerKey()
+ }
+
+ computeKeyMD5(copySrcSSEKeyHeader, copySrcSSEKeyMD5Header, key, r.HTTPRequest)
+}
+
+func computeKeyMD5(keyHeader, keyMD5Header, key string, r *http.Request) {
+ if len(key) == 0 {
+ // Backwards compatiablity where user just set the header value instead
+ // of using the API parameter, or setting the header value for an
+ // operation without the parameters modeled.
+ key = r.Header.Get(keyHeader)
+ if len(key) == 0 {
+ return
+ }
+
+ // In backwards compatible, the header's value is not base64 encoded,
+ // and needs to be encoded and updated by the SDK's customizations.
+ b64Key := base64.StdEncoding.EncodeToString([]byte(key))
+ r.Header.Set(keyHeader, b64Key)
+ }
+
+ // Only update Key's MD5 if not already set.
+ if len(r.Header.Get(keyMD5Header)) == 0 {
+ sum := md5.Sum([]byte(key))
+ keyMD5 := base64.StdEncoding.EncodeToString(sum[:])
+ r.Header.Set(keyMD5Header, keyMD5)
+ }
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/statusok_error.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/statusok_error.go
new file mode 100644
index 0000000000000..f7d625c50a9f6
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/statusok_error.go
@@ -0,0 +1,47 @@
+package s3
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+ "github.com/IBM/ibm-cos-sdk-go/internal/sdkio"
+)
+
+func copyMultipartStatusOKUnmarshalError(r *request.Request) {
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ r.HTTPResponse.Body.Close()
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization, "unable to read response body", err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ // Note, some middleware later in the stack like restxml.Unmarshal expect a valid, non-closed Body
+ // even in case of an error, so we replace it with an empty Reader.
+ r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewBuffer(nil))
+ return
+ }
+
+ body := bytes.NewReader(b)
+ r.HTTPResponse.Body = ioutil.NopCloser(body)
+ defer body.Seek(0, sdkio.SeekStart)
+
+ unmarshalError(r)
+ if err, ok := r.Error.(awserr.Error); ok && err != nil {
+ if err.Code() == request.ErrCodeSerialization &&
+ err.OrigErr() != io.EOF {
+ r.Error = nil
+ return
+ }
+ // if empty payload
+ if err.OrigErr() == io.EOF {
+ r.HTTPResponse.StatusCode = http.StatusInternalServerError
+ } else {
+ r.HTTPResponse.StatusCode = http.StatusServiceUnavailable
+ }
+ }
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/unmarshal_error.go
new file mode 100644
index 0000000000000..782bcc33f4b3a
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/unmarshal_error.go
@@ -0,0 +1,114 @@
+package s3
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strings"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/awserr"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+ "github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil"
+)
+
+type xmlErrorResponse struct {
+ XMLName xml.Name `xml:"Error"`
+ Code string `xml:"Code"`
+ Message string `xml:"Message"`
+}
+
+func unmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ defer io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+
+ // Bucket exists in a different region, and request needs
+ // to be made to the correct region.
+ if r.HTTPResponse.StatusCode == http.StatusMovedPermanently {
+ msg := fmt.Sprintf(
+ "incorrect region, the bucket is not in '%s' region at endpoint '%s'",
+ aws.StringValue(r.Config.Region),
+ aws.StringValue(r.Config.Endpoint),
+ )
+ if v := r.HTTPResponse.Header.Get("x-amz-bucket-region"); len(v) != 0 {
+ msg += fmt.Sprintf(", bucket is in '%s' region", v)
+ }
+ r.Error = awserr.NewRequestFailure(
+ awserr.New("BucketRegionError", msg, nil),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+
+ // Attempt to parse error from body if it is known
+ var errResp xmlErrorResponse
+ var err error
+ if r.HTTPResponse.StatusCode >= 200 && r.HTTPResponse.StatusCode < 300 {
+ err = s3unmarshalXMLError(&errResp, r.HTTPResponse.Body)
+ } else {
+ err = xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body)
+ }
+
+ if err != nil {
+ var errorMsg string
+ if err == io.EOF {
+ errorMsg = "empty response payload"
+ } else {
+ errorMsg = "failed to unmarshal error message"
+ }
+
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization,
+ errorMsg, err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+
+ // Fallback to status code converted to message if still no error code
+ if len(errResp.Code) == 0 {
+ statusText := http.StatusText(r.HTTPResponse.StatusCode)
+ errResp.Code = strings.Replace(statusText, " ", "", -1)
+ errResp.Message = statusText
+ }
+
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(errResp.Code, errResp.Message, err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+}
+
+// A RequestFailure provides access to the S3 Request ID and Host ID values
+// returned from API operation errors. Getting the error as a string will
+// return the formated error with the same information as awserr.RequestFailure,
+// while also adding the HostID value from the response.
+type RequestFailure interface {
+ awserr.RequestFailure
+
+ // Host ID is the S3 Host ID needed for debug, and contacting support
+ HostID() string
+}
+
+// s3unmarshalXMLError is s3 specific xml error unmarshaler
+// for 200 OK errors and response payloads.
+// This function differs from the xmlUtil.UnmarshalXMLError
+// func. It does not ignore the EOF error and passes it up.
+// Related to bug fix for `s3 200 OK response with empty payload`
+func s3unmarshalXMLError(v interface{}, stream io.Reader) error {
+ var errBuf bytes.Buffer
+ body := io.TeeReader(stream, &errBuf)
+
+ err := xml.NewDecoder(body).Decode(v)
+ if err != nil && err != io.EOF {
+ return awserr.NewUnmarshalError(err,
+ "failed to unmarshal error message", errBuf.Bytes())
+ }
+
+ return err
+}
diff --git a/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/waiters.go b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/waiters.go
new file mode 100644
index 0000000000000..f6fa0cf607830
--- /dev/null
+++ b/vendor/github.com/IBM/ibm-cos-sdk-go/service/s3/waiters.go
@@ -0,0 +1,214 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package s3
+
+import (
+ "time"
+
+ "github.com/IBM/ibm-cos-sdk-go/aws"
+ "github.com/IBM/ibm-cos-sdk-go/aws/request"
+)
+
+// WaitUntilBucketExists uses the Amazon S3 API operation
+// HeadBucket to wait for a condition to be met before returning.
+// If the condition is not met within the max attempt window, an error will
+// be returned.
+func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error {
+ return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilBucketExistsWithContext is an extended version of WaitUntilBucketExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error {
+ w := request.Waiter{
+ Name: "WaitUntilBucketExists",
+ MaxAttempts: 20,
+ Delay: request.ConstantWaiterDelay(5 * time.Second),
+ Acceptors: []request.WaiterAcceptor{
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 200,
+ },
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 301,
+ },
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 403,
+ },
+ {
+ State: request.RetryWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 404,
+ },
+ },
+ Logger: c.Config.Logger,
+ NewRequest: func(opts []request.Option) (*request.Request, error) {
+ var inCpy *HeadBucketInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.HeadBucketRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+ w.ApplyOptions(opts...)
+
+ return w.WaitWithContext(ctx)
+}
+
+// WaitUntilBucketNotExists uses the Amazon S3 API operation
+// HeadBucket to wait for a condition to be met before returning.
+// If the condition is not met within the max attempt window, an error will
+// be returned.
+func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error {
+ return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilBucketNotExistsWithContext is an extended version of WaitUntilBucketNotExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error {
+ w := request.Waiter{
+ Name: "WaitUntilBucketNotExists",
+ MaxAttempts: 20,
+ Delay: request.ConstantWaiterDelay(5 * time.Second),
+ Acceptors: []request.WaiterAcceptor{
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 404,
+ },
+ },
+ Logger: c.Config.Logger,
+ NewRequest: func(opts []request.Option) (*request.Request, error) {
+ var inCpy *HeadBucketInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.HeadBucketRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+ w.ApplyOptions(opts...)
+
+ return w.WaitWithContext(ctx)
+}
+
+// WaitUntilObjectExists uses the Amazon S3 API operation
+// HeadObject to wait for a condition to be met before returning.
+// If the condition is not met within the max attempt window, an error will
+// be returned.
+func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error {
+ return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilObjectExistsWithContext is an extended version of WaitUntilObjectExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error {
+ w := request.Waiter{
+ Name: "WaitUntilObjectExists",
+ MaxAttempts: 20,
+ Delay: request.ConstantWaiterDelay(5 * time.Second),
+ Acceptors: []request.WaiterAcceptor{
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 200,
+ },
+ {
+ State: request.RetryWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 404,
+ },
+ },
+ Logger: c.Config.Logger,
+ NewRequest: func(opts []request.Option) (*request.Request, error) {
+ var inCpy *HeadObjectInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.HeadObjectRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+ w.ApplyOptions(opts...)
+
+ return w.WaitWithContext(ctx)
+}
+
+// WaitUntilObjectNotExists uses the Amazon S3 API operation
+// HeadObject to wait for a condition to be met before returning.
+// If the condition is not met within the max attempt window, an error will
+// be returned.
+func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error {
+ return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilObjectNotExistsWithContext is an extended version of WaitUntilObjectNotExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) WaitUntilObjectNotExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error {
+ w := request.Waiter{
+ Name: "WaitUntilObjectNotExists",
+ MaxAttempts: 20,
+ Delay: request.ConstantWaiterDelay(5 * time.Second),
+ Acceptors: []request.WaiterAcceptor{
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 404,
+ },
+ },
+ Logger: c.Config.Logger,
+ NewRequest: func(opts []request.Option) (*request.Request, error) {
+ var inCpy *HeadObjectInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.HeadObjectRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+ w.ApplyOptions(opts...)
+
+ return w.WaitWithContext(ctx)
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 13aa8b57b7797..5302db50b1242 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -152,6 +152,48 @@ github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/o
github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared
github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version
github.com/AzureAD/microsoft-authentication-library-for-go/apps/public
+# github.com/IBM/ibm-cos-sdk-go v1.9.4
+## explicit; go 1.11
+github.com/IBM/ibm-cos-sdk-go/aws
+github.com/IBM/ibm-cos-sdk-go/aws/arn
+github.com/IBM/ibm-cos-sdk-go/aws/awserr
+github.com/IBM/ibm-cos-sdk-go/aws/awsutil
+github.com/IBM/ibm-cos-sdk-go/aws/client
+github.com/IBM/ibm-cos-sdk-go/aws/client/metadata
+github.com/IBM/ibm-cos-sdk-go/aws/corehandlers
+github.com/IBM/ibm-cos-sdk-go/aws/credentials
+github.com/IBM/ibm-cos-sdk-go/aws/credentials/endpointcreds
+github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam
+github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/token
+github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/tokenmanager
+github.com/IBM/ibm-cos-sdk-go/aws/credentials/processcreds
+github.com/IBM/ibm-cos-sdk-go/aws/defaults
+github.com/IBM/ibm-cos-sdk-go/aws/endpoints
+github.com/IBM/ibm-cos-sdk-go/aws/request
+github.com/IBM/ibm-cos-sdk-go/aws/session
+github.com/IBM/ibm-cos-sdk-go/aws/signer
+github.com/IBM/ibm-cos-sdk-go/aws/signer/ibmiam
+github.com/IBM/ibm-cos-sdk-go/aws/signer/v4
+github.com/IBM/ibm-cos-sdk-go/internal/ini
+github.com/IBM/ibm-cos-sdk-go/internal/s3shared
+github.com/IBM/ibm-cos-sdk-go/internal/s3shared/arn
+github.com/IBM/ibm-cos-sdk-go/internal/s3shared/s3err
+github.com/IBM/ibm-cos-sdk-go/internal/sdkio
+github.com/IBM/ibm-cos-sdk-go/internal/sdkmath
+github.com/IBM/ibm-cos-sdk-go/internal/sdkrand
+github.com/IBM/ibm-cos-sdk-go/internal/shareddefaults
+github.com/IBM/ibm-cos-sdk-go/internal/strings
+github.com/IBM/ibm-cos-sdk-go/internal/sync/singleflight
+github.com/IBM/ibm-cos-sdk-go/private/checksum
+github.com/IBM/ibm-cos-sdk-go/private/protocol
+github.com/IBM/ibm-cos-sdk-go/private/protocol/json/jsonutil
+github.com/IBM/ibm-cos-sdk-go/private/protocol/query
+github.com/IBM/ibm-cos-sdk-go/private/protocol/query/queryutil
+github.com/IBM/ibm-cos-sdk-go/private/protocol/rest
+github.com/IBM/ibm-cos-sdk-go/private/protocol/restxml
+github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil
+github.com/IBM/ibm-cos-sdk-go/service/s3
+github.com/IBM/ibm-cos-sdk-go/service/s3/s3iface
# github.com/Masterminds/goutils v1.1.1
## explicit
github.com/Masterminds/goutils
|
feat
|
add support for IBM cloud object storage as storage client (#8826)
|
e71964cca461c9da6515ce0b25467fa8d17b3598
|
2024-03-08 15:44:31
|
wellweek
|
docs: fix some typos (#12163)
| false
|
diff --git a/docs/sources/query/log_queries/_index.md b/docs/sources/query/log_queries/_index.md
index bc9a05f536f21..eb66dd0b1e5c5 100644
--- a/docs/sources/query/log_queries/_index.md
+++ b/docs/sources/query/log_queries/_index.md
@@ -230,7 +230,7 @@ String type work exactly like Prometheus label matchers use in [log stream selec
> The string type is the only one that can filter out a log line with a label `__error__`.
-Using Duration, Number and Bytes will convert the label value prior to comparision and support the following comparators:
+Using Duration, Number and Bytes will convert the label value prior to comparison and support the following comparators:
- `==` or `=` for equality.
- `!=` for inequality.
@@ -626,7 +626,7 @@ the result will be
{host="grafana.net", path="/", status="200"} {"level": "info", "method": "GET", "path": "/", "host": "grafana.net", "status": "200"}
```
-Similary, this expression can be used to drop `__error__` labels as well. For example, for the query `{job="varlogs"}|json|drop __error__`, with below log line
+Similarly, this expression can be used to drop `__error__` labels as well. For example, for the query `{job="varlogs"}|json|drop __error__`, with below log line
```
INFO GET / loki.net 200
diff --git a/docs/sources/query/template_functions.md b/docs/sources/query/template_functions.md
index 5b660fa786da8..0effe4c01ac75 100644
--- a/docs/sources/query/template_functions.md
+++ b/docs/sources/query/template_functions.md
@@ -367,7 +367,7 @@ Example:
## mul
-Mulitply numbers. Supports multiple numbers.
+Multiply numbers. Supports multiple numbers.
Signature: `func(a interface{}, v ...interface{}) int64`
@@ -415,7 +415,7 @@ Example:
## mulf
-Mulitply numbers. Supports multiple numbers
+Multiply numbers. Supports multiple numbers
Signature: `func(a interface{}, v ...interface{}) float64`
diff --git a/docs/sources/reference/api.md b/docs/sources/reference/api.md
index cf384859c6a71..afb3654dbedee 100644
--- a/docs/sources/reference/api.md
+++ b/docs/sources/reference/api.md
@@ -829,7 +829,7 @@ The `/loki/api/v1/index/volume` and `/loki/api/v1/index/volume_range` endpoints
The `query` should be a valid LogQL stream selector, for example `{job="foo", env=~".+"}`. By default, these endpoints will aggregate into series consisting of all matches for labels included in the query. For example, assuming you have the streams `{job="foo", env="prod", team="alpha"}`, `{job="bar", env="prod", team="beta"}`, `{job="foo", env="dev", team="alpha"}`, and `{job="bar", env="dev", team="beta"}` in your system. The query `{job="foo", env=~".+"}` would return the two metric series `{job="foo", env="dev"}` and `{job="foo", env="prod"}`, each with datapoints representing the accumulate values of chunks for the streams matching that selector, which in this case would be the streams `{job="foo", env="dev", team="alpha"}` and `{job="foo", env="prod", team="alpha"}`, respectively.
-There are two parameters which can affect the aggregation strategy. First, a comma-seperated list of `targetLabels` can be provided, allowing volumes to be aggregated by the speficied `targetLabels` only. This is useful for negations. For example, if you said `{team="alpha", env!="dev"}`, the default behavior would include `env` in the aggregation set. However, maybe you're looking for all non-dev jobs for team alpha, and you don't care which env those are in (other than caring that they're not dev jobs). To achieve this, you could specify `targetLabels=team,job`, resulting in a single metric series (in this case) of `{team="alpha", job="foo}`.
+There are two parameters which can affect the aggregation strategy. First, a comma-separated list of `targetLabels` can be provided, allowing volumes to be aggregated by the speficied `targetLabels` only. This is useful for negations. For example, if you said `{team="alpha", env!="dev"}`, the default behavior would include `env` in the aggregation set. However, maybe you're looking for all non-dev jobs for team alpha, and you don't care which env those are in (other than caring that they're not dev jobs). To achieve this, you could specify `targetLabels=team,job`, resulting in a single metric series (in this case) of `{team="alpha", job="foo}`.
The other way to change aggregations is with the `aggregateBy` parameter. The default value for this is `series`, which aggregates into combinations of matching key-value pairs. Alternately this can be specified as `labels`, which will aggregate into labels only. In this case, the response will have a metric series with a label name matching each label, and a label value of `""`. This is useful for exploring logs at a high level. For example, if you wanted to know what percentage of your logs had a `team` label, you could query your logs with `aggregateBy=labels` and a query with either an exact or regex match on `team`, or by including `team` in the list of `targetLabels`.
diff --git a/docs/sources/send-data/promtail/cloud/ecs/_index.md b/docs/sources/send-data/promtail/cloud/ecs/_index.md
index 5b933c2ec611c..c78d9699ffff4 100644
--- a/docs/sources/send-data/promtail/cloud/ecs/_index.md
+++ b/docs/sources/send-data/promtail/cloud/ecs/_index.md
@@ -153,7 +153,7 @@ Go ahead and replace the `Host` and `HTTP_User` property with your [GrafanaCloud
We include plain text credentials in `options` for simplicity. However, this exposes credentials in your ECS task definition and in any version-controlled configuration. Mitigate this issue by using a secret store such as [AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html), combined with the `secretOptions` configuration option for [injecting sensitive data in a log configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data-secrets.html#secrets-logconfig).
-All `options` of the `logConfiguration` will be automatically translated into [fluentbit ouput][fluentbit ouput]. For example, the above options will produce this fluent bit `OUTPUT` config section:
+All `options` of the `logConfiguration` will be automatically translated into [fluentbit output][fluentbit output]. For example, the above options will produce this fluent bit `OUTPUT` config section:
```conf
[OUTPUT]
@@ -238,7 +238,7 @@ That's it ! Make sure to checkout LogQL to learn more about Loki powerful query
[fluentbit loki image]: https://hub.docker.com/r/grafana/fluent-bit-plugin-loki
[logql]: https://grafana.com/docs/loki/latest/logql/
[alpine]:https://hub.docker.com/_/alpine
-[fluentbit ouput]: https://fluentbit.io/documentation/0.14/output/
+[fluentbit output]: https://fluentbit.io/documentation/0.14/output/
[routing]: https://fluentbit.io/documentation/0.13/getting_started/routing.html
[grafanacloud account]: https://grafana.com/login
[grafana logs firelens]: ./ecs-grafana.png
diff --git a/docs/sources/setup/install/helm/monitor-and-alert/with-local-monitoring.md b/docs/sources/setup/install/helm/monitor-and-alert/with-local-monitoring.md
index 1c4f97a1dece3..dfa491fe966fa 100644
--- a/docs/sources/setup/install/helm/monitor-and-alert/with-local-monitoring.md
+++ b/docs/sources/setup/install/helm/monitor-and-alert/with-local-monitoring.md
@@ -17,7 +17,7 @@ By default this Helm Chart configures meta-monitoring of metrics (service monito
The `ServiceMonitor` resource works with either the Prometheus Operator or the Grafana Agent Operator, and defines how Loki's metrics should be scraped. Scraping this Loki cluster using the scrape config defined in the `SerivceMonitor` resource is required for the included dashboards to work. A `MetricsInstance` can be configured to write the metrics to a remote Prometheus instance such as Grafana Cloud Metrics.
-_Self monitoring_ is enabled by default. This will deploy a `GrafanaAgent`, `LogsInstance`, and `PodLogs` resource which will instruct the Grafana Agent Operator (installed seperately) on how to scrape this Loki cluster's logs and send them back to itself. Scraping this Loki cluster using the scrape config defined in the `PodLogs` resource is required for the included dashboards to work.
+_Self monitoring_ is enabled by default. This will deploy a `GrafanaAgent`, `LogsInstance`, and `PodLogs` resource which will instruct the Grafana Agent Operator (installed separately) on how to scrape this Loki cluster's logs and send them back to itself. Scraping this Loki cluster using the scrape config defined in the `PodLogs` resource is required for the included dashboards to work.
Rules and alerts are automatically deployed.
diff --git a/docs/sources/setup/migrate/migrate-from-distributed/index.md b/docs/sources/setup/migrate/migrate-from-distributed/index.md
index 51c1476c58f22..1618716fd26e8 100644
--- a/docs/sources/setup/migrate/migrate-from-distributed/index.md
+++ b/docs/sources/setup/migrate/migrate-from-distributed/index.md
@@ -13,7 +13,7 @@ keywords:
# Migrate from `loki-distributed` Helm chart
-This guide will walk you through migrating to the `loki` Helm Chart, v3.0 or higher, from the `loki-distributed` Helm Chart (v0.63.2 at time of writing). The process consists of deploying the new `loki` Helm Chart alongside the existing `loki-distributed` installation. By joining the new cluster to the exsiting cluster's ring, you will create one large cluster. This will allow you to manually bring down the `loki-distributed` components in a safe way to avoid any data loss.
+This guide will walk you through migrating to the `loki` Helm Chart, v3.0 or higher, from the `loki-distributed` Helm Chart (v0.63.2 at time of writing). The process consists of deploying the new `loki` Helm Chart alongside the existing `loki-distributed` installation. By joining the new cluster to the existing cluster's ring, you will create one large cluster. This will allow you to manually bring down the `loki-distributed` components in a safe way to avoid any data loss.
**Before you begin:**
diff --git a/docs/sources/setup/migrate/migrate-to-tsdb/_index.md b/docs/sources/setup/migrate/migrate-to-tsdb/_index.md
index 59458e700af57..b9a1f478d359d 100644
--- a/docs/sources/setup/migrate/migrate-to-tsdb/_index.md
+++ b/docs/sources/setup/migrate/migrate-to-tsdb/_index.md
@@ -10,7 +10,7 @@ keywords:
# Migrate to TSDB
-[TSDB]({{< relref "../../../operations/storage/tsdb" >}}) is the recommeneded index type for Loki and is where the current development lies.
+[TSDB]({{< relref "../../../operations/storage/tsdb" >}}) is the recommended index type for Loki and is where the current development lies.
If you are running Loki with [boltb-shipper]({{< relref "../../../operations/storage/boltdb-shipper" >}}) or any of the [legacy index types]({{< relref "../../../storage#index-storage" >}}) that have been deprecated,
we strongly recommend migrating to TSDB.
diff --git a/docs/sources/storage/_index.md b/docs/sources/storage/_index.md
index bbbebf756fc73..e7517e5c00353 100644
--- a/docs/sources/storage/_index.md
+++ b/docs/sources/storage/_index.md
@@ -326,7 +326,7 @@ This guide assumes a provisioned EKS cluster.
export AWS_REGION=<region of EKS cluster>
```
-4. Save the OIDC provider in an enviroment variable:
+4. Save the OIDC provider in an environment variable:
```
oidc_provider=$(aws eks describe-cluster --name <EKS cluster> --query "cluster.identity.oidc.issuer" --output text | sed -e "s/^https:\/\///")
|
docs
|
fix some typos (#12163)
|
f6468f16816f3d6028f6d9ce4bbe42d73888745f
|
2019-09-10 01:25:46
|
sh0rez
|
chore(packaging): simplify tagging (#989)
| false
|
diff --git a/Makefile b/Makefile
index 3eb04077e4570..53a244b8adea9 100644
--- a/Makefile
+++ b/Makefile
@@ -325,7 +325,9 @@ helm-clean:
# Docker Driver #
#################
+# optionally set the tag or the arch suffix (-arm64)
PLUGIN_TAG ?= $(IMAGE_TAG)
+PLUGIN_ARCH ?=
docker-driver: docker-driver-clean
mkdir cmd/docker-driver/rootfs
@@ -334,21 +336,24 @@ docker-driver: docker-driver-clean
(docker export $$ID | tar -x -C cmd/docker-driver/rootfs) && \
docker rm -vf $$ID
docker rmi rootfsimage -f
- docker plugin create grafana/loki-docker-driver:$(PLUGIN_TAG) cmd/docker-driver
+ docker plugin create grafana/loki-docker-driver:$(PLUGIN_TAG)$(PLUGIN_ARCH) cmd/docker-driver
+ docker plugin create grafana/loki-docker-driver:latest$(PLUGIN_ARCH) cmd/docker-driver
cmd/docker-driver/docker-driver: $(APP_GO_FILES)
CGO_ENABLED=0 go build $(GO_FLAGS) -o $@ ./$(@D)
$(NETGO_CHECK)
docker-driver-push: docker-driver
- docker plugin push grafana/loki-docker-driver:$(PLUGIN_TAG)
+ docker plugin push grafana/loki-docker-driver:$(PLUGIN_TAG)$(PLUGIN_ARCH)
+ docker plugin push grafana/loki-docker-driver:latest$(PLUGIN_ARCH)
docker-driver-enable:
- docker plugin enable grafana/loki-docker-driver:$(PLUGIN_TAG)
+ docker plugin enable grafana/loki-docker-driver:$(PLUGIN_TAG)$(PLUGIN_ARCH)
docker-driver-clean:
- -docker plugin disable grafana/loki-docker-driver:$(IMAGE_TAG)
- -docker plugin rm grafana/loki-docker-driver:$(IMAGE_TAG)
+ -docker plugin disable grafana/loki-docker-driver:$(PLUGIN_TAG)$(PLUGIN_ARCH)
+ -docker plugin rm grafana/loki-docker-driver:$(PLUGIN_TAG)$(PLUGIN_ARCH)
+ -docker plugin rm grafana/loki-docker-driver:latest$(PLUGIN_ARCH)
rm -rf cmd/docker-driver/rootfs
########################
@@ -380,16 +385,16 @@ IMAGE_NAMES := grafana/loki grafana/promtail grafana/loki-canary
# push(app, optional tag)
# pushes the app, optionally tagging it differently before
define push
- $(SUDO) $(TAG_OCI) $(IMAGE_PREFIX)/$(1):$(IMAGE_TAG) $(IMAGE_PREFIX)/$(1):$(if $(2),$(2),$(IMAGE_TAG))
- $(SUDO) $(PUSH_OCI) $(IMAGE_PREFIX)/$(1):$(if $(2),$(2),$(IMAGE_TAG))
+ $(SUDO) $(TAG_OCI) $(IMAGE_PREFIX)/$(1):$(IMAGE_TAG) $(IMAGE_PREFIX)/$(1):$(2)
+ $(SUDO) $(PUSH_OCI) $(IMAGE_PREFIX)/$(1):$(2)
endef
# push-image(app)
-# pushes the app, if branch==master also as :latest and :master
+# pushes the app, also as :latest and :master
define push-image
- $(call push,$(1))
- $(if $(filter $(GIT_BRANCH),master), $(call push,$(1),master))
- $(if $(filter $(GIT_BRANCH),master), $(call push,$(1),latest))
+ $(call push,$(1),$(IMAGE_TAG))
+ $(call push,$(1),master)
+ $(call push,$(1),latest)
endef
# promtail
|
chore
|
simplify tagging (#989)
|
d451e23225047a11b4d5d82900cec4a46d6e7b39
|
2024-07-03 11:07:25
|
Jatin Suri
|
feat(operator): Add support for the volume API (#13369)
| false
|
diff --git a/operator/apis/loki/v1/lokistack_types.go b/operator/apis/loki/v1/lokistack_types.go
index 185f6bb1d8461..90cee75d94475 100644
--- a/operator/apis/loki/v1/lokistack_types.go
+++ b/operator/apis/loki/v1/lokistack_types.go
@@ -643,6 +643,13 @@ type QueryLimitSpec struct {
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Cardinality Limit"
CardinalityLimit int32 `json:"cardinalityLimit,omitempty"`
+
+ // MaxVolumeSeries defines the maximum number of aggregated series in a log-volume response
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Volume Series"
+ MaxVolumeSeries int32 `json:"maxVolumeSeries,omitempty"`
}
// BlockedQueryType defines which type of query a blocked query should apply to.
diff --git a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml
index bd28187a5f013..e91e10a64442d 100644
--- a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml
@@ -150,7 +150,7 @@ metadata:
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: docker.io/grafana/loki-operator:0.6.1
- createdAt: "2024-06-12T17:07:27Z"
+ createdAt: "2024-07-02T16:13:52Z"
description: The Community Loki Operator provides Kubernetes native deployment
and management of Loki and related logging components.
features.operators.openshift.io/disconnected: "true"
@@ -384,6 +384,12 @@ spec:
path: limits.global.queries.maxQuerySeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxVolumeSeries defines the maximum number of aggregated series
+ in a log-volume response
+ displayName: Max Volume Series
+ path: limits.global.queries.maxVolumeSeries
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
- description: Timeout when querying ingesters or storage during the execution
of a query request.
displayName: Query Timeout
@@ -495,6 +501,12 @@ spec:
path: limits.tenants.queries.maxQuerySeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxVolumeSeries defines the maximum number of aggregated series
+ in a log-volume response
+ displayName: Max Volume Series
+ path: limits.tenants.queries.maxVolumeSeries
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
- description: Timeout when querying ingesters or storage during the execution
of a query request.
displayName: Query Timeout
diff --git a/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml
index df256e183a01e..7cfeec6d074f6 100644
--- a/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml
+++ b/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml
@@ -191,6 +191,11 @@ spec:
that is returned by a metric query.
format: int32
type: integer
+ maxVolumeSeries:
+ description: MaxVolumeSeries defines the maximum number
+ of aggregated series in a log-volume response
+ format: int32
+ type: integer
queryTimeout:
default: 3m
description: Timeout when querying ingesters or storage
@@ -366,6 +371,11 @@ spec:
that is returned by a metric query.
format: int32
type: integer
+ maxVolumeSeries:
+ description: MaxVolumeSeries defines the maximum number
+ of aggregated series in a log-volume response
+ format: int32
+ type: integer
queryTimeout:
default: 3m
description: Timeout when querying ingesters or storage
diff --git a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml
index 42d3b88c6d79e..03195bfe7ccec 100644
--- a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml
@@ -150,7 +150,7 @@ metadata:
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: docker.io/grafana/loki-operator:0.6.1
- createdAt: "2024-06-12T17:07:25Z"
+ createdAt: "2024-07-02T16:13:50Z"
description: The Community Loki Operator provides Kubernetes native deployment
and management of Loki and related logging components.
operators.operatorframework.io/builder: operator-sdk-unknown
@@ -377,6 +377,12 @@ spec:
path: limits.global.queries.maxQuerySeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxVolumeSeries defines the maximum number of aggregated series
+ in a log-volume response
+ displayName: Max Volume Series
+ path: limits.global.queries.maxVolumeSeries
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
- description: Timeout when querying ingesters or storage during the execution
of a query request.
displayName: Query Timeout
@@ -488,6 +494,12 @@ spec:
path: limits.tenants.queries.maxQuerySeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxVolumeSeries defines the maximum number of aggregated series
+ in a log-volume response
+ displayName: Max Volume Series
+ path: limits.tenants.queries.maxVolumeSeries
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
- description: Timeout when querying ingesters or storage during the execution
of a query request.
displayName: Query Timeout
diff --git a/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml
index b349edbecec95..234ec782eb1bf 100644
--- a/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml
+++ b/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml
@@ -191,6 +191,11 @@ spec:
that is returned by a metric query.
format: int32
type: integer
+ maxVolumeSeries:
+ description: MaxVolumeSeries defines the maximum number
+ of aggregated series in a log-volume response
+ format: int32
+ type: integer
queryTimeout:
default: 3m
description: Timeout when querying ingesters or storage
@@ -366,6 +371,11 @@ spec:
that is returned by a metric query.
format: int32
type: integer
+ maxVolumeSeries:
+ description: MaxVolumeSeries defines the maximum number
+ of aggregated series in a log-volume response
+ format: int32
+ type: integer
queryTimeout:
default: 3m
description: Timeout when querying ingesters or storage
diff --git a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
index b270d82272608..d21f1adbb7c4d 100644
--- a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
@@ -150,7 +150,7 @@ metadata:
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: quay.io/openshift-logging/loki-operator:0.1.0
- createdAt: "2024-06-12T17:07:29Z"
+ createdAt: "2024-07-02T16:13:54Z"
description: |
The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging.
## Prerequisites and Requirements
@@ -397,6 +397,12 @@ spec:
path: limits.global.queries.maxQuerySeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxVolumeSeries defines the maximum number of aggregated series
+ in a log-volume response
+ displayName: Max Volume Series
+ path: limits.global.queries.maxVolumeSeries
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
- description: Timeout when querying ingesters or storage during the execution
of a query request.
displayName: Query Timeout
@@ -508,6 +514,12 @@ spec:
path: limits.tenants.queries.maxQuerySeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxVolumeSeries defines the maximum number of aggregated series
+ in a log-volume response
+ displayName: Max Volume Series
+ path: limits.tenants.queries.maxVolumeSeries
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
- description: Timeout when querying ingesters or storage during the execution
of a query request.
displayName: Query Timeout
diff --git a/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml
index fe4d81f5646f5..4ab2f8aaba2ad 100644
--- a/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml
+++ b/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml
@@ -191,6 +191,11 @@ spec:
that is returned by a metric query.
format: int32
type: integer
+ maxVolumeSeries:
+ description: MaxVolumeSeries defines the maximum number
+ of aggregated series in a log-volume response
+ format: int32
+ type: integer
queryTimeout:
default: 3m
description: Timeout when querying ingesters or storage
@@ -366,6 +371,11 @@ spec:
that is returned by a metric query.
format: int32
type: integer
+ maxVolumeSeries:
+ description: MaxVolumeSeries defines the maximum number
+ of aggregated series in a log-volume response
+ format: int32
+ type: integer
queryTimeout:
default: 3m
description: Timeout when querying ingesters or storage
diff --git a/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml b/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml
index cc971fd0c562c..2429338bd3a60 100644
--- a/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml
+++ b/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml
@@ -173,6 +173,11 @@ spec:
that is returned by a metric query.
format: int32
type: integer
+ maxVolumeSeries:
+ description: MaxVolumeSeries defines the maximum number
+ of aggregated series in a log-volume response
+ format: int32
+ type: integer
queryTimeout:
default: 3m
description: Timeout when querying ingesters or storage
@@ -348,6 +353,11 @@ spec:
that is returned by a metric query.
format: int32
type: integer
+ maxVolumeSeries:
+ description: MaxVolumeSeries defines the maximum number
+ of aggregated series in a log-volume response
+ format: int32
+ type: integer
queryTimeout:
default: 3m
description: Timeout when querying ingesters or storage
diff --git a/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml
index 3627c03d58577..b655b250aea30 100644
--- a/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml
+++ b/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml
@@ -297,6 +297,12 @@ spec:
path: limits.global.queries.maxQuerySeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxVolumeSeries defines the maximum number of aggregated series
+ in a log-volume response
+ displayName: Max Volume Series
+ path: limits.global.queries.maxVolumeSeries
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
- description: Timeout when querying ingesters or storage during the execution
of a query request.
displayName: Query Timeout
@@ -408,6 +414,12 @@ spec:
path: limits.tenants.queries.maxQuerySeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxVolumeSeries defines the maximum number of aggregated series
+ in a log-volume response
+ displayName: Max Volume Series
+ path: limits.tenants.queries.maxVolumeSeries
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
- description: Timeout when querying ingesters or storage during the execution
of a query request.
displayName: Query Timeout
diff --git a/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml
index 0aefa95fc2807..7d12fc8ddaad8 100644
--- a/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml
+++ b/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml
@@ -290,6 +290,12 @@ spec:
path: limits.global.queries.maxQuerySeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxVolumeSeries defines the maximum number of aggregated series
+ in a log-volume response
+ displayName: Max Volume Series
+ path: limits.global.queries.maxVolumeSeries
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
- description: Timeout when querying ingesters or storage during the execution
of a query request.
displayName: Query Timeout
@@ -401,6 +407,12 @@ spec:
path: limits.tenants.queries.maxQuerySeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxVolumeSeries defines the maximum number of aggregated series
+ in a log-volume response
+ displayName: Max Volume Series
+ path: limits.tenants.queries.maxVolumeSeries
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
- description: Timeout when querying ingesters or storage during the execution
of a query request.
displayName: Query Timeout
diff --git a/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml
index 77bb3bff6fcd1..d55686c3addc3 100644
--- a/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml
+++ b/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml
@@ -309,6 +309,12 @@ spec:
path: limits.global.queries.maxQuerySeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxVolumeSeries defines the maximum number of aggregated series
+ in a log-volume response
+ displayName: Max Volume Series
+ path: limits.global.queries.maxVolumeSeries
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
- description: Timeout when querying ingesters or storage during the execution
of a query request.
displayName: Query Timeout
@@ -420,6 +426,12 @@ spec:
path: limits.tenants.queries.maxQuerySeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxVolumeSeries defines the maximum number of aggregated series
+ in a log-volume response
+ displayName: Max Volume Series
+ path: limits.tenants.queries.maxVolumeSeries
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
- description: Timeout when querying ingesters or storage during the execution
of a query request.
displayName: Query Timeout
diff --git a/operator/docs/operator/api.md b/operator/docs/operator/api.md
index ca71ecee6ce3a..3e2cb6a850ac5 100644
--- a/operator/docs/operator/api.md
+++ b/operator/docs/operator/api.md
@@ -3165,6 +3165,18 @@ int32
<p>CardinalityLimit defines the cardinality limit for index queries.</p>
</td>
</tr>
+<tr>
+<td>
+<code>maxVolumeSeries</code><br/>
+<em>
+int32
+</em>
+</td>
+<td>
+<em>(Optional)</em>
+<p>MaxVolumeSeries defines the maximum number of aggregated series in a log-volume response</p>
+</td>
+</tr>
</tbody>
</table>
diff --git a/operator/internal/manifests/internal/config/build_test.go b/operator/internal/manifests/internal/config/build_test.go
index 9486771f2611b..7a9479c34dd48 100644
--- a/operator/internal/manifests/internal/config/build_test.go
+++ b/operator/internal/manifests/internal/config/build_test.go
@@ -110,6 +110,8 @@ limits_config:
max_cache_freshness_per_query: 10m
split_queries_by_interval: 30m
query_timeout: 1m
+ volume_enabled: true
+ volume_max_series: 1000
per_stream_rate_limit: 5MB
per_stream_rate_limit_burst: 15MB
shard_streams:
@@ -206,6 +208,7 @@ overrides:
MaxQuerySeries: 500,
QueryTimeout: "1m",
CardinalityLimit: 100000,
+ MaxVolumeSeries: 1000,
},
},
},
@@ -368,6 +371,8 @@ limits_config:
max_cache_freshness_per_query: 10m
split_queries_by_interval: 30m
query_timeout: 1m
+ volume_enabled: true
+ volume_max_series: 1000
per_stream_rate_limit: 5MB
per_stream_rate_limit_burst: 15MB
shard_streams:
@@ -480,6 +485,7 @@ overrides:
MaxQuerySeries: 500,
QueryTimeout: "1m",
CardinalityLimit: 100000,
+ MaxVolumeSeries: 1000,
},
},
Tenants: map[string]lokiv1.PerTenantLimitsTemplateSpec{
@@ -795,6 +801,8 @@ limits_config:
max_cache_freshness_per_query: 10m
split_queries_by_interval: 30m
query_timeout: 1m
+ volume_enabled: true
+ volume_max_series: 1000
per_stream_rate_limit: 5MB
per_stream_rate_limit_burst: 15MB
shard_streams:
@@ -945,6 +953,7 @@ overrides:
MaxQuerySeries: 500,
QueryTimeout: "1m",
CardinalityLimit: 100000,
+ MaxVolumeSeries: 1000,
},
},
},
@@ -1154,6 +1163,8 @@ limits_config:
max_cache_freshness_per_query: 10m
split_queries_by_interval: 30m
query_timeout: 1m
+ volume_enabled: true
+ volume_max_series: 1000
per_stream_rate_limit: 5MB
per_stream_rate_limit_burst: 15MB
shard_streams:
@@ -1304,6 +1315,7 @@ overrides:
MaxQuerySeries: 500,
QueryTimeout: "1m",
CardinalityLimit: 100000,
+ MaxVolumeSeries: 1000,
},
},
},
@@ -1514,6 +1526,8 @@ limits_config:
max_cache_freshness_per_query: 10m
split_queries_by_interval: 30m
query_timeout: 1m
+ volume_enabled: true
+ volume_max_series: 1000
per_stream_rate_limit: 5MB
per_stream_rate_limit_burst: 15MB
shard_streams:
@@ -1677,6 +1691,7 @@ overrides:
MaxQuerySeries: 500,
QueryTimeout: "1m",
CardinalityLimit: 100000,
+ MaxVolumeSeries: 1000,
},
},
},
@@ -1912,6 +1927,8 @@ limits_config:
max_cache_freshness_per_query: 10m
split_queries_by_interval: 30m
query_timeout: 1m
+ volume_enabled: true
+ volume_max_series: 1000
per_stream_rate_limit: 5MB
per_stream_rate_limit_burst: 15MB
shard_streams:
@@ -2018,6 +2035,7 @@ overrides:
MaxQuerySeries: 500,
QueryTimeout: "1m",
CardinalityLimit: 100000,
+ MaxVolumeSeries: 1000,
},
Retention: &lokiv1.RetentionLimitSpec{
Days: 15,
@@ -2243,6 +2261,8 @@ limits_config:
max_cache_freshness_per_query: 10m
split_queries_by_interval: 30m
query_timeout: 2m
+ volume_enabled: true
+ volume_max_series: 1000
per_stream_rate_limit: 5MB
per_stream_rate_limit_burst: 15MB
shard_streams:
@@ -2419,6 +2439,7 @@ overrides:
MaxQuerySeries: 500,
QueryTimeout: "2m",
CardinalityLimit: 100000,
+ MaxVolumeSeries: 1000,
},
},
},
@@ -2683,6 +2704,8 @@ limits_config:
max_cache_freshness_per_query: 10m
split_queries_by_interval: 30m
query_timeout: 1m
+ volume_enabled: true
+ volume_max_series: 1000
per_stream_rate_limit: 5MB
per_stream_rate_limit_burst: 15MB
shard_streams:
@@ -2816,6 +2839,7 @@ overrides:
MaxQuerySeries: 500,
QueryTimeout: "1m",
CardinalityLimit: 100000,
+ MaxVolumeSeries: 1000,
},
},
},
@@ -3008,6 +3032,8 @@ limits_config:
max_cache_freshness_per_query: 10m
split_queries_by_interval: 30m
query_timeout: 2m
+ volume_enabled: true
+ volume_max_series: 1000
per_stream_rate_limit: 5MB
per_stream_rate_limit_burst: 15MB
shard_streams:
@@ -3212,6 +3238,7 @@ overrides:
MaxQuerySeries: 500,
QueryTimeout: "2m",
CardinalityLimit: 100000,
+ MaxVolumeSeries: 1000,
},
},
},
@@ -3506,6 +3533,8 @@ limits_config:
max_cache_freshness_per_query: 10m
split_queries_by_interval: 30m
query_timeout: 1m
+ volume_enabled: true
+ volume_max_series: 1000
per_stream_rate_limit: 5MB
per_stream_rate_limit_burst: 15MB
shard_streams:
@@ -3603,6 +3632,7 @@ overrides:
MaxQuerySeries: 500,
QueryTimeout: "1m",
CardinalityLimit: 100000,
+ MaxVolumeSeries: 1000,
},
},
},
@@ -3768,6 +3798,8 @@ limits_config:
max_cache_freshness_per_query: 10m
split_queries_by_interval: 30m
query_timeout: 1m
+ volume_enabled: true
+ volume_max_series: 1000
per_stream_rate_limit: 5MB
per_stream_rate_limit_burst: 15MB
shard_streams:
@@ -3865,6 +3897,7 @@ overrides:
MaxQuerySeries: 500,
QueryTimeout: "1m",
CardinalityLimit: 100000,
+ MaxVolumeSeries: 1000,
},
},
},
@@ -4031,6 +4064,8 @@ limits_config:
max_cache_freshness_per_query: 10m
split_queries_by_interval: 30m
query_timeout: 1m
+ volume_enabled: true
+ volume_max_series: 1000
per_stream_rate_limit: 5MB
per_stream_rate_limit_burst: 15MB
shard_streams:
@@ -4127,6 +4162,7 @@ overrides:
MaxQuerySeries: 500,
QueryTimeout: "1m",
CardinalityLimit: 100000,
+ MaxVolumeSeries: 1000,
},
},
},
@@ -4295,6 +4331,8 @@ limits_config:
max_cache_freshness_per_query: 10m
split_queries_by_interval: 30m
query_timeout: 1m
+ volume_enabled: true
+ volume_max_series: 1000
per_stream_rate_limit: 5MB
per_stream_rate_limit_burst: 15MB
shard_streams:
@@ -4396,6 +4434,7 @@ overrides:
MaxQuerySeries: 500,
QueryTimeout: "1m",
CardinalityLimit: 100000,
+ MaxVolumeSeries: 1000,
},
},
Tenants: map[string]lokiv1.PerTenantLimitsTemplateSpec{
@@ -4595,6 +4634,8 @@ limits_config:
max_cache_freshness_per_query: 10m
split_queries_by_interval: 30m
query_timeout: 1m
+ volume_enabled: true
+ volume_max_series: 1000
per_stream_rate_limit: 5MB
per_stream_rate_limit_burst: 15MB
shard_streams:
@@ -4696,6 +4737,7 @@ overrides:
MaxQuerySeries: 500,
QueryTimeout: "1m",
CardinalityLimit: 100000,
+ MaxVolumeSeries: 1000,
},
},
Tenants: map[string]lokiv1.PerTenantLimitsTemplateSpec{
@@ -4895,6 +4937,8 @@ limits_config:
split_queries_by_interval: 30m
tsdb_max_query_parallelism: 512
query_timeout: 1m
+ volume_enabled: true
+ volume_max_series: 1000
allow_structured_metadata: true
memberlist:
abort_if_cluster_join_fails: true
@@ -4985,6 +5029,7 @@ overrides:
MaxQuerySeries: 500,
QueryTimeout: "1m",
CardinalityLimit: 100000,
+ MaxVolumeSeries: 1000,
},
},
},
@@ -5075,6 +5120,7 @@ func defaultOptions() Options {
MaxQuerySeries: 500,
QueryTimeout: "1m",
CardinalityLimit: 100000,
+ MaxVolumeSeries: 1000,
},
},
},
@@ -5386,6 +5432,8 @@ limits_config:
per_stream_rate_limit_burst: 15MB
split_queries_by_interval: 30m
query_timeout: 1m
+ volume_enabled: true
+ volume_max_series: 1000
allow_structured_metadata: true
memberlist:
abort_if_cluster_join_fails: true
@@ -5561,6 +5609,8 @@ limits_config:
per_stream_rate_limit_burst: 15MB
split_queries_by_interval: 30m
query_timeout: 1m
+ volume_enabled: true
+ volume_max_series: 1000
allow_structured_metadata: true
memberlist:
abort_if_cluster_join_fails: true
@@ -5730,6 +5780,8 @@ limits_config:
max_cache_freshness_per_query: 10m
split_queries_by_interval: 30m
query_timeout: 1m
+ volume_enabled: true
+ volume_max_series: 1000
per_stream_rate_limit: 5MB
per_stream_rate_limit_burst: 15MB
shard_streams:
@@ -5891,6 +5943,7 @@ overrides:
MaxQuerySeries: 500,
QueryTimeout: "1m",
CardinalityLimit: 100000,
+ MaxVolumeSeries: 1000,
},
},
},
diff --git a/operator/internal/manifests/internal/config/loki-config.yaml b/operator/internal/manifests/internal/config/loki-config.yaml
index 38326157f2937..7db4597f1c62d 100644
--- a/operator/internal/manifests/internal/config/loki-config.yaml
+++ b/operator/internal/manifests/internal/config/loki-config.yaml
@@ -201,6 +201,8 @@ limits_config:
cardinality_limit: {{ .Stack.Limits.Global.QueryLimits.CardinalityLimit }}
max_streams_matchers_per_query: 1000
query_timeout: {{ .Stack.Limits.Global.QueryLimits.QueryTimeout }}
+ volume_enabled: true
+ volume_max_series: {{ .Stack.Limits.Global.QueryLimits.MaxVolumeSeries }}
{{- if .Retention.Enabled }}{{- with .Stack.Limits.Global.Retention }}
retention_period: {{.Days}}d
{{- with .Streams }}
diff --git a/operator/internal/manifests/internal/config/loki-runtime-config.yaml b/operator/internal/manifests/internal/config/loki-runtime-config.yaml
index 935c53f5692dc..7d5b5e2421085 100644
--- a/operator/internal/manifests/internal/config/loki-runtime-config.yaml
+++ b/operator/internal/manifests/internal/config/loki-runtime-config.yaml
@@ -54,6 +54,9 @@ overrides:
{{- if $spec.QueryLimits.CardinalityLimit }}
cardinality_limit: {{ $spec.QueryLimits.CardinalityLimit }}
{{- end }}
+ {{- if $spec.QueryLimits.MaxVolumeSeries }}
+ max_volume_series: {{ $spec.QueryLimits.MaxVolumeSeries }}
+ {{- end }}
{{- with $l.Blocked }}
blocked_queries:
{{- range $blockedQuery := . }}
diff --git a/operator/internal/manifests/internal/sizes.go b/operator/internal/manifests/internal/sizes.go
index be5ac2eefb018..4962e4b3e762c 100644
--- a/operator/internal/manifests/internal/sizes.go
+++ b/operator/internal/manifests/internal/sizes.go
@@ -252,6 +252,7 @@ var StackSizeTable = map[lokiv1.LokiStackSizeType]lokiv1.LokiStackSpec{
MaxQuerySeries: 500,
QueryTimeout: "3m",
CardinalityLimit: 100000,
+ MaxVolumeSeries: 1000,
},
},
},
@@ -308,6 +309,7 @@ var StackSizeTable = map[lokiv1.LokiStackSizeType]lokiv1.LokiStackSpec{
MaxQuerySeries: 500,
QueryTimeout: "3m",
CardinalityLimit: 100000,
+ MaxVolumeSeries: 1000,
},
},
},
@@ -367,6 +369,7 @@ var StackSizeTable = map[lokiv1.LokiStackSizeType]lokiv1.LokiStackSpec{
MaxQuerySeries: 500,
QueryTimeout: "3m",
CardinalityLimit: 100000,
+ MaxVolumeSeries: 1000,
},
},
},
@@ -426,6 +429,7 @@ var StackSizeTable = map[lokiv1.LokiStackSizeType]lokiv1.LokiStackSpec{
MaxQuerySeries: 500,
QueryTimeout: "3m",
CardinalityLimit: 100000,
+ MaxVolumeSeries: 1000,
},
},
},
|
feat
|
Add support for the volume API (#13369)
|
cc3694eecddaab579d08328cdab78a7d8a7bd720
|
2024-05-29 16:42:51
|
George Robinson
|
feat: Add ingester_chunks_flush_failures_total (#12925)
| false
|
diff --git a/pkg/ingester/flush.go b/pkg/ingester/flush.go
index f9904ca8409e5..00aad05475495 100644
--- a/pkg/ingester/flush.go
+++ b/pkg/ingester/flush.go
@@ -372,6 +372,7 @@ func (i *Ingester) encodeChunk(ctx context.Context, ch *chunk.Chunk, desc *chunk
// chunk to have another opportunity to be flushed.
func (i *Ingester) flushChunk(ctx context.Context, ch *chunk.Chunk) error {
if err := i.store.Put(ctx, []chunk.Chunk{*ch}); err != nil {
+ i.metrics.chunksFlushFailures.Inc()
return fmt.Errorf("store put chunk: %w", err)
}
i.metrics.flushedChunksStats.Inc(1)
diff --git a/pkg/ingester/metrics.go b/pkg/ingester/metrics.go
index 8b005860555f1..756eba0ebea74 100644
--- a/pkg/ingester/metrics.go
+++ b/pkg/ingester/metrics.go
@@ -47,6 +47,7 @@ type ingesterMetrics struct {
chunkSizePerTenant *prometheus.CounterVec
chunkAge prometheus.Histogram
chunkEncodeTime prometheus.Histogram
+ chunksFlushFailures prometheus.Counter
chunksFlushedPerReason *prometheus.CounterVec
chunkLifespan prometheus.Histogram
flushedChunksStats *analytics.Counter
@@ -232,6 +233,11 @@ func newIngesterMetrics(r prometheus.Registerer, metricsNamespace string) *inges
// 10ms to 10s.
Buckets: prometheus.ExponentialBuckets(0.01, 4, 6),
}),
+ chunksFlushFailures: promauto.With(r).NewCounter(prometheus.CounterOpts{
+ Namespace: constants.Loki,
+ Name: "ingester_chunks_flush_failures_total",
+ Help: "Total number of flush failures.",
+ }),
chunksFlushedPerReason: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
Namespace: constants.Loki,
Name: "ingester_chunks_flushed_total",
|
feat
|
Add ingester_chunks_flush_failures_total (#12925)
|
79c45880cd32b22e6b4cc66e204ae77d93fec7ed
|
2021-03-25 02:25:39
|
Brett Jones
|
ci: add build targets for broker and operator (#9)
| false
|
diff --git a/.github/workflows/go.yaml b/.github/workflows/go.yaml
index b9a5fc8de382a..29cde1c45df33 100644
--- a/.github/workflows/go.yaml
+++ b/.github/workflows/go.yaml
@@ -29,6 +29,46 @@ jobs:
only-new-issues: true
args: --timeout=2m
+ build-controller:
+ name: Build Controller
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ go: ['1.16']
+ steps:
+ - name: Install make
+ run: sudo apt-get install make
+ - name: Set up Go 1.x
+ uses: actions/setup-go@v2
+ with:
+ go-version: ${{ matrix.go }}
+ id: go
+ - uses: actions/checkout@v2
+ - name: Build operator
+ run: |-
+ make manager
+
+ build-loki-broker:
+ name: Build Loki Broker
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ go: ['1.16']
+ steps:
+ - name: Install make
+ run: sudo apt-get install make
+ - name: Set up Go 1.x
+ uses: actions/setup-go@v2
+ with:
+ go-version: ${{ matrix.go }}
+ id: go
+ - uses: actions/checkout@v2
+ - name: Build loki-broker
+ run: |-
+ make bin/loki-broker
+
test:
name: test
runs-on: ubuntu-latest
diff --git a/Makefile b/Makefile
index 41b3eaeb2fd07..4874a3e5bc153 100644
--- a/Makefile
+++ b/Makefile
@@ -29,6 +29,8 @@ BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL)
# You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=<some-registry>/<project-name-bundle>:<tag>)
BUNDLE_IMG ?= quay.io/blockloop/loki-controller-bundle:$(VERSION)
+GO_FILES := $(shell find . -type f -name '*.go')
+
# Image URL to use all building/pushing image targets
IMG ?= quay.io/blockloop/loki-operator:latest
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
@@ -151,5 +153,5 @@ bundle-build:
cli: bin/loki-broker
-bin/loki-broker:
+bin/loki-broker: $(GO_FILES) | generate lint
go build -o $@ ./cmd/loki-broker/
|
ci
|
add build targets for broker and operator (#9)
|
9191eafc735b1f80bf3a7520abd0670aea7fc398
|
2025-01-06 23:04:58
|
Louis Dutoit
|
fix(typo): Change "did received" to "did receive" in NOTES.txt (#15584)
| false
|
diff --git a/production/helm/loki/templates/NOTES.txt b/production/helm/loki/templates/NOTES.txt
index 622b1a8c26160..f770c5bc724e2 100644
--- a/production/helm/loki/templates/NOTES.txt
+++ b/production/helm/loki/templates/NOTES.txt
@@ -123,7 +123,7 @@ curl -H "Content-Type: application/json" -XPOST -s "http://127.0.0.1:3100/loki/a
{{- end}}
```
-Then verify that Loki did received the data using the following command:
+Then verify that Loki did receive the data using the following command:
```
curl "http://127.0.0.1:3100/loki/api/v1/query_range" --data-urlencode 'query={job="test"}' {{- if .Values.loki.auth_enabled }} -H X-Scope-OrgId:foo {{- end}} | jq .data.result
|
fix
|
Change "did received" to "did receive" in NOTES.txt (#15584)
|
3941767c6fba6b090c3ff6c23160d835c63e297b
|
2023-05-31 01:17:12
|
Dylan Guedes
|
loki: Introduce new `limited_log_push_errors`. (#9568)
| false
|
diff --git a/pkg/runtime/config.go b/pkg/runtime/config.go
index 110a9fce2a8df..86926cc714a16 100644
--- a/pkg/runtime/config.go
+++ b/pkg/runtime/config.go
@@ -4,6 +4,10 @@ type Config struct {
LogStreamCreation bool `yaml:"log_stream_creation"`
LogPushRequest bool `yaml:"log_push_request"`
LogPushRequestStreams bool `yaml:"log_push_request_streams"`
+
+ // LimitedLogPushErrors is to be implemented and will allow logging push failures at a controlled pace.
+ // TODO(dylanguedes): implement it.
+ LimitedLogPushErrors bool `yaml:"limited_log_push_errors"`
}
// TenantConfig is a function that returns configs for given tenant, or
@@ -54,3 +58,7 @@ func (o *TenantConfigs) LogPushRequest(userID string) bool {
func (o *TenantConfigs) LogPushRequestStreams(userID string) bool {
return o.getOverridesForUser(userID).LogPushRequestStreams
}
+
+func (o *TenantConfigs) LimitedLogPushErrors(userID string) bool {
+ return o.getOverridesForUser(userID).LimitedLogPushErrors
+}
|
loki
|
Introduce new `limited_log_push_errors`. (#9568)
|
91974e0cfe82664f2e59fa336aa1e2064403e073
|
2021-02-01 16:15:27
|
Chance Zibolski
|
logcli: Fix handling of logcli query using --since/--from and --tail (#3270)
| false
|
diff --git a/pkg/logcli/client/client.go b/pkg/logcli/client/client.go
index ec8cd02effbd5..34bba5b0229e1 100644
--- a/pkg/logcli/client/client.go
+++ b/pkg/logcli/client/client.go
@@ -37,11 +37,11 @@ var (
// Client contains all the methods to query a Loki instance, it's an interface to allow multiple implementations.
type Client interface {
Query(queryStr string, limit int, time time.Time, direction logproto.Direction, quiet bool) (*loghttp.QueryResponse, error)
- QueryRange(queryStr string, limit int, from, through time.Time, direction logproto.Direction, step, interval time.Duration, quiet bool) (*loghttp.QueryResponse, error)
- ListLabelNames(quiet bool, from, through time.Time) (*loghttp.LabelResponse, error)
- ListLabelValues(name string, quiet bool, from, through time.Time) (*loghttp.LabelResponse, error)
- Series(matchers []string, from, through time.Time, quiet bool) (*loghttp.SeriesResponse, error)
- LiveTailQueryConn(queryStr string, delayFor int, limit int, from int64, quiet bool) (*websocket.Conn, error)
+ QueryRange(queryStr string, limit int, start, end time.Time, direction logproto.Direction, step, interval time.Duration, quiet bool) (*loghttp.QueryResponse, error)
+ ListLabelNames(quiet bool, start, end time.Time) (*loghttp.LabelResponse, error)
+ ListLabelValues(name string, quiet bool, start, end time.Time) (*loghttp.LabelResponse, error)
+ Series(matchers []string, start, end time.Time, quiet bool) (*loghttp.SeriesResponse, error)
+ LiveTailQueryConn(queryStr string, delayFor int, limit int, start int64, quiet bool) (*websocket.Conn, error)
GetOrgID() string
}
@@ -70,12 +70,12 @@ func (c *DefaultClient) Query(queryStr string, limit int, time time.Time, direct
// QueryRange uses the /api/v1/query_range endpoint to execute a range query
// excluding interfacer b/c it suggests taking the interface promql.Node instead of logproto.Direction b/c it happens to have a String() method
// nolint:interfacer
-func (c *DefaultClient) QueryRange(queryStr string, limit int, from, through time.Time, direction logproto.Direction, step, interval time.Duration, quiet bool) (*loghttp.QueryResponse, error) {
+func (c *DefaultClient) QueryRange(queryStr string, limit int, start, end time.Time, direction logproto.Direction, step, interval time.Duration, quiet bool) (*loghttp.QueryResponse, error) {
params := util.NewQueryStringBuilder()
params.SetString("query", queryStr)
params.SetInt32("limit", limit)
- params.SetInt("start", from.UnixNano())
- params.SetInt("end", through.UnixNano())
+ params.SetInt("start", start.UnixNano())
+ params.SetInt("end", end.UnixNano())
params.SetString("direction", direction.String())
// The step is optional, so we do set it only if provided,
@@ -92,11 +92,11 @@ func (c *DefaultClient) QueryRange(queryStr string, limit int, from, through tim
}
// ListLabelNames uses the /api/v1/label endpoint to list label names
-func (c *DefaultClient) ListLabelNames(quiet bool, from, through time.Time) (*loghttp.LabelResponse, error) {
+func (c *DefaultClient) ListLabelNames(quiet bool, start, end time.Time) (*loghttp.LabelResponse, error) {
var labelResponse loghttp.LabelResponse
params := util.NewQueryStringBuilder()
- params.SetInt("start", from.UnixNano())
- params.SetInt("end", through.UnixNano())
+ params.SetInt("start", start.UnixNano())
+ params.SetInt("end", end.UnixNano())
if err := c.doRequest(labelsPath, params.Encode(), quiet, &labelResponse); err != nil {
return nil, err
@@ -105,22 +105,22 @@ func (c *DefaultClient) ListLabelNames(quiet bool, from, through time.Time) (*lo
}
// ListLabelValues uses the /api/v1/label endpoint to list label values
-func (c *DefaultClient) ListLabelValues(name string, quiet bool, from, through time.Time) (*loghttp.LabelResponse, error) {
+func (c *DefaultClient) ListLabelValues(name string, quiet bool, start, end time.Time) (*loghttp.LabelResponse, error) {
path := fmt.Sprintf(labelValuesPath, url.PathEscape(name))
var labelResponse loghttp.LabelResponse
params := util.NewQueryStringBuilder()
- params.SetInt("start", from.UnixNano())
- params.SetInt("end", through.UnixNano())
+ params.SetInt("start", start.UnixNano())
+ params.SetInt("end", end.UnixNano())
if err := c.doRequest(path, params.Encode(), quiet, &labelResponse); err != nil {
return nil, err
}
return &labelResponse, nil
}
-func (c *DefaultClient) Series(matchers []string, from, through time.Time, quiet bool) (*loghttp.SeriesResponse, error) {
+func (c *DefaultClient) Series(matchers []string, start, end time.Time, quiet bool) (*loghttp.SeriesResponse, error) {
params := util.NewQueryStringBuilder()
- params.SetInt("start", from.UnixNano())
- params.SetInt("end", through.UnixNano())
+ params.SetInt("start", start.UnixNano())
+ params.SetInt("end", end.UnixNano())
params.SetStringArray("match", matchers)
var seriesResponse loghttp.SeriesResponse
@@ -131,12 +131,12 @@ func (c *DefaultClient) Series(matchers []string, from, through time.Time, quiet
}
// LiveTailQueryConn uses /api/prom/tail to set up a websocket connection and returns it
-func (c *DefaultClient) LiveTailQueryConn(queryStr string, delayFor int, limit int, from int64, quiet bool) (*websocket.Conn, error) {
+func (c *DefaultClient) LiveTailQueryConn(queryStr string, delayFor int, limit int, start int64, quiet bool) (*websocket.Conn, error) {
qsb := util.NewQueryStringBuilder()
qsb.SetString("query", queryStr)
qsb.SetInt("delay_for", int64(delayFor))
qsb.SetInt("limit", int64(limit))
- qsb.SetInt("from", from)
+ qsb.SetInt("start", start)
return c.wsConnect(tailPath, qsb.Encode(), quiet)
}
|
logcli
|
Fix handling of logcli query using --since/--from and --tail (#3270)
|
ba7550a0345e9a90052efdd33dad5669386a0c97
|
2025-03-18 23:44:52
|
George Robinson
|
fix: fix a panic in ServeHTTP where stream was nil (#16818)
| false
|
diff --git a/pkg/limits/frontend/http.go b/pkg/limits/frontend/http.go
index 49cecaa8f454c..5d19a1c63ecf3 100644
--- a/pkg/limits/frontend/http.go
+++ b/pkg/limits/frontend/http.go
@@ -33,7 +33,7 @@ func (f *Frontend) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
- streams := make([]*logproto.StreamMetadata, len(req.StreamHashes))
+ streams := make([]*logproto.StreamMetadata, 0, len(req.StreamHashes))
for _, streamHash := range req.StreamHashes {
streams = append(streams, &logproto.StreamMetadata{
StreamHash: streamHash,
diff --git a/pkg/limits/frontend/http_test.go b/pkg/limits/frontend/http_test.go
new file mode 100644
index 0000000000000..9756bfd3ef0ec
--- /dev/null
+++ b/pkg/limits/frontend/http_test.go
@@ -0,0 +1,109 @@
+package frontend
+
+import (
+ "bytes"
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "github.com/grafana/dskit/limiter"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/v3/pkg/logproto"
+)
+
+func TestFrontend_ServeHTTP(t *testing.T) {
+ tests := []struct {
+ name string
+ limits Limits
+ expectedGetStreamUsageRequest *GetStreamUsageRequest
+ getStreamUsageResponses []GetStreamUsageResponse
+ request exceedsLimitsRequest
+ expected exceedsLimitsResponse
+ }{{
+ name: "within limits",
+ limits: &mockLimits{
+ maxGlobalStreams: 1,
+ ingestionRate: 100,
+ },
+ expectedGetStreamUsageRequest: &GetStreamUsageRequest{
+ Tenant: "test",
+ StreamHashes: []uint64{0x1},
+ },
+ getStreamUsageResponses: []GetStreamUsageResponse{{
+ Response: &logproto.GetStreamUsageResponse{
+ Tenant: "test",
+ ActiveStreams: 1,
+ Rate: 10,
+ },
+ }},
+ request: exceedsLimitsRequest{
+ TenantID: "test",
+ StreamHashes: []uint64{0x1},
+ },
+ // expected should be default value (no rejected streams).
+ }, {
+ name: "exceeds limits",
+ limits: &mockLimits{
+ maxGlobalStreams: 1,
+ ingestionRate: 100,
+ },
+ expectedGetStreamUsageRequest: &GetStreamUsageRequest{
+ Tenant: "test",
+ StreamHashes: []uint64{0x1},
+ },
+ getStreamUsageResponses: []GetStreamUsageResponse{{
+ Response: &logproto.GetStreamUsageResponse{
+ Tenant: "test",
+ ActiveStreams: 2,
+ Rate: 200,
+ },
+ }},
+ request: exceedsLimitsRequest{
+ TenantID: "test",
+ StreamHashes: []uint64{0x1},
+ },
+ expected: exceedsLimitsResponse{
+ RejectedStreams: []*logproto.RejectedStream{{
+ StreamHash: 0x1,
+ Reason: "exceeds_rate_limit",
+ }},
+ },
+ }}
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ f := Frontend{
+ limits: test.limits,
+ rateLimiter: limiter.NewRateLimiter(newRateLimitsAdapter(test.limits), time.Second),
+ streamUsage: &mockStreamUsageGatherer{
+ t: t,
+ expectedRequest: test.expectedGetStreamUsageRequest,
+ responses: test.getStreamUsageResponses,
+ },
+ metrics: newMetrics(prometheus.NewRegistry()),
+ }
+ ts := httptest.NewServer(&f)
+ defer ts.Close()
+
+ b, err := json.Marshal(test.request)
+ require.NoError(t, err)
+
+ resp, err := http.Post(ts.URL, "application/json", bytes.NewReader(b))
+ require.NoError(t, err)
+ require.Equal(t, http.StatusOK, resp.StatusCode)
+
+ defer resp.Body.Close()
+ b, err = io.ReadAll(resp.Body)
+ require.NoError(t, err)
+
+ var actual exceedsLimitsResponse
+ require.NoError(t, json.Unmarshal(b, &actual))
+ require.Equal(t, test.expected, actual)
+ })
+ }
+}
diff --git a/pkg/limits/frontend/mock_test.go b/pkg/limits/frontend/mock_test.go
index 41a7ed74bc252..07cba88c9f51d 100644
--- a/pkg/limits/frontend/mock_test.go
+++ b/pkg/limits/frontend/mock_test.go
@@ -16,6 +16,22 @@ import (
"github.com/grafana/loki/v3/pkg/logproto"
)
+// mockStreamUsageGatherer mocks a StreamUsageGatherer. It avoids having to
+// set up a mock ring to test the frontend.
+type mockStreamUsageGatherer struct {
+ t *testing.T
+
+ expectedRequest *GetStreamUsageRequest
+ responses []GetStreamUsageResponse
+}
+
+func (g *mockStreamUsageGatherer) GetStreamUsage(_ context.Context, r GetStreamUsageRequest) ([]GetStreamUsageResponse, error) {
+ if expected := g.expectedRequest; expected != nil {
+ require.Equal(g.t, *expected, r)
+ }
+ return g.responses, nil
+}
+
// mockIngestLimitsClient mocks logproto.IngestLimitsClient.
type mockIngestLimitsClient struct {
logproto.IngestLimitsClient
|
fix
|
fix a panic in ServeHTTP where stream was nil (#16818)
|
40cf074fba0ed0016a8ca64bed554f3d628e7ec6
|
2025-02-11 00:53:52
|
Katuya Kawakami
|
fix(operator): Fix minimum available ingesters for 1x.pico size (#16035)
| false
|
diff --git a/operator/internal/manifests/internal/sizes.go b/operator/internal/manifests/internal/sizes.go
index 28ffe4fb3c319..e4a5c4d10f5f1 100644
--- a/operator/internal/manifests/internal/sizes.go
+++ b/operator/internal/manifests/internal/sizes.go
@@ -68,7 +68,7 @@ var ResourceRequirementsTable = map[lokiv1.LokiStackSizeType]ComponentResources{
corev1.ResourceCPU: resource.MustParse("500m"),
corev1.ResourceMemory: resource.MustParse("3Gi"),
},
- PDBMinAvailable: 1,
+ PDBMinAvailable: 2,
},
Distributor: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{
|
fix
|
Fix minimum available ingesters for 1x.pico size (#16035)
|
d1e0fa75976745b4277033d4500be089e4ced404
|
2025-02-14 16:54:47
|
Ashwanth
|
docs: release notes for v3.4.2 (#16278)
| false
|
diff --git a/docs/sources/release-notes/v3-4.md b/docs/sources/release-notes/v3-4.md
index e017c8a97ce33..10db501a8b8f2 100644
--- a/docs/sources/release-notes/v3-4.md
+++ b/docs/sources/release-notes/v3-4.md
@@ -89,6 +89,10 @@ For important upgrade guidance, refer to the [Upgrade Guide](https://grafana.com
## Bug fixes
+### 3.4.2 (2025-02-14)
+- **blooms:** Initialize bloom gateway client only once (backport release-3.4.x) ([#16268](https://github.com/grafana/loki/issues/16268)) ([1b9829b](https://github.com/grafana/loki/commit/1b9829b48935ffc9f99d741cab11864ac9d0f35c))
+- **ci:** do not disable CGO in Makefile (backport release-3.4.x) ([#16272](https://github.com/grafana/loki/issues/16272)) ([4fa045d](https://github.com/grafana/loki/commit/4fa045d3807f4de0543b06e6ce79b89afb741adc))
+
### 3.4.1 (2025-02-12)
- **docker:** Update the build info in docker images ([#16225](https://github.com/grafana/loki/issues/16225)) ([4484080](https://github.com/grafana/loki/commit/4484080c7662817e945c0276ba15d05315e93194)).
|
docs
|
release notes for v3.4.2 (#16278)
|
515e13cc6c92b08968bc87e220b8bca64683fd05
|
2024-07-18 01:50:08
|
Quentin Bisson
|
fix: incorrect pod matcher for compactor in mixin when using ssd mode (#12846)
| false
|
diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-deletion.json b/production/loki-mixin-compiled-ssd/dashboards/loki-deletion.json
index 0718507d941fd..e630e9b9f5c0b 100644
--- a/production/loki-mixin-compiled-ssd/dashboards/loki-deletion.json
+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-deletion.json
@@ -379,7 +379,7 @@
"span": 4,
"targets": [
{
- "expr": "node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki.*|enterprise-logs)-backend.*\"}",
+ "expr": "node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{cluster=~\"$cluster\", namespace=~\"$namespace\", pod=~\"(compactor|(loki.*|enterprise-logs)-backend.*|loki-single-binary)\"}",
"format": "time_series",
"legendFormat": "{{pod}}",
"legendLink": null
@@ -426,7 +426,7 @@
"span": 4,
"targets": [
{
- "expr": "go_memstats_heap_inuse_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"compactor\"} / 1024 / 1024 ",
+ "expr": "go_memstats_heap_inuse_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", pod=~\"(compactor|(loki.*|enterprise-logs)-backend.*|loki-single-binary)\"} / 1024 / 1024 ",
"format": "time_series",
"legendFormat": " {{pod}} ",
"legendLink": null
@@ -579,7 +579,7 @@
"span": 6,
"targets": [
{
- "expr": "sum(rate(loki_compactor_deleted_lines{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki.*|enterprise-logs)-backend.*\"}[$__rate_interval])) by (user)",
+ "expr": "sum(rate(loki_compactor_deleted_lines{cluster=~\"$cluster\", namespace=~\"$namespace\", pod=~\"(compactor|(loki.*|enterprise-logs)-backend.*|loki-single-binary)\"}[$__rate_interval])) by (user)",
"format": "time_series",
"legendFormat": "{{user}}",
"legendLink": null
@@ -606,7 +606,7 @@
"span": 6,
"targets": [
{
- "expr": "{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki.*|enterprise-logs)-backend.*\"} |~ \"Started processing delete request|delete request for user marked as processed\" | logfmt | line_format \"{{.ts}} user={{.user}} delete_request_id={{.delete_request_id}} msg={{.msg}}\" ",
+ "expr": "{cluster=~\"$cluster\", namespace=~\"$namespace\", pod=~\"(compactor|(loki.*|enterprise-logs)-backend.*|loki-single-binary)\"} |~ \"Started processing delete request|delete request for user marked as processed\" | logfmt | line_format \"{{.ts}} user={{.user}} delete_request_id={{.delete_request_id}} msg={{.msg}}\" ",
"refId": "A"
}
],
@@ -619,7 +619,7 @@
"span": 6,
"targets": [
{
- "expr": "{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki.*|enterprise-logs)-backend.*\"} |~ \"delete request for user added\" | logfmt | line_format \"{{.ts}} user={{.user}} query='{{.query}}'\"",
+ "expr": "{cluster=~\"$cluster\", namespace=~\"$namespace\", pod=~\"(compactor|(loki.*|enterprise-logs)-backend.*|loki-single-binary)\"} |~ \"delete request for user added\" | logfmt | line_format \"{{.ts}} user={{.user}} query='{{.query}}'\"",
"refId": "A"
}
],
diff --git a/production/loki-mixin-compiled/dashboards/loki-deletion.json b/production/loki-mixin-compiled/dashboards/loki-deletion.json
index 7b048f729e2b3..e630e9b9f5c0b 100644
--- a/production/loki-mixin-compiled/dashboards/loki-deletion.json
+++ b/production/loki-mixin-compiled/dashboards/loki-deletion.json
@@ -379,7 +379,7 @@
"span": 4,
"targets": [
{
- "expr": "node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"compactor\"}",
+ "expr": "node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{cluster=~\"$cluster\", namespace=~\"$namespace\", pod=~\"(compactor|(loki.*|enterprise-logs)-backend.*|loki-single-binary)\"}",
"format": "time_series",
"legendFormat": "{{pod}}",
"legendLink": null
@@ -426,7 +426,7 @@
"span": 4,
"targets": [
{
- "expr": "go_memstats_heap_inuse_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"compactor\"} / 1024 / 1024 ",
+ "expr": "go_memstats_heap_inuse_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", pod=~\"(compactor|(loki.*|enterprise-logs)-backend.*|loki-single-binary)\"} / 1024 / 1024 ",
"format": "time_series",
"legendFormat": " {{pod}} ",
"legendLink": null
@@ -579,7 +579,7 @@
"span": 6,
"targets": [
{
- "expr": "sum(rate(loki_compactor_deleted_lines{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"compactor\"}[$__rate_interval])) by (user)",
+ "expr": "sum(rate(loki_compactor_deleted_lines{cluster=~\"$cluster\", namespace=~\"$namespace\", pod=~\"(compactor|(loki.*|enterprise-logs)-backend.*|loki-single-binary)\"}[$__rate_interval])) by (user)",
"format": "time_series",
"legendFormat": "{{user}}",
"legendLink": null
@@ -606,7 +606,7 @@
"span": 6,
"targets": [
{
- "expr": "{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"compactor\"} |~ \"Started processing delete request|delete request for user marked as processed\" | logfmt | line_format \"{{.ts}} user={{.user}} delete_request_id={{.delete_request_id}} msg={{.msg}}\" ",
+ "expr": "{cluster=~\"$cluster\", namespace=~\"$namespace\", pod=~\"(compactor|(loki.*|enterprise-logs)-backend.*|loki-single-binary)\"} |~ \"Started processing delete request|delete request for user marked as processed\" | logfmt | line_format \"{{.ts}} user={{.user}} delete_request_id={{.delete_request_id}} msg={{.msg}}\" ",
"refId": "A"
}
],
@@ -619,7 +619,7 @@
"span": 6,
"targets": [
{
- "expr": "{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"compactor\"} |~ \"delete request for user added\" | logfmt | line_format \"{{.ts}} user={{.user}} query='{{.query}}'\"",
+ "expr": "{cluster=~\"$cluster\", namespace=~\"$namespace\", pod=~\"(compactor|(loki.*|enterprise-logs)-backend.*|loki-single-binary)\"} |~ \"delete request for user added\" | logfmt | line_format \"{{.ts}} user={{.user}} query='{{.query}}'\"",
"refId": "A"
}
],
diff --git a/production/loki-mixin/dashboards/loki-deletion.libsonnet b/production/loki-mixin/dashboards/loki-deletion.libsonnet
index a1c50ecfa6911..a4c4ae779e701 100644
--- a/production/loki-mixin/dashboards/loki-deletion.libsonnet
+++ b/production/loki-mixin/dashboards/loki-deletion.libsonnet
@@ -2,9 +2,7 @@ local g = import 'grafana-builder/grafana.libsonnet';
local utils = import 'mixin-utils/utils.libsonnet';
(import 'dashboard-utils.libsonnet') {
- local compactor_matcher = if $._config.meta_monitoring.enabled
- then 'pod=~"(compactor|%s-backend.*|loki-single-binary)"' % $._config.ssd.pod_prefix_matcher
- else if $._config.ssd.enabled then 'container="loki", pod=~"%s-backend.*"' % $._config.ssd.pod_prefix_matcher else 'container="compactor"',
+ local compactor_matcher = 'pod=~"(compactor|%s-backend.*|loki-single-binary)"' % $._config.ssd.pod_prefix_matcher,
grafanaDashboards+::
{
'loki-deletion.json':
@@ -50,7 +48,7 @@ local utils = import 'mixin-utils/utils.libsonnet';
)
.addPanel(
$.newQueryPanel('Compactor memory usage (MiB)') +
- g.queryPanel('go_memstats_heap_inuse_bytes{%s, container="compactor"} / 1024 / 1024 ' % $.namespaceMatcher(), ' {{pod}} '),
+ g.queryPanel('go_memstats_heap_inuse_bytes{%s, %s} / 1024 / 1024 ' % [$.namespaceMatcher(), compactor_matcher], ' {{pod}} '),
)
.addPanel(
$.newQueryPanel('Compaction run duration (seconds)') +
|
fix
|
incorrect pod matcher for compactor in mixin when using ssd mode (#12846)
|
9691e94f540f75fc78e75d11097c351250e7ccc9
|
2025-02-25 02:55:56
|
renovate[bot]
|
fix(deps): update dependency react-code-block to v1.1.3 (main) (#16428)
| false
|
diff --git a/pkg/ui/frontend/package-lock.json b/pkg/ui/frontend/package-lock.json
index f765cfd3a0d62..ddbb6cc3773b1 100644
--- a/pkg/ui/frontend/package-lock.json
+++ b/pkg/ui/frontend/package-lock.json
@@ -5614,14 +5614,14 @@
}
},
"node_modules/react-code-block": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/react-code-block/-/react-code-block-1.1.1.tgz",
- "integrity": "sha512-32FWIZe5YBN7if9VP4+R2IFOWCzko2/OBGoTZE3Wjn9G0W8kMhN2CvwXcCKjw6DCKLwtrVmoCQCywPAlV+P9pg==",
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/react-code-block/-/react-code-block-1.1.3.tgz",
+ "integrity": "sha512-XfSb9BkJWt4j7FvcVMmgQxIs61g81R0y0bxoemrVPo6/v0kS61PL3S1MWWtHl2g67aRPJeoTqPa4D91v/DzMdQ==",
"license": "MIT",
"peerDependencies": {
"prism-react-renderer": "^2",
- "react": "^18",
- "react-dom": "^18"
+ "react": ">=18.0.0",
+ "react-dom": ">=18.0.0"
}
},
"node_modules/react-datepicker": {
|
fix
|
update dependency react-code-block to v1.1.3 (main) (#16428)
|
38ee57c7d56756c02ff559e24f9cc0a29fd7c4f3
|
2020-06-01 21:05:27
|
Aditya C S
|
feature: Replace stage in pipeline (#2060)
| false
|
diff --git a/docs/clients/promtail/stages/replace.md b/docs/clients/promtail/stages/replace.md
new file mode 100644
index 0000000000000..ba2ea3b89a4d2
--- /dev/null
+++ b/docs/clients/promtail/stages/replace.md
@@ -0,0 +1,196 @@
+# `replace` stage
+
+The `replace` stage is a parsing stage that parses a log line using a regular
+expression and replaces the log line. Named capture groups in the regex support adding data into the
+extracted map.
+
+## Schema
+
+```yaml
+replace:
+ # The RE2 regular expression. Each named capture group will be added to extracted.
+ # Each capture group and named capture group will be replaced with the value given in `replace`
+ expression: <string>
+
+ # Name from extracted data to parse. If empty, uses the log message.
+ # The replaced value will be assigned back to soure key
+ [source: <string>]
+
+ # Value to which the captured group will be replaced. The captured group or the named captured group will be
+ # replaced with this value and the log line will be replaced with new replaced values
+ [replace: <string>]
+```
+
+`expression` needs to be a [Go RE2 regex
+string](https://github.com/google/re2/wiki/Syntax). Every named capture group `(?P<name>re)`
+will be set into the `extracted` map. The name of the capture group will be used as the key in the
+extracted map.
+
+Because of how YAML treats backslashes in double-quoted strings, note that all
+backslashes in a regex expression must be escaped when using double quotes. For
+example, all of these are valid:
+
+- `expression: \w*`
+- `expression: '\w*'`
+- `expression: "\\w*"`
+
+But these are not:
+
+- `expression: \\w*` (only escape backslashes when using double quotes)
+- `expression: '\\w*'` (only escape backslashes when using double quotes)
+- `expression: "\w*"` (backslash must be escaped)
+
+## Example
+
+### Without `source`
+
+Given the pipeline:
+
+```yaml
+- replace:
+ expression: "password (\\S+)"
+ replace: "****"
+```
+
+And the log line:
+
+```
+2019-01-01T01:00:00.000000001Z stderr P i'm a log message who has sensitive information with password xyz!
+```
+
+The log line becomes
+
+```
+2019-01-01T01:00:00.000000001Z stderr P i'm a log message who has sensitive information with password ****!
+```
+
+
+### With `source`
+
+Given the pipeline:
+
+```yaml
+- json:
+ expressions:
+ level:
+ msg:
+- replace:
+ expression: "\\S+ - \"POST (\\S+) .*"
+ source: "msg"
+ replace: "/loki/api/v1/push"
+```
+
+And the log line:
+
+```
+{"time":"2019-01-01T01:00:00.000000001Z", "level": "info", "msg":"11.11.11.11 - "POST /loki/api/push/ HTTP/1.1" 200 932 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6"}
+```
+
+The first stage would add the following key-value pairs into the `extracted`
+map:
+
+- `time`: `2019-01-01T01:00:00.000000001Z`
+- `level`: `info`
+- `msg`: `11.11.11.11 - "POST /loki/api/push/ HTTP/1.1" 200 932 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6"`
+
+While the replace stage would then parse the value for `msg` in the extracted map
+and replaces the `msg` value. `msg` in extracted will now become
+
+- `msg`: `11.11.11.11 - "POST /loki/api/v1/push/ HTTP/1.1" 200 932 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6"`
+
+### With `replace` value in `template` format
+
+Given the pipeline:
+
+```yaml
+- replace:
+ expression: "^(\\S+) (\\S+) (\\S+) \\[([\\w:/]+\\s[+\\-]\\d{4})\\] \"(\\S+)\\s?(\\S+)?\\s?(\\S+)?\" (\\d{3}|-) (\\d+|-)\\s?\"?([^\"]*)\"?\\s?\"?([^\"]*)?\"?$"
+ replace: '{{ if eq .Value "200" }}{{ Replace .Value "200" "HttpStatusOk" -1 }}{{ else }}{{ .Value | ToUpper }}{{ end }}'
+```
+
+And the log line:
+
+```
+11.11.11.11 - frank [25/Jan/2000:14:00:01 -0500] "GET /1986.js HTTP/1.1" 200 932 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6"
+```
+
+The replace stage parses the log line and if the captured group has a value `200` it replaces the value to `HttpStatusOk`.
+
+The log line would become
+
+```
+11.11.11.11 - frank [25/Jan/2000:14:00:01 -0500] "GET /1986.js HTTP/1.1" HttpStatusOk 932 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6"
+```
+
+### `replace` with named captured group
+
+Given the pipeline:
+
+```yaml
+- replace:
+ expression: "^(?P<ip>\\S+) (?P<identd>\\S+) (?P<user>\\S+) \\[(?P<timestamp>[\\w:/]+\\s[+\\-]\\d{4})\\] \"(?P<action>\\S+)\\s?(?P<path>\\S+)?\\s?(?P<protocol>\\S+)?\" (?P<status>\\d{3}|-) (?P<size>\\d+|-)\\s?\"?(?P<referer>[^\"]*)\"?\\s?\"?(?P<useragent>[^\"]*)?\"?$"
+ replace: '{{ .Value | ToUpper }}'
+```
+
+And the log line:
+
+```
+11.11.11.11 - frank [25/Jan/2000:14:00:01 -0500] "GET /1986.js HTTP/1.1" 200 932 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6"
+```
+
+The replace stage parses the log line and replaces the value of all named captured groups to upper case. The
+named captured groups will be extracted to
+
+- `ip`: `11.11.11.11`
+- `identd`: `-`
+- `user`: `FRANK`
+- `timestamp`: `25/JAN/2000:14:00:01 -0500`
+- `action`: `GET`
+- `path`: `/1986.JS`
+- `protocol`: `HTTP/1.1`
+- `status`: `200`
+- `size`: `932`
+- `referer`: `-`
+- `useragent`: `MOZILLA/5.0 (WINDOWS; U; WINDOWS NT 5.1; DE; RV:1.9.1.7) GECKO/20091221 FIREFOX/3.5.7 GTB6"`
+
+The log line would become
+
+```
+11.11.11.11 - FRANK [25/JAN/2000:14:00:01 -0500] "GET /1986.JS HTTP/1.1" 200 932 "-" "MOZILLA/5.0 (WINDOWS; U; WINDOWS NT 5.1; DE; RV:1.9.1.7) GECKO/20091221 FIREFOX/3.5.7 GTB6"
+```
+
+### `replace` with both named captured group and only captured group
+
+Given the pipeline:
+
+```yaml
+- replace:
+ expression: "^(?P<ip>\\S+) (?P<identd>\\S+) (\\S+) \\[(?P<timestamp>[\\w:/]+\\s[+\\-]\\d{4})\\] \"(?P<action>\\S+)\\s?(?P<path>\\S+)?\\s?(?P<protocol>\\S+)?\" (?P<status>\\d{3}|-) (?P<size>\\d+|-)\\s?\"?(?P<referer>[^\"]*)\"?\\s?\"?(?P<useragent>[^\"]*)?\"?$"
+ replace: '{{ .Value | ToUpper }}'
+```
+
+And the log line:
+
+```
+11.11.11.11 - frank [25/Jan/2000:14:00:01 -0500] "GET /1986.js HTTP/1.1" 200 932 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6"
+```
+
+The replace stage parses the log line and replaces the value of all named captured groups to upper case. The
+named captured groups will be extracted to. Observe here that `user` is not extracted since it was just `(\\S+)` and not a named captured group like this `(?P<user>\\S+)`
+
+- `ip`: `11.11.11.11`
+- `identd`: `-`
+- `timestamp`: `25/JAN/2000:14:00:01 -0500`
+- `action`: `GET`
+- `path`: `/1986.JS`
+- `protocol`: `HTTP/1.1`
+- `status`: `200`
+- `size`: `932`
+- `referer`: `-`
+- `useragent`: `MOZILLA/5.0 (WINDOWS; U; WINDOWS NT 5.1; DE; RV:1.9.1.7) GECKO/20091221 FIREFOX/3.5.7 GTB6"`
+
+The log line would become
+
+```
+11.11.11.11 - FRANK [25/JAN/2000:14:00:01 -0500] "GET /1986.JS HTTP/1.1" 200 932 "-" "MOZILLA/5.0 (WINDOWS; U; WINDOWS NT 5.1; DE; RV:1.9.1.7) GECKO/20091221 FIREFOX/3.5.7 GTB6"
+```
diff --git a/pkg/logentry/stages/replace.go b/pkg/logentry/stages/replace.go
new file mode 100644
index 0000000000000..97db956fc7be7
--- /dev/null
+++ b/pkg/logentry/stages/replace.go
@@ -0,0 +1,218 @@
+package stages
+
+import (
+ "bytes"
+ "reflect"
+ "regexp"
+ "text/template"
+ "time"
+
+ "github.com/go-kit/kit/log"
+ "github.com/go-kit/kit/log/level"
+ "github.com/mitchellh/mapstructure"
+ "github.com/pkg/errors"
+ "github.com/prometheus/common/model"
+)
+
+// Config Errors
+const (
+ ErrEmptyReplaceStageConfig = "empty replace stage configuration"
+ ErrEmptyReplaceStageSource = "empty source in replace stage"
+)
+
+// ReplaceConfig contains a regexStage configuration
+type ReplaceConfig struct {
+ Expression string `mapstructure:"expression"`
+ Source *string `mapstructure:"source"`
+ Replace string `mapstructure:"replace"`
+}
+
+// validateReplaceConfig validates the config and return a regex
+func validateReplaceConfig(c *ReplaceConfig) (*regexp.Regexp, error) {
+ if c == nil {
+ return nil, errors.New(ErrEmptyReplaceStageConfig)
+ }
+
+ if c.Expression == "" {
+ return nil, errors.New(ErrExpressionRequired)
+ }
+
+ if c.Source != nil && *c.Source == "" {
+ return nil, errors.New(ErrEmptyReplaceStageSource)
+ }
+
+ if c.Replace == "" {
+ return nil, errors.New(ErrEmptyReplaceStageConfig)
+ }
+
+ expr, err := regexp.Compile(c.Expression)
+ if err != nil {
+ return nil, errors.Wrap(err, ErrCouldNotCompileRegex)
+ }
+ return expr, nil
+}
+
+// replaceStage sets extracted data using regular expressions
+type replaceStage struct {
+ cfg *ReplaceConfig
+ expression *regexp.Regexp
+ logger log.Logger
+}
+
+// newReplaceStage creates a newReplaceStage
+func newReplaceStage(logger log.Logger, config interface{}) (Stage, error) {
+ cfg, err := parseReplaceConfig(config)
+ if err != nil {
+ return nil, err
+ }
+ expression, err := validateReplaceConfig(cfg)
+ if err != nil {
+ return nil, err
+ }
+
+ return &replaceStage{
+ cfg: cfg,
+ expression: expression,
+ logger: log.With(logger, "component", "stage", "type", "replace"),
+ }, nil
+}
+
+// parseReplaceConfig processes an incoming configuration into a ReplaceConfig
+func parseReplaceConfig(config interface{}) (*ReplaceConfig, error) {
+ cfg := &ReplaceConfig{}
+ err := mapstructure.Decode(config, cfg)
+ if err != nil {
+ return nil, err
+ }
+ return cfg, nil
+}
+
+// Process implements Stage
+func (r *replaceStage) Process(labels model.LabelSet, extracted map[string]interface{}, t *time.Time, entry *string) {
+ // If a source key is provided, the replace stage should process it
+ // from the extracted map, otherwise should fallback to the entry
+ input := entry
+
+ if r.cfg.Source != nil {
+ if _, ok := extracted[*r.cfg.Source]; !ok {
+ if Debug {
+ level.Debug(r.logger).Log("msg", "source does not exist in the set of extracted values", "source", *r.cfg.Source)
+ }
+ return
+ }
+
+ value, err := getString(extracted[*r.cfg.Source])
+ if err != nil {
+ if Debug {
+ level.Debug(r.logger).Log("msg", "failed to convert source value to string", "source", *r.cfg.Source, "err", err, "type", reflect.TypeOf(extracted[*r.cfg.Source]))
+ }
+ return
+ }
+
+ input = &value
+ }
+
+ if input == nil {
+ if Debug {
+ level.Debug(r.logger).Log("msg", "cannot parse a nil entry")
+ }
+ return
+ }
+
+ // Get string of matched captured groups. We will use this to extract all named captured groups
+ match := r.expression.FindStringSubmatch(*input)
+
+ matchAllIndex := r.expression.FindAllStringSubmatchIndex(*input, -1)
+
+ if matchAllIndex == nil {
+ if Debug {
+ level.Debug(r.logger).Log("msg", "regex did not match", "input", *input, "regex", r.expression)
+ }
+ return
+ }
+
+ // All extracted values will be available for templating
+ td := r.getTemplateData(extracted)
+
+ // Initialize the template with the "replace" string defined by user
+ templ, err := template.New("pipeline_template").Funcs(functionMap).Parse(r.cfg.Replace)
+ if err != nil {
+ if Debug {
+ level.Debug(r.logger).Log("msg", "template initialization error", "err", err)
+ }
+ return
+ }
+
+ result, capturedMap, err := r.getReplacedEntry(matchAllIndex, *input, td, templ)
+ if err != nil {
+ if Debug {
+ level.Debug(r.logger).Log("msg", "failed to execute template on extracted value", "err", err)
+ }
+ return
+ }
+
+ if r.cfg.Source != nil {
+ extracted[*r.cfg.Source] = result
+ } else {
+ *entry = result
+ }
+
+ // All the named captured group will be extracted
+ for i, name := range r.expression.SubexpNames() {
+ if i != 0 && name != "" {
+ if v, ok := capturedMap[match[i]]; ok {
+ extracted[name] = v
+ }
+ }
+ }
+}
+
+func (r *replaceStage) getReplacedEntry(matchAllIndex [][]int, input string, td map[string]string, templ *template.Template) (string, map[string]string, error) {
+ var result string
+ previousInputEndIndex := 0
+ capturedMap := make(map[string]string)
+
+ // For a simple string like `11.11.11.11 - frank 12.12.12.12 - frank`
+ // if the regex is "(\\d{2}.\\d{2}.\\d{2}.\\d{2}) - (\\S+)"
+ // FindAllStringSubmatchIndex would return [[0 19 0 11 14 19] [20 37 20 31 34 37]].
+ // Each inner array's first two values will be the start and end index of the entire
+ // matched string and the next values will be start and end index of the matched
+ // captured group. Here 0-19 is "11.11.11.11 - frank", 0-11 is "11.11.11.11" and
+ // 14-19 is "frank". So, we advance by 2 index to get the next match
+ for _, matchIndex := range matchAllIndex {
+ for i := 2; i < len(matchIndex); i += 2 {
+ capturedString := input[matchIndex[i]:matchIndex[i+1]]
+ buf := &bytes.Buffer{}
+ td["Value"] = capturedString
+ err := templ.Execute(buf, td)
+ if err != nil {
+ return "", nil, err
+ }
+ st := buf.String()
+ result += input[previousInputEndIndex:matchIndex[i]] + st
+ previousInputEndIndex = matchIndex[i+1]
+ capturedMap[capturedString] = st
+ }
+ }
+ return result + input[previousInputEndIndex:], capturedMap, nil
+}
+
+func (r *replaceStage) getTemplateData(extracted map[string]interface{}) map[string]string {
+ td := make(map[string]string)
+ for k, v := range extracted {
+ s, err := getString(v)
+ if err != nil {
+ if Debug {
+ level.Debug(r.logger).Log("msg", "extracted template could not be converted to a string", "err", err, "type", reflect.TypeOf(v))
+ }
+ continue
+ }
+ td[k] = s
+ }
+ return td
+}
+
+// Name implements Stage
+func (r *replaceStage) Name() string {
+ return StageTypeReplace
+}
diff --git a/pkg/logentry/stages/replace_test.go b/pkg/logentry/stages/replace_test.go
new file mode 100644
index 0000000000000..8e39328618b46
--- /dev/null
+++ b/pkg/logentry/stages/replace_test.go
@@ -0,0 +1,225 @@
+package stages
+
+import (
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/cortexproject/cortex/pkg/util"
+ "github.com/pkg/errors"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/model"
+ "github.com/stretchr/testify/assert"
+ "gopkg.in/yaml.v2"
+)
+
+var testReplaceYamlSingleStageWithoutSource = `
+pipeline_stages:
+- replace:
+ expression: "11.11.11.11 - (\\S+) .*"
+ replace: "dummy"
+`
+var testReplaceYamlMultiStageWithSource = `
+pipeline_stages:
+- json:
+ expressions:
+ level:
+ msg:
+- replace:
+ expression: "\\S+ - \"POST (\\S+) .*"
+ source: msg
+ replace: "/loki/api/v1/push/"
+`
+
+var testReplaceYamlWithNamedCaputedGroupWithTemplate = `
+---
+pipeline_stages:
+ -
+ replace:
+ expression: "^(?P<ip>\\S+) (?P<identd>\\S+) (?P<user>\\S+) \\[(?P<timestamp>[\\w:/]+\\s[+\\-]\\d{4})\\] \"(?P<action>\\S+)\\s?(?P<path>\\S+)?\\s?(?P<protocol>\\S+)?\" (?P<status>\\d{3}|-) (\\d+|-)\\s?\"?(?P<referer>[^\"]*)\"?\\s?\"?(?P<useragent>[^\"]*)?\"?$"
+ replace: '{{ if eq .Value "200" }}{{ Replace .Value "200" "HttpStatusOk" -1 }}{{ else }}{{ .Value | ToUpper }}{{ end }}'
+`
+
+var testReplaceYamlWithTemplate = `
+---
+pipeline_stages:
+ -
+ replace:
+ expression: "^(\\S+) (\\S+) (\\S+) \\[([\\w:/]+\\s[+\\-]\\d{4})\\] \"(\\S+)\\s?(\\S+)?\\s?(\\S+)?\" (\\d{3}|-) (\\d+|-)\\s?\"?([^\"]*)\"?\\s?\"?([^\"]*)?\"?$"
+ replace: '{{ if eq .Value "200" }}{{ Replace .Value "200" "HttpStatusOk" -1 }}{{ else }}{{ .Value | ToUpper }}{{ end }}'
+`
+
+var testReplaceLogLine = `11.11.11.11 - frank [25/Jan/2000:14:00:01 -0500] "GET /1986.js HTTP/1.1" 200 932 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6"`
+var testReplaceLogJSONLine = `{"time":"2019-01-01T01:00:00.000000001Z", "level": "info", "msg": "11.11.11.11 - \"POST /loki/api/push/ HTTP/1.1\" 200 932 \"-\" \"Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6\""}`
+
+func TestPipeline_Replace(t *testing.T) {
+ t.Parallel()
+
+ tests := map[string]struct {
+ config string
+ entry string
+ extracted map[string]interface{}
+ expectedEntry string
+ }{
+ "successfully run a pipeline with 1 regex stage without source": {
+ testReplaceYamlSingleStageWithoutSource,
+ testReplaceLogLine,
+ map[string]interface{}{},
+ `11.11.11.11 - dummy [25/Jan/2000:14:00:01 -0500] "GET /1986.js HTTP/1.1" 200 932 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6"`,
+ },
+ "successfully run a pipeline with multi stage with": {
+ testReplaceYamlMultiStageWithSource,
+ testReplaceLogJSONLine,
+ map[string]interface{}{
+ "level": "info",
+ "msg": `11.11.11.11 - "POST /loki/api/v1/push/ HTTP/1.1" 200 932 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6"`,
+ },
+ `{"time":"2019-01-01T01:00:00.000000001Z", "level": "info", "msg": "11.11.11.11 - \"POST /loki/api/push/ HTTP/1.1\" 200 932 \"-\" \"Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6\""}`,
+ },
+ "successfully run a pipeline with 1 regex stage with named captured group and with template and without source": {
+ testReplaceYamlWithNamedCaputedGroupWithTemplate,
+ testReplaceLogLine,
+ map[string]interface{}{
+ "ip": "11.11.11.11",
+ "identd": "-",
+ "user": "FRANK",
+ "timestamp": "25/JAN/2000:14:00:01 -0500",
+ "action": "GET",
+ "path": "/1986.JS",
+ "protocol": "HTTP/1.1",
+ "status": "HttpStatusOk",
+ "referer": "-",
+ "useragent": "MOZILLA/5.0 (WINDOWS; U; WINDOWS NT 5.1; DE; RV:1.9.1.7) GECKO/20091221 FIREFOX/3.5.7 GTB6",
+ },
+ `11.11.11.11 - FRANK [25/JAN/2000:14:00:01 -0500] "GET /1986.JS HTTP/1.1" HttpStatusOk 932 "-" "MOZILLA/5.0 (WINDOWS; U; WINDOWS NT 5.1; DE; RV:1.9.1.7) GECKO/20091221 FIREFOX/3.5.7 GTB6"`,
+ },
+ "successfully run a pipeline with 1 regex stage with template and without source": {
+ testReplaceYamlWithTemplate,
+ testReplaceLogLine,
+ map[string]interface{}{},
+ `11.11.11.11 - FRANK [25/JAN/2000:14:00:01 -0500] "GET /1986.JS HTTP/1.1" HttpStatusOk 932 "-" "MOZILLA/5.0 (WINDOWS; U; WINDOWS NT 5.1; DE; RV:1.9.1.7) GECKO/20091221 FIREFOX/3.5.7 GTB6"`,
+ },
+ }
+
+ for testName, testData := range tests {
+ testData := testData
+
+ t.Run(testName, func(t *testing.T) {
+ t.Parallel()
+
+ pl, err := NewPipeline(util.Logger, loadConfig(testData.config), nil, prometheus.DefaultRegisterer)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ lbls := model.LabelSet{}
+ ts := time.Now()
+ entry := testData.entry
+ extracted := map[string]interface{}{}
+ pl.Process(lbls, extracted, &ts, &entry)
+ assert.Equal(t, testData.expectedEntry, entry)
+ assert.Equal(t, testData.extracted, extracted)
+ })
+ }
+}
+
+var replaceCfg = `
+replace:
+ expression: "regexexpression"
+ replace: "replace"`
+
+func TestReplaceMapStructure(t *testing.T) {
+ t.Parallel()
+
+ // testing that we can use yaml data into mapstructure.
+ var mapstruct map[interface{}]interface{}
+ if err := yaml.Unmarshal([]byte(replaceCfg), &mapstruct); err != nil {
+ t.Fatalf("error while un-marshalling config: %s", err)
+ }
+ p, ok := mapstruct["replace"].(map[interface{}]interface{})
+ if !ok {
+ t.Fatalf("could not read parser %+v", mapstruct["replace"])
+ }
+ got, err := parseReplaceConfig(p)
+ if err != nil {
+ t.Fatalf("could not create parser from yaml: %s", err)
+ }
+ want := &ReplaceConfig{
+ Expression: "regexexpression",
+ Replace: "replace",
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Fatalf("want: %+v got: %+v", want, got)
+ }
+}
+
+func TestReplaceConfig_validate(t *testing.T) {
+ t.Parallel()
+ tests := map[string]struct {
+ config interface{}
+ err error
+ }{
+ "empty config": {
+ nil,
+ errors.New(ErrExpressionRequired),
+ },
+ "missing regex_expression": {
+ map[string]interface{}{},
+ errors.New(ErrExpressionRequired),
+ },
+ "invalid regex_expression": {
+ map[string]interface{}{
+ "expression": "(?P<ts[0-9]+).*",
+ "replace": "test",
+ },
+ errors.New(ErrCouldNotCompileRegex + ": error parsing regexp: invalid named capture: `(?P<ts[0-9]+).*`"),
+ },
+ "empty source": {
+ map[string]interface{}{
+ "expression": "(?P<ts>[0-9]+).*",
+ "source": "",
+ },
+ errors.New(ErrEmptyReplaceStageSource),
+ },
+ "empty replace": {
+ map[string]interface{}{
+ "expression": "(?P<ts>[0-9]+).*",
+ "replace": "",
+ },
+ errors.New(ErrEmptyReplaceStageConfig),
+ },
+ "valid without source": {
+ map[string]interface{}{
+ "expression": "(?P<ts>[0-9]+).*",
+ "replace": "test",
+ },
+ nil,
+ },
+ "valid with source": {
+ map[string]interface{}{
+ "expression": "(?P<ts>[0-9]+).*",
+ "source": "log",
+ "replace": "test",
+ },
+ nil,
+ },
+ }
+ for tName, tt := range tests {
+ tt := tt
+ t.Run(tName, func(t *testing.T) {
+ c, err := parseReplaceConfig(tt.config)
+ if err != nil {
+ t.Fatalf("failed to create config: %s", err)
+ }
+ _, err = validateReplaceConfig(c)
+ if (err != nil) != (tt.err != nil) {
+ t.Errorf("ReplaceConfig.validate() expected error = %v, actual error = %v", tt.err, err)
+ return
+ }
+ if (err != nil) && (err.Error() != tt.err.Error()) {
+ t.Errorf("ReplaceConfig.validate() expected error = %v, actual error = %v", tt.err, err)
+ return
+ }
+ })
+ }
+}
diff --git a/pkg/logentry/stages/stage.go b/pkg/logentry/stages/stage.go
index 46ac43c55c350..16da6fd9f205d 100644
--- a/pkg/logentry/stages/stage.go
+++ b/pkg/logentry/stages/stage.go
@@ -12,6 +12,7 @@ import (
const (
StageTypeJSON = "json"
StageTypeRegex = "regex"
+ StageTypeReplace = "replace"
StageTypeMetric = "metrics"
StageTypeLabel = "labels"
StageTypeTimestamp = "timestamp"
@@ -100,6 +101,11 @@ func New(logger log.Logger, jobName *string, stageType string,
if err != nil {
return nil, err
}
+ case StageTypeReplace:
+ s, err = newReplaceStage(logger, cfg)
+ if err != nil {
+ return nil, err
+ }
default:
return nil, errors.Errorf("Unknown stage type: %s", stageType)
}
|
feature
|
Replace stage in pipeline (#2060)
|
42b8a6cbcaf8211c0344c0f8e7d00a1617c911bf
|
2023-08-16 18:55:32
|
Roman Danko
|
helm: Use "loki.clusterLabel" template for PodLogs cluster label (#10215)
| false
|
diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md
index 6465686364195..b5a0ca2b9bd48 100644
--- a/production/helm/loki/CHANGELOG.md
+++ b/production/helm/loki/CHANGELOG.md
@@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang
[//]: # (<AUTOMATED_UPDATES_LOCATOR> : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.)
+## 5.13.0
+
+- [ENHANCEMENT] Use "loki.clusterLabel" template for PodLogs cluster label
+
## 5.12.0
- [ENHANCEMENT] Use tpl function in ingress and gateway-ingress for hosts
@@ -21,12 +25,10 @@ Entries should include a reference to the pull request that introduced the chang
- [CHANGE] Changed version of Loki to 2.8.4
-
## 5.10.0
- [CHANGE] Changed version of Grafana Enterprise Logs to v1.7.3
-
## 5.9.2
- [ENHANCEMENT] Add custom labels value for loki ingress
diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml
index 0d2f3fda43cd0..3a01b28361933 100644
--- a/production/helm/loki/Chart.yaml
+++ b/production/helm/loki/Chart.yaml
@@ -3,7 +3,7 @@ name: loki
description: Helm chart for Grafana Loki in simple, scalable mode
type: application
appVersion: 2.8.4
-version: 5.12.0
+version: 5.13.0
home: https://grafana.github.io/helm-charts
sources:
- https://github.com/grafana/loki
diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md
index 0f9c0b8f1627e..b3a5e7479279c 100644
--- a/production/helm/loki/README.md
+++ b/production/helm/loki/README.md
@@ -1,6 +1,6 @@
# loki
-  
+  
Helm chart for Grafana Loki in simple, scalable mode
diff --git a/production/helm/loki/templates/monitoring/pod-logs.yaml b/production/helm/loki/templates/monitoring/pod-logs.yaml
index e9d66d64dcf8f..e8ced3dce6de2 100644
--- a/production/helm/loki/templates/monitoring/pod-logs.yaml
+++ b/production/helm/loki/templates/monitoring/pod-logs.yaml
@@ -41,7 +41,7 @@ spec:
sourceLabels:
- __meta_kubernetes_pod_container_name
targetLabel: container
- - replacement: "{{ include "loki.fullname" $ }}"
+ - replacement: "{{ include "loki.clusterLabel" $ }}"
targetLabel: cluster
{{- with .relabelings }}
{{- toYaml . | nindent 4 }}
|
helm
|
Use "loki.clusterLabel" template for PodLogs cluster label (#10215)
|
dbb1716ad2a2775a7f18922fa560e47bdc8d1937
|
2025-02-18 06:26:44
|
renovate[bot]
|
fix(deps): update dependency @tanstack/react-query-devtools to v5.66.5 (main) (#16331)
| false
|
diff --git a/pkg/ui/frontend/package-lock.json b/pkg/ui/frontend/package-lock.json
index bc76fbe14408a..a6310cf34aaa0 100644
--- a/pkg/ui/frontend/package-lock.json
+++ b/pkg/ui/frontend/package-lock.json
@@ -2466,9 +2466,9 @@
}
},
"node_modules/@tanstack/react-query-devtools": {
- "version": "5.66.0",
- "resolved": "https://registry.npmjs.org/@tanstack/react-query-devtools/-/react-query-devtools-5.66.0.tgz",
- "integrity": "sha512-uB57wA2YZaQ2fPcFW0E9O1zAGDGSbRKRx84uMk/86VyU9jWVxvJ3Uzp+zNm+nZJYsuekCIo2opTdgNuvM3cKgA==",
+ "version": "5.66.5",
+ "resolved": "https://registry.npmjs.org/@tanstack/react-query-devtools/-/react-query-devtools-5.66.5.tgz",
+ "integrity": "sha512-aj/+IuLdt3fbNweKo2XSK6OMUpOULDUO1MRDUtggRl8W6/EvD7FVvm0ydRrOMvS9KUqC25V88WPYaPj8SEVGXg==",
"license": "MIT",
"dependencies": {
"@tanstack/query-devtools": "5.65.0"
@@ -2478,7 +2478,7 @@
"url": "https://github.com/sponsors/tannerlinsley"
},
"peerDependencies": {
- "@tanstack/react-query": "^5.66.0",
+ "@tanstack/react-query": "^5.66.5",
"react": "^18 || ^19"
}
},
|
fix
|
update dependency @tanstack/react-query-devtools to v5.66.5 (main) (#16331)
|
9f28f6938d6c6484836b35765a95a778d47b3a4a
|
2025-01-03 22:14:57
|
renovate[bot]
|
fix(deps): update module cloud.google.com/go/bigtable to v1.34.0 (#15581)
| false
|
diff --git a/go.mod b/go.mod
index 6fdb7870a501f..25a6bdcbc249a 100644
--- a/go.mod
+++ b/go.mod
@@ -5,7 +5,7 @@ go 1.23.0
toolchain go1.23.1
require (
- cloud.google.com/go/bigtable v1.33.0
+ cloud.google.com/go/bigtable v1.34.0
cloud.google.com/go/pubsub v1.45.3
cloud.google.com/go/storage v1.49.0
dario.cat/mergo v1.0.1
@@ -203,7 +203,7 @@ require (
)
require (
- cloud.google.com/go v0.116.0 // indirect
+ cloud.google.com/go v0.117.0 // indirect
cloud.google.com/go/compute/metadata v0.6.0 // indirect
cloud.google.com/go/iam v1.2.2 // indirect
cloud.google.com/go/longrunning v0.6.2 // indirect
@@ -284,7 +284,6 @@ require (
github.com/gofrs/flock v0.8.1 // indirect
github.com/golang-jwt/jwt/v4 v4.5.1 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
- github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/google/btree v1.1.3 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-querystring v1.1.0 // indirect
diff --git a/go.sum b/go.sum
index 91dd2119b9e98..c6265b0759094 100644
--- a/go.sum
+++ b/go.sum
@@ -15,8 +15,8 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
-cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE=
-cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U=
+cloud.google.com/go v0.117.0 h1:Z5TNFfQxj7WG2FgOGX1ekC5RiXrYgms6QscOm32M/4s=
+cloud.google.com/go v0.117.0/go.mod h1:ZbwhVTb1DBGt2Iwb3tNO6SEK4q+cplHZmLWH+DelYYc=
cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs=
cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q=
cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU=
@@ -27,8 +27,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/bigtable v1.33.0 h1:2BDaWLRAwXO14DJL/u8crbV2oUbMZkIa2eGq8Yao1bk=
-cloud.google.com/go/bigtable v1.33.0/go.mod h1:HtpnH4g25VT1pejHRtInlFPnN5sjTxbQlsYBjh9t5l0=
+cloud.google.com/go/bigtable v1.34.0 h1:eIgi3QLcN4aq8p6n9U/zPgmHeBP34sm9FiKq4ik/ZoY=
+cloud.google.com/go/bigtable v1.34.0/go.mod h1:p94uLf6cy6D73POkudMagaFF3x9c7ktZjRnOUVGjZAw=
cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I=
cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
diff --git a/vendor/cloud.google.com/go/.release-please-manifest-individual.json b/vendor/cloud.google.com/go/.release-please-manifest-individual.json
index 39ed1f94745e7..0c4dc6cb5d918 100644
--- a/vendor/cloud.google.com/go/.release-please-manifest-individual.json
+++ b/vendor/cloud.google.com/go/.release-please-manifest-individual.json
@@ -1,18 +1,16 @@
{
- "ai": "0.8.2",
- "aiplatform": "1.68.0",
- "auth": "0.9.7",
- "auth/oauth2adapt": "0.2.4",
- "bigquery": "1.63.1",
+ "auth": "0.13.0",
+ "auth/oauth2adapt": "0.2.6",
+ "bigquery": "1.65.0",
"bigtable": "1.33.0",
- "datastore": "1.19.0",
- "errorreporting": "0.3.1",
+ "datastore": "1.20.0",
+ "errorreporting": "0.3.2",
"firestore": "1.17.0",
- "logging": "1.11.0",
- "profiler": "0.4.1",
- "pubsub": "1.44.0",
+ "logging": "1.12.0",
+ "profiler": "0.4.2",
+ "pubsub": "1.45.3",
"pubsublite": "1.8.2",
- "spanner": "1.69.0",
- "storage": "1.44.0",
- "vertexai": "0.13.1"
+ "spanner": "1.73.0",
+ "storage": "1.48.0",
+ "vertexai": "0.13.2"
}
diff --git a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json
index edbdcf47fd969..b70366d86907e 100644
--- a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json
+++ b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json
@@ -1,150 +1,153 @@
{
- "accessapproval": "1.8.1",
- "accesscontextmanager": "1.9.1",
- "advisorynotifications": "1.5.1",
- "alloydb": "1.12.1",
- "analytics": "0.25.1",
- "apigateway": "1.7.1",
- "apigeeconnect": "1.7.1",
- "apigeeregistry": "0.9.1",
- "apihub": "0.1.1",
- "apikeys": "1.2.1",
- "appengine": "1.9.1",
- "apphub": "0.2.1",
- "apps": "0.5.1",
- "area120": "0.9.1",
- "artifactregistry": "1.15.1",
- "asset": "1.20.2",
- "assuredworkloads": "1.12.1",
- "automl": "1.14.1",
- "backupdr": "1.1.1",
- "baremetalsolution": "1.3.1",
- "batch": "1.11.0",
- "beyondcorp": "1.1.1",
- "billing": "1.19.1",
- "binaryauthorization": "1.9.1",
- "certificatemanager": "1.9.1",
- "channel": "1.18.1",
- "chat": "0.6.0",
- "cloudbuild": "1.18.0",
- "cloudcontrolspartner": "1.2.0",
- "clouddms": "1.8.1",
- "cloudprofiler": "0.4.1",
- "cloudquotas": "1.1.1",
- "cloudtasks": "1.13.1",
- "commerce": "1.1.1",
- "compute": "1.28.1",
- "compute/metadata": "0.5.2",
- "confidentialcomputing": "1.7.1",
- "config": "1.1.1",
- "contactcenterinsights": "1.14.1",
- "container": "1.40.0",
- "containeranalysis": "0.13.1",
- "datacatalog": "1.22.1",
- "dataflow": "0.10.1",
- "dataform": "0.10.1",
- "datafusion": "1.8.1",
- "datalabeling": "0.9.1",
- "dataplex": "1.19.1",
- "dataproc": "2.9.0",
- "dataqna": "0.9.1",
- "datastream": "1.11.1",
- "deploy": "1.22.1",
- "developerconnect": "0.2.1",
- "dialogflow": "1.58.0",
- "discoveryengine": "1.14.0",
- "dlp": "1.19.0",
- "documentai": "1.34.0",
- "domains": "0.10.1",
- "edgecontainer": "1.3.1",
- "edgenetwork": "1.2.1",
- "essentialcontacts": "1.7.1",
- "eventarc": "1.14.1",
- "filestore": "1.9.1",
- "functions": "1.19.1",
- "gkebackup": "1.6.1",
- "gkeconnect": "0.11.1",
- "gkehub": "0.15.1",
- "gkemulticloud": "1.4.0",
- "grafeas": "0.3.11",
- "gsuiteaddons": "1.7.1",
- "iam": "1.2.1",
- "iap": "1.10.1",
- "identitytoolkit": "0.2.1",
- "ids": "1.5.1",
- "iot": "1.8.1",
- "kms": "1.20.0",
- "language": "1.14.1",
- "lifesciences": "0.10.1",
- "longrunning": "0.6.1",
- "managedidentities": "1.7.1",
- "managedkafka": "0.2.1",
- "maps": "1.14.0",
- "mediatranslation": "0.9.1",
- "memcache": "1.11.1",
- "metastore": "1.14.1",
- "migrationcenter": "1.1.1",
- "monitoring": "1.21.1",
- "netapp": "1.4.0",
- "networkconnectivity": "1.15.1",
- "networkmanagement": "1.14.1",
- "networksecurity": "0.10.1",
- "networkservices": "0.2.1",
- "notebooks": "1.12.1",
- "optimization": "1.7.1",
- "orchestration": "1.11.0",
- "orgpolicy": "1.14.0",
- "osconfig": "1.14.1",
- "oslogin": "1.14.1",
- "parallelstore": "0.6.1",
- "phishingprotection": "0.9.1",
- "policysimulator": "0.3.1",
- "policytroubleshooter": "1.11.1",
- "privatecatalog": "0.10.1",
- "privilegedaccessmanager": "0.2.1",
- "rapidmigrationassessment": "1.1.1",
- "recaptchaenterprise": "2.17.1",
- "recommendationengine": "0.9.1",
- "recommender": "1.13.1",
- "redis": "1.17.1",
- "resourcemanager": "1.10.1",
- "resourcesettings": "1.8.1",
- "retail": "1.18.1",
- "run": "1.5.1",
- "scheduler": "1.11.1",
- "secretmanager": "1.14.1",
- "securesourcemanager": "1.2.1",
- "security": "1.18.1",
- "securitycenter": "1.35.1",
- "securitycentermanagement": "1.1.1",
- "securityposture": "0.2.1",
- "servicecontrol": "1.14.1",
- "servicedirectory": "1.12.1",
- "servicehealth": "1.1.1",
- "servicemanagement": "1.10.1",
- "serviceusage": "1.9.1",
- "shell": "1.8.1",
- "shopping": "0.10.0",
- "speech": "1.25.1",
- "storageinsights": "1.1.1",
- "storagetransfer": "1.11.1",
- "streetview": "0.2.1",
- "support": "1.1.1",
- "talent": "1.7.1",
- "telcoautomation": "1.1.1",
- "texttospeech": "1.8.1",
- "tpu": "1.7.1",
- "trace": "1.11.1",
- "translate": "1.12.1",
- "video": "1.23.1",
- "videointelligence": "1.12.1",
- "vision": "2.9.1",
- "visionai": "0.4.1",
- "vmmigration": "1.8.1",
- "vmwareengine": "1.3.1",
- "vpcaccess": "1.8.1",
- "webrisk": "1.10.1",
- "websecurityscanner": "1.7.1",
- "workflows": "1.13.1",
- "workstations": "1.1.1"
+ "accessapproval": "1.8.2",
+ "accesscontextmanager": "1.9.2",
+ "advisorynotifications": "1.5.2",
+ "ai": "0.9.0",
+ "aiplatform": "1.69.0",
+ "alloydb": "1.14.0",
+ "analytics": "0.25.2",
+ "apigateway": "1.7.2",
+ "apigeeconnect": "1.7.2",
+ "apigeeregistry": "0.9.2",
+ "apihub": "0.1.2",
+ "apikeys": "1.2.2",
+ "appengine": "1.9.2",
+ "apphub": "0.2.2",
+ "apps": "0.5.2",
+ "area120": "0.9.2",
+ "artifactregistry": "1.16.0",
+ "asset": "1.20.3",
+ "assuredworkloads": "1.12.2",
+ "automl": "1.14.3",
+ "backupdr": "1.2.1",
+ "baremetalsolution": "1.3.2",
+ "batch": "1.11.4",
+ "beyondcorp": "1.1.2",
+ "billing": "1.20.0",
+ "binaryauthorization": "1.9.2",
+ "certificatemanager": "1.9.2",
+ "channel": "1.19.1",
+ "chat": "0.9.0",
+ "cloudbuild": "1.19.1",
+ "cloudcontrolspartner": "1.2.1",
+ "clouddms": "1.8.2",
+ "cloudprofiler": "0.4.2",
+ "cloudquotas": "1.2.0",
+ "cloudtasks": "1.13.2",
+ "commerce": "1.2.1",
+ "compute": "1.31.0",
+ "compute/metadata": "0.6.0",
+ "confidentialcomputing": "1.8.0",
+ "config": "1.2.0",
+ "contactcenterinsights": "1.16.0",
+ "container": "1.42.0",
+ "containeranalysis": "0.13.2",
+ "datacatalog": "1.24.0",
+ "dataflow": "0.10.2",
+ "dataform": "0.10.2",
+ "datafusion": "1.8.2",
+ "datalabeling": "0.9.2",
+ "dataplex": "1.20.0",
+ "dataproc": "2.10.0",
+ "dataqna": "0.9.2",
+ "datastream": "1.12.0",
+ "deploy": "1.26.0",
+ "developerconnect": "0.3.0",
+ "dialogflow": "1.63.0",
+ "discoveryengine": "1.16.0",
+ "dlp": "1.20.0",
+ "documentai": "1.35.0",
+ "domains": "0.10.2",
+ "edgecontainer": "1.4.0",
+ "edgenetwork": "1.2.2",
+ "essentialcontacts": "1.7.2",
+ "eventarc": "1.15.0",
+ "filestore": "1.9.2",
+ "functions": "1.19.2",
+ "gkebackup": "1.6.2",
+ "gkeconnect": "0.12.0",
+ "gkehub": "0.15.2",
+ "gkemulticloud": "1.4.1",
+ "grafeas": "0.3.12",
+ "gsuiteaddons": "1.7.2",
+ "iam": "1.3.0",
+ "iap": "1.10.2",
+ "identitytoolkit": "0.2.2",
+ "ids": "1.5.2",
+ "iot": "1.8.2",
+ "kms": "1.20.2",
+ "language": "1.14.2",
+ "lifesciences": "0.10.2",
+ "longrunning": "0.6.3",
+ "managedidentities": "1.7.2",
+ "managedkafka": "0.3.0",
+ "maps": "1.17.0",
+ "mediatranslation": "0.9.2",
+ "memcache": "1.11.2",
+ "metastore": "1.14.2",
+ "migrationcenter": "1.1.2",
+ "monitoring": "1.22.0",
+ "netapp": "1.5.0",
+ "networkconnectivity": "1.16.0",
+ "networkmanagement": "1.17.0",
+ "networksecurity": "0.10.2",
+ "networkservices": "0.2.2",
+ "notebooks": "1.12.2",
+ "optimization": "1.7.2",
+ "oracledatabase": "0.1.2",
+ "orchestration": "1.11.2",
+ "orgpolicy": "1.14.1",
+ "osconfig": "1.14.2",
+ "oslogin": "1.14.2",
+ "parallelstore": "0.9.0",
+ "phishingprotection": "0.9.2",
+ "policysimulator": "0.3.2",
+ "policytroubleshooter": "1.11.2",
+ "privatecatalog": "0.10.2",
+ "privilegedaccessmanager": "0.2.2",
+ "rapidmigrationassessment": "1.1.2",
+ "recaptchaenterprise": "2.19.1",
+ "recommendationengine": "0.9.2",
+ "recommender": "1.13.2",
+ "redis": "1.17.2",
+ "resourcemanager": "1.10.2",
+ "resourcesettings": "1.8.2",
+ "retail": "1.19.1",
+ "run": "1.8.0",
+ "scheduler": "1.11.2",
+ "secretmanager": "1.14.2",
+ "securesourcemanager": "1.3.0",
+ "security": "1.18.2",
+ "securitycenter": "1.35.2",
+ "securitycentermanagement": "1.1.2",
+ "securityposture": "0.2.2",
+ "servicecontrol": "1.14.2",
+ "servicedirectory": "1.12.2",
+ "servicehealth": "1.2.0",
+ "servicemanagement": "1.10.2",
+ "serviceusage": "1.9.2",
+ "shell": "1.8.2",
+ "shopping": "0.13.0",
+ "speech": "1.25.2",
+ "storageinsights": "1.1.2",
+ "storagetransfer": "1.11.2",
+ "streetview": "0.2.2",
+ "support": "1.1.2",
+ "talent": "1.7.2",
+ "telcoautomation": "1.1.2",
+ "texttospeech": "1.10.0",
+ "tpu": "1.7.2",
+ "trace": "1.11.2",
+ "translate": "1.12.2",
+ "video": "1.23.2",
+ "videointelligence": "1.12.2",
+ "vision": "2.9.2",
+ "visionai": "0.4.2",
+ "vmmigration": "1.8.2",
+ "vmwareengine": "1.3.2",
+ "vpcaccess": "1.8.2",
+ "webrisk": "1.10.2",
+ "websecurityscanner": "1.7.2",
+ "workflows": "1.13.2",
+ "workstations": "1.1.2"
}
diff --git a/vendor/cloud.google.com/go/.release-please-manifest.json b/vendor/cloud.google.com/go/.release-please-manifest.json
index c8f1da56d86db..3200815ccadaf 100644
--- a/vendor/cloud.google.com/go/.release-please-manifest.json
+++ b/vendor/cloud.google.com/go/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.116.0"
+ ".": "0.117.0"
}
diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md
index adc725ca1a72d..c8d214a8e30b1 100644
--- a/vendor/cloud.google.com/go/CHANGES.md
+++ b/vendor/cloud.google.com/go/CHANGES.md
@@ -1,5 +1,13 @@
# Changes
+## [0.117.0](https://github.com/googleapis/google-cloud-go/compare/v0.116.0...v0.117.0) (2024-12-16)
+
+
+### Features
+
+* **internal/trace:** Remove previously deprecated OpenCensus support ([#11230](https://github.com/googleapis/google-cloud-go/issues/11230)) ([40cf125](https://github.com/googleapis/google-cloud-go/commit/40cf1251c9d73be435585ce204a63588446c72b1)), refs [#10287](https://github.com/googleapis/google-cloud-go/issues/10287)
+* **transport:** Remove deprecated EXPERIMENTAL OpenCensus trace context propagation ([#11239](https://github.com/googleapis/google-cloud-go/issues/11239)) ([0d1ac87](https://github.com/googleapis/google-cloud-go/commit/0d1ac87174ed8526ea47d71a80e641ffbd687a6c)), refs [#10287](https://github.com/googleapis/google-cloud-go/issues/10287) [#11230](https://github.com/googleapis/google-cloud-go/issues/11230)
+
## [0.116.0](https://github.com/googleapis/google-cloud-go/compare/v0.115.1...v0.116.0) (2024-10-09)
diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md
index 63db0209c7dbe..7a6d74af1a3ff 100644
--- a/vendor/cloud.google.com/go/README.md
+++ b/vendor/cloud.google.com/go/README.md
@@ -4,18 +4,10 @@
Go packages for [Google Cloud Platform](https://cloud.google.com) services.
-``` go
-import "cloud.google.com/go"
-```
-
-To install the packages on your system, *do not clone the repo*. Instead:
-
-1. Change to your project directory: `cd /my/cloud/project`
-1. Get the package you want to use. Some products have their own module, so it's
- best to `go get` the package(s) you want to use:
+## Installation
```bash
-go get cloud.google.com/go/firestore # Replace with the package you want to use.
+go get cloud.google.com/go/firestore@latest # Replace firestore with the package you want to use.
```
**NOTE:** Some of these packages are under development, and may occasionally
diff --git a/vendor/cloud.google.com/go/bigtable/CHANGES.md b/vendor/cloud.google.com/go/bigtable/CHANGES.md
index 2c6917bbe2831..f66ddcd063614 100644
--- a/vendor/cloud.google.com/go/bigtable/CHANGES.md
+++ b/vendor/cloud.google.com/go/bigtable/CHANGES.md
@@ -1,5 +1,29 @@
# Changes
+## [1.34.0](https://github.com/googleapis/google-cloud-go/compare/bigtable/v1.33.0...bigtable/v1.34.0) (2025-01-02)
+
+
+### Features
+
+* **bigtable/admin:** Add support for Cloud Bigtable Node Scaling Factor for CBT Clusters ([7250d71](https://github.com/googleapis/google-cloud-go/commit/7250d714a638dcd5df3fbe0e91c5f1250c3f80f9))
+* **bigtable:** Add feature flags proto for Direct Access ([2c83297](https://github.com/googleapis/google-cloud-go/commit/2c83297a569117b0252b5b2edaecb09e4924d979))
+* **bigtable:** Async refresh dry run in parallel with sync refresh ([#11066](https://github.com/googleapis/google-cloud-go/issues/11066)) ([169e309](https://github.com/googleapis/google-cloud-go/commit/169e3096150599899788169368f96ce4470e5599))
+
+
+### Bug Fixes
+
+* **bigtable:** Correct the 'method' label value ([#11350](https://github.com/googleapis/google-cloud-go/issues/11350)) ([6aa27dc](https://github.com/googleapis/google-cloud-go/commit/6aa27dc79046df09e34e93044dbfe47cb3e9aa54))
+* **bigtable:** Resolve discrepancy between server and client qps ([#11224](https://github.com/googleapis/google-cloud-go/issues/11224)) ([c500179](https://github.com/googleapis/google-cloud-go/commit/c500179e771ac45ca3c2f5f7939444c8f65eafd3))
+* **bigtable:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9))
+* **bigtable:** Update google.golang.org/api to v0.203.0 ([8bb87d5](https://github.com/googleapis/google-cloud-go/commit/8bb87d56af1cba736e0fe243979723e747e5e11e))
+* **bigtable:** WARNING: On approximately Dec 1, 2024, an update to Protobuf will change service registration function signatures to use an interface instead of a concrete type in generated .pb.go files. This change is expected to affect very few if any users of this client library. For more information, see https://togithub.com/googleapis/google-cloud-go/issues/11020. ([8bb87d5](https://github.com/googleapis/google-cloud-go/commit/8bb87d56af1cba736e0fe243979723e747e5e11e))
+
+
+### Documentation
+
+* **bigtable:** Add todos ([#11280](https://github.com/googleapis/google-cloud-go/issues/11280)) ([d4f2449](https://github.com/googleapis/google-cloud-go/commit/d4f2449c5c2192b49de2bb42c7027beffb7517a2))
+* **bigtable:** Adding shut down log ([#11293](https://github.com/googleapis/google-cloud-go/issues/11293)) ([6cf33a8](https://github.com/googleapis/google-cloud-go/commit/6cf33a8d5605f37c1666de2c4e49554ec8fcc1a1))
+
## [1.33.0](https://github.com/googleapis/google-cloud-go/compare/bigtable/v1.32.0...bigtable/v1.33.0) (2024-09-23)
diff --git a/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/bigtable_instance_admin.pb.go b/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/bigtable_instance_admin.pb.go
index ca912acd58f63..c628fd2741acb 100644
--- a/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/bigtable_instance_admin.pb.go
+++ b/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/bigtable_instance_admin.pb.go
@@ -14,7 +14,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.2
+// protoc-gen-go v1.35.2
// protoc v4.25.3
// source: google/bigtable/admin/v2/bigtable_instance_admin.proto
@@ -132,11 +132,9 @@ type CreateInstanceRequest struct {
func (x *CreateInstanceRequest) Reset() {
*x = CreateInstanceRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CreateInstanceRequest) String() string {
@@ -147,7 +145,7 @@ func (*CreateInstanceRequest) ProtoMessage() {}
func (x *CreateInstanceRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -203,11 +201,9 @@ type GetInstanceRequest struct {
func (x *GetInstanceRequest) Reset() {
*x = GetInstanceRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GetInstanceRequest) String() string {
@@ -218,7 +214,7 @@ func (*GetInstanceRequest) ProtoMessage() {}
func (x *GetInstanceRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -255,11 +251,9 @@ type ListInstancesRequest struct {
func (x *ListInstancesRequest) Reset() {
*x = ListInstancesRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListInstancesRequest) String() string {
@@ -270,7 +264,7 @@ func (*ListInstancesRequest) ProtoMessage() {}
func (x *ListInstancesRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -320,11 +314,9 @@ type ListInstancesResponse struct {
func (x *ListInstancesResponse) Reset() {
*x = ListInstancesResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListInstancesResponse) String() string {
@@ -335,7 +327,7 @@ func (*ListInstancesResponse) ProtoMessage() {}
func (x *ListInstancesResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -386,11 +378,9 @@ type PartialUpdateInstanceRequest struct {
func (x *PartialUpdateInstanceRequest) Reset() {
*x = PartialUpdateInstanceRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *PartialUpdateInstanceRequest) String() string {
@@ -401,7 +391,7 @@ func (*PartialUpdateInstanceRequest) ProtoMessage() {}
func (x *PartialUpdateInstanceRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -443,11 +433,9 @@ type DeleteInstanceRequest struct {
func (x *DeleteInstanceRequest) Reset() {
*x = DeleteInstanceRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DeleteInstanceRequest) String() string {
@@ -458,7 +446,7 @@ func (*DeleteInstanceRequest) ProtoMessage() {}
func (x *DeleteInstanceRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -500,11 +488,9 @@ type CreateClusterRequest struct {
func (x *CreateClusterRequest) Reset() {
*x = CreateClusterRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CreateClusterRequest) String() string {
@@ -515,7 +501,7 @@ func (*CreateClusterRequest) ProtoMessage() {}
func (x *CreateClusterRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -564,11 +550,9 @@ type GetClusterRequest struct {
func (x *GetClusterRequest) Reset() {
*x = GetClusterRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GetClusterRequest) String() string {
@@ -579,7 +563,7 @@ func (*GetClusterRequest) ProtoMessage() {}
func (x *GetClusterRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -619,11 +603,9 @@ type ListClustersRequest struct {
func (x *ListClustersRequest) Reset() {
*x = ListClustersRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListClustersRequest) String() string {
@@ -634,7 +616,7 @@ func (*ListClustersRequest) ProtoMessage() {}
func (x *ListClustersRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -683,11 +665,9 @@ type ListClustersResponse struct {
func (x *ListClustersResponse) Reset() {
*x = ListClustersResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListClustersResponse) String() string {
@@ -698,7 +678,7 @@ func (*ListClustersResponse) ProtoMessage() {}
func (x *ListClustersResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -747,11 +727,9 @@ type DeleteClusterRequest struct {
func (x *DeleteClusterRequest) Reset() {
*x = DeleteClusterRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DeleteClusterRequest) String() string {
@@ -762,7 +740,7 @@ func (*DeleteClusterRequest) ProtoMessage() {}
func (x *DeleteClusterRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -800,11 +778,9 @@ type CreateInstanceMetadata struct {
func (x *CreateInstanceMetadata) Reset() {
*x = CreateInstanceMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CreateInstanceMetadata) String() string {
@@ -815,7 +791,7 @@ func (*CreateInstanceMetadata) ProtoMessage() {}
func (x *CreateInstanceMetadata) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -867,11 +843,9 @@ type UpdateInstanceMetadata struct {
func (x *UpdateInstanceMetadata) Reset() {
*x = UpdateInstanceMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UpdateInstanceMetadata) String() string {
@@ -882,7 +856,7 @@ func (*UpdateInstanceMetadata) ProtoMessage() {}
func (x *UpdateInstanceMetadata) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -943,11 +917,9 @@ type CreateClusterMetadata struct {
func (x *CreateClusterMetadata) Reset() {
*x = CreateClusterMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CreateClusterMetadata) String() string {
@@ -958,7 +930,7 @@ func (*CreateClusterMetadata) ProtoMessage() {}
func (x *CreateClusterMetadata) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1017,11 +989,9 @@ type UpdateClusterMetadata struct {
func (x *UpdateClusterMetadata) Reset() {
*x = UpdateClusterMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UpdateClusterMetadata) String() string {
@@ -1032,7 +1002,7 @@ func (*UpdateClusterMetadata) ProtoMessage() {}
func (x *UpdateClusterMetadata) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1084,11 +1054,9 @@ type PartialUpdateClusterMetadata struct {
func (x *PartialUpdateClusterMetadata) Reset() {
*x = PartialUpdateClusterMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *PartialUpdateClusterMetadata) String() string {
@@ -1099,7 +1067,7 @@ func (*PartialUpdateClusterMetadata) ProtoMessage() {}
func (x *PartialUpdateClusterMetadata) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1150,11 +1118,9 @@ type PartialUpdateClusterRequest struct {
func (x *PartialUpdateClusterRequest) Reset() {
*x = PartialUpdateClusterRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *PartialUpdateClusterRequest) String() string {
@@ -1165,7 +1131,7 @@ func (*PartialUpdateClusterRequest) ProtoMessage() {}
func (x *PartialUpdateClusterRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1216,11 +1182,9 @@ type CreateAppProfileRequest struct {
func (x *CreateAppProfileRequest) Reset() {
*x = CreateAppProfileRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CreateAppProfileRequest) String() string {
@@ -1231,7 +1195,7 @@ func (*CreateAppProfileRequest) ProtoMessage() {}
func (x *CreateAppProfileRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1287,11 +1251,9 @@ type GetAppProfileRequest struct {
func (x *GetAppProfileRequest) Reset() {
*x = GetAppProfileRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GetAppProfileRequest) String() string {
@@ -1302,7 +1264,7 @@ func (*GetAppProfileRequest) ProtoMessage() {}
func (x *GetAppProfileRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1352,11 +1314,9 @@ type ListAppProfilesRequest struct {
func (x *ListAppProfilesRequest) Reset() {
*x = ListAppProfilesRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListAppProfilesRequest) String() string {
@@ -1367,7 +1327,7 @@ func (*ListAppProfilesRequest) ProtoMessage() {}
func (x *ListAppProfilesRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1424,11 +1384,9 @@ type ListAppProfilesResponse struct {
func (x *ListAppProfilesResponse) Reset() {
*x = ListAppProfilesResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListAppProfilesResponse) String() string {
@@ -1439,7 +1397,7 @@ func (*ListAppProfilesResponse) ProtoMessage() {}
func (x *ListAppProfilesResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1492,11 +1450,9 @@ type UpdateAppProfileRequest struct {
func (x *UpdateAppProfileRequest) Reset() {
*x = UpdateAppProfileRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[21]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UpdateAppProfileRequest) String() string {
@@ -1507,7 +1463,7 @@ func (*UpdateAppProfileRequest) ProtoMessage() {}
func (x *UpdateAppProfileRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[21]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1559,11 +1515,9 @@ type DeleteAppProfileRequest struct {
func (x *DeleteAppProfileRequest) Reset() {
*x = DeleteAppProfileRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[22]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DeleteAppProfileRequest) String() string {
@@ -1574,7 +1528,7 @@ func (*DeleteAppProfileRequest) ProtoMessage() {}
func (x *DeleteAppProfileRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[22]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1612,11 +1566,9 @@ type UpdateAppProfileMetadata struct {
func (x *UpdateAppProfileMetadata) Reset() {
*x = UpdateAppProfileMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[23]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UpdateAppProfileMetadata) String() string {
@@ -1627,7 +1579,7 @@ func (*UpdateAppProfileMetadata) ProtoMessage() {}
func (x *UpdateAppProfileMetadata) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[23]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1677,11 +1629,9 @@ type ListHotTabletsRequest struct {
func (x *ListHotTabletsRequest) Reset() {
*x = ListHotTabletsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[24]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListHotTabletsRequest) String() string {
@@ -1692,7 +1642,7 @@ func (*ListHotTabletsRequest) ProtoMessage() {}
func (x *ListHotTabletsRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[24]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1762,11 +1712,9 @@ type ListHotTabletsResponse struct {
func (x *ListHotTabletsResponse) Reset() {
*x = ListHotTabletsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[25]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListHotTabletsResponse) String() string {
@@ -1777,7 +1725,7 @@ func (*ListHotTabletsResponse) ProtoMessage() {}
func (x *ListHotTabletsResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[25]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1823,11 +1771,9 @@ type CreateClusterMetadata_TableProgress struct {
func (x *CreateClusterMetadata_TableProgress) Reset() {
*x = CreateClusterMetadata_TableProgress{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[27]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CreateClusterMetadata_TableProgress) String() string {
@@ -1838,7 +1784,7 @@ func (*CreateClusterMetadata_TableProgress) ProtoMessage() {}
func (x *CreateClusterMetadata_TableProgress) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[27]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2659,332 +2605,6 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() {
return
}
file_google_bigtable_admin_v2_instance_proto_init()
- if !protoimpl.UnsafeEnabled {
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*CreateInstanceRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*GetInstanceRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*ListInstancesRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*ListInstancesResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[4].Exporter = func(v any, i int) any {
- switch v := v.(*PartialUpdateInstanceRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[5].Exporter = func(v any, i int) any {
- switch v := v.(*DeleteInstanceRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[6].Exporter = func(v any, i int) any {
- switch v := v.(*CreateClusterRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[7].Exporter = func(v any, i int) any {
- switch v := v.(*GetClusterRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[8].Exporter = func(v any, i int) any {
- switch v := v.(*ListClustersRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[9].Exporter = func(v any, i int) any {
- switch v := v.(*ListClustersResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[10].Exporter = func(v any, i int) any {
- switch v := v.(*DeleteClusterRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[11].Exporter = func(v any, i int) any {
- switch v := v.(*CreateInstanceMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[12].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateInstanceMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[13].Exporter = func(v any, i int) any {
- switch v := v.(*CreateClusterMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[14].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateClusterMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[15].Exporter = func(v any, i int) any {
- switch v := v.(*PartialUpdateClusterMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[16].Exporter = func(v any, i int) any {
- switch v := v.(*PartialUpdateClusterRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[17].Exporter = func(v any, i int) any {
- switch v := v.(*CreateAppProfileRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[18].Exporter = func(v any, i int) any {
- switch v := v.(*GetAppProfileRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[19].Exporter = func(v any, i int) any {
- switch v := v.(*ListAppProfilesRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[20].Exporter = func(v any, i int) any {
- switch v := v.(*ListAppProfilesResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[21].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateAppProfileRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[22].Exporter = func(v any, i int) any {
- switch v := v.(*DeleteAppProfileRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[23].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateAppProfileMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[24].Exporter = func(v any, i int) any {
- switch v := v.(*ListHotTabletsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[25].Exporter = func(v any, i int) any {
- switch v := v.(*ListHotTabletsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[27].Exporter = func(v any, i int) any {
- switch v := v.(*CreateClusterMetadata_TableProgress); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/bigtable_table_admin.pb.go b/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/bigtable_table_admin.pb.go
index f765d7acdeedb..d7775857b59a5 100644
--- a/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/bigtable_table_admin.pb.go
+++ b/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/bigtable_table_admin.pb.go
@@ -14,7 +14,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.2
+// protoc-gen-go v1.35.2
// protoc v4.25.3
// source: google/bigtable/admin/v2/bigtable_table_admin.proto
@@ -71,11 +71,9 @@ type RestoreTableRequest struct {
func (x *RestoreTableRequest) Reset() {
*x = RestoreTableRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *RestoreTableRequest) String() string {
@@ -86,7 +84,7 @@ func (*RestoreTableRequest) ProtoMessage() {}
func (x *RestoreTableRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -178,11 +176,9 @@ type RestoreTableMetadata struct {
func (x *RestoreTableMetadata) Reset() {
*x = RestoreTableMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *RestoreTableMetadata) String() string {
@@ -193,7 +189,7 @@ func (*RestoreTableMetadata) ProtoMessage() {}
func (x *RestoreTableMetadata) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -277,11 +273,9 @@ type OptimizeRestoredTableMetadata struct {
func (x *OptimizeRestoredTableMetadata) Reset() {
*x = OptimizeRestoredTableMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *OptimizeRestoredTableMetadata) String() string {
@@ -292,7 +286,7 @@ func (*OptimizeRestoredTableMetadata) ProtoMessage() {}
func (x *OptimizeRestoredTableMetadata) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -358,11 +352,9 @@ type CreateTableRequest struct {
func (x *CreateTableRequest) Reset() {
*x = CreateTableRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CreateTableRequest) String() string {
@@ -373,7 +365,7 @@ func (*CreateTableRequest) ProtoMessage() {}
func (x *CreateTableRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -443,11 +435,9 @@ type CreateTableFromSnapshotRequest struct {
func (x *CreateTableFromSnapshotRequest) Reset() {
*x = CreateTableFromSnapshotRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CreateTableFromSnapshotRequest) String() string {
@@ -458,7 +448,7 @@ func (*CreateTableFromSnapshotRequest) ProtoMessage() {}
func (x *CreateTableFromSnapshotRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -516,11 +506,9 @@ type DropRowRangeRequest struct {
func (x *DropRowRangeRequest) Reset() {
*x = DropRowRangeRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DropRowRangeRequest) String() string {
@@ -531,7 +519,7 @@ func (*DropRowRangeRequest) ProtoMessage() {}
func (x *DropRowRangeRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -622,11 +610,9 @@ type ListTablesRequest struct {
func (x *ListTablesRequest) Reset() {
*x = ListTablesRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListTablesRequest) String() string {
@@ -637,7 +623,7 @@ func (*ListTablesRequest) ProtoMessage() {}
func (x *ListTablesRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -697,11 +683,9 @@ type ListTablesResponse struct {
func (x *ListTablesResponse) Reset() {
*x = ListTablesResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListTablesResponse) String() string {
@@ -712,7 +696,7 @@ func (*ListTablesResponse) ProtoMessage() {}
func (x *ListTablesResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -759,11 +743,9 @@ type GetTableRequest struct {
func (x *GetTableRequest) Reset() {
*x = GetTableRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GetTableRequest) String() string {
@@ -774,7 +756,7 @@ func (*GetTableRequest) ProtoMessage() {}
func (x *GetTableRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -830,11 +812,9 @@ type UpdateTableRequest struct {
func (x *UpdateTableRequest) Reset() {
*x = UpdateTableRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UpdateTableRequest) String() string {
@@ -845,7 +825,7 @@ func (*UpdateTableRequest) ProtoMessage() {}
func (x *UpdateTableRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -891,11 +871,9 @@ type UpdateTableMetadata struct {
func (x *UpdateTableMetadata) Reset() {
*x = UpdateTableMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UpdateTableMetadata) String() string {
@@ -906,7 +884,7 @@ func (*UpdateTableMetadata) ProtoMessage() {}
func (x *UpdateTableMetadata) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -957,11 +935,9 @@ type DeleteTableRequest struct {
func (x *DeleteTableRequest) Reset() {
*x = DeleteTableRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DeleteTableRequest) String() string {
@@ -972,7 +948,7 @@ func (*DeleteTableRequest) ProtoMessage() {}
func (x *DeleteTableRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1009,11 +985,9 @@ type UndeleteTableRequest struct {
func (x *UndeleteTableRequest) Reset() {
*x = UndeleteTableRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UndeleteTableRequest) String() string {
@@ -1024,7 +998,7 @@ func (*UndeleteTableRequest) ProtoMessage() {}
func (x *UndeleteTableRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1063,11 +1037,9 @@ type UndeleteTableMetadata struct {
func (x *UndeleteTableMetadata) Reset() {
*x = UndeleteTableMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UndeleteTableMetadata) String() string {
@@ -1078,7 +1050,7 @@ func (*UndeleteTableMetadata) ProtoMessage() {}
func (x *UndeleteTableMetadata) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1136,11 +1108,9 @@ type ModifyColumnFamiliesRequest struct {
func (x *ModifyColumnFamiliesRequest) Reset() {
*x = ModifyColumnFamiliesRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ModifyColumnFamiliesRequest) String() string {
@@ -1151,7 +1121,7 @@ func (*ModifyColumnFamiliesRequest) ProtoMessage() {}
func (x *ModifyColumnFamiliesRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1202,11 +1172,9 @@ type GenerateConsistencyTokenRequest struct {
func (x *GenerateConsistencyTokenRequest) Reset() {
*x = GenerateConsistencyTokenRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GenerateConsistencyTokenRequest) String() string {
@@ -1217,7 +1185,7 @@ func (*GenerateConsistencyTokenRequest) ProtoMessage() {}
func (x *GenerateConsistencyTokenRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1252,11 +1220,9 @@ type GenerateConsistencyTokenResponse struct {
func (x *GenerateConsistencyTokenResponse) Reset() {
*x = GenerateConsistencyTokenResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GenerateConsistencyTokenResponse) String() string {
@@ -1267,7 +1233,7 @@ func (*GenerateConsistencyTokenResponse) ProtoMessage() {}
func (x *GenerateConsistencyTokenResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1314,11 +1280,9 @@ type CheckConsistencyRequest struct {
func (x *CheckConsistencyRequest) Reset() {
*x = CheckConsistencyRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CheckConsistencyRequest) String() string {
@@ -1329,7 +1293,7 @@ func (*CheckConsistencyRequest) ProtoMessage() {}
func (x *CheckConsistencyRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1411,11 +1375,9 @@ type StandardReadRemoteWrites struct {
func (x *StandardReadRemoteWrites) Reset() {
*x = StandardReadRemoteWrites{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *StandardReadRemoteWrites) String() string {
@@ -1426,7 +1388,7 @@ func (*StandardReadRemoteWrites) ProtoMessage() {}
func (x *StandardReadRemoteWrites) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1451,11 +1413,9 @@ type DataBoostReadLocalWrites struct {
func (x *DataBoostReadLocalWrites) Reset() {
*x = DataBoostReadLocalWrites{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DataBoostReadLocalWrites) String() string {
@@ -1466,7 +1426,7 @@ func (*DataBoostReadLocalWrites) ProtoMessage() {}
func (x *DataBoostReadLocalWrites) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1495,11 +1455,9 @@ type CheckConsistencyResponse struct {
func (x *CheckConsistencyResponse) Reset() {
*x = CheckConsistencyResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CheckConsistencyResponse) String() string {
@@ -1510,7 +1468,7 @@ func (*CheckConsistencyResponse) ProtoMessage() {}
func (x *CheckConsistencyResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1568,11 +1526,9 @@ type SnapshotTableRequest struct {
func (x *SnapshotTableRequest) Reset() {
*x = SnapshotTableRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[21]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SnapshotTableRequest) String() string {
@@ -1583,7 +1539,7 @@ func (*SnapshotTableRequest) ProtoMessage() {}
func (x *SnapshotTableRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[21]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1653,11 +1609,9 @@ type GetSnapshotRequest struct {
func (x *GetSnapshotRequest) Reset() {
*x = GetSnapshotRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[22]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GetSnapshotRequest) String() string {
@@ -1668,7 +1622,7 @@ func (*GetSnapshotRequest) ProtoMessage() {}
func (x *GetSnapshotRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[22]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1717,11 +1671,9 @@ type ListSnapshotsRequest struct {
func (x *ListSnapshotsRequest) Reset() {
*x = ListSnapshotsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[23]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListSnapshotsRequest) String() string {
@@ -1732,7 +1684,7 @@ func (*ListSnapshotsRequest) ProtoMessage() {}
func (x *ListSnapshotsRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[23]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1790,11 +1742,9 @@ type ListSnapshotsResponse struct {
func (x *ListSnapshotsResponse) Reset() {
*x = ListSnapshotsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[24]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListSnapshotsResponse) String() string {
@@ -1805,7 +1755,7 @@ func (*ListSnapshotsResponse) ProtoMessage() {}
func (x *ListSnapshotsResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[24]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1854,11 +1804,9 @@ type DeleteSnapshotRequest struct {
func (x *DeleteSnapshotRequest) Reset() {
*x = DeleteSnapshotRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[25]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DeleteSnapshotRequest) String() string {
@@ -1869,7 +1817,7 @@ func (*DeleteSnapshotRequest) ProtoMessage() {}
func (x *DeleteSnapshotRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[25]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1912,11 +1860,9 @@ type SnapshotTableMetadata struct {
func (x *SnapshotTableMetadata) Reset() {
*x = SnapshotTableMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[26]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SnapshotTableMetadata) String() string {
@@ -1927,7 +1873,7 @@ func (*SnapshotTableMetadata) ProtoMessage() {}
func (x *SnapshotTableMetadata) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[26]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1985,11 +1931,9 @@ type CreateTableFromSnapshotMetadata struct {
func (x *CreateTableFromSnapshotMetadata) Reset() {
*x = CreateTableFromSnapshotMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[27]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CreateTableFromSnapshotMetadata) String() string {
@@ -2000,7 +1944,7 @@ func (*CreateTableFromSnapshotMetadata) ProtoMessage() {}
func (x *CreateTableFromSnapshotMetadata) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[27]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2060,11 +2004,9 @@ type CreateBackupRequest struct {
func (x *CreateBackupRequest) Reset() {
*x = CreateBackupRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[28]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CreateBackupRequest) String() string {
@@ -2075,7 +2017,7 @@ func (*CreateBackupRequest) ProtoMessage() {}
func (x *CreateBackupRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[28]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2130,11 +2072,9 @@ type CreateBackupMetadata struct {
func (x *CreateBackupMetadata) Reset() {
*x = CreateBackupMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[29]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[29]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CreateBackupMetadata) String() string {
@@ -2145,7 +2085,7 @@ func (*CreateBackupMetadata) ProtoMessage() {}
func (x *CreateBackupMetadata) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[29]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2211,11 +2151,9 @@ type UpdateBackupRequest struct {
func (x *UpdateBackupRequest) Reset() {
*x = UpdateBackupRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[30]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UpdateBackupRequest) String() string {
@@ -2226,7 +2164,7 @@ func (*UpdateBackupRequest) ProtoMessage() {}
func (x *UpdateBackupRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[30]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2270,11 +2208,9 @@ type GetBackupRequest struct {
func (x *GetBackupRequest) Reset() {
*x = GetBackupRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[31]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[31]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GetBackupRequest) String() string {
@@ -2285,7 +2221,7 @@ func (*GetBackupRequest) ProtoMessage() {}
func (x *GetBackupRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[31]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2322,11 +2258,9 @@ type DeleteBackupRequest struct {
func (x *DeleteBackupRequest) Reset() {
*x = DeleteBackupRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[32]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DeleteBackupRequest) String() string {
@@ -2337,7 +2271,7 @@ func (*DeleteBackupRequest) ProtoMessage() {}
func (x *DeleteBackupRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[32]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2441,11 +2375,9 @@ type ListBackupsRequest struct {
func (x *ListBackupsRequest) Reset() {
*x = ListBackupsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[33]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[33]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListBackupsRequest) String() string {
@@ -2456,7 +2388,7 @@ func (*ListBackupsRequest) ProtoMessage() {}
func (x *ListBackupsRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[33]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2523,11 +2455,9 @@ type ListBackupsResponse struct {
func (x *ListBackupsResponse) Reset() {
*x = ListBackupsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[34]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[34]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListBackupsResponse) String() string {
@@ -2538,7 +2468,7 @@ func (*ListBackupsResponse) ProtoMessage() {}
func (x *ListBackupsResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[34]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2603,11 +2533,9 @@ type CopyBackupRequest struct {
func (x *CopyBackupRequest) Reset() {
*x = CopyBackupRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[35]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[35]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CopyBackupRequest) String() string {
@@ -2618,7 +2546,7 @@ func (*CopyBackupRequest) ProtoMessage() {}
func (x *CopyBackupRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[35]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2682,11 +2610,9 @@ type CopyBackupMetadata struct {
func (x *CopyBackupMetadata) Reset() {
*x = CopyBackupMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[36]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[36]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CopyBackupMetadata) String() string {
@@ -2697,7 +2623,7 @@ func (*CopyBackupMetadata) ProtoMessage() {}
func (x *CopyBackupMetadata) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[36]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2755,11 +2681,9 @@ type CreateAuthorizedViewRequest struct {
func (x *CreateAuthorizedViewRequest) Reset() {
*x = CreateAuthorizedViewRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[37]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[37]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CreateAuthorizedViewRequest) String() string {
@@ -2770,7 +2694,7 @@ func (*CreateAuthorizedViewRequest) ProtoMessage() {}
func (x *CreateAuthorizedViewRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[37]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2822,11 +2746,9 @@ type CreateAuthorizedViewMetadata struct {
func (x *CreateAuthorizedViewMetadata) Reset() {
*x = CreateAuthorizedViewMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[38]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[38]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CreateAuthorizedViewMetadata) String() string {
@@ -2837,7 +2759,7 @@ func (*CreateAuthorizedViewMetadata) ProtoMessage() {}
func (x *CreateAuthorizedViewMetadata) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[38]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2903,11 +2825,9 @@ type ListAuthorizedViewsRequest struct {
func (x *ListAuthorizedViewsRequest) Reset() {
*x = ListAuthorizedViewsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[39]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[39]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListAuthorizedViewsRequest) String() string {
@@ -2918,7 +2838,7 @@ func (*ListAuthorizedViewsRequest) ProtoMessage() {}
func (x *ListAuthorizedViewsRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[39]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2978,11 +2898,9 @@ type ListAuthorizedViewsResponse struct {
func (x *ListAuthorizedViewsResponse) Reset() {
*x = ListAuthorizedViewsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[40]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[40]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListAuthorizedViewsResponse) String() string {
@@ -2993,7 +2911,7 @@ func (*ListAuthorizedViewsResponse) ProtoMessage() {}
func (x *ListAuthorizedViewsResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[40]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3040,11 +2958,9 @@ type GetAuthorizedViewRequest struct {
func (x *GetAuthorizedViewRequest) Reset() {
*x = GetAuthorizedViewRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[41]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[41]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GetAuthorizedViewRequest) String() string {
@@ -3055,7 +2971,7 @@ func (*GetAuthorizedViewRequest) ProtoMessage() {}
func (x *GetAuthorizedViewRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[41]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3111,11 +3027,9 @@ type UpdateAuthorizedViewRequest struct {
func (x *UpdateAuthorizedViewRequest) Reset() {
*x = UpdateAuthorizedViewRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[42]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[42]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UpdateAuthorizedViewRequest) String() string {
@@ -3126,7 +3040,7 @@ func (*UpdateAuthorizedViewRequest) ProtoMessage() {}
func (x *UpdateAuthorizedViewRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[42]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3180,11 +3094,9 @@ type UpdateAuthorizedViewMetadata struct {
func (x *UpdateAuthorizedViewMetadata) Reset() {
*x = UpdateAuthorizedViewMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[43]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[43]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UpdateAuthorizedViewMetadata) String() string {
@@ -3195,7 +3107,7 @@ func (*UpdateAuthorizedViewMetadata) ProtoMessage() {}
func (x *UpdateAuthorizedViewMetadata) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[43]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3251,11 +3163,9 @@ type DeleteAuthorizedViewRequest struct {
func (x *DeleteAuthorizedViewRequest) Reset() {
*x = DeleteAuthorizedViewRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[44]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[44]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DeleteAuthorizedViewRequest) String() string {
@@ -3266,7 +3176,7 @@ func (*DeleteAuthorizedViewRequest) ProtoMessage() {}
func (x *DeleteAuthorizedViewRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[44]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3307,11 +3217,9 @@ type CreateTableRequest_Split struct {
func (x *CreateTableRequest_Split) Reset() {
*x = CreateTableRequest_Split{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[45]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[45]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CreateTableRequest_Split) String() string {
@@ -3322,7 +3230,7 @@ func (*CreateTableRequest_Split) ProtoMessage() {}
func (x *CreateTableRequest_Split) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[45]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3368,11 +3276,9 @@ type ModifyColumnFamiliesRequest_Modification struct {
func (x *ModifyColumnFamiliesRequest_Modification) Reset() {
*x = ModifyColumnFamiliesRequest_Modification{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[46]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[46]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ModifyColumnFamiliesRequest_Modification) String() string {
@@ -3383,7 +3289,7 @@ func (*ModifyColumnFamiliesRequest_Modification) ProtoMessage() {}
func (x *ModifyColumnFamiliesRequest_Modification) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[46]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4655,572 +4561,6 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() {
}
file_google_bigtable_admin_v2_common_proto_init()
file_google_bigtable_admin_v2_table_proto_init()
- if !protoimpl.UnsafeEnabled {
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*RestoreTableRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*RestoreTableMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*OptimizeRestoredTableMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*CreateTableRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[4].Exporter = func(v any, i int) any {
- switch v := v.(*CreateTableFromSnapshotRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[5].Exporter = func(v any, i int) any {
- switch v := v.(*DropRowRangeRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[6].Exporter = func(v any, i int) any {
- switch v := v.(*ListTablesRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[7].Exporter = func(v any, i int) any {
- switch v := v.(*ListTablesResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[8].Exporter = func(v any, i int) any {
- switch v := v.(*GetTableRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[9].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateTableRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[10].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateTableMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[11].Exporter = func(v any, i int) any {
- switch v := v.(*DeleteTableRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[12].Exporter = func(v any, i int) any {
- switch v := v.(*UndeleteTableRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[13].Exporter = func(v any, i int) any {
- switch v := v.(*UndeleteTableMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[14].Exporter = func(v any, i int) any {
- switch v := v.(*ModifyColumnFamiliesRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[15].Exporter = func(v any, i int) any {
- switch v := v.(*GenerateConsistencyTokenRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[16].Exporter = func(v any, i int) any {
- switch v := v.(*GenerateConsistencyTokenResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[17].Exporter = func(v any, i int) any {
- switch v := v.(*CheckConsistencyRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[18].Exporter = func(v any, i int) any {
- switch v := v.(*StandardReadRemoteWrites); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[19].Exporter = func(v any, i int) any {
- switch v := v.(*DataBoostReadLocalWrites); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[20].Exporter = func(v any, i int) any {
- switch v := v.(*CheckConsistencyResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[21].Exporter = func(v any, i int) any {
- switch v := v.(*SnapshotTableRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[22].Exporter = func(v any, i int) any {
- switch v := v.(*GetSnapshotRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[23].Exporter = func(v any, i int) any {
- switch v := v.(*ListSnapshotsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[24].Exporter = func(v any, i int) any {
- switch v := v.(*ListSnapshotsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[25].Exporter = func(v any, i int) any {
- switch v := v.(*DeleteSnapshotRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[26].Exporter = func(v any, i int) any {
- switch v := v.(*SnapshotTableMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[27].Exporter = func(v any, i int) any {
- switch v := v.(*CreateTableFromSnapshotMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[28].Exporter = func(v any, i int) any {
- switch v := v.(*CreateBackupRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[29].Exporter = func(v any, i int) any {
- switch v := v.(*CreateBackupMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[30].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateBackupRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[31].Exporter = func(v any, i int) any {
- switch v := v.(*GetBackupRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[32].Exporter = func(v any, i int) any {
- switch v := v.(*DeleteBackupRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[33].Exporter = func(v any, i int) any {
- switch v := v.(*ListBackupsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[34].Exporter = func(v any, i int) any {
- switch v := v.(*ListBackupsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[35].Exporter = func(v any, i int) any {
- switch v := v.(*CopyBackupRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[36].Exporter = func(v any, i int) any {
- switch v := v.(*CopyBackupMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[37].Exporter = func(v any, i int) any {
- switch v := v.(*CreateAuthorizedViewRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[38].Exporter = func(v any, i int) any {
- switch v := v.(*CreateAuthorizedViewMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[39].Exporter = func(v any, i int) any {
- switch v := v.(*ListAuthorizedViewsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[40].Exporter = func(v any, i int) any {
- switch v := v.(*ListAuthorizedViewsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[41].Exporter = func(v any, i int) any {
- switch v := v.(*GetAuthorizedViewRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[42].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateAuthorizedViewRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[43].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateAuthorizedViewMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[44].Exporter = func(v any, i int) any {
- switch v := v.(*DeleteAuthorizedViewRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[45].Exporter = func(v any, i int) any {
- switch v := v.(*CreateTableRequest_Split); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[46].Exporter = func(v any, i int) any {
- switch v := v.(*ModifyColumnFamiliesRequest_Modification); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[0].OneofWrappers = []any{
(*RestoreTableRequest_Backup)(nil),
}
diff --git a/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/common.pb.go b/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/common.pb.go
index caa6e944683ae..91422aa52bccd 100644
--- a/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/common.pb.go
+++ b/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/common.pb.go
@@ -14,7 +14,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.2
+// protoc-gen-go v1.35.2
// protoc v4.25.3
// source: google/bigtable/admin/v2/common.proto
@@ -108,11 +108,9 @@ type OperationProgress struct {
func (x *OperationProgress) Reset() {
*x = OperationProgress{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_common_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_common_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *OperationProgress) String() string {
@@ -123,7 +121,7 @@ func (*OperationProgress) ProtoMessage() {}
func (x *OperationProgress) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_common_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -233,20 +231,6 @@ func file_google_bigtable_admin_v2_common_proto_init() {
if File_google_bigtable_admin_v2_common_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_bigtable_admin_v2_common_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*OperationProgress); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/instance.pb.go b/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/instance.pb.go
index 0fc21d396ded7..b36695812cbff 100644
--- a/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/instance.pb.go
+++ b/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/instance.pb.go
@@ -14,7 +14,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.2
+// protoc-gen-go v1.35.2
// protoc v4.25.3
// source: google/bigtable/admin/v2/instance.proto
@@ -217,6 +217,62 @@ func (Cluster_State) EnumDescriptor() ([]byte, []int) {
return file_google_bigtable_admin_v2_instance_proto_rawDescGZIP(), []int{3, 0}
}
+// Possible node scaling factors of the clusters. Node scaling delivers better
+// latency and more throughput by removing node boundaries.
+type Cluster_NodeScalingFactor int32
+
+const (
+ // No node scaling specified. Defaults to NODE_SCALING_FACTOR_1X.
+ Cluster_NODE_SCALING_FACTOR_UNSPECIFIED Cluster_NodeScalingFactor = 0
+ // The cluster is running with a scaling factor of 1.
+ Cluster_NODE_SCALING_FACTOR_1X Cluster_NodeScalingFactor = 1
+ // The cluster is running with a scaling factor of 2.
+ // All node count values must be in increments of 2 with this scaling factor
+ // enabled, otherwise an INVALID_ARGUMENT error will be returned.
+ Cluster_NODE_SCALING_FACTOR_2X Cluster_NodeScalingFactor = 2
+)
+
+// Enum value maps for Cluster_NodeScalingFactor.
+var (
+ Cluster_NodeScalingFactor_name = map[int32]string{
+ 0: "NODE_SCALING_FACTOR_UNSPECIFIED",
+ 1: "NODE_SCALING_FACTOR_1X",
+ 2: "NODE_SCALING_FACTOR_2X",
+ }
+ Cluster_NodeScalingFactor_value = map[string]int32{
+ "NODE_SCALING_FACTOR_UNSPECIFIED": 0,
+ "NODE_SCALING_FACTOR_1X": 1,
+ "NODE_SCALING_FACTOR_2X": 2,
+ }
+)
+
+func (x Cluster_NodeScalingFactor) Enum() *Cluster_NodeScalingFactor {
+ p := new(Cluster_NodeScalingFactor)
+ *p = x
+ return p
+}
+
+func (x Cluster_NodeScalingFactor) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Cluster_NodeScalingFactor) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_bigtable_admin_v2_instance_proto_enumTypes[3].Descriptor()
+}
+
+func (Cluster_NodeScalingFactor) Type() protoreflect.EnumType {
+ return &file_google_bigtable_admin_v2_instance_proto_enumTypes[3]
+}
+
+func (x Cluster_NodeScalingFactor) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Cluster_NodeScalingFactor.Descriptor instead.
+func (Cluster_NodeScalingFactor) EnumDescriptor() ([]byte, []int) {
+ return file_google_bigtable_admin_v2_instance_proto_rawDescGZIP(), []int{3, 1}
+}
+
// Possible priorities for an app profile. Note that higher priority writes
// can sometimes queue behind lower priority writes to the same tablet, as
// writes must be strictly sequenced in the durability log.
@@ -257,11 +313,11 @@ func (x AppProfile_Priority) String() string {
}
func (AppProfile_Priority) Descriptor() protoreflect.EnumDescriptor {
- return file_google_bigtable_admin_v2_instance_proto_enumTypes[3].Descriptor()
+ return file_google_bigtable_admin_v2_instance_proto_enumTypes[4].Descriptor()
}
func (AppProfile_Priority) Type() protoreflect.EnumType {
- return &file_google_bigtable_admin_v2_instance_proto_enumTypes[3]
+ return &file_google_bigtable_admin_v2_instance_proto_enumTypes[4]
}
func (x AppProfile_Priority) Number() protoreflect.EnumNumber {
@@ -309,11 +365,11 @@ func (x AppProfile_DataBoostIsolationReadOnly_ComputeBillingOwner) String() stri
}
func (AppProfile_DataBoostIsolationReadOnly_ComputeBillingOwner) Descriptor() protoreflect.EnumDescriptor {
- return file_google_bigtable_admin_v2_instance_proto_enumTypes[4].Descriptor()
+ return file_google_bigtable_admin_v2_instance_proto_enumTypes[5].Descriptor()
}
func (AppProfile_DataBoostIsolationReadOnly_ComputeBillingOwner) Type() protoreflect.EnumType {
- return &file_google_bigtable_admin_v2_instance_proto_enumTypes[4]
+ return &file_google_bigtable_admin_v2_instance_proto_enumTypes[5]
}
func (x AppProfile_DataBoostIsolationReadOnly_ComputeBillingOwner) Number() protoreflect.EnumNumber {
@@ -368,11 +424,9 @@ type Instance struct {
func (x *Instance) Reset() {
*x = Instance{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Instance) String() string {
@@ -383,7 +437,7 @@ func (*Instance) ProtoMessage() {}
func (x *Instance) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -469,11 +523,9 @@ type AutoscalingTargets struct {
func (x *AutoscalingTargets) Reset() {
*x = AutoscalingTargets{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *AutoscalingTargets) String() string {
@@ -484,7 +536,7 @@ func (*AutoscalingTargets) ProtoMessage() {}
func (x *AutoscalingTargets) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -527,11 +579,9 @@ type AutoscalingLimits struct {
func (x *AutoscalingLimits) Reset() {
*x = AutoscalingLimits{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *AutoscalingLimits) String() string {
@@ -542,7 +592,7 @@ func (*AutoscalingLimits) ProtoMessage() {}
func (x *AutoscalingLimits) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -592,6 +642,8 @@ type Cluster struct {
// The number of nodes allocated to this cluster. More nodes enable higher
// throughput and more consistent performance.
ServeNodes int32 `protobuf:"varint,4,opt,name=serve_nodes,json=serveNodes,proto3" json:"serve_nodes,omitempty"`
+ // Immutable. The node scaling factor of this cluster.
+ NodeScalingFactor Cluster_NodeScalingFactor `protobuf:"varint,9,opt,name=node_scaling_factor,json=nodeScalingFactor,proto3,enum=google.bigtable.admin.v2.Cluster_NodeScalingFactor" json:"node_scaling_factor,omitempty"`
// Types that are assignable to Config:
//
// *Cluster_ClusterConfig_
@@ -605,11 +657,9 @@ type Cluster struct {
func (x *Cluster) Reset() {
*x = Cluster{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Cluster) String() string {
@@ -620,7 +670,7 @@ func (*Cluster) ProtoMessage() {}
func (x *Cluster) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -663,6 +713,13 @@ func (x *Cluster) GetServeNodes() int32 {
return 0
}
+func (x *Cluster) GetNodeScalingFactor() Cluster_NodeScalingFactor {
+ if x != nil {
+ return x.NodeScalingFactor
+ }
+ return Cluster_NODE_SCALING_FACTOR_UNSPECIFIED
+}
+
func (m *Cluster) GetConfig() isCluster_Config {
if m != nil {
return m.Config
@@ -743,11 +800,9 @@ type AppProfile struct {
func (x *AppProfile) Reset() {
*x = AppProfile{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *AppProfile) String() string {
@@ -758,7 +813,7 @@ func (*AppProfile) ProtoMessage() {}
func (x *AppProfile) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -927,11 +982,9 @@ type HotTablet struct {
func (x *HotTablet) Reset() {
*x = HotTablet{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *HotTablet) String() string {
@@ -942,7 +995,7 @@ func (*HotTablet) ProtoMessage() {}
func (x *HotTablet) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1020,11 +1073,9 @@ type Cluster_ClusterAutoscalingConfig struct {
func (x *Cluster_ClusterAutoscalingConfig) Reset() {
*x = Cluster_ClusterAutoscalingConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Cluster_ClusterAutoscalingConfig) String() string {
@@ -1035,7 +1086,7 @@ func (*Cluster_ClusterAutoscalingConfig) ProtoMessage() {}
func (x *Cluster_ClusterAutoscalingConfig) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1076,11 +1127,9 @@ type Cluster_ClusterConfig struct {
func (x *Cluster_ClusterConfig) Reset() {
*x = Cluster_ClusterConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Cluster_ClusterConfig) String() string {
@@ -1091,7 +1140,7 @@ func (*Cluster_ClusterConfig) ProtoMessage() {}
func (x *Cluster_ClusterConfig) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1136,11 +1185,9 @@ type Cluster_EncryptionConfig struct {
func (x *Cluster_EncryptionConfig) Reset() {
*x = Cluster_EncryptionConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Cluster_EncryptionConfig) String() string {
@@ -1151,7 +1198,7 @@ func (*Cluster_EncryptionConfig) ProtoMessage() {}
func (x *Cluster_EncryptionConfig) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1203,11 +1250,9 @@ type AppProfile_MultiClusterRoutingUseAny struct {
func (x *AppProfile_MultiClusterRoutingUseAny) Reset() {
*x = AppProfile_MultiClusterRoutingUseAny{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *AppProfile_MultiClusterRoutingUseAny) String() string {
@@ -1218,7 +1263,7 @@ func (*AppProfile_MultiClusterRoutingUseAny) ProtoMessage() {}
func (x *AppProfile_MultiClusterRoutingUseAny) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1285,11 +1330,9 @@ type AppProfile_SingleClusterRouting struct {
func (x *AppProfile_SingleClusterRouting) Reset() {
*x = AppProfile_SingleClusterRouting{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *AppProfile_SingleClusterRouting) String() string {
@@ -1300,7 +1343,7 @@ func (*AppProfile_SingleClusterRouting) ProtoMessage() {}
func (x *AppProfile_SingleClusterRouting) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1342,11 +1385,9 @@ type AppProfile_StandardIsolation struct {
func (x *AppProfile_StandardIsolation) Reset() {
*x = AppProfile_StandardIsolation{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *AppProfile_StandardIsolation) String() string {
@@ -1357,7 +1398,7 @@ func (*AppProfile_StandardIsolation) ProtoMessage() {}
func (x *AppProfile_StandardIsolation) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1402,11 +1443,9 @@ type AppProfile_DataBoostIsolationReadOnly struct {
func (x *AppProfile_DataBoostIsolationReadOnly) Reset() {
*x = AppProfile_DataBoostIsolationReadOnly{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *AppProfile_DataBoostIsolationReadOnly) String() string {
@@ -1417,7 +1456,7 @@ func (*AppProfile_DataBoostIsolationReadOnly) ProtoMessage() {}
func (x *AppProfile_DataBoostIsolationReadOnly) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1456,11 +1495,9 @@ type AppProfile_MultiClusterRoutingUseAny_RowAffinity struct {
func (x *AppProfile_MultiClusterRoutingUseAny_RowAffinity) Reset() {
*x = AppProfile_MultiClusterRoutingUseAny_RowAffinity{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *AppProfile_MultiClusterRoutingUseAny_RowAffinity) String() string {
@@ -1471,7 +1508,7 @@ func (*AppProfile_MultiClusterRoutingUseAny_RowAffinity) ProtoMessage() {}
func (x *AppProfile_MultiClusterRoutingUseAny_RowAffinity) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1559,7 +1596,7 @@ var file_google_bigtable_admin_v2_instance_proto_rawDesc = []byte{
0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x73,
0x65, 0x72, 0x76, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05,
0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x53, 0x65, 0x72, 0x76, 0x65, 0x4e,
- 0x6f, 0x64, 0x65, 0x73, 0x22, 0xf7, 0x08, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x6f, 0x64, 0x65, 0x73, 0x22, 0xd3, 0x0a, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
0x6e, 0x61, 0x6d, 0x65, 0x12, 0x45, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x23, 0x0a, 0x21,
@@ -1572,216 +1609,229 @@ var file_google_bigtable_admin_v2_instance_proto_rawDesc = []byte{
0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12,
0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x04,
0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73,
- 0x12, 0x58, 0x0a, 0x0e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x12, 0x68, 0x0a, 0x13, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67,
+ 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x33, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x46, 0x61, 0x63, 0x74,
+ 0x6f, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x11, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x63, 0x61,
+ 0x6c, 0x69, 0x6e, 0x67, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x58, 0x0a, 0x0e, 0x63, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x07, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5c, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x12,
+ 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x54, 0x79,
+ 0x70, 0x65, 0x12, 0x64, 0x0a, 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x10, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xdf, 0x01, 0x0a, 0x18, 0x43, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5f, 0x0a, 0x12, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61,
+ 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74,
+ 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x42, 0x03,
+ 0xe0, 0x41, 0x02, 0x52, 0x11, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67,
+ 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x62, 0x0a, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63,
+ 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41,
+ 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74,
+ 0x73, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x12, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c,
+ 0x69, 0x6e, 0x67, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x1a, 0x89, 0x01, 0x0a, 0x0d, 0x43,
+ 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x78, 0x0a, 0x1a,
+ 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c,
+ 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73,
+ 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x6f, 0x73,
+ 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x18, 0x63, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x5c, 0x0a, 0x10, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x48, 0x0a, 0x0c, 0x6b, 0x6d,
+ 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43,
+ 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0a, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79,
+ 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x51, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x13, 0x0a,
+ 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
+ 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x01, 0x12, 0x0c, 0x0a,
+ 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x52,
+ 0x45, 0x53, 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x53,
+ 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x22, 0x70, 0x0a, 0x11, 0x4e, 0x6f, 0x64, 0x65, 0x53,
+ 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x23, 0x0a, 0x1f,
+ 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x43, 0x41, 0x4c, 0x49, 0x4e, 0x47, 0x5f, 0x46, 0x41, 0x43,
+ 0x54, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
+ 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x43, 0x41, 0x4c, 0x49, 0x4e,
+ 0x47, 0x5f, 0x46, 0x41, 0x43, 0x54, 0x4f, 0x52, 0x5f, 0x31, 0x58, 0x10, 0x01, 0x12, 0x1a, 0x0a,
+ 0x16, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x43, 0x41, 0x4c, 0x49, 0x4e, 0x47, 0x5f, 0x46, 0x41,
+ 0x43, 0x54, 0x4f, 0x52, 0x5f, 0x32, 0x58, 0x10, 0x02, 0x3a, 0x65, 0xea, 0x41, 0x62, 0x0a, 0x24,
+ 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x12, 0x3a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x63, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x7d,
+ 0x42, 0x08, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xb5, 0x0c, 0x0a, 0x0a, 0x41,
+ 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a,
+ 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61,
+ 0x67, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x82, 0x01, 0x0a, 0x1d, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x5f, 0x63, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x75, 0x73,
+ 0x65, 0x5f, 0x61, 0x6e, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c,
+ 0x65, 0x2e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f,
+ 0x75, 0x74, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x19, 0x6d,
+ 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69,
+ 0x6e, 0x67, 0x55, 0x73, 0x65, 0x41, 0x6e, 0x79, 0x12, 0x71, 0x0a, 0x16, 0x73, 0x69, 0x6e, 0x67,
+ 0x6c, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69,
+ 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73,
- 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0d, 0x63, 0x6c, 0x75,
- 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5c, 0x0a, 0x14, 0x64, 0x65,
- 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79,
- 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x53,
+ 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74,
+ 0x69, 0x6e, 0x67, 0x48, 0x00, 0x52, 0x14, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x4f, 0x0a, 0x08, 0x70,
+ 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66,
+ 0x69, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x42, 0x02, 0x18, 0x01,
+ 0x48, 0x01, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x67, 0x0a, 0x12,
+ 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x42,
- 0x03, 0xe0, 0x41, 0x05, 0x52, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x53, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x64, 0x0a, 0x11, 0x65, 0x6e, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67,
- 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43,
- 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x10, 0x65, 0x6e,
- 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xdf,
- 0x01, 0x0a, 0x18, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63,
- 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5f, 0x0a, 0x12, 0x61,
- 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74,
- 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
- 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x4c, 0x69,
- 0x6d, 0x69, 0x74, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x11, 0x61, 0x75, 0x74, 0x6f, 0x73,
- 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x62, 0x0a, 0x13,
- 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x72, 0x67,
- 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67,
- 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x12, 0x61, 0x75,
- 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73,
- 0x1a, 0x89, 0x01, 0x0a, 0x0d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x12, 0x78, 0x0a, 0x1a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x61, 0x75,
- 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76,
- 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65,
- 0x72, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x52, 0x18, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x6f, 0x73,
- 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x5c, 0x0a, 0x10,
- 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x12, 0x48, 0x0a, 0x0c, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f,
- 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
- 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0a,
- 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x51, 0x0a, 0x05, 0x53, 0x74,
- 0x61, 0x74, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x54,
- 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44,
- 0x59, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10,
- 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x45, 0x53, 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12,
- 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x3a, 0x65, 0xea,
- 0x41, 0x62, 0x0a, 0x24, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
- 0x2f, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x3a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73,
- 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
- 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73,
- 0x74, 0x65, 0x72, 0x7d, 0x42, 0x08, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xb5,
- 0x0c, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
- 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63,
- 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x82, 0x01, 0x0a, 0x1d, 0x6d, 0x75, 0x6c, 0x74,
- 0x69, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e,
- 0x67, 0x5f, 0x75, 0x73, 0x65, 0x5f, 0x61, 0x6e, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c,
- 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72,
- 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74,
- 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x41, 0x6e, 0x79, 0x48,
- 0x00, 0x52, 0x19, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52,
- 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x41, 0x6e, 0x79, 0x12, 0x71, 0x0a, 0x16,
- 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72,
- 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61,
- 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69,
- 0x6c, 0x65, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
- 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x48, 0x00, 0x52, 0x14, 0x73, 0x69, 0x6e, 0x67, 0x6c,
- 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x12,
- 0x4f, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28,
- 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61,
- 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70,
- 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79,
- 0x42, 0x02, 0x18, 0x01, 0x48, 0x01, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79,
- 0x12, 0x67, 0x0a, 0x12, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x73, 0x6f,
- 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67,
+ 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x53,
+ 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x49, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x48, 0x01, 0x52, 0x11, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x49, 0x73, 0x6f, 0x6c,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x85, 0x01, 0x0a, 0x1e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x62,
+ 0x6f, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72,
+ 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f,
+ 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x42, 0x6f, 0x6f, 0x73, 0x74, 0x49, 0x73,
+ 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x48,
+ 0x01, 0x52, 0x1a, 0x64, 0x61, 0x74, 0x61, 0x42, 0x6f, 0x6f, 0x73, 0x74, 0x49, 0x73, 0x6f, 0x6c,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x1a, 0xc8, 0x01,
+ 0x0a, 0x19, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f,
+ 0x75, 0x74, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x41, 0x6e, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x63,
+ 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09,
+ 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x6f, 0x0a, 0x0c,
+ 0x72, 0x6f, 0x77, 0x5f, 0x61, 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x4a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70,
+ 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x41,
+ 0x6e, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x41, 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, 0x48, 0x00,
+ 0x52, 0x0b, 0x72, 0x6f, 0x77, 0x41, 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, 0x1a, 0x0d, 0x0a,
+ 0x0b, 0x52, 0x6f, 0x77, 0x41, 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, 0x42, 0x0a, 0x0a, 0x08,
+ 0x61, 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, 0x1a, 0x73, 0x0a, 0x14, 0x53, 0x69, 0x6e, 0x67,
+ 0x6c, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67,
+ 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12,
+ 0x3c, 0x0a, 0x1a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x18, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x73, 0x1a, 0x5e, 0x0a,
+ 0x11, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x49, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69,
+ 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e,
+ 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72,
+ 0x69, 0x74, 0x79, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x1a, 0x92, 0x02,
+ 0x0a, 0x1a, 0x44, 0x61, 0x74, 0x61, 0x42, 0x6f, 0x6f, 0x73, 0x74, 0x49, 0x73, 0x6f, 0x6c, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x8c, 0x01, 0x0a,
+ 0x15, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x5f, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67,
+ 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x53, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61,
0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69,
- 0x6c, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x49, 0x73, 0x6f, 0x6c, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x01, 0x52, 0x11, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64,
- 0x49, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x85, 0x01, 0x0a, 0x1e, 0x64, 0x61,
- 0x74, 0x61, 0x5f, 0x62, 0x6f, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x0a, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70,
- 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x42, 0x6f, 0x6f,
- 0x73, 0x74, 0x49, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x64, 0x4f,
- 0x6e, 0x6c, 0x79, 0x48, 0x01, 0x52, 0x1a, 0x64, 0x61, 0x74, 0x61, 0x42, 0x6f, 0x6f, 0x73, 0x74,
- 0x49, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c,
- 0x79, 0x1a, 0xc8, 0x01, 0x0a, 0x19, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74,
- 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x41, 0x6e, 0x79, 0x12,
- 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73,
- 0x12, 0x6f, 0x0a, 0x0c, 0x72, 0x6f, 0x77, 0x5f, 0x61, 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76,
- 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x4d, 0x75, 0x6c,
- 0x74, 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67,
- 0x55, 0x73, 0x65, 0x41, 0x6e, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x41, 0x66, 0x66, 0x69, 0x6e, 0x69,
- 0x74, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x72, 0x6f, 0x77, 0x41, 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74,
- 0x79, 0x1a, 0x0d, 0x0a, 0x0b, 0x52, 0x6f, 0x77, 0x41, 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79,
- 0x42, 0x0a, 0x0a, 0x08, 0x61, 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, 0x1a, 0x73, 0x0a, 0x14,
- 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75,
- 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f,
- 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
- 0x72, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x1a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x72, 0x61,
- 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65,
- 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x54, 0x72,
- 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65,
- 0x73, 0x1a, 0x5e, 0x0a, 0x11, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x49, 0x73, 0x6f,
- 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69,
- 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x50,
- 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74,
- 0x79, 0x1a, 0x92, 0x02, 0x0a, 0x1a, 0x44, 0x61, 0x74, 0x61, 0x42, 0x6f, 0x6f, 0x73, 0x74, 0x49,
- 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79,
- 0x12, 0x8c, 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x5f, 0x62, 0x69, 0x6c,
- 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e,
- 0x32, 0x53, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62,
- 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50,
- 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x42, 0x6f, 0x6f, 0x73, 0x74,
- 0x49, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c,
- 0x79, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67,
- 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x48, 0x00, 0x52, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65,
- 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x88, 0x01, 0x01, 0x22,
- 0x4b, 0x0a, 0x13, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e,
- 0x67, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x25, 0x0a, 0x21, 0x43, 0x4f, 0x4d, 0x50, 0x55, 0x54,
- 0x45, 0x5f, 0x42, 0x49, 0x4c, 0x4c, 0x49, 0x4e, 0x47, 0x5f, 0x4f, 0x57, 0x4e, 0x45, 0x52, 0x5f,
- 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a,
- 0x09, 0x48, 0x4f, 0x53, 0x54, 0x5f, 0x50, 0x41, 0x59, 0x53, 0x10, 0x01, 0x42, 0x18, 0x0a, 0x16,
- 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x5f, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67,
- 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x22, 0x5e, 0x0a, 0x08, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69,
- 0x74, 0x79, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x55,
- 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c,
- 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x13,
- 0x0a, 0x0f, 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4d, 0x45, 0x44, 0x49, 0x55,
- 0x4d, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49, 0x54, 0x59, 0x5f,
- 0x48, 0x49, 0x47, 0x48, 0x10, 0x03, 0x3a, 0x6f, 0xea, 0x41, 0x6c, 0x0a, 0x27, 0x62, 0x69, 0x67,
- 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f,
- 0x66, 0x69, 0x6c, 0x65, 0x12, 0x41, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b,
- 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x61, 0x70,
- 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x70, 0x70, 0x5f, 0x70,
- 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x7d, 0x42, 0x10, 0x0a, 0x0e, 0x72, 0x6f, 0x75, 0x74, 0x69,
- 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x0b, 0x0a, 0x09, 0x69, 0x73, 0x6f,
- 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xd4, 0x03, 0x0a, 0x09, 0x48, 0x6f, 0x74, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c,
- 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xfa, 0x41,
- 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65,
- 0x12, 0x3e, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
- 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65,
- 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03,
- 0xe0, 0x41, 0x03, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09,
- 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x6e, 0x64,
- 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x64, 0x4b,
- 0x65, 0x79, 0x12, 0x38, 0x0a, 0x16, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x75,
- 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01,
- 0x28, 0x02, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x6e, 0x6f, 0x64, 0x65, 0x43, 0x70, 0x75,
- 0x55, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x3a, 0x7f, 0xea, 0x41,
- 0x7c, 0x0a, 0x26, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x48, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x52, 0x70, 0x72, 0x6f, 0x6a, 0x65,
- 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e,
- 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75,
- 0x73, 0x74, 0x65, 0x72, 0x7d, 0x2f, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73,
- 0x2f, 0x7b, 0x68, 0x6f, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x7d, 0x42, 0xcb, 0x02,
- 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79,
- 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
- 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f,
- 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69,
- 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b,
- 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x0d, 0x49, 0x6e, 0x73, 0x74, 0x61,
- 0x6e, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x63, 0x6c, 0x6f, 0x75,
- 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f,
- 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x61,
- 0x70, 0x69, 0x76, 0x32, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x70, 0x62, 0x3b, 0x61, 0x64, 0x6d,
- 0x69, 0x6e, 0x70, 0x62, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c,
- 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d,
- 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43,
- 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64,
- 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a,
- 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x33,
+ 0x6c, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x42, 0x6f, 0x6f, 0x73, 0x74, 0x49, 0x73, 0x6f, 0x6c,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x2e, 0x43, 0x6f,
+ 0x6d, 0x70, 0x75, 0x74, 0x65, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x4f, 0x77, 0x6e, 0x65,
+ 0x72, 0x48, 0x00, 0x52, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x42, 0x69, 0x6c, 0x6c,
+ 0x69, 0x6e, 0x67, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x88, 0x01, 0x01, 0x22, 0x4b, 0x0a, 0x13, 0x43,
+ 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x4f, 0x77, 0x6e,
+ 0x65, 0x72, 0x12, 0x25, 0x0a, 0x21, 0x43, 0x4f, 0x4d, 0x50, 0x55, 0x54, 0x45, 0x5f, 0x42, 0x49,
+ 0x4c, 0x4c, 0x49, 0x4e, 0x47, 0x5f, 0x4f, 0x57, 0x4e, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50,
+ 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x48, 0x4f, 0x53,
+ 0x54, 0x5f, 0x50, 0x41, 0x59, 0x53, 0x10, 0x01, 0x42, 0x18, 0x0a, 0x16, 0x5f, 0x63, 0x6f, 0x6d,
+ 0x70, 0x75, 0x74, 0x65, 0x5f, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x6f, 0x77, 0x6e,
+ 0x65, 0x72, 0x22, 0x5e, 0x0a, 0x08, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18,
+ 0x0a, 0x14, 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45,
+ 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x50, 0x52, 0x49, 0x4f,
+ 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x52,
+ 0x49, 0x4f, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4d, 0x45, 0x44, 0x49, 0x55, 0x4d, 0x10, 0x02, 0x12,
+ 0x11, 0x0a, 0x0d, 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x48, 0x49, 0x47, 0x48,
+ 0x10, 0x03, 0x3a, 0x6f, 0xea, 0x41, 0x6c, 0x0a, 0x27, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
+ 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65,
+ 0x12, 0x41, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a,
+ 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b,
+ 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f,
+ 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69,
+ 0x6c, 0x65, 0x7d, 0x42, 0x10, 0x0a, 0x0e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x70,
+ 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x0b, 0x0a, 0x09, 0x69, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x22, 0xd4, 0x03, 0x0a, 0x09, 0x48, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62,
+ 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c,
+ 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x0a,
+ 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41,
+ 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3a, 0x0a, 0x08,
+ 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52,
+ 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72,
+ 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x74, 0x61,
+ 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x6e, 0x64, 0x5f, 0x6b, 0x65, 0x79,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x38,
+ 0x0a, 0x16, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65,
+ 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x02, 0x42, 0x03,
+ 0xe0, 0x41, 0x03, 0x52, 0x13, 0x6e, 0x6f, 0x64, 0x65, 0x43, 0x70, 0x75, 0x55, 0x73, 0x61, 0x67,
+ 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x3a, 0x7f, 0xea, 0x41, 0x7c, 0x0a, 0x26, 0x62,
+ 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x48, 0x6f, 0x74, 0x54,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x52, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f,
+ 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
+ 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x63,
+ 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x7d, 0x2f, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x68, 0x6f,
+ 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x7d, 0x42, 0xcb, 0x02, 0xea, 0x41, 0x78, 0x0a,
+ 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b,
+ 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52,
+ 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f,
+ 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70,
+ 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x0d, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x62, 0x69, 0x67, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32,
+ 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x70, 0x62, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x70, 0x62,
+ 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
+ 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56,
+ 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64,
+ 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c,
+ 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f,
+ 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64,
+ 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -1796,59 +1846,61 @@ func file_google_bigtable_admin_v2_instance_proto_rawDescGZIP() []byte {
return file_google_bigtable_admin_v2_instance_proto_rawDescData
}
-var file_google_bigtable_admin_v2_instance_proto_enumTypes = make([]protoimpl.EnumInfo, 5)
+var file_google_bigtable_admin_v2_instance_proto_enumTypes = make([]protoimpl.EnumInfo, 6)
var file_google_bigtable_admin_v2_instance_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
var file_google_bigtable_admin_v2_instance_proto_goTypes = []any{
- (Instance_State)(0), // 0: google.bigtable.admin.v2.Instance.State
- (Instance_Type)(0), // 1: google.bigtable.admin.v2.Instance.Type
- (Cluster_State)(0), // 2: google.bigtable.admin.v2.Cluster.State
- (AppProfile_Priority)(0), // 3: google.bigtable.admin.v2.AppProfile.Priority
- (AppProfile_DataBoostIsolationReadOnly_ComputeBillingOwner)(0), // 4: google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner
- (*Instance)(nil), // 5: google.bigtable.admin.v2.Instance
- (*AutoscalingTargets)(nil), // 6: google.bigtable.admin.v2.AutoscalingTargets
- (*AutoscalingLimits)(nil), // 7: google.bigtable.admin.v2.AutoscalingLimits
- (*Cluster)(nil), // 8: google.bigtable.admin.v2.Cluster
- (*AppProfile)(nil), // 9: google.bigtable.admin.v2.AppProfile
- (*HotTablet)(nil), // 10: google.bigtable.admin.v2.HotTablet
- nil, // 11: google.bigtable.admin.v2.Instance.LabelsEntry
- (*Cluster_ClusterAutoscalingConfig)(nil), // 12: google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig
- (*Cluster_ClusterConfig)(nil), // 13: google.bigtable.admin.v2.Cluster.ClusterConfig
- (*Cluster_EncryptionConfig)(nil), // 14: google.bigtable.admin.v2.Cluster.EncryptionConfig
- (*AppProfile_MultiClusterRoutingUseAny)(nil), // 15: google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny
- (*AppProfile_SingleClusterRouting)(nil), // 16: google.bigtable.admin.v2.AppProfile.SingleClusterRouting
- (*AppProfile_StandardIsolation)(nil), // 17: google.bigtable.admin.v2.AppProfile.StandardIsolation
- (*AppProfile_DataBoostIsolationReadOnly)(nil), // 18: google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
- (*AppProfile_MultiClusterRoutingUseAny_RowAffinity)(nil), // 19: google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny.RowAffinity
- (*timestamppb.Timestamp)(nil), // 20: google.protobuf.Timestamp
- (StorageType)(0), // 21: google.bigtable.admin.v2.StorageType
+ (Instance_State)(0), // 0: google.bigtable.admin.v2.Instance.State
+ (Instance_Type)(0), // 1: google.bigtable.admin.v2.Instance.Type
+ (Cluster_State)(0), // 2: google.bigtable.admin.v2.Cluster.State
+ (Cluster_NodeScalingFactor)(0), // 3: google.bigtable.admin.v2.Cluster.NodeScalingFactor
+ (AppProfile_Priority)(0), // 4: google.bigtable.admin.v2.AppProfile.Priority
+ (AppProfile_DataBoostIsolationReadOnly_ComputeBillingOwner)(0), // 5: google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner
+ (*Instance)(nil), // 6: google.bigtable.admin.v2.Instance
+ (*AutoscalingTargets)(nil), // 7: google.bigtable.admin.v2.AutoscalingTargets
+ (*AutoscalingLimits)(nil), // 8: google.bigtable.admin.v2.AutoscalingLimits
+ (*Cluster)(nil), // 9: google.bigtable.admin.v2.Cluster
+ (*AppProfile)(nil), // 10: google.bigtable.admin.v2.AppProfile
+ (*HotTablet)(nil), // 11: google.bigtable.admin.v2.HotTablet
+ nil, // 12: google.bigtable.admin.v2.Instance.LabelsEntry
+ (*Cluster_ClusterAutoscalingConfig)(nil), // 13: google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig
+ (*Cluster_ClusterConfig)(nil), // 14: google.bigtable.admin.v2.Cluster.ClusterConfig
+ (*Cluster_EncryptionConfig)(nil), // 15: google.bigtable.admin.v2.Cluster.EncryptionConfig
+ (*AppProfile_MultiClusterRoutingUseAny)(nil), // 16: google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny
+ (*AppProfile_SingleClusterRouting)(nil), // 17: google.bigtable.admin.v2.AppProfile.SingleClusterRouting
+ (*AppProfile_StandardIsolation)(nil), // 18: google.bigtable.admin.v2.AppProfile.StandardIsolation
+ (*AppProfile_DataBoostIsolationReadOnly)(nil), // 19: google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
+ (*AppProfile_MultiClusterRoutingUseAny_RowAffinity)(nil), // 20: google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny.RowAffinity
+ (*timestamppb.Timestamp)(nil), // 21: google.protobuf.Timestamp
+ (StorageType)(0), // 22: google.bigtable.admin.v2.StorageType
}
var file_google_bigtable_admin_v2_instance_proto_depIdxs = []int32{
0, // 0: google.bigtable.admin.v2.Instance.state:type_name -> google.bigtable.admin.v2.Instance.State
1, // 1: google.bigtable.admin.v2.Instance.type:type_name -> google.bigtable.admin.v2.Instance.Type
- 11, // 2: google.bigtable.admin.v2.Instance.labels:type_name -> google.bigtable.admin.v2.Instance.LabelsEntry
- 20, // 3: google.bigtable.admin.v2.Instance.create_time:type_name -> google.protobuf.Timestamp
+ 12, // 2: google.bigtable.admin.v2.Instance.labels:type_name -> google.bigtable.admin.v2.Instance.LabelsEntry
+ 21, // 3: google.bigtable.admin.v2.Instance.create_time:type_name -> google.protobuf.Timestamp
2, // 4: google.bigtable.admin.v2.Cluster.state:type_name -> google.bigtable.admin.v2.Cluster.State
- 13, // 5: google.bigtable.admin.v2.Cluster.cluster_config:type_name -> google.bigtable.admin.v2.Cluster.ClusterConfig
- 21, // 6: google.bigtable.admin.v2.Cluster.default_storage_type:type_name -> google.bigtable.admin.v2.StorageType
- 14, // 7: google.bigtable.admin.v2.Cluster.encryption_config:type_name -> google.bigtable.admin.v2.Cluster.EncryptionConfig
- 15, // 8: google.bigtable.admin.v2.AppProfile.multi_cluster_routing_use_any:type_name -> google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny
- 16, // 9: google.bigtable.admin.v2.AppProfile.single_cluster_routing:type_name -> google.bigtable.admin.v2.AppProfile.SingleClusterRouting
- 3, // 10: google.bigtable.admin.v2.AppProfile.priority:type_name -> google.bigtable.admin.v2.AppProfile.Priority
- 17, // 11: google.bigtable.admin.v2.AppProfile.standard_isolation:type_name -> google.bigtable.admin.v2.AppProfile.StandardIsolation
- 18, // 12: google.bigtable.admin.v2.AppProfile.data_boost_isolation_read_only:type_name -> google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
- 20, // 13: google.bigtable.admin.v2.HotTablet.start_time:type_name -> google.protobuf.Timestamp
- 20, // 14: google.bigtable.admin.v2.HotTablet.end_time:type_name -> google.protobuf.Timestamp
- 7, // 15: google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig.autoscaling_limits:type_name -> google.bigtable.admin.v2.AutoscalingLimits
- 6, // 16: google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig.autoscaling_targets:type_name -> google.bigtable.admin.v2.AutoscalingTargets
- 12, // 17: google.bigtable.admin.v2.Cluster.ClusterConfig.cluster_autoscaling_config:type_name -> google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig
- 19, // 18: google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny.row_affinity:type_name -> google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny.RowAffinity
- 3, // 19: google.bigtable.admin.v2.AppProfile.StandardIsolation.priority:type_name -> google.bigtable.admin.v2.AppProfile.Priority
- 4, // 20: google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.compute_billing_owner:type_name -> google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner
- 21, // [21:21] is the sub-list for method output_type
- 21, // [21:21] is the sub-list for method input_type
- 21, // [21:21] is the sub-list for extension type_name
- 21, // [21:21] is the sub-list for extension extendee
- 0, // [0:21] is the sub-list for field type_name
+ 3, // 5: google.bigtable.admin.v2.Cluster.node_scaling_factor:type_name -> google.bigtable.admin.v2.Cluster.NodeScalingFactor
+ 14, // 6: google.bigtable.admin.v2.Cluster.cluster_config:type_name -> google.bigtable.admin.v2.Cluster.ClusterConfig
+ 22, // 7: google.bigtable.admin.v2.Cluster.default_storage_type:type_name -> google.bigtable.admin.v2.StorageType
+ 15, // 8: google.bigtable.admin.v2.Cluster.encryption_config:type_name -> google.bigtable.admin.v2.Cluster.EncryptionConfig
+ 16, // 9: google.bigtable.admin.v2.AppProfile.multi_cluster_routing_use_any:type_name -> google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny
+ 17, // 10: google.bigtable.admin.v2.AppProfile.single_cluster_routing:type_name -> google.bigtable.admin.v2.AppProfile.SingleClusterRouting
+ 4, // 11: google.bigtable.admin.v2.AppProfile.priority:type_name -> google.bigtable.admin.v2.AppProfile.Priority
+ 18, // 12: google.bigtable.admin.v2.AppProfile.standard_isolation:type_name -> google.bigtable.admin.v2.AppProfile.StandardIsolation
+ 19, // 13: google.bigtable.admin.v2.AppProfile.data_boost_isolation_read_only:type_name -> google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
+ 21, // 14: google.bigtable.admin.v2.HotTablet.start_time:type_name -> google.protobuf.Timestamp
+ 21, // 15: google.bigtable.admin.v2.HotTablet.end_time:type_name -> google.protobuf.Timestamp
+ 8, // 16: google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig.autoscaling_limits:type_name -> google.bigtable.admin.v2.AutoscalingLimits
+ 7, // 17: google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig.autoscaling_targets:type_name -> google.bigtable.admin.v2.AutoscalingTargets
+ 13, // 18: google.bigtable.admin.v2.Cluster.ClusterConfig.cluster_autoscaling_config:type_name -> google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig
+ 20, // 19: google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny.row_affinity:type_name -> google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny.RowAffinity
+ 4, // 20: google.bigtable.admin.v2.AppProfile.StandardIsolation.priority:type_name -> google.bigtable.admin.v2.AppProfile.Priority
+ 5, // 21: google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.compute_billing_owner:type_name -> google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner
+ 22, // [22:22] is the sub-list for method output_type
+ 22, // [22:22] is the sub-list for method input_type
+ 22, // [22:22] is the sub-list for extension type_name
+ 22, // [22:22] is the sub-list for extension extendee
+ 0, // [0:22] is the sub-list for field type_name
}
func init() { file_google_bigtable_admin_v2_instance_proto_init() }
@@ -1857,176 +1909,6 @@ func file_google_bigtable_admin_v2_instance_proto_init() {
return
}
file_google_bigtable_admin_v2_common_proto_init()
- if !protoimpl.UnsafeEnabled {
- file_google_bigtable_admin_v2_instance_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Instance); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_instance_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*AutoscalingTargets); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_instance_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*AutoscalingLimits); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_instance_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*Cluster); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_instance_proto_msgTypes[4].Exporter = func(v any, i int) any {
- switch v := v.(*AppProfile); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_instance_proto_msgTypes[5].Exporter = func(v any, i int) any {
- switch v := v.(*HotTablet); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_instance_proto_msgTypes[7].Exporter = func(v any, i int) any {
- switch v := v.(*Cluster_ClusterAutoscalingConfig); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_instance_proto_msgTypes[8].Exporter = func(v any, i int) any {
- switch v := v.(*Cluster_ClusterConfig); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_instance_proto_msgTypes[9].Exporter = func(v any, i int) any {
- switch v := v.(*Cluster_EncryptionConfig); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_instance_proto_msgTypes[10].Exporter = func(v any, i int) any {
- switch v := v.(*AppProfile_MultiClusterRoutingUseAny); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_instance_proto_msgTypes[11].Exporter = func(v any, i int) any {
- switch v := v.(*AppProfile_SingleClusterRouting); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_instance_proto_msgTypes[12].Exporter = func(v any, i int) any {
- switch v := v.(*AppProfile_StandardIsolation); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_instance_proto_msgTypes[13].Exporter = func(v any, i int) any {
- switch v := v.(*AppProfile_DataBoostIsolationReadOnly); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_instance_proto_msgTypes[14].Exporter = func(v any, i int) any {
- switch v := v.(*AppProfile_MultiClusterRoutingUseAny_RowAffinity); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
file_google_bigtable_admin_v2_instance_proto_msgTypes[0].OneofWrappers = []any{}
file_google_bigtable_admin_v2_instance_proto_msgTypes[3].OneofWrappers = []any{
(*Cluster_ClusterConfig_)(nil),
@@ -2047,7 +1929,7 @@ func file_google_bigtable_admin_v2_instance_proto_init() {
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_bigtable_admin_v2_instance_proto_rawDesc,
- NumEnums: 5,
+ NumEnums: 6,
NumMessages: 15,
NumExtensions: 0,
NumServices: 0,
diff --git a/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/table.pb.go b/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/table.pb.go
index 9cd3dbe892cee..374b8e930f342 100644
--- a/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/table.pb.go
+++ b/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/table.pb.go
@@ -14,7 +14,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.2
+// protoc-gen-go v1.35.2
// protoc v4.25.3
// source: google/bigtable/admin/v2/table.proto
@@ -582,11 +582,9 @@ type RestoreInfo struct {
func (x *RestoreInfo) Reset() {
*x = RestoreInfo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *RestoreInfo) String() string {
@@ -597,7 +595,7 @@ func (*RestoreInfo) ProtoMessage() {}
func (x *RestoreInfo) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -661,11 +659,9 @@ type ChangeStreamConfig struct {
func (x *ChangeStreamConfig) Reset() {
*x = ChangeStreamConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ChangeStreamConfig) String() string {
@@ -676,7 +672,7 @@ func (*ChangeStreamConfig) ProtoMessage() {}
func (x *ChangeStreamConfig) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -747,11 +743,9 @@ type Table struct {
func (x *Table) Reset() {
*x = Table{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Table) String() string {
@@ -762,7 +756,7 @@ func (*Table) ProtoMessage() {}
func (x *Table) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -882,11 +876,9 @@ type AuthorizedView struct {
func (x *AuthorizedView) Reset() {
*x = AuthorizedView{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *AuthorizedView) String() string {
@@ -897,7 +889,7 @@ func (*AuthorizedView) ProtoMessage() {}
func (x *AuthorizedView) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -985,11 +977,9 @@ type ColumnFamily struct {
func (x *ColumnFamily) Reset() {
*x = ColumnFamily{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ColumnFamily) String() string {
@@ -1000,7 +990,7 @@ func (*ColumnFamily) ProtoMessage() {}
func (x *ColumnFamily) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1048,11 +1038,9 @@ type GcRule struct {
func (x *GcRule) Reset() {
*x = GcRule{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GcRule) String() string {
@@ -1063,7 +1051,7 @@ func (*GcRule) ProtoMessage() {}
func (x *GcRule) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1169,11 +1157,9 @@ type EncryptionInfo struct {
func (x *EncryptionInfo) Reset() {
*x = EncryptionInfo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EncryptionInfo) String() string {
@@ -1184,7 +1170,7 @@ func (*EncryptionInfo) ProtoMessage() {}
func (x *EncryptionInfo) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1257,11 +1243,9 @@ type Snapshot struct {
func (x *Snapshot) Reset() {
*x = Snapshot{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Snapshot) String() string {
@@ -1272,7 +1256,7 @@ func (*Snapshot) ProtoMessage() {}
func (x *Snapshot) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1402,11 +1386,9 @@ type Backup struct {
func (x *Backup) Reset() {
*x = Backup{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Backup) String() string {
@@ -1417,7 +1399,7 @@ func (*Backup) ProtoMessage() {}
func (x *Backup) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1534,11 +1516,9 @@ type BackupInfo struct {
func (x *BackupInfo) Reset() {
*x = BackupInfo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *BackupInfo) String() string {
@@ -1549,7 +1529,7 @@ func (*BackupInfo) ProtoMessage() {}
func (x *BackupInfo) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1617,11 +1597,9 @@ type Table_ClusterState struct {
func (x *Table_ClusterState) Reset() {
*x = Table_ClusterState{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Table_ClusterState) String() string {
@@ -1632,7 +1610,7 @@ func (*Table_ClusterState) ProtoMessage() {}
func (x *Table_ClusterState) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1677,11 +1655,9 @@ type Table_AutomatedBackupPolicy struct {
func (x *Table_AutomatedBackupPolicy) Reset() {
*x = Table_AutomatedBackupPolicy{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Table_AutomatedBackupPolicy) String() string {
@@ -1692,7 +1668,7 @@ func (*Table_AutomatedBackupPolicy) ProtoMessage() {}
func (x *Table_AutomatedBackupPolicy) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1739,11 +1715,9 @@ type AuthorizedView_FamilySubsets struct {
func (x *AuthorizedView_FamilySubsets) Reset() {
*x = AuthorizedView_FamilySubsets{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *AuthorizedView_FamilySubsets) String() string {
@@ -1754,7 +1728,7 @@ func (*AuthorizedView_FamilySubsets) ProtoMessage() {}
func (x *AuthorizedView_FamilySubsets) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1799,11 +1773,9 @@ type AuthorizedView_SubsetView struct {
func (x *AuthorizedView_SubsetView) Reset() {
*x = AuthorizedView_SubsetView{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *AuthorizedView_SubsetView) String() string {
@@ -1814,7 +1786,7 @@ func (*AuthorizedView_SubsetView) ProtoMessage() {}
func (x *AuthorizedView_SubsetView) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1855,11 +1827,9 @@ type GcRule_Intersection struct {
func (x *GcRule_Intersection) Reset() {
*x = GcRule_Intersection{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GcRule_Intersection) String() string {
@@ -1870,7 +1840,7 @@ func (*GcRule_Intersection) ProtoMessage() {}
func (x *GcRule_Intersection) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1904,11 +1874,9 @@ type GcRule_Union struct {
func (x *GcRule_Union) Reset() {
*x = GcRule_Union{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GcRule_Union) String() string {
@@ -1919,7 +1887,7 @@ func (*GcRule_Union) ProtoMessage() {}
func (x *GcRule_Union) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2435,200 +2403,6 @@ func file_google_bigtable_admin_v2_table_proto_init() {
return
}
file_google_bigtable_admin_v2_types_proto_init()
- if !protoimpl.UnsafeEnabled {
- file_google_bigtable_admin_v2_table_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*RestoreInfo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_table_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*ChangeStreamConfig); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_table_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*Table); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_table_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*AuthorizedView); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_table_proto_msgTypes[4].Exporter = func(v any, i int) any {
- switch v := v.(*ColumnFamily); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_table_proto_msgTypes[5].Exporter = func(v any, i int) any {
- switch v := v.(*GcRule); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_table_proto_msgTypes[6].Exporter = func(v any, i int) any {
- switch v := v.(*EncryptionInfo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_table_proto_msgTypes[7].Exporter = func(v any, i int) any {
- switch v := v.(*Snapshot); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_table_proto_msgTypes[8].Exporter = func(v any, i int) any {
- switch v := v.(*Backup); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_table_proto_msgTypes[9].Exporter = func(v any, i int) any {
- switch v := v.(*BackupInfo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_table_proto_msgTypes[10].Exporter = func(v any, i int) any {
- switch v := v.(*Table_ClusterState); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_table_proto_msgTypes[11].Exporter = func(v any, i int) any {
- switch v := v.(*Table_AutomatedBackupPolicy); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_table_proto_msgTypes[14].Exporter = func(v any, i int) any {
- switch v := v.(*AuthorizedView_FamilySubsets); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_table_proto_msgTypes[15].Exporter = func(v any, i int) any {
- switch v := v.(*AuthorizedView_SubsetView); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_table_proto_msgTypes[17].Exporter = func(v any, i int) any {
- switch v := v.(*GcRule_Intersection); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_table_proto_msgTypes[18].Exporter = func(v any, i int) any {
- switch v := v.(*GcRule_Union); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
file_google_bigtable_admin_v2_table_proto_msgTypes[0].OneofWrappers = []any{
(*RestoreInfo_BackupInfo)(nil),
}
diff --git a/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/types.pb.go b/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/types.pb.go
index 9997364f1d2c6..f23fbecfe1e1b 100644
--- a/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/types.pb.go
+++ b/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/types.pb.go
@@ -14,7 +14,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.2
+// protoc-gen-go v1.35.2
// protoc v4.25.3
// source: google/bigtable/admin/v2/types.proto
@@ -86,11 +86,9 @@ type Type struct {
func (x *Type) Reset() {
*x = Type{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type) String() string {
@@ -101,7 +99,7 @@ func (*Type) ProtoMessage() {}
func (x *Type) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -308,11 +306,9 @@ type Type_Bytes struct {
func (x *Type_Bytes) Reset() {
*x = Type_Bytes{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Bytes) String() string {
@@ -323,7 +319,7 @@ func (*Type_Bytes) ProtoMessage() {}
func (x *Type_Bytes) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -358,11 +354,9 @@ type Type_String struct {
func (x *Type_String) Reset() {
*x = Type_String{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_String) String() string {
@@ -373,7 +367,7 @@ func (*Type_String) ProtoMessage() {}
func (x *Type_String) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -408,11 +402,9 @@ type Type_Int64 struct {
func (x *Type_Int64) Reset() {
*x = Type_Int64{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Int64) String() string {
@@ -423,7 +415,7 @@ func (*Type_Int64) ProtoMessage() {}
func (x *Type_Int64) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -455,11 +447,9 @@ type Type_Bool struct {
func (x *Type_Bool) Reset() {
*x = Type_Bool{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Bool) String() string {
@@ -470,7 +460,7 @@ func (*Type_Bool) ProtoMessage() {}
func (x *Type_Bool) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -495,11 +485,9 @@ type Type_Float32 struct {
func (x *Type_Float32) Reset() {
*x = Type_Float32{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Float32) String() string {
@@ -510,7 +498,7 @@ func (*Type_Float32) ProtoMessage() {}
func (x *Type_Float32) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -535,11 +523,9 @@ type Type_Float64 struct {
func (x *Type_Float64) Reset() {
*x = Type_Float64{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Float64) String() string {
@@ -550,7 +536,7 @@ func (*Type_Float64) ProtoMessage() {}
func (x *Type_Float64) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -575,11 +561,9 @@ type Type_Timestamp struct {
func (x *Type_Timestamp) Reset() {
*x = Type_Timestamp{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Timestamp) String() string {
@@ -590,7 +574,7 @@ func (*Type_Timestamp) ProtoMessage() {}
func (x *Type_Timestamp) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -615,11 +599,9 @@ type Type_Date struct {
func (x *Type_Date) Reset() {
*x = Type_Date{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Date) String() string {
@@ -630,7 +612,7 @@ func (*Type_Date) ProtoMessage() {}
func (x *Type_Date) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -660,11 +642,9 @@ type Type_Struct struct {
func (x *Type_Struct) Reset() {
*x = Type_Struct{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Struct) String() string {
@@ -675,7 +655,7 @@ func (*Type_Struct) ProtoMessage() {}
func (x *Type_Struct) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -710,11 +690,9 @@ type Type_Array struct {
func (x *Type_Array) Reset() {
*x = Type_Array{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Array) String() string {
@@ -725,7 +703,7 @@ func (*Type_Array) ProtoMessage() {}
func (x *Type_Array) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -768,11 +746,9 @@ type Type_Map struct {
func (x *Type_Map) Reset() {
*x = Type_Map{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Map) String() string {
@@ -783,7 +759,7 @@ func (*Type_Map) ProtoMessage() {}
func (x *Type_Map) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -843,11 +819,9 @@ type Type_Aggregate struct {
func (x *Type_Aggregate) Reset() {
*x = Type_Aggregate{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Aggregate) String() string {
@@ -858,7 +832,7 @@ func (*Type_Aggregate) ProtoMessage() {}
func (x *Type_Aggregate) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -970,11 +944,9 @@ type Type_Bytes_Encoding struct {
func (x *Type_Bytes_Encoding) Reset() {
*x = Type_Bytes_Encoding{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Bytes_Encoding) String() string {
@@ -985,7 +957,7 @@ func (*Type_Bytes_Encoding) ProtoMessage() {}
func (x *Type_Bytes_Encoding) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1037,11 +1009,9 @@ type Type_Bytes_Encoding_Raw struct {
func (x *Type_Bytes_Encoding_Raw) Reset() {
*x = Type_Bytes_Encoding_Raw{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Bytes_Encoding_Raw) String() string {
@@ -1052,7 +1022,7 @@ func (*Type_Bytes_Encoding_Raw) ProtoMessage() {}
func (x *Type_Bytes_Encoding_Raw) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1084,11 +1054,9 @@ type Type_String_Encoding struct {
func (x *Type_String_Encoding) Reset() {
*x = Type_String_Encoding{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_String_Encoding) String() string {
@@ -1099,7 +1067,7 @@ func (*Type_String_Encoding) ProtoMessage() {}
func (x *Type_String_Encoding) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1167,11 +1135,9 @@ type Type_String_Encoding_Utf8Raw struct {
func (x *Type_String_Encoding_Utf8Raw) Reset() {
*x = Type_String_Encoding_Utf8Raw{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_String_Encoding_Utf8Raw) String() string {
@@ -1182,7 +1148,7 @@ func (*Type_String_Encoding_Utf8Raw) ProtoMessage() {}
func (x *Type_String_Encoding_Utf8Raw) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1212,11 +1178,9 @@ type Type_String_Encoding_Utf8Bytes struct {
func (x *Type_String_Encoding_Utf8Bytes) Reset() {
*x = Type_String_Encoding_Utf8Bytes{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_String_Encoding_Utf8Bytes) String() string {
@@ -1227,7 +1191,7 @@ func (*Type_String_Encoding_Utf8Bytes) ProtoMessage() {}
func (x *Type_String_Encoding_Utf8Bytes) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1258,11 +1222,9 @@ type Type_Int64_Encoding struct {
func (x *Type_Int64_Encoding) Reset() {
*x = Type_Int64_Encoding{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Int64_Encoding) String() string {
@@ -1273,7 +1235,7 @@ func (*Type_Int64_Encoding) ProtoMessage() {}
func (x *Type_Int64_Encoding) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1332,11 +1294,9 @@ type Type_Int64_Encoding_BigEndianBytes struct {
func (x *Type_Int64_Encoding_BigEndianBytes) Reset() {
*x = Type_Int64_Encoding_BigEndianBytes{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Int64_Encoding_BigEndianBytes) String() string {
@@ -1347,7 +1307,7 @@ func (*Type_Int64_Encoding_BigEndianBytes) ProtoMessage() {}
func (x *Type_Int64_Encoding_BigEndianBytes) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1384,11 +1344,9 @@ type Type_Struct_Field struct {
func (x *Type_Struct_Field) Reset() {
*x = Type_Struct_Field{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Struct_Field) String() string {
@@ -1399,7 +1357,7 @@ func (*Type_Struct_Field) ProtoMessage() {}
func (x *Type_Struct_Field) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1439,11 +1397,9 @@ type Type_Aggregate_Sum struct {
func (x *Type_Aggregate_Sum) Reset() {
*x = Type_Aggregate_Sum{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[21]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Aggregate_Sum) String() string {
@@ -1454,7 +1410,7 @@ func (*Type_Aggregate_Sum) ProtoMessage() {}
func (x *Type_Aggregate_Sum) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[21]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1480,11 +1436,9 @@ type Type_Aggregate_Max struct {
func (x *Type_Aggregate_Max) Reset() {
*x = Type_Aggregate_Max{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[22]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Aggregate_Max) String() string {
@@ -1495,7 +1449,7 @@ func (*Type_Aggregate_Max) ProtoMessage() {}
func (x *Type_Aggregate_Max) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[22]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1521,11 +1475,9 @@ type Type_Aggregate_Min struct {
func (x *Type_Aggregate_Min) Reset() {
*x = Type_Aggregate_Min{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[23]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Aggregate_Min) String() string {
@@ -1536,7 +1488,7 @@ func (*Type_Aggregate_Min) ProtoMessage() {}
func (x *Type_Aggregate_Min) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[23]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1566,11 +1518,9 @@ type Type_Aggregate_HyperLogLogPlusPlusUniqueCount struct {
func (x *Type_Aggregate_HyperLogLogPlusPlusUniqueCount) Reset() {
*x = Type_Aggregate_HyperLogLogPlusPlusUniqueCount{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[24]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Aggregate_HyperLogLogPlusPlusUniqueCount) String() string {
@@ -1581,7 +1531,7 @@ func (*Type_Aggregate_HyperLogLogPlusPlusUniqueCount) ProtoMessage() {}
func (x *Type_Aggregate_HyperLogLogPlusPlusUniqueCount) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_admin_v2_types_proto_msgTypes[24]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1872,308 +1822,6 @@ func file_google_bigtable_admin_v2_types_proto_init() {
if File_google_bigtable_admin_v2_types_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_bigtable_admin_v2_types_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Type); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_types_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Bytes); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_types_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*Type_String); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_types_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Int64); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_types_proto_msgTypes[4].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Bool); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_types_proto_msgTypes[5].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Float32); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_types_proto_msgTypes[6].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Float64); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_types_proto_msgTypes[7].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Timestamp); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_types_proto_msgTypes[8].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Date); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_types_proto_msgTypes[9].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Struct); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_types_proto_msgTypes[10].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Array); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_types_proto_msgTypes[11].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Map); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_types_proto_msgTypes[12].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Aggregate); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_types_proto_msgTypes[13].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Bytes_Encoding); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_types_proto_msgTypes[14].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Bytes_Encoding_Raw); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_types_proto_msgTypes[15].Exporter = func(v any, i int) any {
- switch v := v.(*Type_String_Encoding); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_types_proto_msgTypes[16].Exporter = func(v any, i int) any {
- switch v := v.(*Type_String_Encoding_Utf8Raw); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_types_proto_msgTypes[17].Exporter = func(v any, i int) any {
- switch v := v.(*Type_String_Encoding_Utf8Bytes); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_types_proto_msgTypes[18].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Int64_Encoding); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_types_proto_msgTypes[19].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Int64_Encoding_BigEndianBytes); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_types_proto_msgTypes[20].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Struct_Field); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_types_proto_msgTypes[21].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Aggregate_Sum); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_types_proto_msgTypes[22].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Aggregate_Max); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_types_proto_msgTypes[23].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Aggregate_Min); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_admin_v2_types_proto_msgTypes[24].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Aggregate_HyperLogLogPlusPlusUniqueCount); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
file_google_bigtable_admin_v2_types_proto_msgTypes[0].OneofWrappers = []any{
(*Type_BytesType)(nil),
(*Type_StringType)(nil),
diff --git a/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/bigtable.pb.go b/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/bigtable.pb.go
index e0880f480b6e6..670f482dd48d7 100644
--- a/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/bigtable.pb.go
+++ b/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/bigtable.pb.go
@@ -14,7 +14,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.2
+// protoc-gen-go v1.35.2
// protoc v4.25.3
// source: google/bigtable/v2/bigtable.proto
@@ -206,11 +206,9 @@ type ReadRowsRequest struct {
func (x *ReadRowsRequest) Reset() {
*x = ReadRowsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ReadRowsRequest) String() string {
@@ -221,7 +219,7 @@ func (*ReadRowsRequest) ProtoMessage() {}
func (x *ReadRowsRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -334,11 +332,9 @@ type ReadRowsResponse struct {
func (x *ReadRowsResponse) Reset() {
*x = ReadRowsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ReadRowsResponse) String() string {
@@ -349,7 +345,7 @@ func (*ReadRowsResponse) ProtoMessage() {}
func (x *ReadRowsResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -409,11 +405,9 @@ type SampleRowKeysRequest struct {
func (x *SampleRowKeysRequest) Reset() {
*x = SampleRowKeysRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SampleRowKeysRequest) String() string {
@@ -424,7 +418,7 @@ func (*SampleRowKeysRequest) ProtoMessage() {}
func (x *SampleRowKeysRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -483,11 +477,9 @@ type SampleRowKeysResponse struct {
func (x *SampleRowKeysResponse) Reset() {
*x = SampleRowKeysResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SampleRowKeysResponse) String() string {
@@ -498,7 +490,7 @@ func (*SampleRowKeysResponse) ProtoMessage() {}
func (x *SampleRowKeysResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -558,11 +550,9 @@ type MutateRowRequest struct {
func (x *MutateRowRequest) Reset() {
*x = MutateRowRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *MutateRowRequest) String() string {
@@ -573,7 +563,7 @@ func (*MutateRowRequest) ProtoMessage() {}
func (x *MutateRowRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -632,11 +622,9 @@ type MutateRowResponse struct {
func (x *MutateRowResponse) Reset() {
*x = MutateRowResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *MutateRowResponse) String() string {
@@ -647,7 +635,7 @@ func (*MutateRowResponse) ProtoMessage() {}
func (x *MutateRowResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -693,11 +681,9 @@ type MutateRowsRequest struct {
func (x *MutateRowsRequest) Reset() {
*x = MutateRowsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *MutateRowsRequest) String() string {
@@ -708,7 +694,7 @@ func (*MutateRowsRequest) ProtoMessage() {}
func (x *MutateRowsRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -767,11 +753,9 @@ type MutateRowsResponse struct {
func (x *MutateRowsResponse) Reset() {
*x = MutateRowsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *MutateRowsResponse) String() string {
@@ -782,7 +766,7 @@ func (*MutateRowsResponse) ProtoMessage() {}
func (x *MutateRowsResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -838,11 +822,9 @@ type RateLimitInfo struct {
func (x *RateLimitInfo) Reset() {
*x = RateLimitInfo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *RateLimitInfo) String() string {
@@ -853,7 +835,7 @@ func (*RateLimitInfo) ProtoMessage() {}
func (x *RateLimitInfo) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -927,11 +909,9 @@ type CheckAndMutateRowRequest struct {
func (x *CheckAndMutateRowRequest) Reset() {
*x = CheckAndMutateRowRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CheckAndMutateRowRequest) String() string {
@@ -942,7 +922,7 @@ func (*CheckAndMutateRowRequest) ProtoMessage() {}
func (x *CheckAndMutateRowRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1019,11 +999,9 @@ type CheckAndMutateRowResponse struct {
func (x *CheckAndMutateRowResponse) Reset() {
*x = CheckAndMutateRowResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CheckAndMutateRowResponse) String() string {
@@ -1034,7 +1012,7 @@ func (*CheckAndMutateRowResponse) ProtoMessage() {}
func (x *CheckAndMutateRowResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1073,11 +1051,9 @@ type PingAndWarmRequest struct {
func (x *PingAndWarmRequest) Reset() {
*x = PingAndWarmRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *PingAndWarmRequest) String() string {
@@ -1088,7 +1064,7 @@ func (*PingAndWarmRequest) ProtoMessage() {}
func (x *PingAndWarmRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1126,11 +1102,9 @@ type PingAndWarmResponse struct {
func (x *PingAndWarmResponse) Reset() {
*x = PingAndWarmResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *PingAndWarmResponse) String() string {
@@ -1141,7 +1115,7 @@ func (*PingAndWarmResponse) ProtoMessage() {}
func (x *PingAndWarmResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1188,11 +1162,9 @@ type ReadModifyWriteRowRequest struct {
func (x *ReadModifyWriteRowRequest) Reset() {
*x = ReadModifyWriteRowRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ReadModifyWriteRowRequest) String() string {
@@ -1203,7 +1175,7 @@ func (*ReadModifyWriteRowRequest) ProtoMessage() {}
func (x *ReadModifyWriteRowRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1265,11 +1237,9 @@ type ReadModifyWriteRowResponse struct {
func (x *ReadModifyWriteRowResponse) Reset() {
*x = ReadModifyWriteRowResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ReadModifyWriteRowResponse) String() string {
@@ -1280,7 +1250,7 @@ func (*ReadModifyWriteRowResponse) ProtoMessage() {}
func (x *ReadModifyWriteRowResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1322,11 +1292,9 @@ type GenerateInitialChangeStreamPartitionsRequest struct {
func (x *GenerateInitialChangeStreamPartitionsRequest) Reset() {
*x = GenerateInitialChangeStreamPartitionsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GenerateInitialChangeStreamPartitionsRequest) String() string {
@@ -1337,7 +1305,7 @@ func (*GenerateInitialChangeStreamPartitionsRequest) ProtoMessage() {}
func (x *GenerateInitialChangeStreamPartitionsRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1379,11 +1347,9 @@ type GenerateInitialChangeStreamPartitionsResponse struct {
func (x *GenerateInitialChangeStreamPartitionsResponse) Reset() {
*x = GenerateInitialChangeStreamPartitionsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GenerateInitialChangeStreamPartitionsResponse) String() string {
@@ -1394,7 +1360,7 @@ func (*GenerateInitialChangeStreamPartitionsResponse) ProtoMessage() {}
func (x *GenerateInitialChangeStreamPartitionsResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1452,11 +1418,9 @@ type ReadChangeStreamRequest struct {
func (x *ReadChangeStreamRequest) Reset() {
*x = ReadChangeStreamRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ReadChangeStreamRequest) String() string {
@@ -1467,7 +1431,7 @@ func (*ReadChangeStreamRequest) ProtoMessage() {}
func (x *ReadChangeStreamRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1587,11 +1551,9 @@ type ReadChangeStreamResponse struct {
func (x *ReadChangeStreamResponse) Reset() {
*x = ReadChangeStreamResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ReadChangeStreamResponse) String() string {
@@ -1602,7 +1564,7 @@ func (*ReadChangeStreamResponse) ProtoMessage() {}
func (x *ReadChangeStreamResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1725,11 +1687,9 @@ type ExecuteQueryRequest struct {
func (x *ExecuteQueryRequest) Reset() {
*x = ExecuteQueryRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ExecuteQueryRequest) String() string {
@@ -1740,7 +1700,7 @@ func (*ExecuteQueryRequest) ProtoMessage() {}
func (x *ExecuteQueryRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1838,11 +1798,9 @@ type ExecuteQueryResponse struct {
func (x *ExecuteQueryResponse) Reset() {
*x = ExecuteQueryResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ExecuteQueryResponse) String() string {
@@ -1853,7 +1811,7 @@ func (*ExecuteQueryResponse) ProtoMessage() {}
func (x *ExecuteQueryResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1970,11 +1928,9 @@ type ReadRowsResponse_CellChunk struct {
func (x *ReadRowsResponse_CellChunk) Reset() {
*x = ReadRowsResponse_CellChunk{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[21]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ReadRowsResponse_CellChunk) String() string {
@@ -1985,7 +1941,7 @@ func (*ReadRowsResponse_CellChunk) ProtoMessage() {}
func (x *ReadRowsResponse_CellChunk) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[21]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2106,11 +2062,9 @@ type MutateRowsRequest_Entry struct {
func (x *MutateRowsRequest_Entry) Reset() {
*x = MutateRowsRequest_Entry{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[22]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *MutateRowsRequest_Entry) String() string {
@@ -2121,7 +2075,7 @@ func (*MutateRowsRequest_Entry) ProtoMessage() {}
func (x *MutateRowsRequest_Entry) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[22]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2168,11 +2122,9 @@ type MutateRowsResponse_Entry struct {
func (x *MutateRowsResponse_Entry) Reset() {
*x = MutateRowsResponse_Entry{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[23]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *MutateRowsResponse_Entry) String() string {
@@ -2183,7 +2135,7 @@ func (*MutateRowsResponse_Entry) ProtoMessage() {}
func (x *MutateRowsResponse_Entry) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[23]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2229,11 +2181,9 @@ type ReadChangeStreamResponse_MutationChunk struct {
func (x *ReadChangeStreamResponse_MutationChunk) Reset() {
*x = ReadChangeStreamResponse_MutationChunk{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[24]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ReadChangeStreamResponse_MutationChunk) String() string {
@@ -2244,7 +2194,7 @@ func (*ReadChangeStreamResponse_MutationChunk) ProtoMessage() {}
func (x *ReadChangeStreamResponse_MutationChunk) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[24]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2324,11 +2274,9 @@ type ReadChangeStreamResponse_DataChange struct {
func (x *ReadChangeStreamResponse_DataChange) Reset() {
*x = ReadChangeStreamResponse_DataChange{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[25]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ReadChangeStreamResponse_DataChange) String() string {
@@ -2339,7 +2287,7 @@ func (*ReadChangeStreamResponse_DataChange) ProtoMessage() {}
func (x *ReadChangeStreamResponse_DataChange) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[25]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2438,11 +2386,9 @@ type ReadChangeStreamResponse_Heartbeat struct {
func (x *ReadChangeStreamResponse_Heartbeat) Reset() {
*x = ReadChangeStreamResponse_Heartbeat{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[26]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ReadChangeStreamResponse_Heartbeat) String() string {
@@ -2453,7 +2399,7 @@ func (*ReadChangeStreamResponse_Heartbeat) ProtoMessage() {}
func (x *ReadChangeStreamResponse_Heartbeat) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[26]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2524,11 +2470,9 @@ type ReadChangeStreamResponse_CloseStream struct {
func (x *ReadChangeStreamResponse_CloseStream) Reset() {
*x = ReadChangeStreamResponse_CloseStream{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[27]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ReadChangeStreamResponse_CloseStream) String() string {
@@ -2539,7 +2483,7 @@ func (*ReadChangeStreamResponse_CloseStream) ProtoMessage() {}
func (x *ReadChangeStreamResponse_CloseStream) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[27]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2594,11 +2538,9 @@ type ReadChangeStreamResponse_MutationChunk_ChunkInfo struct {
func (x *ReadChangeStreamResponse_MutationChunk_ChunkInfo) Reset() {
*x = ReadChangeStreamResponse_MutationChunk_ChunkInfo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[28]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ReadChangeStreamResponse_MutationChunk_ChunkInfo) String() string {
@@ -2609,7 +2551,7 @@ func (*ReadChangeStreamResponse_MutationChunk_ChunkInfo) ProtoMessage() {}
func (x *ReadChangeStreamResponse_MutationChunk_ChunkInfo) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[28]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3567,356 +3509,6 @@ func file_google_bigtable_v2_bigtable_proto_init() {
}
file_google_bigtable_v2_data_proto_init()
file_google_bigtable_v2_request_stats_proto_init()
- if !protoimpl.UnsafeEnabled {
- file_google_bigtable_v2_bigtable_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*ReadRowsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*ReadRowsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*SampleRowKeysRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*SampleRowKeysResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[4].Exporter = func(v any, i int) any {
- switch v := v.(*MutateRowRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[5].Exporter = func(v any, i int) any {
- switch v := v.(*MutateRowResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[6].Exporter = func(v any, i int) any {
- switch v := v.(*MutateRowsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[7].Exporter = func(v any, i int) any {
- switch v := v.(*MutateRowsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[8].Exporter = func(v any, i int) any {
- switch v := v.(*RateLimitInfo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[9].Exporter = func(v any, i int) any {
- switch v := v.(*CheckAndMutateRowRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[10].Exporter = func(v any, i int) any {
- switch v := v.(*CheckAndMutateRowResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[11].Exporter = func(v any, i int) any {
- switch v := v.(*PingAndWarmRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[12].Exporter = func(v any, i int) any {
- switch v := v.(*PingAndWarmResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[13].Exporter = func(v any, i int) any {
- switch v := v.(*ReadModifyWriteRowRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[14].Exporter = func(v any, i int) any {
- switch v := v.(*ReadModifyWriteRowResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[15].Exporter = func(v any, i int) any {
- switch v := v.(*GenerateInitialChangeStreamPartitionsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[16].Exporter = func(v any, i int) any {
- switch v := v.(*GenerateInitialChangeStreamPartitionsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[17].Exporter = func(v any, i int) any {
- switch v := v.(*ReadChangeStreamRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[18].Exporter = func(v any, i int) any {
- switch v := v.(*ReadChangeStreamResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[19].Exporter = func(v any, i int) any {
- switch v := v.(*ExecuteQueryRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[20].Exporter = func(v any, i int) any {
- switch v := v.(*ExecuteQueryResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[21].Exporter = func(v any, i int) any {
- switch v := v.(*ReadRowsResponse_CellChunk); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[22].Exporter = func(v any, i int) any {
- switch v := v.(*MutateRowsRequest_Entry); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[23].Exporter = func(v any, i int) any {
- switch v := v.(*MutateRowsResponse_Entry); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[24].Exporter = func(v any, i int) any {
- switch v := v.(*ReadChangeStreamResponse_MutationChunk); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[25].Exporter = func(v any, i int) any {
- switch v := v.(*ReadChangeStreamResponse_DataChange); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[26].Exporter = func(v any, i int) any {
- switch v := v.(*ReadChangeStreamResponse_Heartbeat); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[27].Exporter = func(v any, i int) any {
- switch v := v.(*ReadChangeStreamResponse_CloseStream); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_bigtable_proto_msgTypes[28].Exporter = func(v any, i int) any {
- switch v := v.(*ReadChangeStreamResponse_MutationChunk_ChunkInfo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
file_google_bigtable_v2_bigtable_proto_msgTypes[7].OneofWrappers = []any{}
file_google_bigtable_v2_bigtable_proto_msgTypes[17].OneofWrappers = []any{
(*ReadChangeStreamRequest_StartTime)(nil),
diff --git a/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/data.pb.go b/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/data.pb.go
index 0021c1a61e22c..f547165ce5765 100644
--- a/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/data.pb.go
+++ b/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/data.pb.go
@@ -14,7 +14,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.2
+// protoc-gen-go v1.35.2
// protoc v4.25.3
// source: google/bigtable/v2/data.proto
@@ -56,11 +56,9 @@ type Row struct {
func (x *Row) Reset() {
*x = Row{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Row) String() string {
@@ -71,7 +69,7 @@ func (*Row) ProtoMessage() {}
func (x *Row) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -120,11 +118,9 @@ type Family struct {
func (x *Family) Reset() {
*x = Family{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Family) String() string {
@@ -135,7 +131,7 @@ func (*Family) ProtoMessage() {}
func (x *Family) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -183,11 +179,9 @@ type Column struct {
func (x *Column) Reset() {
*x = Column{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Column) String() string {
@@ -198,7 +192,7 @@ func (*Column) ProtoMessage() {}
func (x *Column) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -250,11 +244,9 @@ type Cell struct {
func (x *Cell) Reset() {
*x = Cell{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Cell) String() string {
@@ -265,7 +257,7 @@ func (*Cell) ProtoMessage() {}
func (x *Cell) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -348,11 +340,9 @@ type Value struct {
func (x *Value) Reset() {
*x = Value{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Value) String() string {
@@ -363,7 +353,7 @@ func (*Value) ProtoMessage() {}
func (x *Value) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -554,11 +544,9 @@ type ArrayValue struct {
func (x *ArrayValue) Reset() {
*x = ArrayValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ArrayValue) String() string {
@@ -569,7 +557,7 @@ func (*ArrayValue) ProtoMessage() {}
func (x *ArrayValue) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -617,11 +605,9 @@ type RowRange struct {
func (x *RowRange) Reset() {
*x = RowRange{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *RowRange) String() string {
@@ -632,7 +618,7 @@ func (*RowRange) ProtoMessage() {}
func (x *RowRange) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -739,11 +725,9 @@ type RowSet struct {
func (x *RowSet) Reset() {
*x = RowSet{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *RowSet) String() string {
@@ -754,7 +738,7 @@ func (*RowSet) ProtoMessage() {}
func (x *RowSet) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -814,11 +798,9 @@ type ColumnRange struct {
func (x *ColumnRange) Reset() {
*x = ColumnRange{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ColumnRange) String() string {
@@ -829,7 +811,7 @@ func (*ColumnRange) ProtoMessage() {}
func (x *ColumnRange) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -943,11 +925,9 @@ type TimestampRange struct {
func (x *TimestampRange) Reset() {
*x = TimestampRange{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *TimestampRange) String() string {
@@ -958,7 +938,7 @@ func (*TimestampRange) ProtoMessage() {}
func (x *TimestampRange) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1013,11 +993,9 @@ type ValueRange struct {
func (x *ValueRange) Reset() {
*x = ValueRange{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ValueRange) String() string {
@@ -1028,7 +1006,7 @@ func (*ValueRange) ProtoMessage() {}
func (x *ValueRange) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1188,11 +1166,9 @@ type RowFilter struct {
func (x *RowFilter) Reset() {
*x = RowFilter{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *RowFilter) String() string {
@@ -1203,7 +1179,7 @@ func (*RowFilter) ProtoMessage() {}
func (x *RowFilter) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1621,11 +1597,9 @@ type Mutation struct {
func (x *Mutation) Reset() {
*x = Mutation{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Mutation) String() string {
@@ -1636,7 +1610,7 @@ func (*Mutation) ProtoMessage() {}
func (x *Mutation) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1772,11 +1746,9 @@ type ReadModifyWriteRule struct {
func (x *ReadModifyWriteRule) Reset() {
*x = ReadModifyWriteRule{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ReadModifyWriteRule) String() string {
@@ -1787,7 +1759,7 @@ func (*ReadModifyWriteRule) ProtoMessage() {}
func (x *ReadModifyWriteRule) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1874,11 +1846,9 @@ type StreamPartition struct {
func (x *StreamPartition) Reset() {
*x = StreamPartition{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *StreamPartition) String() string {
@@ -1889,7 +1859,7 @@ func (*StreamPartition) ProtoMessage() {}
func (x *StreamPartition) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1925,11 +1895,9 @@ type StreamContinuationTokens struct {
func (x *StreamContinuationTokens) Reset() {
*x = StreamContinuationTokens{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *StreamContinuationTokens) String() string {
@@ -1940,7 +1908,7 @@ func (*StreamContinuationTokens) ProtoMessage() {}
func (x *StreamContinuationTokens) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1978,11 +1946,9 @@ type StreamContinuationToken struct {
func (x *StreamContinuationToken) Reset() {
*x = StreamContinuationToken{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *StreamContinuationToken) String() string {
@@ -1993,7 +1959,7 @@ func (*StreamContinuationToken) ProtoMessage() {}
func (x *StreamContinuationToken) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2032,11 +1998,9 @@ type ProtoFormat struct {
func (x *ProtoFormat) Reset() {
*x = ProtoFormat{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ProtoFormat) String() string {
@@ -2047,7 +2011,7 @@ func (*ProtoFormat) ProtoMessage() {}
func (x *ProtoFormat) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2076,11 +2040,9 @@ type ColumnMetadata struct {
func (x *ColumnMetadata) Reset() {
*x = ColumnMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ColumnMetadata) String() string {
@@ -2091,7 +2053,7 @@ func (*ColumnMetadata) ProtoMessage() {}
func (x *ColumnMetadata) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2132,11 +2094,9 @@ type ProtoSchema struct {
func (x *ProtoSchema) Reset() {
*x = ProtoSchema{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ProtoSchema) String() string {
@@ -2147,7 +2107,7 @@ func (*ProtoSchema) ProtoMessage() {}
func (x *ProtoSchema) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2186,11 +2146,9 @@ type ResultSetMetadata struct {
func (x *ResultSetMetadata) Reset() {
*x = ResultSetMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ResultSetMetadata) String() string {
@@ -2201,7 +2159,7 @@ func (*ResultSetMetadata) ProtoMessage() {}
func (x *ResultSetMetadata) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2259,11 +2217,9 @@ type ProtoRows struct {
func (x *ProtoRows) Reset() {
*x = ProtoRows{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[21]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ProtoRows) String() string {
@@ -2274,7 +2230,7 @@ func (*ProtoRows) ProtoMessage() {}
func (x *ProtoRows) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[21]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2309,11 +2265,9 @@ type ProtoRowsBatch struct {
func (x *ProtoRowsBatch) Reset() {
*x = ProtoRowsBatch{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[22]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ProtoRowsBatch) String() string {
@@ -2324,7 +2278,7 @@ func (*ProtoRowsBatch) ProtoMessage() {}
func (x *ProtoRowsBatch) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[22]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2396,11 +2350,9 @@ type PartialResultSet struct {
func (x *PartialResultSet) Reset() {
*x = PartialResultSet{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[23]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *PartialResultSet) String() string {
@@ -2411,7 +2363,7 @@ func (*PartialResultSet) ProtoMessage() {}
func (x *PartialResultSet) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[23]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2479,11 +2431,9 @@ type RowFilter_Chain struct {
func (x *RowFilter_Chain) Reset() {
*x = RowFilter_Chain{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[24]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *RowFilter_Chain) String() string {
@@ -2494,7 +2444,7 @@ func (*RowFilter_Chain) ProtoMessage() {}
func (x *RowFilter_Chain) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[24]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2553,11 +2503,9 @@ type RowFilter_Interleave struct {
func (x *RowFilter_Interleave) Reset() {
*x = RowFilter_Interleave{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[25]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *RowFilter_Interleave) String() string {
@@ -2568,7 +2516,7 @@ func (*RowFilter_Interleave) ProtoMessage() {}
func (x *RowFilter_Interleave) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[25]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2616,11 +2564,9 @@ type RowFilter_Condition struct {
func (x *RowFilter_Condition) Reset() {
*x = RowFilter_Condition{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[26]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *RowFilter_Condition) String() string {
@@ -2631,7 +2577,7 @@ func (*RowFilter_Condition) ProtoMessage() {}
func (x *RowFilter_Condition) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[26]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2691,11 +2637,9 @@ type Mutation_SetCell struct {
func (x *Mutation_SetCell) Reset() {
*x = Mutation_SetCell{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[27]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Mutation_SetCell) String() string {
@@ -2706,7 +2650,7 @@ func (*Mutation_SetCell) ProtoMessage() {}
func (x *Mutation_SetCell) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[27]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2772,11 +2716,9 @@ type Mutation_AddToCell struct {
func (x *Mutation_AddToCell) Reset() {
*x = Mutation_AddToCell{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[28]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Mutation_AddToCell) String() string {
@@ -2787,7 +2729,7 @@ func (*Mutation_AddToCell) ProtoMessage() {}
func (x *Mutation_AddToCell) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[28]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2855,11 +2797,9 @@ type Mutation_MergeToCell struct {
func (x *Mutation_MergeToCell) Reset() {
*x = Mutation_MergeToCell{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[29]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[29]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Mutation_MergeToCell) String() string {
@@ -2870,7 +2810,7 @@ func (*Mutation_MergeToCell) ProtoMessage() {}
func (x *Mutation_MergeToCell) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[29]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2932,11 +2872,9 @@ type Mutation_DeleteFromColumn struct {
func (x *Mutation_DeleteFromColumn) Reset() {
*x = Mutation_DeleteFromColumn{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[30]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Mutation_DeleteFromColumn) String() string {
@@ -2947,7 +2885,7 @@ func (*Mutation_DeleteFromColumn) ProtoMessage() {}
func (x *Mutation_DeleteFromColumn) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[30]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2996,11 +2934,9 @@ type Mutation_DeleteFromFamily struct {
func (x *Mutation_DeleteFromFamily) Reset() {
*x = Mutation_DeleteFromFamily{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[31]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[31]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Mutation_DeleteFromFamily) String() string {
@@ -3011,7 +2947,7 @@ func (*Mutation_DeleteFromFamily) ProtoMessage() {}
func (x *Mutation_DeleteFromFamily) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[31]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3042,11 +2978,9 @@ type Mutation_DeleteFromRow struct {
func (x *Mutation_DeleteFromRow) Reset() {
*x = Mutation_DeleteFromRow{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_data_proto_msgTypes[32]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_data_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Mutation_DeleteFromRow) String() string {
@@ -3057,7 +2991,7 @@ func (*Mutation_DeleteFromRow) ProtoMessage() {}
func (x *Mutation_DeleteFromRow) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_data_proto_msgTypes[32]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3570,404 +3504,6 @@ func file_google_bigtable_v2_data_proto_init() {
return
}
file_google_bigtable_v2_types_proto_init()
- if !protoimpl.UnsafeEnabled {
- file_google_bigtable_v2_data_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Row); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*Family); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*Column); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*Cell); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[4].Exporter = func(v any, i int) any {
- switch v := v.(*Value); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[5].Exporter = func(v any, i int) any {
- switch v := v.(*ArrayValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[6].Exporter = func(v any, i int) any {
- switch v := v.(*RowRange); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[7].Exporter = func(v any, i int) any {
- switch v := v.(*RowSet); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[8].Exporter = func(v any, i int) any {
- switch v := v.(*ColumnRange); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[9].Exporter = func(v any, i int) any {
- switch v := v.(*TimestampRange); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[10].Exporter = func(v any, i int) any {
- switch v := v.(*ValueRange); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[11].Exporter = func(v any, i int) any {
- switch v := v.(*RowFilter); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[12].Exporter = func(v any, i int) any {
- switch v := v.(*Mutation); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[13].Exporter = func(v any, i int) any {
- switch v := v.(*ReadModifyWriteRule); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[14].Exporter = func(v any, i int) any {
- switch v := v.(*StreamPartition); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[15].Exporter = func(v any, i int) any {
- switch v := v.(*StreamContinuationTokens); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[16].Exporter = func(v any, i int) any {
- switch v := v.(*StreamContinuationToken); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[17].Exporter = func(v any, i int) any {
- switch v := v.(*ProtoFormat); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[18].Exporter = func(v any, i int) any {
- switch v := v.(*ColumnMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[19].Exporter = func(v any, i int) any {
- switch v := v.(*ProtoSchema); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[20].Exporter = func(v any, i int) any {
- switch v := v.(*ResultSetMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[21].Exporter = func(v any, i int) any {
- switch v := v.(*ProtoRows); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[22].Exporter = func(v any, i int) any {
- switch v := v.(*ProtoRowsBatch); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[23].Exporter = func(v any, i int) any {
- switch v := v.(*PartialResultSet); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[24].Exporter = func(v any, i int) any {
- switch v := v.(*RowFilter_Chain); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[25].Exporter = func(v any, i int) any {
- switch v := v.(*RowFilter_Interleave); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[26].Exporter = func(v any, i int) any {
- switch v := v.(*RowFilter_Condition); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[27].Exporter = func(v any, i int) any {
- switch v := v.(*Mutation_SetCell); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[28].Exporter = func(v any, i int) any {
- switch v := v.(*Mutation_AddToCell); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[29].Exporter = func(v any, i int) any {
- switch v := v.(*Mutation_MergeToCell); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[30].Exporter = func(v any, i int) any {
- switch v := v.(*Mutation_DeleteFromColumn); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[31].Exporter = func(v any, i int) any {
- switch v := v.(*Mutation_DeleteFromFamily); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_data_proto_msgTypes[32].Exporter = func(v any, i int) any {
- switch v := v.(*Mutation_DeleteFromRow); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
file_google_bigtable_v2_data_proto_msgTypes[4].OneofWrappers = []any{
(*Value_RawValue)(nil),
(*Value_RawTimestampMicros)(nil),
diff --git a/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/feature_flags.pb.go b/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/feature_flags.pb.go
index 3ecb550dd56a5..99c5647abd571 100644
--- a/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/feature_flags.pb.go
+++ b/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/feature_flags.pb.go
@@ -14,7 +14,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.2
+// protoc-gen-go v1.35.2
// protoc v4.25.3
// source: google/bigtable/v2/feature_flags.proto
@@ -70,15 +70,17 @@ type FeatureFlags struct {
RetryInfo bool `protobuf:"varint,7,opt,name=retry_info,json=retryInfo,proto3" json:"retry_info,omitempty"`
// Notify the server that the client has client side metrics enabled.
ClientSideMetricsEnabled bool `protobuf:"varint,8,opt,name=client_side_metrics_enabled,json=clientSideMetricsEnabled,proto3" json:"client_side_metrics_enabled,omitempty"`
+ // Notify the server that the client using Traffic Director endpoint.
+ TrafficDirectorEnabled bool `protobuf:"varint,9,opt,name=traffic_director_enabled,json=trafficDirectorEnabled,proto3" json:"traffic_director_enabled,omitempty"`
+ // Notify the server that the client explicitly opted in for Direct Access.
+ DirectAccessRequested bool `protobuf:"varint,10,opt,name=direct_access_requested,json=directAccessRequested,proto3" json:"direct_access_requested,omitempty"`
}
func (x *FeatureFlags) Reset() {
*x = FeatureFlags{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_feature_flags_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_feature_flags_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FeatureFlags) String() string {
@@ -89,7 +91,7 @@ func (*FeatureFlags) ProtoMessage() {}
func (x *FeatureFlags) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_feature_flags_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -153,13 +155,27 @@ func (x *FeatureFlags) GetClientSideMetricsEnabled() bool {
return false
}
+func (x *FeatureFlags) GetTrafficDirectorEnabled() bool {
+ if x != nil {
+ return x.TrafficDirectorEnabled
+ }
+ return false
+}
+
+func (x *FeatureFlags) GetDirectAccessRequested() bool {
+ if x != nil {
+ return x.DirectAccessRequested
+ }
+ return false
+}
+
var File_google_bigtable_v2_feature_flags_proto protoreflect.FileDescriptor
var file_google_bigtable_v2_feature_flags_proto_rawDesc = []byte{
0x0a, 0x26, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c,
0x65, 0x2f, 0x76, 0x32, 0x2f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x66, 0x6c, 0x61,
0x67, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x22, 0xe1, 0x02, 0x0a,
+ 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x22, 0xd3, 0x03, 0x0a,
0x0c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x23, 0x0a,
0x0d, 0x72, 0x65, 0x76, 0x65, 0x72, 0x73, 0x65, 0x5f, 0x73, 0x63, 0x61, 0x6e, 0x73, 0x18, 0x01,
0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x72, 0x65, 0x76, 0x65, 0x72, 0x73, 0x65, 0x53, 0x63, 0x61,
@@ -182,19 +198,26 @@ var file_google_bigtable_v2_feature_flags_proto_rawDesc = []byte{
0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64,
0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x69,
0x64, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64,
- 0x42, 0xbb, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x11, 0x46, 0x65, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
- 0x5a, 0x38, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61,
- 0x70, 0x69, 0x76, 0x32, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x70, 0x62, 0x3b,
- 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x70, 0x62, 0xaa, 0x02, 0x18, 0x47, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62,
- 0x6c, 0x65, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43,
- 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x56, 0x32,
- 0xea, 0x02, 0x1b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64,
- 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x12, 0x38, 0x0a, 0x18, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x64, 0x69, 0x72, 0x65,
+ 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x16, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63,
+ 0x74, 0x6f, 0x72, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x36, 0x0a, 0x17, 0x64, 0x69,
+ 0x72, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x69, 0x72,
+ 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x65, 0x64, 0x42, 0xbb, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x11, 0x46,
+ 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x50, 0x01, 0x5a, 0x38, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x70,
+ 0x62, 0x3b, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x70, 0x62, 0xaa, 0x02, 0x18, 0x47,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c,
+ 0x56, 0x32, 0xea, 0x02, 0x1b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f,
+ 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x32,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -226,20 +249,6 @@ func file_google_bigtable_v2_feature_flags_proto_init() {
if File_google_bigtable_v2_feature_flags_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_bigtable_v2_feature_flags_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*FeatureFlags); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/request_stats.pb.go b/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/request_stats.pb.go
index a3b26670fe4a2..4c67043cd201f 100644
--- a/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/request_stats.pb.go
+++ b/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/request_stats.pb.go
@@ -14,7 +14,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.2
+// protoc-gen-go v1.35.2
// protoc v4.25.3
// source: google/bigtable/v2/request_stats.proto
@@ -58,11 +58,9 @@ type ReadIterationStats struct {
func (x *ReadIterationStats) Reset() {
*x = ReadIterationStats{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ReadIterationStats) String() string {
@@ -73,7 +71,7 @@ func (*ReadIterationStats) ProtoMessage() {}
func (x *ReadIterationStats) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -146,11 +144,9 @@ type RequestLatencyStats struct {
func (x *RequestLatencyStats) Reset() {
*x = RequestLatencyStats{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *RequestLatencyStats) String() string {
@@ -161,7 +157,7 @@ func (*RequestLatencyStats) ProtoMessage() {}
func (x *RequestLatencyStats) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -201,11 +197,9 @@ type FullReadStatsView struct {
func (x *FullReadStatsView) Reset() {
*x = FullReadStatsView{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FullReadStatsView) String() string {
@@ -216,7 +210,7 @@ func (*FullReadStatsView) ProtoMessage() {}
func (x *FullReadStatsView) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -267,11 +261,9 @@ type RequestStats struct {
func (x *RequestStats) Reset() {
*x = RequestStats{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *RequestStats) String() string {
@@ -282,7 +274,7 @@ func (*RequestStats) ProtoMessage() {}
func (x *RequestStats) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -423,56 +415,6 @@ func file_google_bigtable_v2_request_stats_proto_init() {
if File_google_bigtable_v2_request_stats_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_bigtable_v2_request_stats_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*ReadIterationStats); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_request_stats_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*RequestLatencyStats); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_request_stats_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*FullReadStatsView); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_request_stats_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*RequestStats); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
file_google_bigtable_v2_request_stats_proto_msgTypes[3].OneofWrappers = []any{
(*RequestStats_FullReadStatsView)(nil),
}
diff --git a/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/response_params.pb.go b/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/response_params.pb.go
index 54caa744faed8..f7268397864fa 100644
--- a/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/response_params.pb.go
+++ b/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/response_params.pb.go
@@ -14,7 +14,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.2
+// protoc-gen-go v1.35.2
// protoc v4.25.3
// source: google/bigtable/v2/response_params.proto
@@ -53,11 +53,9 @@ type ResponseParams struct {
func (x *ResponseParams) Reset() {
*x = ResponseParams{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_response_params_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_response_params_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ResponseParams) String() string {
@@ -68,7 +66,7 @@ func (*ResponseParams) ProtoMessage() {}
func (x *ResponseParams) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_response_params_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -155,20 +153,6 @@ func file_google_bigtable_v2_response_params_proto_init() {
if File_google_bigtable_v2_response_params_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_bigtable_v2_response_params_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*ResponseParams); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
file_google_bigtable_v2_response_params_proto_msgTypes[0].OneofWrappers = []any{}
type x struct{}
out := protoimpl.TypeBuilder{
diff --git a/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/types.pb.go b/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/types.pb.go
index 1090862843a80..03d252d9b97ce 100644
--- a/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/types.pb.go
+++ b/vendor/cloud.google.com/go/bigtable/apiv2/bigtablepb/types.pb.go
@@ -14,7 +14,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.2
+// protoc-gen-go v1.35.2
// protoc v4.25.3
// source: google/bigtable/v2/types.proto
@@ -86,11 +86,9 @@ type Type struct {
func (x *Type) Reset() {
*x = Type{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_types_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_types_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type) String() string {
@@ -101,7 +99,7 @@ func (*Type) ProtoMessage() {}
func (x *Type) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_types_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -308,11 +306,9 @@ type Type_Bytes struct {
func (x *Type_Bytes) Reset() {
*x = Type_Bytes{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_types_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_types_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Bytes) String() string {
@@ -323,7 +319,7 @@ func (*Type_Bytes) ProtoMessage() {}
func (x *Type_Bytes) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_types_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -358,11 +354,9 @@ type Type_String struct {
func (x *Type_String) Reset() {
*x = Type_String{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_types_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_types_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_String) String() string {
@@ -373,7 +367,7 @@ func (*Type_String) ProtoMessage() {}
func (x *Type_String) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_types_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -408,11 +402,9 @@ type Type_Int64 struct {
func (x *Type_Int64) Reset() {
*x = Type_Int64{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_types_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_types_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Int64) String() string {
@@ -423,7 +415,7 @@ func (*Type_Int64) ProtoMessage() {}
func (x *Type_Int64) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_types_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -455,11 +447,9 @@ type Type_Bool struct {
func (x *Type_Bool) Reset() {
*x = Type_Bool{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_types_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_types_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Bool) String() string {
@@ -470,7 +460,7 @@ func (*Type_Bool) ProtoMessage() {}
func (x *Type_Bool) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_types_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -495,11 +485,9 @@ type Type_Float32 struct {
func (x *Type_Float32) Reset() {
*x = Type_Float32{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_types_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_types_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Float32) String() string {
@@ -510,7 +498,7 @@ func (*Type_Float32) ProtoMessage() {}
func (x *Type_Float32) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_types_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -535,11 +523,9 @@ type Type_Float64 struct {
func (x *Type_Float64) Reset() {
*x = Type_Float64{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_types_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_types_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Float64) String() string {
@@ -550,7 +536,7 @@ func (*Type_Float64) ProtoMessage() {}
func (x *Type_Float64) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_types_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -575,11 +561,9 @@ type Type_Timestamp struct {
func (x *Type_Timestamp) Reset() {
*x = Type_Timestamp{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_types_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_types_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Timestamp) String() string {
@@ -590,7 +574,7 @@ func (*Type_Timestamp) ProtoMessage() {}
func (x *Type_Timestamp) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_types_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -615,11 +599,9 @@ type Type_Date struct {
func (x *Type_Date) Reset() {
*x = Type_Date{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_types_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_types_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Date) String() string {
@@ -630,7 +612,7 @@ func (*Type_Date) ProtoMessage() {}
func (x *Type_Date) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_types_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -660,11 +642,9 @@ type Type_Struct struct {
func (x *Type_Struct) Reset() {
*x = Type_Struct{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_types_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_types_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Struct) String() string {
@@ -675,7 +655,7 @@ func (*Type_Struct) ProtoMessage() {}
func (x *Type_Struct) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_types_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -710,11 +690,9 @@ type Type_Array struct {
func (x *Type_Array) Reset() {
*x = Type_Array{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_types_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_types_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Array) String() string {
@@ -725,7 +703,7 @@ func (*Type_Array) ProtoMessage() {}
func (x *Type_Array) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_types_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -768,11 +746,9 @@ type Type_Map struct {
func (x *Type_Map) Reset() {
*x = Type_Map{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_types_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_types_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Map) String() string {
@@ -783,7 +759,7 @@ func (*Type_Map) ProtoMessage() {}
func (x *Type_Map) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_types_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -843,11 +819,9 @@ type Type_Aggregate struct {
func (x *Type_Aggregate) Reset() {
*x = Type_Aggregate{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_types_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_types_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Aggregate) String() string {
@@ -858,7 +832,7 @@ func (*Type_Aggregate) ProtoMessage() {}
func (x *Type_Aggregate) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_types_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -970,11 +944,9 @@ type Type_Bytes_Encoding struct {
func (x *Type_Bytes_Encoding) Reset() {
*x = Type_Bytes_Encoding{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_types_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_types_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Bytes_Encoding) String() string {
@@ -985,7 +957,7 @@ func (*Type_Bytes_Encoding) ProtoMessage() {}
func (x *Type_Bytes_Encoding) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_types_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1037,11 +1009,9 @@ type Type_Bytes_Encoding_Raw struct {
func (x *Type_Bytes_Encoding_Raw) Reset() {
*x = Type_Bytes_Encoding_Raw{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_types_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_types_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Bytes_Encoding_Raw) String() string {
@@ -1052,7 +1022,7 @@ func (*Type_Bytes_Encoding_Raw) ProtoMessage() {}
func (x *Type_Bytes_Encoding_Raw) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_types_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1084,11 +1054,9 @@ type Type_String_Encoding struct {
func (x *Type_String_Encoding) Reset() {
*x = Type_String_Encoding{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_types_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_types_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_String_Encoding) String() string {
@@ -1099,7 +1067,7 @@ func (*Type_String_Encoding) ProtoMessage() {}
func (x *Type_String_Encoding) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_types_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1167,11 +1135,9 @@ type Type_String_Encoding_Utf8Raw struct {
func (x *Type_String_Encoding_Utf8Raw) Reset() {
*x = Type_String_Encoding_Utf8Raw{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_types_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_types_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_String_Encoding_Utf8Raw) String() string {
@@ -1182,7 +1148,7 @@ func (*Type_String_Encoding_Utf8Raw) ProtoMessage() {}
func (x *Type_String_Encoding_Utf8Raw) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_types_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1212,11 +1178,9 @@ type Type_String_Encoding_Utf8Bytes struct {
func (x *Type_String_Encoding_Utf8Bytes) Reset() {
*x = Type_String_Encoding_Utf8Bytes{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_types_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_types_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_String_Encoding_Utf8Bytes) String() string {
@@ -1227,7 +1191,7 @@ func (*Type_String_Encoding_Utf8Bytes) ProtoMessage() {}
func (x *Type_String_Encoding_Utf8Bytes) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_types_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1258,11 +1222,9 @@ type Type_Int64_Encoding struct {
func (x *Type_Int64_Encoding) Reset() {
*x = Type_Int64_Encoding{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_types_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_types_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Int64_Encoding) String() string {
@@ -1273,7 +1235,7 @@ func (*Type_Int64_Encoding) ProtoMessage() {}
func (x *Type_Int64_Encoding) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_types_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1332,11 +1294,9 @@ type Type_Int64_Encoding_BigEndianBytes struct {
func (x *Type_Int64_Encoding_BigEndianBytes) Reset() {
*x = Type_Int64_Encoding_BigEndianBytes{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_types_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_types_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Int64_Encoding_BigEndianBytes) String() string {
@@ -1347,7 +1307,7 @@ func (*Type_Int64_Encoding_BigEndianBytes) ProtoMessage() {}
func (x *Type_Int64_Encoding_BigEndianBytes) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_types_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1384,11 +1344,9 @@ type Type_Struct_Field struct {
func (x *Type_Struct_Field) Reset() {
*x = Type_Struct_Field{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_types_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_types_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Struct_Field) String() string {
@@ -1399,7 +1357,7 @@ func (*Type_Struct_Field) ProtoMessage() {}
func (x *Type_Struct_Field) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_types_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1439,11 +1397,9 @@ type Type_Aggregate_Sum struct {
func (x *Type_Aggregate_Sum) Reset() {
*x = Type_Aggregate_Sum{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_types_proto_msgTypes[21]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_types_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Aggregate_Sum) String() string {
@@ -1454,7 +1410,7 @@ func (*Type_Aggregate_Sum) ProtoMessage() {}
func (x *Type_Aggregate_Sum) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_types_proto_msgTypes[21]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1480,11 +1436,9 @@ type Type_Aggregate_Max struct {
func (x *Type_Aggregate_Max) Reset() {
*x = Type_Aggregate_Max{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_types_proto_msgTypes[22]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_types_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Aggregate_Max) String() string {
@@ -1495,7 +1449,7 @@ func (*Type_Aggregate_Max) ProtoMessage() {}
func (x *Type_Aggregate_Max) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_types_proto_msgTypes[22]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1521,11 +1475,9 @@ type Type_Aggregate_Min struct {
func (x *Type_Aggregate_Min) Reset() {
*x = Type_Aggregate_Min{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_types_proto_msgTypes[23]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_types_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Aggregate_Min) String() string {
@@ -1536,7 +1488,7 @@ func (*Type_Aggregate_Min) ProtoMessage() {}
func (x *Type_Aggregate_Min) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_types_proto_msgTypes[23]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1566,11 +1518,9 @@ type Type_Aggregate_HyperLogLogPlusPlusUniqueCount struct {
func (x *Type_Aggregate_HyperLogLogPlusPlusUniqueCount) Reset() {
*x = Type_Aggregate_HyperLogLogPlusPlusUniqueCount{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_bigtable_v2_types_proto_msgTypes[24]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_bigtable_v2_types_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type_Aggregate_HyperLogLogPlusPlusUniqueCount) String() string {
@@ -1581,7 +1531,7 @@ func (*Type_Aggregate_HyperLogLogPlusPlusUniqueCount) ProtoMessage() {}
func (x *Type_Aggregate_HyperLogLogPlusPlusUniqueCount) ProtoReflect() protoreflect.Message {
mi := &file_google_bigtable_v2_types_proto_msgTypes[24]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1858,308 +1808,6 @@ func file_google_bigtable_v2_types_proto_init() {
if File_google_bigtable_v2_types_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_bigtable_v2_types_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Type); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_types_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Bytes); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_types_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*Type_String); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_types_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Int64); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_types_proto_msgTypes[4].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Bool); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_types_proto_msgTypes[5].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Float32); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_types_proto_msgTypes[6].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Float64); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_types_proto_msgTypes[7].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Timestamp); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_types_proto_msgTypes[8].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Date); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_types_proto_msgTypes[9].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Struct); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_types_proto_msgTypes[10].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Array); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_types_proto_msgTypes[11].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Map); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_types_proto_msgTypes[12].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Aggregate); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_types_proto_msgTypes[13].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Bytes_Encoding); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_types_proto_msgTypes[14].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Bytes_Encoding_Raw); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_types_proto_msgTypes[15].Exporter = func(v any, i int) any {
- switch v := v.(*Type_String_Encoding); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_types_proto_msgTypes[16].Exporter = func(v any, i int) any {
- switch v := v.(*Type_String_Encoding_Utf8Raw); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_types_proto_msgTypes[17].Exporter = func(v any, i int) any {
- switch v := v.(*Type_String_Encoding_Utf8Bytes); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_types_proto_msgTypes[18].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Int64_Encoding); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_types_proto_msgTypes[19].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Int64_Encoding_BigEndianBytes); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_types_proto_msgTypes[20].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Struct_Field); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_types_proto_msgTypes[21].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Aggregate_Sum); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_types_proto_msgTypes[22].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Aggregate_Max); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_types_proto_msgTypes[23].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Aggregate_Min); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_bigtable_v2_types_proto_msgTypes[24].Exporter = func(v any, i int) any {
- switch v := v.(*Type_Aggregate_HyperLogLogPlusPlusUniqueCount); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
file_google_bigtable_v2_types_proto_msgTypes[0].OneofWrappers = []any{
(*Type_BytesType)(nil),
(*Type_StringType)(nil),
diff --git a/vendor/cloud.google.com/go/bigtable/bigtable.go b/vendor/cloud.google.com/go/bigtable/bigtable.go
index b08785658714e..0748672a597e5 100644
--- a/vendor/cloud.google.com/go/bigtable/bigtable.go
+++ b/vendor/cloud.google.com/go/bigtable/bigtable.go
@@ -32,6 +32,7 @@ import (
btopt "cloud.google.com/go/bigtable/internal/option"
"cloud.google.com/go/internal/trace"
gax "github.com/googleapis/gax-go/v2"
+ "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"google.golang.org/api/option"
"google.golang.org/api/option/internaloption"
@@ -95,6 +96,18 @@ func NewClient(ctx context.Context, project, instance string, opts ...option.Cli
// NewClientWithConfig creates a new client with the given config.
func NewClientWithConfig(ctx context.Context, project, instance string, config ClientConfig, opts ...option.ClientOption) (*Client, error) {
+ metricsProvider := config.MetricsProvider
+ if emulatorAddr := os.Getenv("BIGTABLE_EMULATOR_HOST"); emulatorAddr != "" {
+ // Do not emit metrics when emulator is being used
+ metricsProvider = NoopMetricsProvider{}
+ }
+
+ // Create a OpenTelemetry metrics configuration
+ metricsTracerFactory, err := newBuiltinMetricsTracerFactory(ctx, project, instance, config.AppProfile, metricsProvider, opts...)
+ if err != nil {
+ return nil, err
+ }
+
o, err := btopt.DefaultClientOptions(prodAddr, mtlsProdAddr, Scope, clientUserAgent)
if err != nil {
return nil, err
@@ -112,19 +125,23 @@ func NewClientWithConfig(ctx context.Context, project, instance string, config C
// Allow non-default service account in DirectPath.
o = append(o, internaloption.AllowNonDefaultServiceAccount(true))
o = append(o, opts...)
- connPool, err := gtransport.DialPool(ctx, o...)
- if err != nil {
- return nil, fmt.Errorf("dialing: %w", err)
- }
- metricsProvider := config.MetricsProvider
- if emulatorAddr := os.Getenv("BIGTABLE_EMULATOR_HOST"); emulatorAddr != "" {
- // Do not emit metrics when emulator is being used
- metricsProvider = NoopMetricsProvider{}
- }
+ // TODO(b/372244283): Remove after b/358175516 has been fixed
+ asyncRefreshMetricAttrs := metricsTracerFactory.clientAttributes
+ asyncRefreshMetricAttrs = append(asyncRefreshMetricAttrs,
+ attribute.String(metricLabelKeyTag, "async_refresh_dry_run"),
+ // Table, cluster and zone are unknown at this point
+ // Use default values
+ attribute.String(monitoredResLabelKeyTable, defaultTable),
+ attribute.String(monitoredResLabelKeyCluster, defaultCluster),
+ attribute.String(monitoredResLabelKeyZone, defaultZone),
+ )
+ o = append(o, internaloption.EnableAsyncRefreshDryRun(func() {
+ metricsTracerFactory.debugTags.Add(context.Background(), 1,
+ metric.WithAttributes(asyncRefreshMetricAttrs...))
+ }))
- // Create a OpenTelemetry metrics configuration
- metricsTracerFactory, err := newBuiltinMetricsTracerFactory(ctx, project, instance, config.AppProfile, metricsProvider, opts...)
+ connPool, err := gtransport.DialPool(ctx, o...)
if err != nil {
return nil, err
}
@@ -1588,7 +1605,7 @@ func gaxInvokeWithRecorder(ctx context.Context, mt *builtinMetricsTracer, method
f func(ctx context.Context, headerMD, trailerMD *metadata.MD, _ gax.CallSettings) error, opts ...gax.CallOption) error {
attemptHeaderMD := metadata.New(nil)
attempTrailerMD := metadata.New(nil)
- mt.method = method
+ mt.setMethod(method)
var callWrapper func(context.Context, gax.CallSettings) error
if !mt.builtInEnabled {
diff --git a/vendor/cloud.google.com/go/bigtable/bttest/inmem.go b/vendor/cloud.google.com/go/bigtable/bttest/inmem.go
index ca0947a4f2f4b..bca700b920949 100644
--- a/vendor/cloud.google.com/go/bigtable/bttest/inmem.go
+++ b/vendor/cloud.google.com/go/bigtable/bttest/inmem.go
@@ -590,6 +590,15 @@ func (s *server) ReadRows(req *btpb.ReadRowsRequest, stream btpb.Bigtable_ReadRo
return nil
}
+func (s *server) GetPartitionsByTableName(name string) []*btpb.RowRange {
+ table, ok := s.tables[name]
+ if !ok {
+ return nil
+ }
+ return table.rowRanges()
+
+}
+
// streamRow filters the given row and sends it via the given stream.
// Returns true if at least one cell matched the filter and was streamed, false otherwise.
func streamRow(stream btpb.Bigtable_ReadRowsServer, r *row, f *btpb.RowFilter, s *btpb.ReadIterationStats, ff *btpb.FeatureFlags) error {
@@ -1461,6 +1470,7 @@ type table struct {
counter uint64 // increment by 1 when a new family is created
families map[string]*columnFamily // keyed by plain family name
rows *btree.BTree // indexed by row key
+ partitions []*btpb.RowRange // partitions used in change stream
isProtected bool // whether this table has deletion protection
}
@@ -1475,10 +1485,56 @@ func newTable(ctr *btapb.CreateTableRequest) *table {
c++
}
}
+
+ // Hard code the partitions for testing purpose.
+ rowRanges := []*btpb.RowRange{
+ {
+ StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte("a")},
+ EndKey: &btpb.RowRange_EndKeyClosed{EndKeyClosed: []byte("b")},
+ },
+ {
+ StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte("c")},
+ EndKey: &btpb.RowRange_EndKeyClosed{EndKeyClosed: []byte("d")},
+ },
+ {
+ StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte("e")},
+ EndKey: &btpb.RowRange_EndKeyClosed{EndKeyClosed: []byte("f")},
+ },
+ {
+ StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte("g")},
+ EndKey: &btpb.RowRange_EndKeyClosed{EndKeyClosed: []byte("h")},
+ },
+ {
+ StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte("i")},
+ EndKey: &btpb.RowRange_EndKeyClosed{EndKeyClosed: []byte("j")},
+ },
+ {
+ StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte("k")},
+ EndKey: &btpb.RowRange_EndKeyClosed{EndKeyClosed: []byte("l")},
+ },
+ {
+ StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte("m")},
+ EndKey: &btpb.RowRange_EndKeyClosed{EndKeyClosed: []byte("n")},
+ },
+ {
+ StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte("o")},
+ EndKey: &btpb.RowRange_EndKeyClosed{EndKeyClosed: []byte("p")},
+ },
+ {
+ StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte("q")},
+ EndKey: &btpb.RowRange_EndKeyClosed{EndKeyClosed: []byte("r")},
+ },
+ {
+ StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte("s")},
+ EndKey: &btpb.RowRange_EndKeyClosed{EndKeyClosed: []byte("z")},
+ },
+ }
+
return &table{
families: fams,
counter: c,
rows: btree.New(btreeDegree),
+ partitions: rowRanges,
isProtected: ctr.GetTable().GetDeletionProtection(),
}
}
@@ -1577,6 +1633,10 @@ func (t *table) gcReadOnly() (toDelete []btree.Item) {
return toDelete
}
+func (t *table) rowRanges() []*btpb.RowRange {
+ return t.partitions
+}
+
type byRowKey []*row
func (b byRowKey) Len() int { return len(b) }
diff --git a/vendor/cloud.google.com/go/bigtable/conformance_test.sh b/vendor/cloud.google.com/go/bigtable/conformance_test.sh
index bf6f520a6b0c0..d380758315fc5 100644
--- a/vendor/cloud.google.com/go/bigtable/conformance_test.sh
+++ b/vendor/cloud.google.com/go/bigtable/conformance_test.sh
@@ -50,10 +50,10 @@ trap cleanup EXIT
# Run the conformance tests
cd $conformanceTestsHome
-# Tests in https://github.com/googleapis/cloud-bigtable-clients-test/tree/main/tests can only be run on go1.22.5
-go install golang.org/dl/go1.22.5@latest
-go1.22.5 download
-go1.22.5 test -v -proxy_addr=:$testProxyPort | tee -a $sponge_log
+# Tests in https://github.com/googleapis/cloud-bigtable-clients-test/tree/main/tests can only be run on go1.22.7
+go install golang.org/dl/go1.22.7@latest
+go1.22.7 download
+go1.22.7 test -v -proxy_addr=:$testProxyPort | tee -a $sponge_log
RETURN_CODE=$?
echo "exiting with ${RETURN_CODE}"
diff --git a/vendor/cloud.google.com/go/bigtable/internal/version.go b/vendor/cloud.google.com/go/bigtable/internal/version.go
index ba70a43673b7c..a018e6275b148 100644
--- a/vendor/cloud.google.com/go/bigtable/internal/version.go
+++ b/vendor/cloud.google.com/go/bigtable/internal/version.go
@@ -15,4 +15,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "1.33.0"
+const Version = "1.34.0"
diff --git a/vendor/cloud.google.com/go/bigtable/metrics.go b/vendor/cloud.google.com/go/bigtable/metrics.go
index c76ecfa1e0ddf..b7ce660ba62ce 100644
--- a/vendor/cloud.google.com/go/bigtable/metrics.go
+++ b/vendor/cloud.google.com/go/bigtable/metrics.go
@@ -50,6 +50,7 @@ const (
metricLabelKeyAppProfile = "app_profile"
metricLabelKeyMethod = "method"
metricLabelKeyStatus = "status"
+ metricLabelKeyTag = "tag"
metricLabelKeyStreamingOperation = "streaming"
metricLabelKeyClientName = "client_name"
metricLabelKeyClientUID = "client_uid"
@@ -59,6 +60,7 @@ const (
metricNameAttemptLatencies = "attempt_latencies"
metricNameServerLatencies = "server_latencies"
metricNameRetryCount = "retry_count"
+ metricNameDebugTags = "debug_tags"
// Metric units
metricUnitMS = "ms"
@@ -68,7 +70,7 @@ const (
// These are effectively constant, but for testing purposes they are mutable
var (
// duration between two metric exports
- defaultSamplePeriod = 5 * time.Minute
+ defaultSamplePeriod = time.Minute
metricsErrorPrefix = "bigtable-metrics: "
@@ -79,7 +81,7 @@ var (
800.0, 1000.0, 2000.0, 5000.0, 10000.0, 20000.0, 50000.0, 100000.0, 200000.0,
400000.0, 800000.0, 1600000.0, 3200000.0}
- // All the built-in metrics have same attributes except 'status' and 'streaming'
+ // All the built-in metrics have same attributes except 'tag', 'status' and 'streaming'
// These attributes need to be added to only few of the metrics
metricsDetails = map[string]metricInfo{
metricNameOperationLatencies: {
@@ -148,6 +150,7 @@ type builtinMetricsTracerFactory struct {
serverLatencies metric.Float64Histogram
attemptLatencies metric.Float64Histogram
retryCount metric.Int64Counter
+ debugTags metric.Int64Counter
}
func newBuiltinMetricsTracerFactory(ctx context.Context, project, instance, appProfile string, metricsProvider MetricsProvider, opts ...option.ClientOption) (*builtinMetricsTracerFactory, error) {
@@ -253,6 +256,16 @@ func (tf *builtinMetricsTracerFactory) createInstruments(meter metric.Meter) err
metric.WithDescription("The number of additional RPCs sent after the initial attempt."),
metric.WithUnit(metricUnitCount),
)
+ if err != nil {
+ return err
+ }
+
+ // Create debug_tags
+ tf.debugTags, err = meter.Int64Counter(
+ metricNameDebugTags,
+ metric.WithDescription("A counter of internal client events used for debugging."),
+ metric.WithUnit(metricUnitCount),
+ )
return err
}
@@ -271,6 +284,7 @@ type builtinMetricsTracer struct {
instrumentServerLatencies metric.Float64Histogram
instrumentAttemptLatencies metric.Float64Histogram
instrumentRetryCount metric.Int64Counter
+ instrumentDebugTags metric.Int64Counter
tableName string
method string
@@ -279,6 +293,10 @@ type builtinMetricsTracer struct {
currOp opTracer
}
+func (b *builtinMetricsTracer) setMethod(m string) {
+ b.method = "Bigtable." + m
+}
+
// opTracer is used to record metrics for the entire operation, including retries.
// Operation is a logical unit that represents a single method invocation on client.
// The method might require multiple attempts/rpcs and backoff logic to complete
@@ -363,6 +381,7 @@ func (tf *builtinMetricsTracerFactory) createBuiltinMetricsTracer(ctx context.Co
instrumentServerLatencies: tf.serverLatencies,
instrumentAttemptLatencies: tf.attemptLatencies,
instrumentRetryCount: tf.retryCount,
+ instrumentDebugTags: tf.debugTags,
tableName: tableName,
isStreaming: isStreaming,
diff --git a/vendor/cloud.google.com/go/bigtable/metrics_util.go b/vendor/cloud.google.com/go/bigtable/metrics_util.go
index 8783f6ff4b214..64a1fb50e4d24 100644
--- a/vendor/cloud.google.com/go/bigtable/metrics_util.go
+++ b/vendor/cloud.google.com/go/bigtable/metrics_util.go
@@ -28,8 +28,9 @@ import (
)
const (
- defaultCluster = "unspecified"
+ defaultCluster = "<unspecified>"
defaultZone = "global"
+ defaultTable = "<unspecified>"
)
// get GFE latency in ms from response metadata
diff --git a/vendor/cloud.google.com/go/debug.md b/vendor/cloud.google.com/go/debug.md
index 2010ed7a6f9c9..7979d14af3f44 100644
--- a/vendor/cloud.google.com/go/debug.md
+++ b/vendor/cloud.google.com/go/debug.md
@@ -187,9 +187,8 @@ for OpenCensus is now deprecated in the Google Cloud client libraries for Go.
See [OpenCensus](#opencensus) below for details.
The Google Cloud client libraries for Go now use the
-[OpenTelemetry](https://opentelemetry.io/docs/what-is-opentelemetry/) project by
-default. Temporary opt-in support for OpenCensus is still available. The
-transition from OpenCensus to OpenTelemetry is covered in the following
+[OpenTelemetry](https://opentelemetry.io/docs/what-is-opentelemetry/) project.
+The transition from OpenCensus to OpenTelemetry is covered in the following
sections.
### Tracing (experimental)
@@ -207,8 +206,7 @@ hand-written clients are in scope for the discussion in this section:
Currently, the spans created by these clients are for OpenTelemetry. OpenCensus
users are urged to transition to OpenTelemetry as soon as possible, as explained
-in the next section. OpenCensus users can still opt-in to the deprecated
-OpenCensus support via an environment variable, as described below.
+in the next section.
#### OpenCensus
@@ -229,23 +227,15 @@ On May 29, 2024, six months after the
[release](https://github.com/googleapis/google-cloud-go/releases/tag/v0.111.0)
of experimental, opt-in support for OpenTelemetry tracing, the default tracing
support in the clients above was changed from OpenCensus to OpenTelemetry, and
-the experimental OpenCensus support was marked as deprecated. To continue
-using the OpenCensus support, set the environment variable
-`GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING` to the case-insensitive
-value `opencensus` before loading the client library.
-
-```sh
-export GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING=opencensus
-```
+the experimental OpenCensus support was marked as deprecated.
On December 2nd, 2024, one year after the release of OpenTelemetry support, the
-experimental and deprecated support for OpenCensus tracing will be removed.
+experimental and deprecated support for OpenCensus tracing was removed.
Please note that all Google Cloud Go clients currently provide experimental
support for the propagation of both OpenCensus and OpenTelemetry trace context
to their receiving endpoints. The experimental support for OpenCensus trace
-context propagation will be removed at the same time as the experimental
-OpenCensus tracing support.
+context propagation will be removed soon.
Please refer to the following resources:
diff --git a/vendor/cloud.google.com/go/doc.go b/vendor/cloud.google.com/go/doc.go
index 8644f614c864c..4c75de36f4290 100644
--- a/vendor/cloud.google.com/go/doc.go
+++ b/vendor/cloud.google.com/go/doc.go
@@ -14,8 +14,8 @@
/*
Package cloud is the root of the packages used to access Google Cloud
-Services. See https://pkg.go.dev/cloud.google.com/go for a full list
-of sub-modules.
+Services. See https://pkg.go.dev/cloud.google.com/go#section-directories for a
+full list of sub-modules.
# Client Options
@@ -177,7 +177,7 @@ Here is a generic example:
// Then use ctx in a subsequent request.
response, err := client.GetSecret(ctx, request)
-## Google-reserved headers
+# Google-reserved headers
There are a some header keys that Google reserves for internal use that must
not be ovewritten. The following header keys are broadly considered reserved
@@ -190,7 +190,7 @@ Be sure to check the individual package documentation for other service-specific
reserved headers. For example, Storage supports a specific auditing header that
is mentioned in that [module's documentation][storagedocs].
-## Google Cloud system parameters
+# Google Cloud system parameters
Google Cloud services respect [system parameters][system parameters] that can be
used to augment request and/or response behavior. For the most part, they are
@@ -209,9 +209,9 @@ connections for later re-use. These are cached to the http.MaxIdleConns
and http.MaxIdleConnsPerHost settings in http.DefaultTransport by default.
For gRPC clients, connection pooling is configurable. Users of Cloud Client
-Libraries may specify option.WithGRPCConnectionPool(n) as a client option to
-NewClient calls. This configures the underlying gRPC connections to be pooled
-and accessed in a round robin fashion.
+Libraries may specify [google.golang.org/api/option.WithGRPCConnectionPool]
+as a client option to NewClient calls. This configures the underlying gRPC
+connections to be pooled and accessed in a round robin fashion.
# Using the Libraries in Container environments(Docker)
@@ -247,19 +247,11 @@ errors can still be unwrapped using the APIError.
log.Println(ae.Reason())
log.Println(ae.Details().Help.GetLinks())
}
- }
-
-If the gRPC transport was used, the [google.golang.org/grpc/status.Status] can
-still be parsed using the [google.golang.org/grpc/status.FromError] function.
-
- if err != nil {
- if s, ok := status.FromError(err); ok {
- log.Println(s.Message())
- for _, d := range s.Proto().Details {
- log.Println(d)
- }
+ // If a gRPC transport was used you can extract the
+ // google.golang.org/grpc/status.Status from the error
+ s := ae.GRPCStatus()
+ log.Println(s.Code())
}
- }
# Client Stability
diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json
index 6b58b6a6f3b55..cea277a4d8459 100644
--- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json
+++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json
@@ -1879,6 +1879,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/oracledatabase/apiv1": {
+ "api_shortname": "oracledatabase",
+ "distribution_name": "cloud.google.com/go/oracledatabase/apiv1",
+ "description": "Oracle Database@Google Cloud API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/oracledatabase/latest/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/orchestration/airflow/service/apiv1": {
"api_shortname": "composer",
"distribution_name": "cloud.google.com/go/orchestration/airflow/service/apiv1",
@@ -1969,6 +1979,16 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/parallelstore/apiv1": {
+ "api_shortname": "parallelstore",
+ "distribution_name": "cloud.google.com/go/parallelstore/apiv1",
+ "description": "Parallelstore API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/parallelstore/latest/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/parallelstore/apiv1beta": {
"api_shortname": "parallelstore",
"distribution_name": "cloud.google.com/go/parallelstore/apiv1beta",
@@ -2392,7 +2412,7 @@
"cloud.google.com/go/securitycentermanagement/apiv1": {
"api_shortname": "securitycentermanagement",
"distribution_name": "cloud.google.com/go/securitycentermanagement/apiv1",
- "description": "Security Center Management API",
+ "description": "Security Command Center Management API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycentermanagement/latest/apiv1",
diff --git a/vendor/cloud.google.com/go/internal/trace/trace.go b/vendor/cloud.google.com/go/internal/trace/trace.go
index e8daf800a6a4b..fcff2a7e48c8d 100644
--- a/vendor/cloud.google.com/go/internal/trace/trace.go
+++ b/vendor/cloud.google.com/go/internal/trace/trace.go
@@ -18,143 +18,39 @@ import (
"context"
"errors"
"fmt"
- "os"
- "strings"
- "sync"
- "go.opencensus.io/trace"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
- ottrace "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace"
"google.golang.org/api/googleapi"
- "google.golang.org/genproto/googleapis/rpc/code"
"google.golang.org/grpc/status"
)
const (
- // Deprecated: The default experimental tracing support for OpenCensus is
- // now deprecated in the Google Cloud client libraries for Go.
- // TelemetryPlatformTracingOpenCensus is the value to which the environment
- // variable GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING should be
- // set to enable OpenCensus tracing.
- TelemetryPlatformTracingOpenCensus = "opencensus"
- // TelemetryPlatformTracingOpenTelemetry is the value to which the environment
- // variable GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING should be
- // set to enable OpenTelemetry tracing.
- TelemetryPlatformTracingOpenTelemetry = "opentelemetry"
- // TelemetryPlatformTracingVar is the name of the environment
- // variable that can be set to change the default tracing from OpenTelemetry
- // to OpenCensus.
- //
- // The default experimental tracing support for OpenCensus is now deprecated
- // in the Google Cloud client libraries for Go.
- TelemetryPlatformTracingVar = "GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING"
- // OpenTelemetryTracerName is the name given to the OpenTelemetry Tracer
- // when it is obtained from the OpenTelemetry TracerProvider.
OpenTelemetryTracerName = "cloud.google.com/go"
)
-var (
- // openCensusTracingEnabledMu guards access to openCensusTracingEnabled field
- openCensusTracingEnabledMu = sync.RWMutex{}
- // openCensusTracingEnabled is true if the environment variable
- // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the
- // case-insensitive value "opencensus".
- openCensusTracingEnabled bool = strings.EqualFold(strings.TrimSpace(
- os.Getenv(TelemetryPlatformTracingVar)), TelemetryPlatformTracingOpenCensus)
-)
-
-// SetOpenTelemetryTracingEnabledField programmatically sets the value provided
-// by GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING for the purpose of
-// unit testing. Do not invoke it directly. Intended for use only in unit tests.
-// Restore original value after each test.
-//
-// The default experimental tracing support for OpenCensus is now deprecated in
-// the Google Cloud client libraries for Go.
-func SetOpenTelemetryTracingEnabledField(enabled bool) {
- openCensusTracingEnabledMu.Lock()
- defer openCensusTracingEnabledMu.Unlock()
- openCensusTracingEnabled = !enabled
-}
-
-// Deprecated: The default experimental tracing support for OpenCensus is now
-// deprecated in the Google Cloud client libraries for Go.
-//
-// IsOpenCensusTracingEnabled returns true if the environment variable
-// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the
-// case-insensitive value "opencensus".
-func IsOpenCensusTracingEnabled() bool {
- openCensusTracingEnabledMu.RLock()
- defer openCensusTracingEnabledMu.RUnlock()
- return openCensusTracingEnabled
-}
-
-// IsOpenTelemetryTracingEnabled returns true if the environment variable
-// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is NOT set to the
-// case-insensitive value "opencensus".
-func IsOpenTelemetryTracingEnabled() bool {
- return !IsOpenCensusTracingEnabled()
-}
-
-// StartSpan adds a span to the trace with the given name. If IsOpenCensusTracingEnabled
-// returns true, the span will be an OpenCensus span. If IsOpenTelemetryTracingEnabled
-// returns true, the span will be an OpenTelemetry span. Set the environment variable
-// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive
-// value "opencensus" before loading the package to use OpenCensus tracing.
-// The default was OpenCensus until May 29, 2024, at which time the default was
-// changed to "opencensus". Explicitly setting the environment variable to
-// "opencensus" is required to continue using OpenCensus tracing.
+// StartSpan adds an OpenTelemetry span to the trace with the given name.
//
// The default experimental tracing support for OpenCensus is now deprecated in
// the Google Cloud client libraries for Go.
func StartSpan(ctx context.Context, name string) context.Context {
- if IsOpenTelemetryTracingEnabled() {
- ctx, _ = otel.GetTracerProvider().Tracer(OpenTelemetryTracerName).Start(ctx, name)
- } else {
- ctx, _ = trace.StartSpan(ctx, name)
- }
+ ctx, _ = otel.GetTracerProvider().Tracer(OpenTelemetryTracerName).Start(ctx, name)
return ctx
}
-// EndSpan ends a span with the given error. If IsOpenCensusTracingEnabled
-// returns true, the span will be an OpenCensus span. If IsOpenTelemetryTracingEnabled
-// returns true, the span will be an OpenTelemetry span. Set the environment variable
-// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive
-// value "opencensus" before loading the package to use OpenCensus tracing.
-// The default was OpenCensus until May 29, 2024, at which time the default was
-// changed to "opencensus". Explicitly setting the environment variable to
-// "opencensus" is required to continue using OpenCensus tracing.
+// EndSpan ends an OpenTelemetry span with the given error.
//
// The default experimental tracing support for OpenCensus is now deprecated in
// the Google Cloud client libraries for Go.
func EndSpan(ctx context.Context, err error) {
- if IsOpenTelemetryTracingEnabled() {
- span := ottrace.SpanFromContext(ctx)
- if err != nil {
- span.SetStatus(codes.Error, toOpenTelemetryStatusDescription(err))
- span.RecordError(err)
- }
- span.End()
- } else {
- span := trace.FromContext(ctx)
- if err != nil {
- span.SetStatus(toStatus(err))
- }
- span.End()
- }
-}
-
-// toStatus converts an error to an equivalent OpenCensus status.
-func toStatus(err error) trace.Status {
- var err2 *googleapi.Error
- if ok := errors.As(err, &err2); ok {
- return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message}
- } else if s, ok := status.FromError(err); ok {
- return trace.Status{Code: int32(s.Code()), Message: s.Message()}
- } else {
- return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()}
+ span := trace.SpanFromContext(ctx)
+ if err != nil {
+ span.SetStatus(codes.Error, toOpenTelemetryStatusDescription(err))
+ span.RecordError(err)
}
+ span.End()
}
// toOpenTelemetryStatus converts an error to an equivalent OpenTelemetry status description.
@@ -169,87 +65,13 @@ func toOpenTelemetryStatusDescription(err error) string {
}
}
-// TODO(deklerk): switch to using OpenCensus function when it becomes available.
-// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto
-func httpStatusCodeToOCCode(httpStatusCode int) int32 {
- switch httpStatusCode {
- case 200:
- return int32(code.Code_OK)
- case 499:
- return int32(code.Code_CANCELLED)
- case 500:
- return int32(code.Code_UNKNOWN) // Could also be Code_INTERNAL, Code_DATA_LOSS
- case 400:
- return int32(code.Code_INVALID_ARGUMENT) // Could also be Code_OUT_OF_RANGE
- case 504:
- return int32(code.Code_DEADLINE_EXCEEDED)
- case 404:
- return int32(code.Code_NOT_FOUND)
- case 409:
- return int32(code.Code_ALREADY_EXISTS) // Could also be Code_ABORTED
- case 403:
- return int32(code.Code_PERMISSION_DENIED)
- case 401:
- return int32(code.Code_UNAUTHENTICATED)
- case 429:
- return int32(code.Code_RESOURCE_EXHAUSTED)
- case 501:
- return int32(code.Code_UNIMPLEMENTED)
- case 503:
- return int32(code.Code_UNAVAILABLE)
- default:
- return int32(code.Code_UNKNOWN)
- }
-}
-
-// TracePrintf retrieves the current OpenCensus or OpenTelemetry span from context, then:
-// * calls Span.Annotatef if OpenCensus is enabled; or
-// * calls Span.AddEvent if OpenTelemetry is enabled.
-//
-// If IsOpenCensusTracingEnabled returns true, the expected span must be an
-// OpenCensus span. If IsOpenTelemetryTracingEnabled returns true, the expected
-// span must be an OpenTelemetry span. Set the environment variable
-// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive
-// value "opencensus" before loading the package to use OpenCensus tracing.
-// The default was OpenCensus until May 29, 2024, at which time the default was
-// changed to "opencensus". Explicitly setting the environment variable to
-// "opencensus" is required to continue using OpenCensus tracing.
-//
-// The default experimental tracing support for OpenCensus is now deprecated in
-// the Google Cloud client libraries for Go.
+// TracePrintf retrieves the current OpenTelemetry span from context, then calls
+// Span.AddEvent. The expected span must be an OpenTelemetry span. The default
+// experimental tracing support for OpenCensus is now deprecated in the Google
+// Cloud client libraries for Go.
func TracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) {
- if IsOpenTelemetryTracingEnabled() {
- attrs := otAttrs(attrMap)
- ottrace.SpanFromContext(ctx).AddEvent(fmt.Sprintf(format, args...), ottrace.WithAttributes(attrs...))
- } else {
- attrs := ocAttrs(attrMap)
- // TODO: (odeke-em): perhaps just pass around spans due to the cost
- // incurred from using trace.FromContext(ctx) yet we could avoid
- // throwing away the work done by ctx, span := trace.StartSpan.
- trace.FromContext(ctx).Annotatef(attrs, format, args...)
- }
-}
-
-// ocAttrs converts a generic map to OpenCensus attributes.
-func ocAttrs(attrMap map[string]interface{}) []trace.Attribute {
- var attrs []trace.Attribute
- for k, v := range attrMap {
- var a trace.Attribute
- switch v := v.(type) {
- case string:
- a = trace.StringAttribute(k, v)
- case bool:
- a = trace.BoolAttribute(k, v)
- case int:
- a = trace.Int64Attribute(k, int64(v))
- case int64:
- a = trace.Int64Attribute(k, v)
- default:
- a = trace.StringAttribute(k, fmt.Sprintf("%#v", v))
- }
- attrs = append(attrs, a)
- }
- return attrs
+ attrs := otAttrs(attrMap)
+ trace.SpanFromContext(ctx).AddEvent(fmt.Sprintf(format, args...), trace.WithAttributes(attrs...))
}
// otAttrs converts a generic map to OpenTelemetry attributes.
diff --git a/vendor/cloud.google.com/go/release-please-config-individual.json b/vendor/cloud.google.com/go/release-please-config-individual.json
index 3dacbc5e6940b..529f7db353a04 100644
--- a/vendor/cloud.google.com/go/release-please-config-individual.json
+++ b/vendor/cloud.google.com/go/release-please-config-individual.json
@@ -5,12 +5,6 @@
"separate-pull-requests": true,
"tag-separator": "/",
"packages": {
- "ai": {
- "component": "ai"
- },
- "aiplatform": {
- "component": "aiplatform"
- },
"auth": {
"component": "auth"
},
diff --git a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json
index 73021df5391d6..3d73202baf892 100644
--- a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json
+++ b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json
@@ -282,6 +282,9 @@
"optimization": {
"component": "optimization"
},
+ "oracledatabase": {
+ "component": "oracledatabase"
+ },
"orchestration": {
"component": "orchestration"
},
diff --git a/vendor/github.com/golang/groupcache/LICENSE b/vendor/github.com/golang/groupcache/LICENSE
deleted file mode 100644
index 37ec93a14fdcd..0000000000000
--- a/vendor/github.com/golang/groupcache/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-Apache License
-Version 2.0, January 2004
-http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-"License" shall mean the terms and conditions for use, reproduction, and
-distribution as defined by Sections 1 through 9 of this document.
-
-"Licensor" shall mean the copyright owner or entity authorized by the copyright
-owner that is granting the License.
-
-"Legal Entity" shall mean the union of the acting entity and all other entities
-that control, are controlled by, or are under common control with that entity.
-For the purposes of this definition, "control" means (i) the power, direct or
-indirect, to cause the direction or management of such entity, whether by
-contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
-outstanding shares, or (iii) beneficial ownership of such entity.
-
-"You" (or "Your") shall mean an individual or Legal Entity exercising
-permissions granted by this License.
-
-"Source" form shall mean the preferred form for making modifications, including
-but not limited to software source code, documentation source, and configuration
-files.
-
-"Object" form shall mean any form resulting from mechanical transformation or
-translation of a Source form, including but not limited to compiled object code,
-generated documentation, and conversions to other media types.
-
-"Work" shall mean the work of authorship, whether in Source or Object form, made
-available under the License, as indicated by a copyright notice that is included
-in or attached to the work (an example is provided in the Appendix below).
-
-"Derivative Works" shall mean any work, whether in Source or Object form, that
-is based on (or derived from) the Work and for which the editorial revisions,
-annotations, elaborations, or other modifications represent, as a whole, an
-original work of authorship. For the purposes of this License, Derivative Works
-shall not include works that remain separable from, or merely link (or bind by
-name) to the interfaces of, the Work and Derivative Works thereof.
-
-"Contribution" shall mean any work of authorship, including the original version
-of the Work and any modifications or additions to that Work or Derivative Works
-thereof, that is intentionally submitted to Licensor for inclusion in the Work
-by the copyright owner or by an individual or Legal Entity authorized to submit
-on behalf of the copyright owner. For the purposes of this definition,
-"submitted" means any form of electronic, verbal, or written communication sent
-to the Licensor or its representatives, including but not limited to
-communication on electronic mailing lists, source code control systems, and
-issue tracking systems that are managed by, or on behalf of, the Licensor for
-the purpose of discussing and improving the Work, but excluding communication
-that is conspicuously marked or otherwise designated in writing by the copyright
-owner as "Not a Contribution."
-
-"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
-of whom a Contribution has been received by Licensor and subsequently
-incorporated within the Work.
-
-2. Grant of Copyright License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable copyright license to reproduce, prepare Derivative Works of,
-publicly display, publicly perform, sublicense, and distribute the Work and such
-Derivative Works in Source or Object form.
-
-3. Grant of Patent License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable (except as stated in this section) patent license to make, have
-made, use, offer to sell, sell, import, and otherwise transfer the Work, where
-such license applies only to those patent claims licensable by such Contributor
-that are necessarily infringed by their Contribution(s) alone or by combination
-of their Contribution(s) with the Work to which such Contribution(s) was
-submitted. If You institute patent litigation against any entity (including a
-cross-claim or counterclaim in a lawsuit) alleging that the Work or a
-Contribution incorporated within the Work constitutes direct or contributory
-patent infringement, then any patent licenses granted to You under this License
-for that Work shall terminate as of the date such litigation is filed.
-
-4. Redistribution.
-
-You may reproduce and distribute copies of the Work or Derivative Works thereof
-in any medium, with or without modifications, and in Source or Object form,
-provided that You meet the following conditions:
-
-You must give any other recipients of the Work or Derivative Works a copy of
-this License; and
-You must cause any modified files to carry prominent notices stating that You
-changed the files; and
-You must retain, in the Source form of any Derivative Works that You distribute,
-all copyright, patent, trademark, and attribution notices from the Source form
-of the Work, excluding those notices that do not pertain to any part of the
-Derivative Works; and
-If the Work includes a "NOTICE" text file as part of its distribution, then any
-Derivative Works that You distribute must include a readable copy of the
-attribution notices contained within such NOTICE file, excluding those notices
-that do not pertain to any part of the Derivative Works, in at least one of the
-following places: within a NOTICE text file distributed as part of the
-Derivative Works; within the Source form or documentation, if provided along
-with the Derivative Works; or, within a display generated by the Derivative
-Works, if and wherever such third-party notices normally appear. The contents of
-the NOTICE file are for informational purposes only and do not modify the
-License. You may add Your own attribution notices within Derivative Works that
-You distribute, alongside or as an addendum to the NOTICE text from the Work,
-provided that such additional attribution notices cannot be construed as
-modifying the License.
-You may add Your own copyright statement to Your modifications and may provide
-additional or different license terms and conditions for use, reproduction, or
-distribution of Your modifications, or for any such Derivative Works as a whole,
-provided Your use, reproduction, and distribution of the Work otherwise complies
-with the conditions stated in this License.
-
-5. Submission of Contributions.
-
-Unless You explicitly state otherwise, any Contribution intentionally submitted
-for inclusion in the Work by You to the Licensor shall be under the terms and
-conditions of this License, without any additional terms or conditions.
-Notwithstanding the above, nothing herein shall supersede or modify the terms of
-any separate license agreement you may have executed with Licensor regarding
-such Contributions.
-
-6. Trademarks.
-
-This License does not grant permission to use the trade names, trademarks,
-service marks, or product names of the Licensor, except as required for
-reasonable and customary use in describing the origin of the Work and
-reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty.
-
-Unless required by applicable law or agreed to in writing, Licensor provides the
-Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
-including, without limitation, any warranties or conditions of TITLE,
-NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
-solely responsible for determining the appropriateness of using or
-redistributing the Work and assume any risks associated with Your exercise of
-permissions under this License.
-
-8. Limitation of Liability.
-
-In no event and under no legal theory, whether in tort (including negligence),
-contract, or otherwise, unless required by applicable law (such as deliberate
-and grossly negligent acts) or agreed to in writing, shall any Contributor be
-liable to You for damages, including any direct, indirect, special, incidental,
-or consequential damages of any character arising as a result of this License or
-out of the use or inability to use the Work (including but not limited to
-damages for loss of goodwill, work stoppage, computer failure or malfunction, or
-any and all other commercial damages or losses), even if such Contributor has
-been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability.
-
-While redistributing the Work or Derivative Works thereof, You may choose to
-offer, and charge a fee for, acceptance of support, warranty, indemnity, or
-other liability obligations and/or rights consistent with this License. However,
-in accepting such obligations, You may act only on Your own behalf and on Your
-sole responsibility, not on behalf of any other Contributor, and only if You
-agree to indemnify, defend, and hold each Contributor harmless for any liability
-incurred by, or claims asserted against, such Contributor by reason of your
-accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work
-
-To apply the Apache License to your work, attach the following boilerplate
-notice, with the fields enclosed by brackets "[]" replaced with your own
-identifying information. (Don't include the brackets!) The text should be
-enclosed in the appropriate comment syntax for the file format. We also
-recommend that a file or class name and description of purpose be included on
-the same "printed page" as the copyright notice for easier identification within
-third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go
deleted file mode 100644
index eac1c7664f995..0000000000000
--- a/vendor/github.com/golang/groupcache/lru/lru.go
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
-Copyright 2013 Google Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package lru implements an LRU cache.
-package lru
-
-import "container/list"
-
-// Cache is an LRU cache. It is not safe for concurrent access.
-type Cache struct {
- // MaxEntries is the maximum number of cache entries before
- // an item is evicted. Zero means no limit.
- MaxEntries int
-
- // OnEvicted optionally specifies a callback function to be
- // executed when an entry is purged from the cache.
- OnEvicted func(key Key, value interface{})
-
- ll *list.List
- cache map[interface{}]*list.Element
-}
-
-// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators
-type Key interface{}
-
-type entry struct {
- key Key
- value interface{}
-}
-
-// New creates a new Cache.
-// If maxEntries is zero, the cache has no limit and it's assumed
-// that eviction is done by the caller.
-func New(maxEntries int) *Cache {
- return &Cache{
- MaxEntries: maxEntries,
- ll: list.New(),
- cache: make(map[interface{}]*list.Element),
- }
-}
-
-// Add adds a value to the cache.
-func (c *Cache) Add(key Key, value interface{}) {
- if c.cache == nil {
- c.cache = make(map[interface{}]*list.Element)
- c.ll = list.New()
- }
- if ee, ok := c.cache[key]; ok {
- c.ll.MoveToFront(ee)
- ee.Value.(*entry).value = value
- return
- }
- ele := c.ll.PushFront(&entry{key, value})
- c.cache[key] = ele
- if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries {
- c.RemoveOldest()
- }
-}
-
-// Get looks up a key's value from the cache.
-func (c *Cache) Get(key Key) (value interface{}, ok bool) {
- if c.cache == nil {
- return
- }
- if ele, hit := c.cache[key]; hit {
- c.ll.MoveToFront(ele)
- return ele.Value.(*entry).value, true
- }
- return
-}
-
-// Remove removes the provided key from the cache.
-func (c *Cache) Remove(key Key) {
- if c.cache == nil {
- return
- }
- if ele, hit := c.cache[key]; hit {
- c.removeElement(ele)
- }
-}
-
-// RemoveOldest removes the oldest item from the cache.
-func (c *Cache) RemoveOldest() {
- if c.cache == nil {
- return
- }
- ele := c.ll.Back()
- if ele != nil {
- c.removeElement(ele)
- }
-}
-
-func (c *Cache) removeElement(e *list.Element) {
- c.ll.Remove(e)
- kv := e.Value.(*entry)
- delete(c.cache, kv.key)
- if c.OnEvicted != nil {
- c.OnEvicted(kv.key, kv.value)
- }
-}
-
-// Len returns the number of items in the cache.
-func (c *Cache) Len() int {
- if c.cache == nil {
- return 0
- }
- return c.ll.Len()
-}
-
-// Clear purges all stored items from the cache.
-func (c *Cache) Clear() {
- if c.OnEvicted != nil {
- for _, e := range c.cache {
- kv := e.Value.(*entry)
- c.OnEvicted(kv.key, kv.value)
- }
- }
- c.ll = nil
- c.cache = nil
-}
diff --git a/vendor/go.opencensus.io/.gitignore b/vendor/go.opencensus.io/.gitignore
deleted file mode 100644
index 74a6db472e56a..0000000000000
--- a/vendor/go.opencensus.io/.gitignore
+++ /dev/null
@@ -1,9 +0,0 @@
-/.idea/
-
-# go.opencensus.io/exporter/aws
-/exporter/aws/
-
-# Exclude vendor, use dep ensure after checkout:
-/vendor/github.com/
-/vendor/golang.org/
-/vendor/google.golang.org/
diff --git a/vendor/go.opencensus.io/CONTRIBUTING.md b/vendor/go.opencensus.io/CONTRIBUTING.md
deleted file mode 100644
index 1ba3962c8bfaf..0000000000000
--- a/vendor/go.opencensus.io/CONTRIBUTING.md
+++ /dev/null
@@ -1,63 +0,0 @@
-# How to contribute
-
-We'd love to accept your patches and contributions to this project. There are
-just a few small guidelines you need to follow.
-
-## Contributor License Agreement
-
-Contributions to this project must be accompanied by a Contributor License
-Agreement. You (or your employer) retain the copyright to your contribution,
-this simply gives us permission to use and redistribute your contributions as
-part of the project. Head over to <https://cla.developers.google.com/> to see
-your current agreements on file or to sign a new one.
-
-You generally only need to submit a CLA once, so if you've already submitted one
-(even if it was for a different project), you probably don't need to do it
-again.
-
-## Code reviews
-
-All submissions, including submissions by project members, require review. We
-use GitHub pull requests for this purpose. Consult [GitHub Help] for more
-information on using pull requests.
-
-[GitHub Help]: https://help.github.com/articles/about-pull-requests/
-
-## Instructions
-
-Fork the repo, checkout the upstream repo to your GOPATH by:
-
-```
-$ go get -d go.opencensus.io
-```
-
-Add your fork as an origin:
-
-```
-cd $(go env GOPATH)/src/go.opencensus.io
-git remote add fork [email protected]:YOUR_GITHUB_USERNAME/opencensus-go.git
-```
-
-Run tests:
-
-```
-$ make install-tools # Only first time.
-$ make
-```
-
-Checkout a new branch, make modifications and push the branch to your fork:
-
-```
-$ git checkout -b feature
-# edit files
-$ git commit
-$ git push fork feature
-```
-
-Open a pull request against the main opencensus-go repo.
-
-## General Notes
-This project uses Appveyor and Travis for CI.
-
-The dependencies are managed with `go mod` if you work with the sources under your
-`$GOPATH` you need to set the environment variable `GO111MODULE=on`.
\ No newline at end of file
diff --git a/vendor/go.opencensus.io/Makefile b/vendor/go.opencensus.io/Makefile
deleted file mode 100644
index d896edc996813..0000000000000
--- a/vendor/go.opencensus.io/Makefile
+++ /dev/null
@@ -1,97 +0,0 @@
-# TODO: Fix this on windows.
-ALL_SRC := $(shell find . -name '*.go' \
- -not -path './vendor/*' \
- -not -path '*/gen-go/*' \
- -type f | sort)
-ALL_PKGS := $(shell go list $(sort $(dir $(ALL_SRC))))
-
-GOTEST_OPT?=-v -race -timeout 30s
-GOTEST_OPT_WITH_COVERAGE = $(GOTEST_OPT) -coverprofile=coverage.txt -covermode=atomic
-GOTEST=go test
-GOIMPORTS=goimports
-GOLINT=golint
-GOVET=go vet
-EMBEDMD=embedmd
-# TODO decide if we need to change these names.
-TRACE_ID_LINT_EXCEPTION="type name will be used as trace.TraceID by other packages"
-TRACE_OPTION_LINT_EXCEPTION="type name will be used as trace.TraceOptions by other packages"
-README_FILES := $(shell find . -name '*README.md' | sort | tr '\n' ' ')
-
-.DEFAULT_GOAL := imports-lint-vet-embedmd-test
-
-.PHONY: imports-lint-vet-embedmd-test
-imports-lint-vet-embedmd-test: imports lint vet embedmd test
-
-# TODO enable test-with-coverage in tavis
-.PHONY: travis-ci
-travis-ci: imports lint vet embedmd test test-386
-
-all-pkgs:
- @echo $(ALL_PKGS) | tr ' ' '\n' | sort
-
-all-srcs:
- @echo $(ALL_SRC) | tr ' ' '\n' | sort
-
-.PHONY: test
-test:
- $(GOTEST) $(GOTEST_OPT) $(ALL_PKGS)
-
-.PHONY: test-386
-test-386:
- GOARCH=386 $(GOTEST) -v -timeout 30s $(ALL_PKGS)
-
-.PHONY: test-with-coverage
-test-with-coverage:
- $(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS)
-
-.PHONY: imports
-imports:
- @IMPORTSOUT=`$(GOIMPORTS) -l $(ALL_SRC) 2>&1`; \
- if [ "$$IMPORTSOUT" ]; then \
- echo "$(GOIMPORTS) FAILED => goimports the following files:\n"; \
- echo "$$IMPORTSOUT\n"; \
- exit 1; \
- else \
- echo "Imports finished successfully"; \
- fi
-
-.PHONY: lint
-lint:
- @LINTOUT=`$(GOLINT) $(ALL_PKGS) | grep -v $(TRACE_ID_LINT_EXCEPTION) | grep -v $(TRACE_OPTION_LINT_EXCEPTION) 2>&1`; \
- if [ "$$LINTOUT" ]; then \
- echo "$(GOLINT) FAILED => clean the following lint errors:\n"; \
- echo "$$LINTOUT\n"; \
- exit 1; \
- else \
- echo "Lint finished successfully"; \
- fi
-
-.PHONY: vet
-vet:
- # TODO: Understand why go vet downloads "github.com/google/go-cmp v0.2.0"
- @VETOUT=`$(GOVET) ./... | grep -v "go: downloading" 2>&1`; \
- if [ "$$VETOUT" ]; then \
- echo "$(GOVET) FAILED => go vet the following files:\n"; \
- echo "$$VETOUT\n"; \
- exit 1; \
- else \
- echo "Vet finished successfully"; \
- fi
-
-.PHONY: embedmd
-embedmd:
- @EMBEDMDOUT=`$(EMBEDMD) -d $(README_FILES) 2>&1`; \
- if [ "$$EMBEDMDOUT" ]; then \
- echo "$(EMBEDMD) FAILED => embedmd the following files:\n"; \
- echo "$$EMBEDMDOUT\n"; \
- exit 1; \
- else \
- echo "Embedmd finished successfully"; \
- fi
-
-.PHONY: install-tools
-install-tools:
- go install golang.org/x/lint/golint@latest
- go install golang.org/x/tools/cmd/cover@latest
- go install golang.org/x/tools/cmd/goimports@latest
- go install github.com/rakyll/embedmd@latest
diff --git a/vendor/go.opencensus.io/README.md b/vendor/go.opencensus.io/README.md
deleted file mode 100644
index 1d7e837116f0e..0000000000000
--- a/vendor/go.opencensus.io/README.md
+++ /dev/null
@@ -1,267 +0,0 @@
-# OpenCensus Libraries for Go
-
-[![Build Status][travis-image]][travis-url]
-[![Windows Build Status][appveyor-image]][appveyor-url]
-[![GoDoc][godoc-image]][godoc-url]
-[![Gitter chat][gitter-image]][gitter-url]
-
-OpenCensus Go is a Go implementation of OpenCensus, a toolkit for
-collecting application performance and behavior monitoring data.
-Currently it consists of three major components: tags, stats and tracing.
-
-#### OpenCensus and OpenTracing have merged to form OpenTelemetry, which serves as the next major version of OpenCensus and OpenTracing. OpenTelemetry will offer backwards compatibility with existing OpenCensus integrations, and we will continue to make security patches to existing OpenCensus libraries for two years. Read more about the merger [here](https://medium.com/opentracing/a-roadmap-to-convergence-b074e5815289).
-
-## Installation
-
-```
-$ go get -u go.opencensus.io
-```
-
-The API of this project is still evolving, see: [Deprecation Policy](#deprecation-policy).
-The use of vendoring or a dependency management tool is recommended.
-
-## Prerequisites
-
-OpenCensus Go libraries require Go 1.8 or later.
-
-## Getting Started
-
-The easiest way to get started using OpenCensus in your application is to use an existing
-integration with your RPC framework:
-
-* [net/http](https://godoc.org/go.opencensus.io/plugin/ochttp)
-* [gRPC](https://godoc.org/go.opencensus.io/plugin/ocgrpc)
-* [database/sql](https://godoc.org/github.com/opencensus-integrations/ocsql)
-* [Go kit](https://godoc.org/github.com/go-kit/kit/tracing/opencensus)
-* [Groupcache](https://godoc.org/github.com/orijtech/groupcache)
-* [Caddy webserver](https://godoc.org/github.com/orijtech/caddy)
-* [MongoDB](https://godoc.org/github.com/orijtech/mongo-go-driver)
-* [Redis gomodule/redigo](https://godoc.org/github.com/orijtech/redigo)
-* [Redis goredis/redis](https://godoc.org/github.com/orijtech/redis)
-* [Memcache](https://godoc.org/github.com/orijtech/gomemcache)
-
-If you're using a framework not listed here, you could either implement your own middleware for your
-framework or use [custom stats](#stats) and [spans](#spans) directly in your application.
-
-## Exporters
-
-OpenCensus can export instrumentation data to various backends.
-OpenCensus has exporter implementations for the following, users
-can implement their own exporters by implementing the exporter interfaces
-([stats](https://godoc.org/go.opencensus.io/stats/view#Exporter),
-[trace](https://godoc.org/go.opencensus.io/trace#Exporter)):
-
-* [Prometheus][exporter-prom] for stats
-* [OpenZipkin][exporter-zipkin] for traces
-* [Stackdriver][exporter-stackdriver] Monitoring for stats and Trace for traces
-* [Jaeger][exporter-jaeger] for traces
-* [AWS X-Ray][exporter-xray] for traces
-* [Datadog][exporter-datadog] for stats and traces
-* [Graphite][exporter-graphite] for stats
-* [Honeycomb][exporter-honeycomb] for traces
-* [New Relic][exporter-newrelic] for stats and traces
-
-## Overview
-
-
-
-In a microservices environment, a user request may go through
-multiple services until there is a response. OpenCensus allows
-you to instrument your services and collect diagnostics data all
-through your services end-to-end.
-
-## Tags
-
-Tags represent propagated key-value pairs. They are propagated using `context.Context`
-in the same process or can be encoded to be transmitted on the wire. Usually, this will
-be handled by an integration plugin, e.g. `ocgrpc.ServerHandler` and `ocgrpc.ClientHandler`
-for gRPC.
-
-Package `tag` allows adding or modifying tags in the current context.
-
-[embedmd]:# (internal/readme/tags.go new)
-```go
-ctx, err := tag.New(ctx,
- tag.Insert(osKey, "macOS-10.12.5"),
- tag.Upsert(userIDKey, "cde36753ed"),
-)
-if err != nil {
- log.Fatal(err)
-}
-```
-
-## Stats
-
-OpenCensus is a low-overhead framework even if instrumentation is always enabled.
-In order to be so, it is optimized to make recording of data points fast
-and separate from the data aggregation.
-
-OpenCensus stats collection happens in two stages:
-
-* Definition of measures and recording of data points
-* Definition of views and aggregation of the recorded data
-
-### Recording
-
-Measurements are data points associated with a measure.
-Recording implicitly tags the set of Measurements with the tags from the
-provided context:
-
-[embedmd]:# (internal/readme/stats.go record)
-```go
-stats.Record(ctx, videoSize.M(102478))
-```
-
-### Views
-
-Views are how Measures are aggregated. You can think of them as queries over the
-set of recorded data points (measurements).
-
-Views have two parts: the tags to group by and the aggregation type used.
-
-Currently three types of aggregations are supported:
-* CountAggregation is used to count the number of times a sample was recorded.
-* DistributionAggregation is used to provide a histogram of the values of the samples.
-* SumAggregation is used to sum up all sample values.
-
-[embedmd]:# (internal/readme/stats.go aggs)
-```go
-distAgg := view.Distribution(1<<32, 2<<32, 3<<32)
-countAgg := view.Count()
-sumAgg := view.Sum()
-```
-
-Here we create a view with the DistributionAggregation over our measure.
-
-[embedmd]:# (internal/readme/stats.go view)
-```go
-if err := view.Register(&view.View{
- Name: "example.com/video_size_distribution",
- Description: "distribution of processed video size over time",
- Measure: videoSize,
- Aggregation: view.Distribution(1<<32, 2<<32, 3<<32),
-}); err != nil {
- log.Fatalf("Failed to register view: %v", err)
-}
-```
-
-Register begins collecting data for the view. Registered views' data will be
-exported via the registered exporters.
-
-## Traces
-
-A distributed trace tracks the progression of a single user request as
-it is handled by the services and processes that make up an application.
-Each step is called a span in the trace. Spans include metadata about the step,
-including especially the time spent in the step, called the span’s latency.
-
-Below you see a trace and several spans underneath it.
-
-
-
-### Spans
-
-Span is the unit step in a trace. Each span has a name, latency, status and
-additional metadata.
-
-Below we are starting a span for a cache read and ending it
-when we are done:
-
-[embedmd]:# (internal/readme/trace.go startend)
-```go
-ctx, span := trace.StartSpan(ctx, "cache.Get")
-defer span.End()
-
-// Do work to get from cache.
-```
-
-### Propagation
-
-Spans can have parents or can be root spans if they don't have any parents.
-The current span is propagated in-process and across the network to allow associating
-new child spans with the parent.
-
-In the same process, `context.Context` is used to propagate spans.
-`trace.StartSpan` creates a new span as a root if the current context
-doesn't contain a span. Or, it creates a child of the span that is
-already in current context. The returned context can be used to keep
-propagating the newly created span in the current context.
-
-[embedmd]:# (internal/readme/trace.go startend)
-```go
-ctx, span := trace.StartSpan(ctx, "cache.Get")
-defer span.End()
-
-// Do work to get from cache.
-```
-
-Across the network, OpenCensus provides different propagation
-methods for different protocols.
-
-* gRPC integrations use the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation).
-* HTTP integrations use Zipkin's [B3](https://github.com/openzipkin/b3-propagation)
- by default but can be configured to use a custom propagation method by setting another
- [propagation.HTTPFormat](https://godoc.org/go.opencensus.io/trace/propagation#HTTPFormat).
-
-## Execution Tracer
-
-With Go 1.11, OpenCensus Go will support integration with the Go execution tracer.
-See [Debugging Latency in Go](https://medium.com/observability/debugging-latency-in-go-1-11-9f97a7910d68)
-for an example of their mutual use.
-
-## Profiles
-
-OpenCensus tags can be applied as profiler labels
-for users who are on Go 1.9 and above.
-
-[embedmd]:# (internal/readme/tags.go profiler)
-```go
-ctx, err = tag.New(ctx,
- tag.Insert(osKey, "macOS-10.12.5"),
- tag.Insert(userIDKey, "fff0989878"),
-)
-if err != nil {
- log.Fatal(err)
-}
-tag.Do(ctx, func(ctx context.Context) {
- // Do work.
- // When profiling is on, samples will be
- // recorded with the key/values from the tag map.
-})
-```
-
-A screenshot of the CPU profile from the program above:
-
-
-
-## Deprecation Policy
-
-Before version 1.0.0, the following deprecation policy will be observed:
-
-No backwards-incompatible changes will be made except for the removal of symbols that have
-been marked as *Deprecated* for at least one minor release (e.g. 0.9.0 to 0.10.0). A release
-removing the *Deprecated* functionality will be made no sooner than 28 days after the first
-release in which the functionality was marked *Deprecated*.
-
-[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master
-[travis-url]: https://travis-ci.org/census-instrumentation/opencensus-go
-[appveyor-image]: https://ci.appveyor.com/api/projects/status/vgtt29ps1783ig38?svg=true
-[appveyor-url]: https://ci.appveyor.com/project/opencensusgoteam/opencensus-go/branch/master
-[godoc-image]: https://godoc.org/go.opencensus.io?status.svg
-[godoc-url]: https://godoc.org/go.opencensus.io
-[gitter-image]: https://badges.gitter.im/census-instrumentation/lobby.svg
-[gitter-url]: https://gitter.im/census-instrumentation/lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
-
-
-[new-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap
-[new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace
-
-[exporter-prom]: https://godoc.org/contrib.go.opencensus.io/exporter/prometheus
-[exporter-stackdriver]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver
-[exporter-zipkin]: https://godoc.org/contrib.go.opencensus.io/exporter/zipkin
-[exporter-jaeger]: https://godoc.org/contrib.go.opencensus.io/exporter/jaeger
-[exporter-xray]: https://github.com/census-ecosystem/opencensus-go-exporter-aws
-[exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog
-[exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite
-[exporter-honeycomb]: https://github.com/honeycombio/opencensus-exporter
-[exporter-newrelic]: https://github.com/newrelic/newrelic-opencensus-exporter-go
diff --git a/vendor/go.opencensus.io/appveyor.yml b/vendor/go.opencensus.io/appveyor.yml
deleted file mode 100644
index d08f0edaff974..0000000000000
--- a/vendor/go.opencensus.io/appveyor.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-version: "{build}"
-
-platform: x64
-
-clone_folder: c:\gopath\src\go.opencensus.io
-
-environment:
- GOPATH: 'c:\gopath'
- GO111MODULE: 'on'
- CGO_ENABLED: '0' # See: https://github.com/appveyor/ci/issues/2613
-
-stack: go 1.11
-
-before_test:
- - go version
- - go env
-
-build: false
-deploy: false
-
-test_script:
- - cd %APPVEYOR_BUILD_FOLDER%
- - go build -v .\...
- - go test -v .\... # No -race because cgo is disabled
diff --git a/vendor/go.opencensus.io/internal/internal.go b/vendor/go.opencensus.io/internal/internal.go
deleted file mode 100644
index 81dc7183ec39f..0000000000000
--- a/vendor/go.opencensus.io/internal/internal.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal // import "go.opencensus.io/internal"
-
-import (
- "fmt"
- "time"
-
- opencensus "go.opencensus.io"
-)
-
-// UserAgent is the user agent to be added to the outgoing
-// requests from the exporters.
-var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version())
-
-// MonotonicEndTime returns the end time at present
-// but offset from start, monotonically.
-//
-// The monotonic clock is used in subtractions hence
-// the duration since start added back to start gives
-// end as a monotonic time.
-// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks
-func MonotonicEndTime(start time.Time) time.Time {
- return start.Add(time.Since(start))
-}
diff --git a/vendor/go.opencensus.io/internal/sanitize.go b/vendor/go.opencensus.io/internal/sanitize.go
deleted file mode 100644
index de8ccf236c4b2..0000000000000
--- a/vendor/go.opencensus.io/internal/sanitize.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
- "strings"
- "unicode"
-)
-
-const labelKeySizeLimit = 100
-
-// Sanitize returns a string that is trunacated to 100 characters if it's too
-// long, and replaces non-alphanumeric characters to underscores.
-func Sanitize(s string) string {
- if len(s) == 0 {
- return s
- }
- if len(s) > labelKeySizeLimit {
- s = s[:labelKeySizeLimit]
- }
- s = strings.Map(sanitizeRune, s)
- if unicode.IsDigit(rune(s[0])) {
- s = "key_" + s
- }
- if s[0] == '_' {
- s = "key" + s
- }
- return s
-}
-
-// converts anything that is not a letter or digit to an underscore
-func sanitizeRune(r rune) rune {
- if unicode.IsLetter(r) || unicode.IsDigit(r) {
- return r
- }
- // Everything else turns into an underscore
- return '_'
-}
diff --git a/vendor/go.opencensus.io/internal/traceinternals.go b/vendor/go.opencensus.io/internal/traceinternals.go
deleted file mode 100644
index 073af7b473a63..0000000000000
--- a/vendor/go.opencensus.io/internal/traceinternals.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
- "time"
-)
-
-// Trace allows internal access to some trace functionality.
-// TODO(#412): remove this
-var Trace interface{}
-
-// LocalSpanStoreEnabled true if the local span store is enabled.
-var LocalSpanStoreEnabled bool
-
-// BucketConfiguration stores the number of samples to store for span buckets
-// for successful and failed spans for a particular span name.
-type BucketConfiguration struct {
- Name string
- MaxRequestsSucceeded int
- MaxRequestsErrors int
-}
-
-// PerMethodSummary is a summary of the spans stored for a single span name.
-type PerMethodSummary struct {
- Active int
- LatencyBuckets []LatencyBucketSummary
- ErrorBuckets []ErrorBucketSummary
-}
-
-// LatencyBucketSummary is a summary of a latency bucket.
-type LatencyBucketSummary struct {
- MinLatency, MaxLatency time.Duration
- Size int
-}
-
-// ErrorBucketSummary is a summary of an error bucket.
-type ErrorBucketSummary struct {
- ErrorCode int32
- Size int
-}
diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go
deleted file mode 100644
index 11e31f421c5d8..0000000000000
--- a/vendor/go.opencensus.io/opencensus.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package opencensus contains Go support for OpenCensus.
-package opencensus // import "go.opencensus.io"
-
-// Version is the current release version of OpenCensus in use.
-func Version() string {
- return "0.24.0"
-}
diff --git a/vendor/go.opencensus.io/trace/basetypes.go b/vendor/go.opencensus.io/trace/basetypes.go
deleted file mode 100644
index c8e26ed6355bc..0000000000000
--- a/vendor/go.opencensus.io/trace/basetypes.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "fmt"
- "time"
-)
-
-type (
- // TraceID is a 16-byte identifier for a set of spans.
- TraceID [16]byte
-
- // SpanID is an 8-byte identifier for a single span.
- SpanID [8]byte
-)
-
-func (t TraceID) String() string {
- return fmt.Sprintf("%02x", t[:])
-}
-
-func (s SpanID) String() string {
- return fmt.Sprintf("%02x", s[:])
-}
-
-// Annotation represents a text annotation with a set of attributes and a timestamp.
-type Annotation struct {
- Time time.Time
- Message string
- Attributes map[string]interface{}
-}
-
-// Attribute represents a key-value pair on a span, link or annotation.
-// Construct with one of: BoolAttribute, Int64Attribute, or StringAttribute.
-type Attribute struct {
- key string
- value interface{}
-}
-
-// Key returns the attribute's key
-func (a *Attribute) Key() string {
- return a.key
-}
-
-// Value returns the attribute's value
-func (a *Attribute) Value() interface{} {
- return a.value
-}
-
-// BoolAttribute returns a bool-valued attribute.
-func BoolAttribute(key string, value bool) Attribute {
- return Attribute{key: key, value: value}
-}
-
-// Int64Attribute returns an int64-valued attribute.
-func Int64Attribute(key string, value int64) Attribute {
- return Attribute{key: key, value: value}
-}
-
-// Float64Attribute returns a float64-valued attribute.
-func Float64Attribute(key string, value float64) Attribute {
- return Attribute{key: key, value: value}
-}
-
-// StringAttribute returns a string-valued attribute.
-func StringAttribute(key string, value string) Attribute {
- return Attribute{key: key, value: value}
-}
-
-// LinkType specifies the relationship between the span that had the link
-// added, and the linked span.
-type LinkType int32
-
-// LinkType values.
-const (
- LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown.
- LinkTypeChild // The linked span is a child of the current span.
- LinkTypeParent // The linked span is the parent of the current span.
-)
-
-// Link represents a reference from one span to another span.
-type Link struct {
- TraceID TraceID
- SpanID SpanID
- Type LinkType
- // Attributes is a set of attributes on the link.
- Attributes map[string]interface{}
-}
-
-// MessageEventType specifies the type of message event.
-type MessageEventType int32
-
-// MessageEventType values.
-const (
- MessageEventTypeUnspecified MessageEventType = iota // Unknown event type.
- MessageEventTypeSent // Indicates a sent RPC message.
- MessageEventTypeRecv // Indicates a received RPC message.
-)
-
-// MessageEvent represents an event describing a message sent or received on the network.
-type MessageEvent struct {
- Time time.Time
- EventType MessageEventType
- MessageID int64
- UncompressedByteSize int64
- CompressedByteSize int64
-}
-
-// Status is the status of a Span.
-type Status struct {
- // Code is a status code. Zero indicates success.
- //
- // If Code will be propagated to Google APIs, it ideally should be a value from
- // https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto .
- Code int32
- Message string
-}
diff --git a/vendor/go.opencensus.io/trace/config.go b/vendor/go.opencensus.io/trace/config.go
deleted file mode 100644
index 775f8274faae2..0000000000000
--- a/vendor/go.opencensus.io/trace/config.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "sync"
-
- "go.opencensus.io/trace/internal"
-)
-
-// Config represents the global tracing configuration.
-type Config struct {
- // DefaultSampler is the default sampler used when creating new spans.
- DefaultSampler Sampler
-
- // IDGenerator is for internal use only.
- IDGenerator internal.IDGenerator
-
- // MaxAnnotationEventsPerSpan is max number of annotation events per span
- MaxAnnotationEventsPerSpan int
-
- // MaxMessageEventsPerSpan is max number of message events per span
- MaxMessageEventsPerSpan int
-
- // MaxAnnotationEventsPerSpan is max number of attributes per span
- MaxAttributesPerSpan int
-
- // MaxLinksPerSpan is max number of links per span
- MaxLinksPerSpan int
-}
-
-var configWriteMu sync.Mutex
-
-const (
- // DefaultMaxAnnotationEventsPerSpan is default max number of annotation events per span
- DefaultMaxAnnotationEventsPerSpan = 32
-
- // DefaultMaxMessageEventsPerSpan is default max number of message events per span
- DefaultMaxMessageEventsPerSpan = 128
-
- // DefaultMaxAttributesPerSpan is default max number of attributes per span
- DefaultMaxAttributesPerSpan = 32
-
- // DefaultMaxLinksPerSpan is default max number of links per span
- DefaultMaxLinksPerSpan = 32
-)
-
-// ApplyConfig applies changes to the global tracing configuration.
-//
-// Fields not provided in the given config are going to be preserved.
-func ApplyConfig(cfg Config) {
- configWriteMu.Lock()
- defer configWriteMu.Unlock()
- c := *config.Load().(*Config)
- if cfg.DefaultSampler != nil {
- c.DefaultSampler = cfg.DefaultSampler
- }
- if cfg.IDGenerator != nil {
- c.IDGenerator = cfg.IDGenerator
- }
- if cfg.MaxAnnotationEventsPerSpan > 0 {
- c.MaxAnnotationEventsPerSpan = cfg.MaxAnnotationEventsPerSpan
- }
- if cfg.MaxMessageEventsPerSpan > 0 {
- c.MaxMessageEventsPerSpan = cfg.MaxMessageEventsPerSpan
- }
- if cfg.MaxAttributesPerSpan > 0 {
- c.MaxAttributesPerSpan = cfg.MaxAttributesPerSpan
- }
- if cfg.MaxLinksPerSpan > 0 {
- c.MaxLinksPerSpan = cfg.MaxLinksPerSpan
- }
- config.Store(&c)
-}
diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go
deleted file mode 100644
index 7a1616a55c5e3..0000000000000
--- a/vendor/go.opencensus.io/trace/doc.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package trace contains support for OpenCensus distributed tracing.
-
-The following assumes a basic familiarity with OpenCensus concepts.
-See http://opencensus.io
-
-# Exporting Traces
-
-To export collected tracing data, register at least one exporter. You can use
-one of the provided exporters or write your own.
-
- trace.RegisterExporter(exporter)
-
-By default, traces will be sampled relatively rarely. To change the sampling
-frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler
-to sample a subset of traces, or use AlwaysSample to collect a trace on every run:
-
- trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
-
-Be careful about using trace.AlwaysSample in a production application with
-significant traffic: a new trace will be started and exported for every request.
-
-# Adding Spans to a Trace
-
-A trace consists of a tree of spans. In Go, the current span is carried in a
-context.Context.
-
-It is common to want to capture all the activity of a function call in a span. For
-this to work, the function must take a context.Context as a parameter. Add these two
-lines to the top of the function:
-
- ctx, span := trace.StartSpan(ctx, "example.com/Run")
- defer span.End()
-
-StartSpan will create a new top-level span if the context
-doesn't contain another span, otherwise it will create a child span.
-*/
-package trace // import "go.opencensus.io/trace"
diff --git a/vendor/go.opencensus.io/trace/evictedqueue.go b/vendor/go.opencensus.io/trace/evictedqueue.go
deleted file mode 100644
index ffc264f23d2dd..0000000000000
--- a/vendor/go.opencensus.io/trace/evictedqueue.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2019, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-type evictedQueue struct {
- queue []interface{}
- capacity int
- droppedCount int
-}
-
-func newEvictedQueue(capacity int) *evictedQueue {
- eq := &evictedQueue{
- capacity: capacity,
- queue: make([]interface{}, 0),
- }
-
- return eq
-}
-
-func (eq *evictedQueue) add(value interface{}) {
- if len(eq.queue) == eq.capacity {
- eq.queue = eq.queue[1:]
- eq.droppedCount++
- }
- eq.queue = append(eq.queue, value)
-}
diff --git a/vendor/go.opencensus.io/trace/export.go b/vendor/go.opencensus.io/trace/export.go
deleted file mode 100644
index e0d9a4b99e96f..0000000000000
--- a/vendor/go.opencensus.io/trace/export.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "sync"
- "sync/atomic"
- "time"
-)
-
-// Exporter is a type for functions that receive sampled trace spans.
-//
-// The ExportSpan method should be safe for concurrent use and should return
-// quickly; if an Exporter takes a significant amount of time to process a
-// SpanData, that work should be done on another goroutine.
-//
-// The SpanData should not be modified, but a pointer to it can be kept.
-type Exporter interface {
- ExportSpan(s *SpanData)
-}
-
-type exportersMap map[Exporter]struct{}
-
-var (
- exporterMu sync.Mutex
- exporters atomic.Value
-)
-
-// RegisterExporter adds to the list of Exporters that will receive sampled
-// trace spans.
-//
-// Binaries can register exporters, libraries shouldn't register exporters.
-func RegisterExporter(e Exporter) {
- exporterMu.Lock()
- new := make(exportersMap)
- if old, ok := exporters.Load().(exportersMap); ok {
- for k, v := range old {
- new[k] = v
- }
- }
- new[e] = struct{}{}
- exporters.Store(new)
- exporterMu.Unlock()
-}
-
-// UnregisterExporter removes from the list of Exporters the Exporter that was
-// registered with the given name.
-func UnregisterExporter(e Exporter) {
- exporterMu.Lock()
- new := make(exportersMap)
- if old, ok := exporters.Load().(exportersMap); ok {
- for k, v := range old {
- new[k] = v
- }
- }
- delete(new, e)
- exporters.Store(new)
- exporterMu.Unlock()
-}
-
-// SpanData contains all the information collected by a Span.
-type SpanData struct {
- SpanContext
- ParentSpanID SpanID
- SpanKind int
- Name string
- StartTime time.Time
- // The wall clock time of EndTime will be adjusted to always be offset
- // from StartTime by the duration of the span.
- EndTime time.Time
- // The values of Attributes each have type string, bool, or int64.
- Attributes map[string]interface{}
- Annotations []Annotation
- MessageEvents []MessageEvent
- Status
- Links []Link
- HasRemoteParent bool
- DroppedAttributeCount int
- DroppedAnnotationCount int
- DroppedMessageEventCount int
- DroppedLinkCount int
-
- // ChildSpanCount holds the number of child span created for this span.
- ChildSpanCount int
-}
diff --git a/vendor/go.opencensus.io/trace/internal/internal.go b/vendor/go.opencensus.io/trace/internal/internal.go
deleted file mode 100644
index 7e808d8f30e60..0000000000000
--- a/vendor/go.opencensus.io/trace/internal/internal.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package internal provides trace internals.
-package internal
-
-// IDGenerator allows custom generators for TraceId and SpanId.
-type IDGenerator interface {
- NewTraceID() [16]byte
- NewSpanID() [8]byte
-}
diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go
deleted file mode 100644
index 80095a5f6c03e..0000000000000
--- a/vendor/go.opencensus.io/trace/lrumap.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2019, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "github.com/golang/groupcache/lru"
-)
-
-// A simple lru.Cache wrapper that tracks the keys of the current contents and
-// the cumulative number of evicted items.
-type lruMap struct {
- cacheKeys map[lru.Key]bool
- cache *lru.Cache
- droppedCount int
-}
-
-func newLruMap(size int) *lruMap {
- lm := &lruMap{
- cacheKeys: make(map[lru.Key]bool),
- cache: lru.New(size),
- droppedCount: 0,
- }
- lm.cache.OnEvicted = func(key lru.Key, value interface{}) {
- delete(lm.cacheKeys, key)
- lm.droppedCount++
- }
- return lm
-}
-
-func (lm lruMap) len() int {
- return lm.cache.Len()
-}
-
-func (lm lruMap) keys() []interface{} {
- keys := make([]interface{}, 0, len(lm.cacheKeys))
- for k := range lm.cacheKeys {
- keys = append(keys, k)
- }
- return keys
-}
-
-func (lm *lruMap) add(key, value interface{}) {
- lm.cacheKeys[lru.Key(key)] = true
- lm.cache.Add(lru.Key(key), value)
-}
-
-func (lm *lruMap) get(key interface{}) (interface{}, bool) {
- return lm.cache.Get(key)
-}
diff --git a/vendor/go.opencensus.io/trace/sampling.go b/vendor/go.opencensus.io/trace/sampling.go
deleted file mode 100644
index 71c10f9e3b424..0000000000000
--- a/vendor/go.opencensus.io/trace/sampling.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "encoding/binary"
-)
-
-const defaultSamplingProbability = 1e-4
-
-// Sampler decides whether a trace should be sampled and exported.
-type Sampler func(SamplingParameters) SamplingDecision
-
-// SamplingParameters contains the values passed to a Sampler.
-type SamplingParameters struct {
- ParentContext SpanContext
- TraceID TraceID
- SpanID SpanID
- Name string
- HasRemoteParent bool
-}
-
-// SamplingDecision is the value returned by a Sampler.
-type SamplingDecision struct {
- Sample bool
-}
-
-// ProbabilitySampler returns a Sampler that samples a given fraction of traces.
-//
-// It also samples spans whose parents are sampled.
-func ProbabilitySampler(fraction float64) Sampler {
- if !(fraction >= 0) {
- fraction = 0
- } else if fraction >= 1 {
- return AlwaysSample()
- }
-
- traceIDUpperBound := uint64(fraction * (1 << 63))
- return Sampler(func(p SamplingParameters) SamplingDecision {
- if p.ParentContext.IsSampled() {
- return SamplingDecision{Sample: true}
- }
- x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1
- return SamplingDecision{Sample: x < traceIDUpperBound}
- })
-}
-
-// AlwaysSample returns a Sampler that samples every trace.
-// Be careful about using this sampler in a production application with
-// significant traffic: a new trace will be started and exported for every
-// request.
-func AlwaysSample() Sampler {
- return func(p SamplingParameters) SamplingDecision {
- return SamplingDecision{Sample: true}
- }
-}
-
-// NeverSample returns a Sampler that samples no traces.
-func NeverSample() Sampler {
- return func(p SamplingParameters) SamplingDecision {
- return SamplingDecision{Sample: false}
- }
-}
diff --git a/vendor/go.opencensus.io/trace/spanbucket.go b/vendor/go.opencensus.io/trace/spanbucket.go
deleted file mode 100644
index fbabad34c000d..0000000000000
--- a/vendor/go.opencensus.io/trace/spanbucket.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "time"
-)
-
-// samplePeriod is the minimum time between accepting spans in a single bucket.
-const samplePeriod = time.Second
-
-// defaultLatencies contains the default latency bucket bounds.
-// TODO: consider defaults, make configurable
-var defaultLatencies = [...]time.Duration{
- 10 * time.Microsecond,
- 100 * time.Microsecond,
- time.Millisecond,
- 10 * time.Millisecond,
- 100 * time.Millisecond,
- time.Second,
- 10 * time.Second,
- time.Minute,
-}
-
-// bucket is a container for a set of spans for a particular error code or latency range.
-type bucket struct {
- nextTime time.Time // next time we can accept a span
- buffer []*SpanData // circular buffer of spans
- nextIndex int // location next SpanData should be placed in buffer
- overflow bool // whether the circular buffer has wrapped around
-}
-
-func makeBucket(bufferSize int) bucket {
- return bucket{
- buffer: make([]*SpanData, bufferSize),
- }
-}
-
-// add adds a span to the bucket, if nextTime has been reached.
-func (b *bucket) add(s *SpanData) {
- if s.EndTime.Before(b.nextTime) {
- return
- }
- if len(b.buffer) == 0 {
- return
- }
- b.nextTime = s.EndTime.Add(samplePeriod)
- b.buffer[b.nextIndex] = s
- b.nextIndex++
- if b.nextIndex == len(b.buffer) {
- b.nextIndex = 0
- b.overflow = true
- }
-}
-
-// size returns the number of spans in the bucket.
-func (b *bucket) size() int {
- if b.overflow {
- return len(b.buffer)
- }
- return b.nextIndex
-}
-
-// span returns the ith span in the bucket.
-func (b *bucket) span(i int) *SpanData {
- if !b.overflow {
- return b.buffer[i]
- }
- if i < len(b.buffer)-b.nextIndex {
- return b.buffer[b.nextIndex+i]
- }
- return b.buffer[b.nextIndex+i-len(b.buffer)]
-}
-
-// resize changes the size of the bucket to n, keeping up to n existing spans.
-func (b *bucket) resize(n int) {
- cur := b.size()
- newBuffer := make([]*SpanData, n)
- if cur < n {
- for i := 0; i < cur; i++ {
- newBuffer[i] = b.span(i)
- }
- b.buffer = newBuffer
- b.nextIndex = cur
- b.overflow = false
- return
- }
- for i := 0; i < n; i++ {
- newBuffer[i] = b.span(i + cur - n)
- }
- b.buffer = newBuffer
- b.nextIndex = 0
- b.overflow = true
-}
-
-// latencyBucket returns the appropriate bucket number for a given latency.
-func latencyBucket(latency time.Duration) int {
- i := 0
- for i < len(defaultLatencies) && latency >= defaultLatencies[i] {
- i++
- }
- return i
-}
-
-// latencyBucketBounds returns the lower and upper bounds for a latency bucket
-// number.
-//
-// The lower bound is inclusive, the upper bound is exclusive (except for the
-// last bucket.)
-func latencyBucketBounds(index int) (lower time.Duration, upper time.Duration) {
- if index == 0 {
- return 0, defaultLatencies[index]
- }
- if index == len(defaultLatencies) {
- return defaultLatencies[index-1], 1<<63 - 1
- }
- return defaultLatencies[index-1], defaultLatencies[index]
-}
diff --git a/vendor/go.opencensus.io/trace/spanstore.go b/vendor/go.opencensus.io/trace/spanstore.go
deleted file mode 100644
index e601f76f2c82e..0000000000000
--- a/vendor/go.opencensus.io/trace/spanstore.go
+++ /dev/null
@@ -1,308 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "sync"
- "time"
-
- "go.opencensus.io/internal"
-)
-
-const (
- maxBucketSize = 100000
- defaultBucketSize = 10
-)
-
-var (
- ssmu sync.RWMutex // protects spanStores
- spanStores = make(map[string]*spanStore)
-)
-
-// This exists purely to avoid exposing internal methods used by z-Pages externally.
-type internalOnly struct{}
-
-func init() {
- //TODO(#412): remove
- internal.Trace = &internalOnly{}
-}
-
-// ReportActiveSpans returns the active spans for the given name.
-func (i internalOnly) ReportActiveSpans(name string) []*SpanData {
- s := spanStoreForName(name)
- if s == nil {
- return nil
- }
- var out []*SpanData
- s.mu.Lock()
- defer s.mu.Unlock()
- for activeSpan := range s.active {
- if s, ok := activeSpan.(*span); ok {
- out = append(out, s.makeSpanData())
- }
- }
- return out
-}
-
-// ReportSpansByError returns a sample of error spans.
-//
-// If code is nonzero, only spans with that status code are returned.
-func (i internalOnly) ReportSpansByError(name string, code int32) []*SpanData {
- s := spanStoreForName(name)
- if s == nil {
- return nil
- }
- var out []*SpanData
- s.mu.Lock()
- defer s.mu.Unlock()
- if code != 0 {
- if b, ok := s.errors[code]; ok {
- for _, sd := range b.buffer {
- if sd == nil {
- break
- }
- out = append(out, sd)
- }
- }
- } else {
- for _, b := range s.errors {
- for _, sd := range b.buffer {
- if sd == nil {
- break
- }
- out = append(out, sd)
- }
- }
- }
- return out
-}
-
-// ConfigureBucketSizes sets the number of spans to keep per latency and error
-// bucket for different span names.
-func (i internalOnly) ConfigureBucketSizes(bcs []internal.BucketConfiguration) {
- for _, bc := range bcs {
- latencyBucketSize := bc.MaxRequestsSucceeded
- if latencyBucketSize < 0 {
- latencyBucketSize = 0
- }
- if latencyBucketSize > maxBucketSize {
- latencyBucketSize = maxBucketSize
- }
- errorBucketSize := bc.MaxRequestsErrors
- if errorBucketSize < 0 {
- errorBucketSize = 0
- }
- if errorBucketSize > maxBucketSize {
- errorBucketSize = maxBucketSize
- }
- spanStoreSetSize(bc.Name, latencyBucketSize, errorBucketSize)
- }
-}
-
-// ReportSpansPerMethod returns a summary of what spans are being stored for each span name.
-func (i internalOnly) ReportSpansPerMethod() map[string]internal.PerMethodSummary {
- out := make(map[string]internal.PerMethodSummary)
- ssmu.RLock()
- defer ssmu.RUnlock()
- for name, s := range spanStores {
- s.mu.Lock()
- p := internal.PerMethodSummary{
- Active: len(s.active),
- }
- for code, b := range s.errors {
- p.ErrorBuckets = append(p.ErrorBuckets, internal.ErrorBucketSummary{
- ErrorCode: code,
- Size: b.size(),
- })
- }
- for i, b := range s.latency {
- min, max := latencyBucketBounds(i)
- p.LatencyBuckets = append(p.LatencyBuckets, internal.LatencyBucketSummary{
- MinLatency: min,
- MaxLatency: max,
- Size: b.size(),
- })
- }
- s.mu.Unlock()
- out[name] = p
- }
- return out
-}
-
-// ReportSpansByLatency returns a sample of successful spans.
-//
-// minLatency is the minimum latency of spans to be returned.
-// maxLatency, if nonzero, is the maximum latency of spans to be returned.
-func (i internalOnly) ReportSpansByLatency(name string, minLatency, maxLatency time.Duration) []*SpanData {
- s := spanStoreForName(name)
- if s == nil {
- return nil
- }
- var out []*SpanData
- s.mu.Lock()
- defer s.mu.Unlock()
- for i, b := range s.latency {
- min, max := latencyBucketBounds(i)
- if i+1 != len(s.latency) && max <= minLatency {
- continue
- }
- if maxLatency != 0 && maxLatency < min {
- continue
- }
- for _, sd := range b.buffer {
- if sd == nil {
- break
- }
- if minLatency != 0 || maxLatency != 0 {
- d := sd.EndTime.Sub(sd.StartTime)
- if d < minLatency {
- continue
- }
- if maxLatency != 0 && d > maxLatency {
- continue
- }
- }
- out = append(out, sd)
- }
- }
- return out
-}
-
-// spanStore keeps track of spans stored for a particular span name.
-//
-// It contains all active spans; a sample of spans for failed requests,
-// categorized by error code; and a sample of spans for successful requests,
-// bucketed by latency.
-type spanStore struct {
- mu sync.Mutex // protects everything below.
- active map[SpanInterface]struct{}
- errors map[int32]*bucket
- latency []bucket
- maxSpansPerErrorBucket int
-}
-
-// newSpanStore creates a span store.
-func newSpanStore(name string, latencyBucketSize int, errorBucketSize int) *spanStore {
- s := &spanStore{
- active: make(map[SpanInterface]struct{}),
- latency: make([]bucket, len(defaultLatencies)+1),
- maxSpansPerErrorBucket: errorBucketSize,
- }
- for i := range s.latency {
- s.latency[i] = makeBucket(latencyBucketSize)
- }
- return s
-}
-
-// spanStoreForName returns the spanStore for the given name.
-//
-// It returns nil if it doesn't exist.
-func spanStoreForName(name string) *spanStore {
- var s *spanStore
- ssmu.RLock()
- s, _ = spanStores[name]
- ssmu.RUnlock()
- return s
-}
-
-// spanStoreForNameCreateIfNew returns the spanStore for the given name.
-//
-// It creates it if it didn't exist.
-func spanStoreForNameCreateIfNew(name string) *spanStore {
- ssmu.RLock()
- s, ok := spanStores[name]
- ssmu.RUnlock()
- if ok {
- return s
- }
- ssmu.Lock()
- defer ssmu.Unlock()
- s, ok = spanStores[name]
- if ok {
- return s
- }
- s = newSpanStore(name, defaultBucketSize, defaultBucketSize)
- spanStores[name] = s
- return s
-}
-
-// spanStoreSetSize resizes the spanStore for the given name.
-//
-// It creates it if it didn't exist.
-func spanStoreSetSize(name string, latencyBucketSize int, errorBucketSize int) {
- ssmu.RLock()
- s, ok := spanStores[name]
- ssmu.RUnlock()
- if ok {
- s.resize(latencyBucketSize, errorBucketSize)
- return
- }
- ssmu.Lock()
- defer ssmu.Unlock()
- s, ok = spanStores[name]
- if ok {
- s.resize(latencyBucketSize, errorBucketSize)
- return
- }
- s = newSpanStore(name, latencyBucketSize, errorBucketSize)
- spanStores[name] = s
-}
-
-func (s *spanStore) resize(latencyBucketSize int, errorBucketSize int) {
- s.mu.Lock()
- for i := range s.latency {
- s.latency[i].resize(latencyBucketSize)
- }
- for _, b := range s.errors {
- b.resize(errorBucketSize)
- }
- s.maxSpansPerErrorBucket = errorBucketSize
- s.mu.Unlock()
-}
-
-// add adds a span to the active bucket of the spanStore.
-func (s *spanStore) add(span SpanInterface) {
- s.mu.Lock()
- s.active[span] = struct{}{}
- s.mu.Unlock()
-}
-
-// finished removes a span from the active set, and adds a corresponding
-// SpanData to a latency or error bucket.
-func (s *spanStore) finished(span SpanInterface, sd *SpanData) {
- latency := sd.EndTime.Sub(sd.StartTime)
- if latency < 0 {
- latency = 0
- }
- code := sd.Status.Code
-
- s.mu.Lock()
- delete(s.active, span)
- if code == 0 {
- s.latency[latencyBucket(latency)].add(sd)
- } else {
- if s.errors == nil {
- s.errors = make(map[int32]*bucket)
- }
- if b := s.errors[code]; b != nil {
- b.add(sd)
- } else {
- b := makeBucket(s.maxSpansPerErrorBucket)
- s.errors[code] = &b
- b.add(sd)
- }
- }
- s.mu.Unlock()
-}
diff --git a/vendor/go.opencensus.io/trace/status_codes.go b/vendor/go.opencensus.io/trace/status_codes.go
deleted file mode 100644
index ec60effd10882..0000000000000
--- a/vendor/go.opencensus.io/trace/status_codes.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-// Status codes for use with Span.SetStatus. These correspond to the status
-// codes used by gRPC defined here: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
-const (
- StatusCodeOK = 0
- StatusCodeCancelled = 1
- StatusCodeUnknown = 2
- StatusCodeInvalidArgument = 3
- StatusCodeDeadlineExceeded = 4
- StatusCodeNotFound = 5
- StatusCodeAlreadyExists = 6
- StatusCodePermissionDenied = 7
- StatusCodeResourceExhausted = 8
- StatusCodeFailedPrecondition = 9
- StatusCodeAborted = 10
- StatusCodeOutOfRange = 11
- StatusCodeUnimplemented = 12
- StatusCodeInternal = 13
- StatusCodeUnavailable = 14
- StatusCodeDataLoss = 15
- StatusCodeUnauthenticated = 16
-)
diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go
deleted file mode 100644
index 861df9d3913db..0000000000000
--- a/vendor/go.opencensus.io/trace/trace.go
+++ /dev/null
@@ -1,595 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "context"
- crand "crypto/rand"
- "encoding/binary"
- "fmt"
- "math/rand"
- "sync"
- "sync/atomic"
- "time"
-
- "go.opencensus.io/internal"
- "go.opencensus.io/trace/tracestate"
-)
-
-type tracer struct{}
-
-var _ Tracer = &tracer{}
-
-// Span represents a span of a trace. It has an associated SpanContext, and
-// stores data accumulated while the span is active.
-//
-// Ideally users should interact with Spans by calling the functions in this
-// package that take a Context parameter.
-type span struct {
- // data contains information recorded about the span.
- //
- // It will be non-nil if we are exporting the span or recording events for it.
- // Otherwise, data is nil, and the Span is simply a carrier for the
- // SpanContext, so that the trace ID is propagated.
- data *SpanData
- mu sync.Mutex // protects the contents of *data (but not the pointer value.)
- spanContext SpanContext
-
- // lruAttributes are capped at configured limit. When the capacity is reached an oldest entry
- // is removed to create room for a new entry.
- lruAttributes *lruMap
-
- // annotations are stored in FIFO queue capped by configured limit.
- annotations *evictedQueue
-
- // messageEvents are stored in FIFO queue capped by configured limit.
- messageEvents *evictedQueue
-
- // links are stored in FIFO queue capped by configured limit.
- links *evictedQueue
-
- // spanStore is the spanStore this span belongs to, if any, otherwise it is nil.
- *spanStore
- endOnce sync.Once
-
- executionTracerTaskEnd func() // ends the execution tracer span
-}
-
-// IsRecordingEvents returns true if events are being recorded for this span.
-// Use this check to avoid computing expensive annotations when they will never
-// be used.
-func (s *span) IsRecordingEvents() bool {
- if s == nil {
- return false
- }
- return s.data != nil
-}
-
-// TraceOptions contains options associated with a trace span.
-type TraceOptions uint32
-
-// IsSampled returns true if the span will be exported.
-func (sc SpanContext) IsSampled() bool {
- return sc.TraceOptions.IsSampled()
-}
-
-// setIsSampled sets the TraceOptions bit that determines whether the span will be exported.
-func (sc *SpanContext) setIsSampled(sampled bool) {
- if sampled {
- sc.TraceOptions |= 1
- } else {
- sc.TraceOptions &= ^TraceOptions(1)
- }
-}
-
-// IsSampled returns true if the span will be exported.
-func (t TraceOptions) IsSampled() bool {
- return t&1 == 1
-}
-
-// SpanContext contains the state that must propagate across process boundaries.
-//
-// SpanContext is not an implementation of context.Context.
-// TODO: add reference to external Census docs for SpanContext.
-type SpanContext struct {
- TraceID TraceID
- SpanID SpanID
- TraceOptions TraceOptions
- Tracestate *tracestate.Tracestate
-}
-
-type contextKey struct{}
-
-// FromContext returns the Span stored in a context, or nil if there isn't one.
-func (t *tracer) FromContext(ctx context.Context) *Span {
- s, _ := ctx.Value(contextKey{}).(*Span)
- return s
-}
-
-// NewContext returns a new context with the given Span attached.
-func (t *tracer) NewContext(parent context.Context, s *Span) context.Context {
- return context.WithValue(parent, contextKey{}, s)
-}
-
-// All available span kinds. Span kind must be either one of these values.
-const (
- SpanKindUnspecified = iota
- SpanKindServer
- SpanKindClient
-)
-
-// StartOptions contains options concerning how a span is started.
-type StartOptions struct {
- // Sampler to consult for this Span. If provided, it is always consulted.
- //
- // If not provided, then the behavior differs based on whether
- // the parent of this Span is remote, local, or there is no parent.
- // In the case of a remote parent or no parent, the
- // default sampler (see Config) will be consulted. Otherwise,
- // when there is a non-remote parent, no new sampling decision will be made:
- // we will preserve the sampling of the parent.
- Sampler Sampler
-
- // SpanKind represents the kind of a span. If none is set,
- // SpanKindUnspecified is used.
- SpanKind int
-}
-
-// StartOption apply changes to StartOptions.
-type StartOption func(*StartOptions)
-
-// WithSpanKind makes new spans to be created with the given kind.
-func WithSpanKind(spanKind int) StartOption {
- return func(o *StartOptions) {
- o.SpanKind = spanKind
- }
-}
-
-// WithSampler makes new spans to be be created with a custom sampler.
-// Otherwise, the global sampler is used.
-func WithSampler(sampler Sampler) StartOption {
- return func(o *StartOptions) {
- o.Sampler = sampler
- }
-}
-
-// StartSpan starts a new child span of the current span in the context. If
-// there is no span in the context, creates a new trace and span.
-//
-// Returned context contains the newly created span. You can use it to
-// propagate the returned span in process.
-func (t *tracer) StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) {
- var opts StartOptions
- var parent SpanContext
- if p := t.FromContext(ctx); p != nil {
- if ps, ok := p.internal.(*span); ok {
- ps.addChild()
- }
- parent = p.SpanContext()
- }
- for _, op := range o {
- op(&opts)
- }
- span := startSpanInternal(name, parent != SpanContext{}, parent, false, opts)
-
- ctx, end := startExecutionTracerTask(ctx, name)
- span.executionTracerTaskEnd = end
- extSpan := NewSpan(span)
- return t.NewContext(ctx, extSpan), extSpan
-}
-
-// StartSpanWithRemoteParent starts a new child span of the span from the given parent.
-//
-// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is
-// preferred for cases where the parent is propagated via an incoming request.
-//
-// Returned context contains the newly created span. You can use it to
-// propagate the returned span in process.
-func (t *tracer) StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) {
- var opts StartOptions
- for _, op := range o {
- op(&opts)
- }
- span := startSpanInternal(name, parent != SpanContext{}, parent, true, opts)
- ctx, end := startExecutionTracerTask(ctx, name)
- span.executionTracerTaskEnd = end
- extSpan := NewSpan(span)
- return t.NewContext(ctx, extSpan), extSpan
-}
-
-func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *span {
- s := &span{}
- s.spanContext = parent
-
- cfg := config.Load().(*Config)
- if gen, ok := cfg.IDGenerator.(*defaultIDGenerator); ok {
- // lazy initialization
- gen.init()
- }
-
- if !hasParent {
- s.spanContext.TraceID = cfg.IDGenerator.NewTraceID()
- }
- s.spanContext.SpanID = cfg.IDGenerator.NewSpanID()
- sampler := cfg.DefaultSampler
-
- if !hasParent || remoteParent || o.Sampler != nil {
- // If this span is the child of a local span and no Sampler is set in the
- // options, keep the parent's TraceOptions.
- //
- // Otherwise, consult the Sampler in the options if it is non-nil, otherwise
- // the default sampler.
- if o.Sampler != nil {
- sampler = o.Sampler
- }
- s.spanContext.setIsSampled(sampler(SamplingParameters{
- ParentContext: parent,
- TraceID: s.spanContext.TraceID,
- SpanID: s.spanContext.SpanID,
- Name: name,
- HasRemoteParent: remoteParent}).Sample)
- }
-
- if !internal.LocalSpanStoreEnabled && !s.spanContext.IsSampled() {
- return s
- }
-
- s.data = &SpanData{
- SpanContext: s.spanContext,
- StartTime: time.Now(),
- SpanKind: o.SpanKind,
- Name: name,
- HasRemoteParent: remoteParent,
- }
- s.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan)
- s.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan)
- s.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan)
- s.links = newEvictedQueue(cfg.MaxLinksPerSpan)
-
- if hasParent {
- s.data.ParentSpanID = parent.SpanID
- }
- if internal.LocalSpanStoreEnabled {
- var ss *spanStore
- ss = spanStoreForNameCreateIfNew(name)
- if ss != nil {
- s.spanStore = ss
- ss.add(s)
- }
- }
-
- return s
-}
-
-// End ends the span.
-func (s *span) End() {
- if s == nil {
- return
- }
- if s.executionTracerTaskEnd != nil {
- s.executionTracerTaskEnd()
- }
- if !s.IsRecordingEvents() {
- return
- }
- s.endOnce.Do(func() {
- exp, _ := exporters.Load().(exportersMap)
- mustExport := s.spanContext.IsSampled() && len(exp) > 0
- if s.spanStore != nil || mustExport {
- sd := s.makeSpanData()
- sd.EndTime = internal.MonotonicEndTime(sd.StartTime)
- if s.spanStore != nil {
- s.spanStore.finished(s, sd)
- }
- if mustExport {
- for e := range exp {
- e.ExportSpan(sd)
- }
- }
- }
- })
-}
-
-// makeSpanData produces a SpanData representing the current state of the Span.
-// It requires that s.data is non-nil.
-func (s *span) makeSpanData() *SpanData {
- var sd SpanData
- s.mu.Lock()
- sd = *s.data
- if s.lruAttributes.len() > 0 {
- sd.Attributes = s.lruAttributesToAttributeMap()
- sd.DroppedAttributeCount = s.lruAttributes.droppedCount
- }
- if len(s.annotations.queue) > 0 {
- sd.Annotations = s.interfaceArrayToAnnotationArray()
- sd.DroppedAnnotationCount = s.annotations.droppedCount
- }
- if len(s.messageEvents.queue) > 0 {
- sd.MessageEvents = s.interfaceArrayToMessageEventArray()
- sd.DroppedMessageEventCount = s.messageEvents.droppedCount
- }
- if len(s.links.queue) > 0 {
- sd.Links = s.interfaceArrayToLinksArray()
- sd.DroppedLinkCount = s.links.droppedCount
- }
- s.mu.Unlock()
- return &sd
-}
-
-// SpanContext returns the SpanContext of the span.
-func (s *span) SpanContext() SpanContext {
- if s == nil {
- return SpanContext{}
- }
- return s.spanContext
-}
-
-// SetName sets the name of the span, if it is recording events.
-func (s *span) SetName(name string) {
- if !s.IsRecordingEvents() {
- return
- }
- s.mu.Lock()
- s.data.Name = name
- s.mu.Unlock()
-}
-
-// SetStatus sets the status of the span, if it is recording events.
-func (s *span) SetStatus(status Status) {
- if !s.IsRecordingEvents() {
- return
- }
- s.mu.Lock()
- s.data.Status = status
- s.mu.Unlock()
-}
-
-func (s *span) interfaceArrayToLinksArray() []Link {
- linksArr := make([]Link, 0, len(s.links.queue))
- for _, value := range s.links.queue {
- linksArr = append(linksArr, value.(Link))
- }
- return linksArr
-}
-
-func (s *span) interfaceArrayToMessageEventArray() []MessageEvent {
- messageEventArr := make([]MessageEvent, 0, len(s.messageEvents.queue))
- for _, value := range s.messageEvents.queue {
- messageEventArr = append(messageEventArr, value.(MessageEvent))
- }
- return messageEventArr
-}
-
-func (s *span) interfaceArrayToAnnotationArray() []Annotation {
- annotationArr := make([]Annotation, 0, len(s.annotations.queue))
- for _, value := range s.annotations.queue {
- annotationArr = append(annotationArr, value.(Annotation))
- }
- return annotationArr
-}
-
-func (s *span) lruAttributesToAttributeMap() map[string]interface{} {
- attributes := make(map[string]interface{}, s.lruAttributes.len())
- for _, key := range s.lruAttributes.keys() {
- value, ok := s.lruAttributes.get(key)
- if ok {
- keyStr := key.(string)
- attributes[keyStr] = value
- }
- }
- return attributes
-}
-
-func (s *span) copyToCappedAttributes(attributes []Attribute) {
- for _, a := range attributes {
- s.lruAttributes.add(a.key, a.value)
- }
-}
-
-func (s *span) addChild() {
- if !s.IsRecordingEvents() {
- return
- }
- s.mu.Lock()
- s.data.ChildSpanCount++
- s.mu.Unlock()
-}
-
-// AddAttributes sets attributes in the span.
-//
-// Existing attributes whose keys appear in the attributes parameter are overwritten.
-func (s *span) AddAttributes(attributes ...Attribute) {
- if !s.IsRecordingEvents() {
- return
- }
- s.mu.Lock()
- s.copyToCappedAttributes(attributes)
- s.mu.Unlock()
-}
-
-func (s *span) printStringInternal(attributes []Attribute, str string) {
- now := time.Now()
- var am map[string]interface{}
- if len(attributes) != 0 {
- am = make(map[string]interface{}, len(attributes))
- for _, attr := range attributes {
- am[attr.key] = attr.value
- }
- }
- s.mu.Lock()
- s.annotations.add(Annotation{
- Time: now,
- Message: str,
- Attributes: am,
- })
- s.mu.Unlock()
-}
-
-// Annotate adds an annotation with attributes.
-// Attributes can be nil.
-func (s *span) Annotate(attributes []Attribute, str string) {
- if !s.IsRecordingEvents() {
- return
- }
- s.printStringInternal(attributes, str)
-}
-
-// Annotatef adds an annotation with attributes.
-func (s *span) Annotatef(attributes []Attribute, format string, a ...interface{}) {
- if !s.IsRecordingEvents() {
- return
- }
- s.printStringInternal(attributes, fmt.Sprintf(format, a...))
-}
-
-// AddMessageSendEvent adds a message send event to the span.
-//
-// messageID is an identifier for the message, which is recommended to be
-// unique in this span and the same between the send event and the receive
-// event (this allows to identify a message between the sender and receiver).
-// For example, this could be a sequence id.
-func (s *span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) {
- if !s.IsRecordingEvents() {
- return
- }
- now := time.Now()
- s.mu.Lock()
- s.messageEvents.add(MessageEvent{
- Time: now,
- EventType: MessageEventTypeSent,
- MessageID: messageID,
- UncompressedByteSize: uncompressedByteSize,
- CompressedByteSize: compressedByteSize,
- })
- s.mu.Unlock()
-}
-
-// AddMessageReceiveEvent adds a message receive event to the span.
-//
-// messageID is an identifier for the message, which is recommended to be
-// unique in this span and the same between the send event and the receive
-// event (this allows to identify a message between the sender and receiver).
-// For example, this could be a sequence id.
-func (s *span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) {
- if !s.IsRecordingEvents() {
- return
- }
- now := time.Now()
- s.mu.Lock()
- s.messageEvents.add(MessageEvent{
- Time: now,
- EventType: MessageEventTypeRecv,
- MessageID: messageID,
- UncompressedByteSize: uncompressedByteSize,
- CompressedByteSize: compressedByteSize,
- })
- s.mu.Unlock()
-}
-
-// AddLink adds a link to the span.
-func (s *span) AddLink(l Link) {
- if !s.IsRecordingEvents() {
- return
- }
- s.mu.Lock()
- s.links.add(l)
- s.mu.Unlock()
-}
-
-func (s *span) String() string {
- if s == nil {
- return "<nil>"
- }
- if s.data == nil {
- return fmt.Sprintf("span %s", s.spanContext.SpanID)
- }
- s.mu.Lock()
- str := fmt.Sprintf("span %s %q", s.spanContext.SpanID, s.data.Name)
- s.mu.Unlock()
- return str
-}
-
-var config atomic.Value // access atomically
-
-func init() {
- config.Store(&Config{
- DefaultSampler: ProbabilitySampler(defaultSamplingProbability),
- IDGenerator: &defaultIDGenerator{},
- MaxAttributesPerSpan: DefaultMaxAttributesPerSpan,
- MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan,
- MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan,
- MaxLinksPerSpan: DefaultMaxLinksPerSpan,
- })
-}
-
-type defaultIDGenerator struct {
- sync.Mutex
-
- // Please keep these as the first fields
- // so that these 8 byte fields will be aligned on addresses
- // divisible by 8, on both 32-bit and 64-bit machines when
- // performing atomic increments and accesses.
- // See:
- // * https://github.com/census-instrumentation/opencensus-go/issues/587
- // * https://github.com/census-instrumentation/opencensus-go/issues/865
- // * https://golang.org/pkg/sync/atomic/#pkg-note-BUG
- nextSpanID uint64
- spanIDInc uint64
-
- traceIDAdd [2]uint64
- traceIDRand *rand.Rand
-
- initOnce sync.Once
-}
-
-// init initializes the generator on the first call to avoid consuming entropy
-// unnecessarily.
-func (gen *defaultIDGenerator) init() {
- gen.initOnce.Do(func() {
- // initialize traceID and spanID generators.
- var rngSeed int64
- for _, p := range []interface{}{
- &rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc,
- } {
- binary.Read(crand.Reader, binary.LittleEndian, p)
- }
- gen.traceIDRand = rand.New(rand.NewSource(rngSeed))
- gen.spanIDInc |= 1
- })
-}
-
-// NewSpanID returns a non-zero span ID from a randomly-chosen sequence.
-func (gen *defaultIDGenerator) NewSpanID() [8]byte {
- var id uint64
- for id == 0 {
- id = atomic.AddUint64(&gen.nextSpanID, gen.spanIDInc)
- }
- var sid [8]byte
- binary.LittleEndian.PutUint64(sid[:], id)
- return sid
-}
-
-// NewTraceID returns a non-zero trace ID from a randomly-chosen sequence.
-// mu should be held while this function is called.
-func (gen *defaultIDGenerator) NewTraceID() [16]byte {
- var tid [16]byte
- // Construct the trace ID from two outputs of traceIDRand, with a constant
- // added to each half for additional entropy.
- gen.Lock()
- binary.LittleEndian.PutUint64(tid[0:8], gen.traceIDRand.Uint64()+gen.traceIDAdd[0])
- binary.LittleEndian.PutUint64(tid[8:16], gen.traceIDRand.Uint64()+gen.traceIDAdd[1])
- gen.Unlock()
- return tid
-}
diff --git a/vendor/go.opencensus.io/trace/trace_api.go b/vendor/go.opencensus.io/trace/trace_api.go
deleted file mode 100644
index 9e2c3a999268c..0000000000000
--- a/vendor/go.opencensus.io/trace/trace_api.go
+++ /dev/null
@@ -1,265 +0,0 @@
-// Copyright 2020, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "context"
-)
-
-// DefaultTracer is the tracer used when package-level exported functions are invoked.
-var DefaultTracer Tracer = &tracer{}
-
-// Tracer can start spans and access context functions.
-type Tracer interface {
-
- // StartSpan starts a new child span of the current span in the context. If
- // there is no span in the context, creates a new trace and span.
- //
- // Returned context contains the newly created span. You can use it to
- // propagate the returned span in process.
- StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span)
-
- // StartSpanWithRemoteParent starts a new child span of the span from the given parent.
- //
- // If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is
- // preferred for cases where the parent is propagated via an incoming request.
- //
- // Returned context contains the newly created span. You can use it to
- // propagate the returned span in process.
- StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span)
-
- // FromContext returns the Span stored in a context, or nil if there isn't one.
- FromContext(ctx context.Context) *Span
-
- // NewContext returns a new context with the given Span attached.
- NewContext(parent context.Context, s *Span) context.Context
-}
-
-// StartSpan starts a new child span of the current span in the context. If
-// there is no span in the context, creates a new trace and span.
-//
-// Returned context contains the newly created span. You can use it to
-// propagate the returned span in process.
-func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) {
- return DefaultTracer.StartSpan(ctx, name, o...)
-}
-
-// StartSpanWithRemoteParent starts a new child span of the span from the given parent.
-//
-// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is
-// preferred for cases where the parent is propagated via an incoming request.
-//
-// Returned context contains the newly created span. You can use it to
-// propagate the returned span in process.
-func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) {
- return DefaultTracer.StartSpanWithRemoteParent(ctx, name, parent, o...)
-}
-
-// FromContext returns the Span stored in a context, or a Span that is not
-// recording events if there isn't one.
-func FromContext(ctx context.Context) *Span {
- return DefaultTracer.FromContext(ctx)
-}
-
-// NewContext returns a new context with the given Span attached.
-func NewContext(parent context.Context, s *Span) context.Context {
- return DefaultTracer.NewContext(parent, s)
-}
-
-// SpanInterface represents a span of a trace. It has an associated SpanContext, and
-// stores data accumulated while the span is active.
-//
-// Ideally users should interact with Spans by calling the functions in this
-// package that take a Context parameter.
-type SpanInterface interface {
-
- // IsRecordingEvents returns true if events are being recorded for this span.
- // Use this check to avoid computing expensive annotations when they will never
- // be used.
- IsRecordingEvents() bool
-
- // End ends the span.
- End()
-
- // SpanContext returns the SpanContext of the span.
- SpanContext() SpanContext
-
- // SetName sets the name of the span, if it is recording events.
- SetName(name string)
-
- // SetStatus sets the status of the span, if it is recording events.
- SetStatus(status Status)
-
- // AddAttributes sets attributes in the span.
- //
- // Existing attributes whose keys appear in the attributes parameter are overwritten.
- AddAttributes(attributes ...Attribute)
-
- // Annotate adds an annotation with attributes.
- // Attributes can be nil.
- Annotate(attributes []Attribute, str string)
-
- // Annotatef adds an annotation with attributes.
- Annotatef(attributes []Attribute, format string, a ...interface{})
-
- // AddMessageSendEvent adds a message send event to the span.
- //
- // messageID is an identifier for the message, which is recommended to be
- // unique in this span and the same between the send event and the receive
- // event (this allows to identify a message between the sender and receiver).
- // For example, this could be a sequence id.
- AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64)
-
- // AddMessageReceiveEvent adds a message receive event to the span.
- //
- // messageID is an identifier for the message, which is recommended to be
- // unique in this span and the same between the send event and the receive
- // event (this allows to identify a message between the sender and receiver).
- // For example, this could be a sequence id.
- AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64)
-
- // AddLink adds a link to the span.
- AddLink(l Link)
-
- // String prints a string representation of a span.
- String() string
-}
-
-// NewSpan is a convenience function for creating a *Span out of a *span
-func NewSpan(s SpanInterface) *Span {
- return &Span{internal: s}
-}
-
-// Span is a struct wrapper around the SpanInt interface, which allows correctly handling
-// nil spans, while also allowing the SpanInterface implementation to be swapped out.
-type Span struct {
- internal SpanInterface
-}
-
-// Internal returns the underlying implementation of the Span
-func (s *Span) Internal() SpanInterface {
- return s.internal
-}
-
-// IsRecordingEvents returns true if events are being recorded for this span.
-// Use this check to avoid computing expensive annotations when they will never
-// be used.
-func (s *Span) IsRecordingEvents() bool {
- if s == nil {
- return false
- }
- return s.internal.IsRecordingEvents()
-}
-
-// End ends the span.
-func (s *Span) End() {
- if s == nil {
- return
- }
- s.internal.End()
-}
-
-// SpanContext returns the SpanContext of the span.
-func (s *Span) SpanContext() SpanContext {
- if s == nil {
- return SpanContext{}
- }
- return s.internal.SpanContext()
-}
-
-// SetName sets the name of the span, if it is recording events.
-func (s *Span) SetName(name string) {
- if !s.IsRecordingEvents() {
- return
- }
- s.internal.SetName(name)
-}
-
-// SetStatus sets the status of the span, if it is recording events.
-func (s *Span) SetStatus(status Status) {
- if !s.IsRecordingEvents() {
- return
- }
- s.internal.SetStatus(status)
-}
-
-// AddAttributes sets attributes in the span.
-//
-// Existing attributes whose keys appear in the attributes parameter are overwritten.
-func (s *Span) AddAttributes(attributes ...Attribute) {
- if !s.IsRecordingEvents() {
- return
- }
- s.internal.AddAttributes(attributes...)
-}
-
-// Annotate adds an annotation with attributes.
-// Attributes can be nil.
-func (s *Span) Annotate(attributes []Attribute, str string) {
- if !s.IsRecordingEvents() {
- return
- }
- s.internal.Annotate(attributes, str)
-}
-
-// Annotatef adds an annotation with attributes.
-func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) {
- if !s.IsRecordingEvents() {
- return
- }
- s.internal.Annotatef(attributes, format, a...)
-}
-
-// AddMessageSendEvent adds a message send event to the span.
-//
-// messageID is an identifier for the message, which is recommended to be
-// unique in this span and the same between the send event and the receive
-// event (this allows to identify a message between the sender and receiver).
-// For example, this could be a sequence id.
-func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) {
- if !s.IsRecordingEvents() {
- return
- }
- s.internal.AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize)
-}
-
-// AddMessageReceiveEvent adds a message receive event to the span.
-//
-// messageID is an identifier for the message, which is recommended to be
-// unique in this span and the same between the send event and the receive
-// event (this allows to identify a message between the sender and receiver).
-// For example, this could be a sequence id.
-func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) {
- if !s.IsRecordingEvents() {
- return
- }
- s.internal.AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize)
-}
-
-// AddLink adds a link to the span.
-func (s *Span) AddLink(l Link) {
- if !s.IsRecordingEvents() {
- return
- }
- s.internal.AddLink(l)
-}
-
-// String prints a string representation of a span.
-func (s *Span) String() string {
- if s == nil {
- return "<nil>"
- }
- return s.internal.String()
-}
diff --git a/vendor/go.opencensus.io/trace/trace_go11.go b/vendor/go.opencensus.io/trace/trace_go11.go
deleted file mode 100644
index b8fc1e495a9c9..0000000000000
--- a/vendor/go.opencensus.io/trace/trace_go11.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.11
-// +build go1.11
-
-package trace
-
-import (
- "context"
- t "runtime/trace"
-)
-
-func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) {
- if !t.IsEnabled() {
- // Avoid additional overhead if
- // runtime/trace is not enabled.
- return ctx, func() {}
- }
- nctx, task := t.NewTask(ctx, name)
- return nctx, task.End
-}
diff --git a/vendor/go.opencensus.io/trace/trace_nongo11.go b/vendor/go.opencensus.io/trace/trace_nongo11.go
deleted file mode 100644
index da488fc874014..0000000000000
--- a/vendor/go.opencensus.io/trace/trace_nongo11.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !go1.11
-// +build !go1.11
-
-package trace
-
-import (
- "context"
-)
-
-func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) {
- return ctx, func() {}
-}
diff --git a/vendor/go.opencensus.io/trace/tracestate/tracestate.go b/vendor/go.opencensus.io/trace/tracestate/tracestate.go
deleted file mode 100644
index 2d6c713eb3a19..0000000000000
--- a/vendor/go.opencensus.io/trace/tracestate/tracestate.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package tracestate implements support for the Tracestate header of the
-// W3C TraceContext propagation format.
-package tracestate
-
-import (
- "fmt"
- "regexp"
-)
-
-const (
- keyMaxSize = 256
- valueMaxSize = 256
- maxKeyValuePairs = 32
-)
-
-const (
- keyWithoutVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,255}`
- keyWithVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}`
- keyFormat = `(` + keyWithoutVendorFormat + `)|(` + keyWithVendorFormat + `)`
- valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]`
-)
-
-var keyValidationRegExp = regexp.MustCompile(`^(` + keyFormat + `)$`)
-var valueValidationRegExp = regexp.MustCompile(`^(` + valueFormat + `)$`)
-
-// Tracestate represents tracing-system specific context in a list of key-value pairs. Tracestate allows different
-// vendors propagate additional information and inter-operate with their legacy Id formats.
-type Tracestate struct {
- entries []Entry
-}
-
-// Entry represents one key-value pair in a list of key-value pair of Tracestate.
-type Entry struct {
- // Key is an opaque string up to 256 characters printable. It MUST begin with a lowercase letter,
- // and can only contain lowercase letters a-z, digits 0-9, underscores _, dashes -, asterisks *, and
- // forward slashes /.
- Key string
-
- // Value is an opaque string up to 256 characters printable ASCII RFC0020 characters (i.e., the
- // range 0x20 to 0x7E) except comma , and =.
- Value string
-}
-
-// Entries returns a slice of Entry.
-func (ts *Tracestate) Entries() []Entry {
- if ts == nil {
- return nil
- }
- return ts.entries
-}
-
-func (ts *Tracestate) remove(key string) *Entry {
- for index, entry := range ts.entries {
- if entry.Key == key {
- ts.entries = append(ts.entries[:index], ts.entries[index+1:]...)
- return &entry
- }
- }
- return nil
-}
-
-func (ts *Tracestate) add(entries []Entry) error {
- for _, entry := range entries {
- ts.remove(entry.Key)
- }
- if len(ts.entries)+len(entries) > maxKeyValuePairs {
- return fmt.Errorf("adding %d key-value pairs to current %d pairs exceeds the limit of %d",
- len(entries), len(ts.entries), maxKeyValuePairs)
- }
- ts.entries = append(entries, ts.entries...)
- return nil
-}
-
-func isValid(entry Entry) bool {
- return keyValidationRegExp.MatchString(entry.Key) &&
- valueValidationRegExp.MatchString(entry.Value)
-}
-
-func containsDuplicateKey(entries ...Entry) (string, bool) {
- keyMap := make(map[string]int)
- for _, entry := range entries {
- if _, ok := keyMap[entry.Key]; ok {
- return entry.Key, true
- }
- keyMap[entry.Key] = 1
- }
- return "", false
-}
-
-func areEntriesValid(entries ...Entry) (*Entry, bool) {
- for _, entry := range entries {
- if !isValid(entry) {
- return &entry, false
- }
- }
- return nil, true
-}
-
-// New creates a Tracestate object from a parent and/or entries (key-value pair).
-// Entries from the parent are copied if present. The entries passed to this function
-// are inserted in front of those copied from the parent. If an entry copied from the
-// parent contains the same key as one of the entry in entries then the entry copied
-// from the parent is removed. See add func.
-//
-// An error is returned with nil Tracestate if
-// 1. one or more entry in entries is invalid.
-// 2. two or more entries in the input entries have the same key.
-// 3. the number of entries combined from the parent and the input entries exceeds maxKeyValuePairs.
-// (duplicate entry is counted only once).
-func New(parent *Tracestate, entries ...Entry) (*Tracestate, error) {
- if parent == nil && len(entries) == 0 {
- return nil, nil
- }
- if entry, ok := areEntriesValid(entries...); !ok {
- return nil, fmt.Errorf("key-value pair {%s, %s} is invalid", entry.Key, entry.Value)
- }
-
- if key, duplicate := containsDuplicateKey(entries...); duplicate {
- return nil, fmt.Errorf("contains duplicate keys (%s)", key)
- }
-
- tracestate := Tracestate{}
-
- if parent != nil && len(parent.entries) > 0 {
- tracestate.entries = append([]Entry{}, parent.entries...)
- }
-
- err := tracestate.add(entries)
- if err != nil {
- return nil, err
- }
- return &tracestate, nil
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 8cb3bc18275d2..1c5ff23f65a89 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,7 +1,7 @@
# cel.dev/expr v0.16.1
## explicit; go 1.18
cel.dev/expr
-# cloud.google.com/go v0.116.0
+# cloud.google.com/go v0.117.0
## explicit; go 1.21
cloud.google.com/go
cloud.google.com/go/internal
@@ -30,7 +30,7 @@ cloud.google.com/go/auth/internal/transport/cert
# cloud.google.com/go/auth/oauth2adapt v0.2.6
## explicit; go 1.21
cloud.google.com/go/auth/oauth2adapt
-# cloud.google.com/go/bigtable v1.33.0
+# cloud.google.com/go/bigtable v1.34.0
## explicit; go 1.21
cloud.google.com/go/bigtable
cloud.google.com/go/bigtable/admin/apiv2/adminpb
@@ -880,9 +880,6 @@ github.com/golang-jwt/jwt/v4
# github.com/golang-jwt/jwt/v5 v5.2.1
## explicit; go 1.18
github.com/golang-jwt/jwt/v5
-# github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da
-## explicit
-github.com/golang/groupcache/lru
# github.com/golang/protobuf v1.5.4
## explicit; go 1.17
github.com/golang/protobuf/proto
@@ -1770,8 +1767,6 @@ go.mongodb.org/mongo-driver/bson/primitive
go.mongodb.org/mongo-driver/x/bsonx/bsoncore
# go.opencensus.io v0.24.0
## explicit; go 1.13
-go.opencensus.io
-go.opencensus.io/internal
go.opencensus.io/internal/tagencoding
go.opencensus.io/metric/metricdata
go.opencensus.io/metric/metricproducer
@@ -1780,9 +1775,6 @@ go.opencensus.io/stats
go.opencensus.io/stats/internal
go.opencensus.io/stats/view
go.opencensus.io/tag
-go.opencensus.io/trace
-go.opencensus.io/trace/internal
-go.opencensus.io/trace/tracestate
# go.opentelemetry.io/auto/sdk v1.1.0
## explicit; go 1.22.0
go.opentelemetry.io/auto/sdk
|
fix
|
update module cloud.google.com/go/bigtable to v1.34.0 (#15581)
|
bf5a25dd5bf4be45ea47f7b286a195a0bffddc39
|
2022-08-27 01:06:01
|
Ed Welch
|
docs: update `split_queries_by_interval` documentation to have the correct explanation for how to disable. (#6715)
| false
|
diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md
index 9864d9289ee7c..6f51918b82e91 100644
--- a/docs/sources/configuration/_index.md
+++ b/docs/sources/configuration/_index.md
@@ -2467,7 +2467,7 @@ The `limits_config` block configures global and per-tenant limits in Loki.
# CLI flag: -frontend.min-sharding-lookback
[min_sharding_lookback: <duration> | default = 0s]
-# Split queries by an interval and execute in parallel, any value less than zero disables it.
+# Split queries by a time interval and execute in parallel. The value 0 disables splitting by time.
# This also determines how cache keys are chosen when result caching is enabled
# CLI flag: -querier.split-queries-by-interval
[split_queries_by_interval: <duration> | default = 30m]
|
docs
|
update `split_queries_by_interval` documentation to have the correct explanation for how to disable. (#6715)
|
f90f6489c4ed1ec5f1fee110d73bd44e88376956
|
2024-11-16 01:45:53
|
Robert Jacob
|
chore: remove submodule accidentally added to repo (#14883)
| false
|
diff --git a/_shared-workflows-dockerhub-login b/_shared-workflows-dockerhub-login
deleted file mode 160000
index e34b275767e9a..0000000000000
--- a/_shared-workflows-dockerhub-login
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit e34b275767e9a075ed07c25ef0173dacf5fd4ca6
|
chore
|
remove submodule accidentally added to repo (#14883)
|
d6f29fc789760318b048d005e14c91eba748b45e
|
2024-05-21 22:04:42
|
Vitor Gomes
|
docs: update otlp ingestion with correct endpoint and add endpoint to reference api docs (#12996)
| false
|
diff --git a/docs/sources/reference/loki-http-api.md b/docs/sources/reference/loki-http-api.md
index 1f85a4b48bbca..e72a03ba51b0a 100644
--- a/docs/sources/reference/loki-http-api.md
+++ b/docs/sources/reference/loki-http-api.md
@@ -24,6 +24,7 @@ Authorization needs to be done separately, for example, using an open-source loa
These endpoints are exposed by the `distributor`, `write`, and `all` components:
- [`POST /loki/api/v1/push`](#ingest-logs)
+- [`POST /otlp/v1/logs`](#ingest-logs-using-otlp)
A [list of clients]({{< relref "../send-data" >}}) can be found in the clients documentation.
@@ -260,6 +261,16 @@ curl -H "Content-Type: application/json" \
--data-raw '{"streams": [{ "stream": { "foo": "bar2" }, "values": [ [ "1570818238000000000", "fizzbuzz" ] ] }]}'
```
+## Ingest logs using OTLP
+
+```bash
+POST /otlp/v1/logs
+```
+
+`/otlp/v1/logs` lets the OpenTelemetry Collector send logs to Loki using `otlphttp` procotol.
+
+For information on how to configure Loki, refer to the [OTel Collector topic](https://grafana.com/docs/loki/<LOKI_VERSION>/send-data/otel/).
+
## Query logs at a single point in time
```bash
diff --git a/docs/sources/send-data/otel/_index.md b/docs/sources/send-data/otel/_index.md
index 27d092a81c095..b7a67fcb14d06 100644
--- a/docs/sources/send-data/otel/_index.md
+++ b/docs/sources/send-data/otel/_index.md
@@ -30,7 +30,7 @@ You need to make the following changes to the [OpenTelemetry Collector config](h
```yaml
exporters:
otlphttp:
- endpoint: http://<loki-addr>:3100/otlp
+ endpoint: http://<loki-addr>:3100/otlp/v1/logs
```
And enable it in `service.pipelines`:
@@ -57,7 +57,7 @@ exporters:
otlphttp:
auth:
authenticator: basicauth/otlp
- endpoint: http://<loki-addr>:3100/otlp
+ endpoint: http://<loki-addr>:3100/otlp/v1/logs
service:
extensions: [basicauth/otlp]
|
docs
|
update otlp ingestion with correct endpoint and add endpoint to reference api docs (#12996)
|
f3ce6890cbf839bc20d9e53c3625a791d3648754
|
2025-02-19 17:18:42
|
Joao Marcal
|
chore(operator): fix .golangci.yaml config file (#16368)
| false
|
diff --git a/operator/.golangci.yaml b/operator/.golangci.yaml
index 250fca56e3ec3..924449463fdef 100644
--- a/operator/.golangci.yaml
+++ b/operator/.golangci.yaml
@@ -1,6 +1,6 @@
---
# golangci.com configuration
-# https://github.com/golangci/golangci/wiki/Configuration
+# https://golangci-lint.run/usage/configuration/
linters-settings:
copyloopvar:
check-alias: true
@@ -12,9 +12,8 @@ linters-settings:
- blank
- dot
govet:
- shadow: true
- maligned:
- suggest-new: true
+ enable:
+ - shadow
misspell:
locale: US
revive:
@@ -43,8 +42,3 @@ linters:
issues:
exclude-use-default: false
- exclude-rules:
- # - text: "could be of size"
- # path: api/v1beta1/lokistack_types.go
- # linters:
- # - maligned
diff --git a/operator/Makefile b/operator/Makefile
index 81416e7c6f774..ddc475e0c136f 100644
--- a/operator/Makefile
+++ b/operator/Makefile
@@ -177,6 +177,7 @@ scorecard: generate go-generate bundle-all ## Run scorecard tests for all bundle
.PHONY: lint
lint: $(GOLANGCI_LINT) | generate ## Run golangci-lint on source code.
+ $(GOLANGCI_LINT) config verify
$(GOLANGCI_LINT) run --timeout=5m ./...
.PHONY: lint-fix
|
chore
|
fix .golangci.yaml config file (#16368)
|
c95fc9d5f9d1fd1737d39d3a9fc43dbf483db757
|
2024-11-13 19:24:57
|
renovate[bot]
|
fix(deps): update module github.com/baidubce/bce-sdk-go to v0.9.200 (#14886)
| false
|
diff --git a/go.mod b/go.mod
index f7dc0a16d2aeb..f9e6fde942316 100644
--- a/go.mod
+++ b/go.mod
@@ -19,7 +19,7 @@ require (
github.com/alicebob/miniredis/v2 v2.33.0
github.com/aliyun/aliyun-oss-go-sdk v2.2.10+incompatible
github.com/aws/aws-sdk-go v1.55.5
- github.com/baidubce/bce-sdk-go v0.9.197
+ github.com/baidubce/bce-sdk-go v0.9.200
github.com/bmatcuk/doublestar v1.3.4
github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500
github.com/cespare/xxhash v1.1.0
diff --git a/go.sum b/go.sum
index c4168082c3d0b..db005a6b6cc80 100644
--- a/go.sum
+++ b/go.sum
@@ -1002,8 +1002,8 @@ github.com/aws/smithy-go v1.11.1 h1:IQ+lPZVkSM3FRtyaDox41R8YS6iwPMYIreejOgPW49g=
github.com/aws/smithy-go v1.11.1/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM=
github.com/axiomhq/hyperloglog v0.2.0 h1:u1XT3yyY1rjzlWuP6NQIrV4bRYHOaqZaovqjcBEvZJo=
github.com/axiomhq/hyperloglog v0.2.0/go.mod h1:GcgMjz9gaDKZ3G0UMS6Fq/VkZ4l7uGgcJyxA7M+omIM=
-github.com/baidubce/bce-sdk-go v0.9.197 h1:TQqa4J+FTagrywhaTQ707ffE1eG3ix1s06eSZ/K+Wk0=
-github.com/baidubce/bce-sdk-go v0.9.197/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg=
+github.com/baidubce/bce-sdk-go v0.9.200 h1:zF3yuKp/wkKZhutCZYl5HtIZJPziWsPEu1kxHEyOaWI=
+github.com/baidubce/bce-sdk-go v0.9.200/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
diff --git a/nix/packages/loki.nix b/nix/packages/loki.nix
index cbcb68d0a304a..8d3ffa54e4685 100644
--- a/nix/packages/loki.nix
+++ b/nix/packages/loki.nix
@@ -5,7 +5,7 @@ let
pname = "lambda-promtail";
src = ./../../tools/lambda-promtail;
- vendorHash = "sha256-fG+AJfniGJy9fpD2Jluh7N2FDYt9LCjXCVoWktuvus0=";
+ vendorHash = "sha256-nyd70qrG0fCB9gWiNRBMK8v9T3aMQ+30KlvgTR1bJUk=";
doCheck = false;
diff --git a/vendor/github.com/baidubce/bce-sdk-go/bce/client.go b/vendor/github.com/baidubce/bce-sdk-go/bce/client.go
index d223a0987fc66..d8ea23dbe5fe6 100644
--- a/vendor/github.com/baidubce/bce-sdk-go/bce/client.go
+++ b/vendor/github.com/baidubce/bce-sdk-go/bce/client.go
@@ -16,24 +16,24 @@
// Package bce implements the infrastructure to access BCE services.
//
-// - BceClient:
+// - BceClient:
// It is the general client of BCE to access all services. It builds http request to access the
// services based on the given client configuration.
//
-// - BceClientConfiguration:
+// - BceClientConfiguration:
// The client configuration data structure which contains endpoint, region, credentials, retry
// policy, sign options and so on. It supports most of the default value and user can also
// access or change the default with its public fields' name.
//
-// - Error types:
+// - Error types:
// The error types when making request or receiving response to the BCE services contains two
// types: the BceClientError when making request to BCE services and the BceServiceError when
// recieving response from them.
//
-// - BceRequest:
+// - BceRequest:
// The request instance stands for an request to access the BCE services.
//
-// - BceResponse:
+// - BceResponse:
// The response instance stands for an response from the BCE services.
package bce
@@ -67,7 +67,7 @@ type BceClient struct {
// BuildHttpRequest - the helper method for the client to build http request
//
// PARAMS:
-// - request: the input request object to be built
+// - request: the input request object to be built
func (c *BceClient) buildHttpRequest(request *BceRequest) {
// Construct the http request instance for the special fields
request.BuildHttpRequest()
@@ -104,10 +104,11 @@ func (c *BceClient) buildHttpRequest(request *BceRequest) {
// response from the BCE services.
//
// PARAMS:
-// - req: the request object to be sent to the BCE service
-// - resp: the response object to receive the content from BCE service
+// - req: the request object to be sent to the BCE service
+// - resp: the response object to receive the content from BCE service
+//
// RETURNS:
-// - error: nil if ok otherwise the specific error
+// - error: nil if ok otherwise the specific error
func (c *BceClient) SendRequest(req *BceRequest, resp *BceResponse) error {
// Return client error if it is not nil
if req.ClientError() != nil {
@@ -158,7 +159,7 @@ func (c *BceClient) SendRequest(req *BceRequest, resp *BceResponse) error {
if resp.ElapsedTime().Milliseconds() > DEFAULT_WARN_LOG_TIMEOUT_IN_MILLS {
log.Warnf("request time more than 5 second, debugId: %s, requestId: %s, elapsed: %v",
- resp.DebugId(), resp.RequestId(), resp.ElapsedTime())
+ resp.DebugId(), resp.RequestId(), resp.ElapsedTime())
}
for k, v := range resp.Headers() {
log.Debugf("%s=%s", k, v)
@@ -187,11 +188,12 @@ func (c *BceClient) SendRequest(req *BceRequest, resp *BceResponse) error {
// response from the BCE services.
//
// PARAMS:
-// - req: the request object to be sent to the BCE service
-// - resp: the response object to receive the content from BCE service
-// - content: the content of body
+// - req: the request object to be sent to the BCE service
+// - resp: the response object to receive the content from BCE service
+// - content: the content of body
+//
// RETURNS:
-// - error: nil if ok otherwise the specific error
+// - error: nil if ok otherwise the specific error
func (c *BceClient) SendRequestFromBytes(req *BceRequest, resp *BceResponse, content []byte) error {
// Return client error if it is not nil
if req.ClientError() != nil {
@@ -263,12 +265,12 @@ func NewBceClientWithAkSk(ak, sk, endPoint string) (*BceClient, error) {
HeadersToSign: auth.DEFAULT_HEADERS_TO_SIGN,
ExpireSeconds: auth.DEFAULT_EXPIRE_SECONDS}
defaultConf := &BceClientConfiguration{
- Endpoint: endPoint,
- Region: DEFAULT_REGION,
- UserAgent: DEFAULT_USER_AGENT,
- Credentials: credentials,
- SignOption: defaultSignOptions,
- Retry: DEFAULT_RETRY_POLICY,
+ Endpoint: endPoint,
+ Region: DEFAULT_REGION,
+ UserAgent: DEFAULT_USER_AGENT,
+ Credentials: credentials,
+ SignOption: defaultSignOptions,
+ Retry: DEFAULT_RETRY_POLICY,
ConnectionTimeoutInMillis: DEFAULT_CONNECTION_TIMEOUT_IN_MILLIS,
RedirectDisabled: false}
v1Signer := &auth.BceV1Signer{}
diff --git a/vendor/github.com/baidubce/bce-sdk-go/bce/config.go b/vendor/github.com/baidubce/bce-sdk-go/bce/config.go
index b0c7617569cbb..57c2d025eb17f 100644
--- a/vendor/github.com/baidubce/bce-sdk-go/bce/config.go
+++ b/vendor/github.com/baidubce/bce-sdk-go/bce/config.go
@@ -26,7 +26,7 @@ import (
// Constants and default values for the package bce
const (
- SDK_VERSION = "0.9.197"
+ SDK_VERSION = "0.9.200"
URI_PREFIX = "/" // now support uri without prefix "v1" so just set root path
DEFAULT_DOMAIN = "baidubce.com"
DEFAULT_PROTOCOL = "http"
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 138d2b05f430d..4ec13cfc55d76 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -490,7 +490,7 @@ github.com/aws/smithy-go/transport/http/internal/io
# github.com/axiomhq/hyperloglog v0.2.0
## explicit; go 1.21
github.com/axiomhq/hyperloglog
-# github.com/baidubce/bce-sdk-go v0.9.197
+# github.com/baidubce/bce-sdk-go v0.9.200
## explicit; go 1.11
github.com/baidubce/bce-sdk-go/auth
github.com/baidubce/bce-sdk-go/bce
|
fix
|
update module github.com/baidubce/bce-sdk-go to v0.9.200 (#14886)
|
c128aa819e4427c5bd7e112cef1a4952c8379ef8
|
2025-02-26 03:08:41
|
renovate[bot]
|
fix(deps): update module github.com/prometheus/client_golang to v1.21.0 (main) (#16446)
| false
|
diff --git a/go.mod b/go.mod
index c87319a8b65ec..18d636790564c 100644
--- a/go.mod
+++ b/go.mod
@@ -83,7 +83,7 @@ require (
// github.com/pierrec/lz4 v2.0.5+incompatible
github.com/pierrec/lz4/v4 v4.1.22
github.com/pkg/errors v0.9.1
- github.com/prometheus/client_golang v1.21.0-rc.0
+ github.com/prometheus/client_golang v1.21.0
github.com/prometheus/client_model v0.6.1
github.com/prometheus/common v0.62.0
github.com/prometheus/prometheus v0.302.0
diff --git a/go.sum b/go.sum
index cf30576dd1c5d..2a61bdadc30f2 100644
--- a/go.sum
+++ b/go.sum
@@ -1022,8 +1022,8 @@ github.com/prometheus/client_golang v1.6.1-0.20200604110148-03575cad4e55/go.mod
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.21.0-rc.0 h1:bR+RxBlwcr4q8hXkgSOA/J18j6n0/qH0Gb0DH+8c+RY=
-github.com/prometheus/client_golang v1.21.0-rc.0/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
+github.com/prometheus/client_golang v1.21.0 h1:DIsaGmiaBkSangBgMtWdNfxbMNdku5IK6iNhrEqWvdA=
+github.com/prometheus/client_golang v1.21.0/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 5830ed3e0b504..80326e6f4932f 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1476,7 +1476,7 @@ github.com/power-devops/perfstat
## explicit; go 1.22.0
github.com/prometheus/alertmanager/api/v2/models
github.com/prometheus/alertmanager/pkg/modtimevfs
-# github.com/prometheus/client_golang v1.21.0-rc.0
+# github.com/prometheus/client_golang v1.21.0
## explicit; go 1.21
github.com/prometheus/client_golang/api
github.com/prometheus/client_golang/api/prometheus/v1
|
fix
|
update module github.com/prometheus/client_golang to v1.21.0 (main) (#16446)
|
ed5ea97f679f4b418f4ddc9ec0ebc854b3fbc1bb
|
2024-11-07 19:27:27
|
George Robinson
|
chore: remove initialization of Ingester RF-1 (#14814)
| false
|
diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go
index f9c92ea010c26..1418f01e21e4a 100644
--- a/pkg/loki/loki.go
+++ b/pkg/loki/loki.go
@@ -39,7 +39,6 @@ import (
"github.com/grafana/loki/v3/pkg/distributor"
"github.com/grafana/loki/v3/pkg/indexgateway"
"github.com/grafana/loki/v3/pkg/ingester"
- ingester_rf1 "github.com/grafana/loki/v3/pkg/ingester-rf1"
"github.com/grafana/loki/v3/pkg/ingester-rf1/metastore"
metastoreclient "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/client"
ingester_client "github.com/grafana/loki/v3/pkg/ingester/client"
@@ -355,7 +354,6 @@ type Loki struct {
TenantLimits validation.TenantLimits
distributor *distributor.Distributor
Ingester ingester.Interface
- IngesterRF1 ingester_rf1.Interface
PatternIngester *pattern.Ingester
PatternRingClient pattern.RingClient
Querier querier.Querier
@@ -639,15 +637,6 @@ func (t *Loki) readyHandler(sm *services.Manager, shutdownRequested *atomic.Bool
}
}
- // Ingester RF1 has a special check that makes sure that it was able to register into the ring,
- // and that all other ring entries are OK too.
- if t.IngesterRF1 != nil {
- if err := t.IngesterRF1.CheckReady(r.Context()); err != nil {
- http.Error(w, "RF-1 Ingester not ready: "+err.Error(), http.StatusServiceUnavailable)
- return
- }
- }
-
// Query Frontend has a special check that makes sure that a querier is attached before it signals
// itself as ready
if t.frontend != nil {
|
chore
|
remove initialization of Ingester RF-1 (#14814)
|
1550762f9a401295f70de346b539594af468851d
|
2022-07-07 15:46:15
|
Periklis Tsirakidis
|
operator: Bump loki.grafana.com/LokiStack from v1beta to v1 (#6474)
| false
|
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md
index 87dd55fbefd9b..a3c2c06c7f5f1 100644
--- a/operator/CHANGELOG.md
+++ b/operator/CHANGELOG.md
@@ -6,6 +6,7 @@
- [6531](https://github.com/grafana/loki/pull/6531) **periklis**: Use default interface_names for lokistack clusters (IPv6 Support)
- [6411](https://github.com/grafana/loki/pull/6478) **aminesnow**: Support TLS enabled lokistack-gateway for vanilla kubernetes deployments
- [6504](https://github.com/grafana/loki/pull/6504) **periklis**: Disable usage report on OpenShift
+- [6474](https://github.com/grafana/loki/pull/6474) **periklis**: Bump loki.grafana.com/LokiStack from v1beta to v1
- [6411](https://github.com/grafana/loki/pull/6411) **Red-GV**: Extend schema validation in LokiStack webhook
- [6334](https://github.com/grafana/loki/pull/6433) **periklis**: Move operator cli flags to component config
- [6224](https://github.com/grafana/loki/pull/6224) **periklis**: Add support for GRPC over TLS for Loki components
diff --git a/operator/Makefile b/operator/Makefile
index b539024b64f4d..1c86254693786 100644
--- a/operator/Makefile
+++ b/operator/Makefile
@@ -237,6 +237,15 @@ olm-deploy: olm-deploy-bundle olm-deploy-operator $(OPERATOR_SDK)
$(OPERATOR_SDK) run bundle -n $(LOKI_OPERATOR_NS) --install-mode AllNamespaces $(BUNDLE_IMG)
endif
+.PHONY: olm-upgrade
+ifeq ($(or $(findstring openshift-logging,$(IMG)),$(findstring openshift-logging,$(BUNDLE_IMG))),openshift-logging)
+olm-upgrade: ## Upgrade the operator bundle and the operator via OLM into an Kubernetes cluster selected via KUBECONFIG.
+ $(error Set variable REGISTRY_ORG to use a custom container registry org account for local development)
+else
+olm-upgrade: olm-deploy-bundle olm-deploy-operator $(OPERATOR_SDK)
+ $(OPERATOR_SDK) run bundle-upgrade -n $(LOKI_OPERATOR_NS) $(BUNDLE_IMG)
+endif
+
.PHONY: olm-undeploy
olm-undeploy: $(OPERATOR_SDK) ## Cleanup deployments of the operator bundle and the operator via OLM on an OpenShift cluster selected via KUBECONFIG.
$(OPERATOR_SDK) cleanup -n $(LOKI_OPERATOR_NS) loki-operator
diff --git a/operator/PROJECT b/operator/PROJECT
index 5f89760a68392..e4f9a6c6e3f38 100644
--- a/operator/PROJECT
+++ b/operator/PROJECT
@@ -18,6 +18,19 @@ resources:
kind: LokiStack
path: github.com/grafana/loki/operator/apis/loki/v1beta1
version: v1beta1
+- api:
+ crdVersion: v1
+ namespaced: true
+ controller: true
+ domain: grafana.com
+ group: loki
+ kind: LokiStack
+ path: github.com/grafana/loki/operator/apis/loki/v1
+ version: v1
+ webhooks:
+ conversion: true
+ validation: true
+ webhookVersion: v1
- api:
crdVersion: v1
namespaced: true
diff --git a/operator/apis/loki/v1/groupversion_info.go b/operator/apis/loki/v1/groupversion_info.go
new file mode 100644
index 0000000000000..62165d36a707a
--- /dev/null
+++ b/operator/apis/loki/v1/groupversion_info.go
@@ -0,0 +1,20 @@
+// Package v1 contains API Schema definitions for the loki v1 API group
+//+kubebuilder:object:generate=true
+//+groupName=loki.grafana.com
+package v1
+
+import (
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "sigs.k8s.io/controller-runtime/pkg/scheme"
+)
+
+var (
+ // GroupVersion is group version used to register these objects
+ GroupVersion = schema.GroupVersion{Group: "loki.grafana.com", Version: "v1"}
+
+ // SchemeBuilder is used to add go types to the GroupVersionKind scheme
+ SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
+
+ // AddToScheme adds the types in this group-version to the given scheme.
+ AddToScheme = SchemeBuilder.AddToScheme
+)
diff --git a/operator/apis/loki/v1/lokistack_types.go b/operator/apis/loki/v1/lokistack_types.go
new file mode 100644
index 0000000000000..1a6a85c401a5e
--- /dev/null
+++ b/operator/apis/loki/v1/lokistack_types.go
@@ -0,0 +1,838 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
+// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
+
+// ManagementStateType defines the type for CR management states.
+//
+// +kubebuilder:validation:Enum=Managed;Unmanaged
+type ManagementStateType string
+
+const (
+ // ManagementStateManaged when the LokiStack custom resource should be
+ // reconciled by the operator.
+ ManagementStateManaged ManagementStateType = "Managed"
+
+ // ManagementStateUnmanaged when the LokiStack custom resource should not be
+ // reconciled by the operator.
+ ManagementStateUnmanaged ManagementStateType = "Unmanaged"
+)
+
+// LokiStackSizeType declares the type for loki cluster scale outs.
+//
+// +kubebuilder:validation:Enum="1x.extra-small";"1x.small";"1x.medium"
+type LokiStackSizeType string
+
+const (
+ // SizeOneXExtraSmall defines the size of a single Loki deployment
+ // with extra small resources/limits requirements and without HA support.
+ // This size is ultimately dedicated for development and demo purposes.
+ // DO NOT USE THIS IN PRODUCTION!
+ //
+ // FIXME: Add clear description of ingestion/query performance expectations.
+ SizeOneXExtraSmall LokiStackSizeType = "1x.extra-small"
+
+ // SizeOneXSmall defines the size of a single Loki deployment
+ // with small resources/limits requirements and HA support for all
+ // Loki components. This size is dedicated for setup **without** the
+ // requirement for single replication factor and auto-compaction.
+ //
+ // FIXME: Add clear description of ingestion/query performance expectations.
+ SizeOneXSmall LokiStackSizeType = "1x.small"
+
+ // SizeOneXMedium defines the size of a single Loki deployment
+ // with small resources/limits requirements and HA support for all
+ // Loki components. This size is dedicated for setup **with** the
+ // requirement for single replication factor and auto-compaction.
+ //
+ // FIXME: Add clear description of ingestion/query performance expectations.
+ SizeOneXMedium LokiStackSizeType = "1x.medium"
+)
+
+// SubjectKind is a kind of LokiStack Gateway RBAC subject.
+//
+// +kubebuilder:validation:Enum=user;group
+type SubjectKind string
+
+const (
+ // User represents a subject that is a user.
+ User SubjectKind = "user"
+ // Group represents a subject that is a group.
+ Group SubjectKind = "group"
+)
+
+// Subject represents a subject that has been bound to a role.
+type Subject struct {
+ Name string `json:"name"`
+ Kind SubjectKind `json:"kind"`
+}
+
+// RoleBindingsSpec binds a set of roles to a set of subjects.
+type RoleBindingsSpec struct {
+ Name string `json:"name"`
+ Subjects []Subject `json:"subjects"`
+ Roles []string `json:"roles"`
+}
+
+// PermissionType is a LokiStack Gateway RBAC permission.
+//
+// +kubebuilder:validation:Enum=read;write
+type PermissionType string
+
+const (
+ // Write gives access to write data to a tenant.
+ Write PermissionType = "write"
+ // Read gives access to read data from a tenant.
+ Read PermissionType = "read"
+)
+
+// RoleSpec describes a set of permissions to interact with a tenant.
+type RoleSpec struct {
+ Name string `json:"name"`
+ Resources []string `json:"resources"`
+ Tenants []string `json:"tenants"`
+ Permissions []PermissionType `json:"permissions"`
+}
+
+// OPASpec defines the opa configuration spec for lokiStack Gateway component.
+type OPASpec struct {
+ // URL defines the third-party endpoint for authorization.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="OpenPolicyAgent URL"
+ URL string `json:"url"`
+}
+
+// AuthorizationSpec defines the opa, role bindings and roles
+// configuration per tenant for lokiStack Gateway component.
+type AuthorizationSpec struct {
+ // OPA defines the spec for the third-party endpoint for tenant's authorization.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="OPA Configuration"
+ OPA *OPASpec `json:"opa"`
+ // Roles defines a set of permissions to interact with a tenant.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Static Roles"
+ Roles []RoleSpec `json:"roles"`
+ // RoleBindings defines configuration to bind a set of roles to a set of subjects.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Static Role Bindings"
+ RoleBindings []RoleBindingsSpec `json:"roleBindings"`
+}
+
+// TenantSecretSpec is a secret reference containing name only
+// for a secret living in the same namespace as the LokiStack custom resource.
+type TenantSecretSpec struct {
+ // Name of a secret in the namespace configured for tenant secrets.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:io.kubernetes:Secret",displayName="Tenant Secret Name"
+ Name string `json:"name"`
+}
+
+// OIDCSpec defines the oidc configuration spec for lokiStack Gateway component.
+type OIDCSpec struct {
+ // Secret defines the spec for the clientID, clientSecret and issuerCAPath for tenant's authentication.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Tenant Secret"
+ Secret *TenantSecretSpec `json:"secret"`
+ // IssuerURL defines the URL for issuer.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Issuer URL"
+ IssuerURL string `json:"issuerURL"`
+ // RedirectURL defines the URL for redirect.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Redirect URL"
+ RedirectURL string `json:"redirectURL,omitempty"`
+ // Group claim field from ID Token
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ GroupClaim string `json:"groupClaim,omitempty"`
+ // User claim field from ID Token
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ UsernameClaim string `json:"usernameClaim,omitempty"`
+}
+
+// AuthenticationSpec defines the oidc configuration per tenant for lokiStack Gateway component.
+type AuthenticationSpec struct {
+ // TenantName defines the name of the tenant.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Tenant Name"
+ TenantName string `json:"tenantName"`
+ // TenantID defines the id of the tenant.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Tenant ID"
+ TenantID string `json:"tenantId"`
+ // OIDC defines the spec for the OIDC tenant's authentication.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="OIDC Configuration"
+ OIDC *OIDCSpec `json:"oidc"`
+}
+
+// ModeType is the authentication/authorization mode in which LokiStack Gateway will be configured.
+//
+// +kubebuilder:validation:Enum=static;dynamic;openshift-logging
+type ModeType string
+
+const (
+ // Static mode asserts the Authorization Spec's Roles and RoleBindings
+ // using an in-process OpenPolicyAgent Rego authorizer.
+ Static ModeType = "static"
+ // Dynamic mode delegates the authorization to a third-party OPA-compatible endpoint.
+ Dynamic ModeType = "dynamic"
+ // OpenshiftLogging mode provides fully automatic OpenShift in-cluster authentication and authorization support.
+ OpenshiftLogging ModeType = "openshift-logging"
+)
+
+// TenantsSpec defines the mode, authentication and authorization
+// configuration of the lokiStack gateway component.
+type TenantsSpec struct {
+ // Mode defines the mode in which lokistack-gateway component will be configured.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +kubebuilder:default:=openshift-logging
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:select:static","urn:alm:descriptor:com.tectonic.ui:select:dynamic","urn:alm:descriptor:com.tectonic.ui:select:openshift-logging"},displayName="Mode"
+ Mode ModeType `json:"mode"`
+ // Authentication defines the lokistack-gateway component authentication configuration spec per tenant.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Authentication"
+ Authentication []AuthenticationSpec `json:"authentication,omitempty"`
+ // Authorization defines the lokistack-gateway component authorization configuration spec per tenant.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Authorization"
+ Authorization *AuthorizationSpec `json:"authorization,omitempty"`
+}
+
+// LokiComponentSpec defines the requirements to configure scheduling
+// of each loki component individually.
+type LokiComponentSpec struct {
+ // Replicas defines the number of replica pods of the component.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:hidden"
+ Replicas int32 `json:"replicas,omitempty"`
+
+ // NodeSelector defines the labels required by a node to schedule
+ // the component onto it.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+
+ // Tolerations defines the tolerations required by a node to schedule
+ // the component onto it.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
+}
+
+// LokiTemplateSpec defines the template of all requirements to configure
+// scheduling of all Loki components to be deployed.
+type LokiTemplateSpec struct {
+
+ // Compactor defines the compaction component spec.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Compactor pods"
+ Compactor *LokiComponentSpec `json:"compactor,omitempty"`
+
+ // Distributor defines the distributor component spec.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Distributor pods"
+ Distributor *LokiComponentSpec `json:"distributor,omitempty"`
+
+ // Ingester defines the ingester component spec.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Ingester pods"
+ Ingester *LokiComponentSpec `json:"ingester,omitempty"`
+
+ // Querier defines the querier component spec.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Querier pods"
+ Querier *LokiComponentSpec `json:"querier,omitempty"`
+
+ // QueryFrontend defines the query frontend component spec.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Query Frontend pods"
+ QueryFrontend *LokiComponentSpec `json:"queryFrontend,omitempty"`
+
+ // Gateway defines the lokistack gateway component spec.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Gateway pods"
+ Gateway *LokiComponentSpec `json:"gateway,omitempty"`
+
+ // IndexGateway defines the index gateway component spec.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Index Gateway pods"
+ IndexGateway *LokiComponentSpec `json:"indexGateway,omitempty"`
+
+ // Ruler defines the ruler component spec.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Ruler pods"
+ Ruler *LokiComponentSpec `json:"ruler,omitempty"`
+}
+
+// ObjectStorageTLSSpec is the TLS configuration for reaching the object storage endpoint.
+type ObjectStorageTLSSpec struct {
+ // CA is the name of a ConfigMap containing a CA certificate.
+ // It needs to be in the same namespace as the LokiStack custom resource.
+ //
+ // +optional
+ // +kubebuilder:validation:optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:io.kubernetes:ConfigMap",displayName="CA ConfigMap Name"
+ CA string `json:"caName,omitempty"`
+}
+
+// ObjectStorageSecretType defines the type of storage which can be used with the Loki cluster.
+//
+// +kubebuilder:validation:Enum=azure;gcs;s3;swift
+type ObjectStorageSecretType string
+
+const (
+ // ObjectStorageSecretAzure when using Azure for Loki storage
+ ObjectStorageSecretAzure ObjectStorageSecretType = "azure"
+
+ // ObjectStorageSecretGCS when using GCS for Loki storage
+ ObjectStorageSecretGCS ObjectStorageSecretType = "gcs"
+
+ // ObjectStorageSecretS3 when using S3 for Loki storage
+ ObjectStorageSecretS3 ObjectStorageSecretType = "s3"
+
+ // ObjectStorageSecretSwift when using Swift for Loki storage
+ ObjectStorageSecretSwift ObjectStorageSecretType = "swift"
+)
+
+// ObjectStorageSecretSpec is a secret reference containing name only, no namespace.
+type ObjectStorageSecretSpec struct {
+ // Type of object storage that should be used
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:select:azure","urn:alm:descriptor:com.tectonic.ui:select:gcs","urn:alm:descriptor:com.tectonic.ui:select:s3","urn:alm:descriptor:com.tectonic.ui:select:swift"},displayName="Object Storage Secret Type"
+ Type ObjectStorageSecretType `json:"type"`
+
+ // Name of a secret in the namespace configured for object storage secrets.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:io.kubernetes:Secret",displayName="Object Storage Secret Name"
+ Name string `json:"name"`
+}
+
+// ObjectStorageSchemaVersion defines the storage schema version which will be
+// used with the Loki cluster.
+//
+// +kubebuilder:validation:Enum=v11;v12
+type ObjectStorageSchemaVersion string
+
+const (
+ // ObjectStorageSchemaV11 when using v11 for the storage schema
+ ObjectStorageSchemaV11 ObjectStorageSchemaVersion = "v11"
+
+ // ObjectStorageSchemaV12 when using v12 for the storage schema
+ ObjectStorageSchemaV12 ObjectStorageSchemaVersion = "v12"
+)
+
+// ObjectStorageSchema defines the requirements needed to configure a new
+// storage schema.
+type ObjectStorageSchema struct {
+
+ // Version for writing and reading logs.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:select:v11","urn:alm:descriptor:com.tectonic.ui:select:v12"},displayName="Version"
+ Version ObjectStorageSchemaVersion `json:"version"`
+
+ // EffectiveDate is the date in UTC that the schema will be applied on.
+ // To ensure readibility of logs, this date should be before the current
+ // date in UTC.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ EffectiveDate StorageSchemaEffectiveDate `json:"effectiveDate"`
+}
+
+// ObjectStorageSpec defines the requirements to access the object
+// storage bucket to persist logs by the ingester component.
+type ObjectStorageSpec struct {
+
+ // Schemas for reading and writing logs.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:MinItems:=1
+ // +kubebuilder:default:={{version:v11,effectiveDate:"2020-10-11"}}
+ Schemas []ObjectStorageSchema `json:"schemas"`
+
+ // Secret for object storage authentication.
+ // Name of a secret in the same namespace as the LokiStack custom resource.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ Secret ObjectStorageSecretSpec `json:"secret"`
+
+ // TLS configuration for reaching the object storage endpoint.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="TLS Config"
+ TLS *ObjectStorageTLSSpec `json:"tls,omitempty"`
+}
+
+// QueryLimitSpec defines the limits applies at the query path.
+type QueryLimitSpec struct {
+
+ // MaxEntriesLimitsPerQuery defines the maximum number of log entries
+ // that will be returned for a query.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Entries Limit per Query"
+ MaxEntriesLimitPerQuery int32 `json:"maxEntriesLimitPerQuery,omitempty"`
+
+ // MaxChunksPerQuery defines the maximum number of chunks
+ // that can be fetched by a single query.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Chunk per Query"
+ MaxChunksPerQuery int32 `json:"maxChunksPerQuery,omitempty"`
+
+ // MaxQuerySeries defines the the maximum of unique series
+ // that is returned by a metric query.
+ //
+ // + optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Query Series"
+ MaxQuerySeries int32 `json:"maxQuerySeries,omitempty"`
+}
+
+// IngestionLimitSpec defines the limits applied at the ingestion path.
+type IngestionLimitSpec struct {
+
+ // IngestionRate defines the sample size per second. Units MB.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Ingestion Rate (in MB)"
+ IngestionRate int32 `json:"ingestionRate,omitempty"`
+
+ // IngestionBurstSize defines the local rate-limited sample size per
+ // distributor replica. It should be set to the set at least to the
+ // maximum logs size expected in a single push request.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Ingestion Burst Size (in MB)"
+ IngestionBurstSize int32 `json:"ingestionBurstSize,omitempty"`
+
+ // MaxLabelNameLength defines the maximum number of characters allowed
+ // for label keys in log streams.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Label Name Length"
+ MaxLabelNameLength int32 `json:"maxLabelNameLength,omitempty"`
+
+ // MaxLabelValueLength defines the maximum number of characters allowed
+ // for label values in log streams.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Label Value Length"
+ MaxLabelValueLength int32 `json:"maxLabelValueLength,omitempty"`
+
+ // MaxLabelNamesPerSeries defines the maximum number of label names per series
+ // in each log stream.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Labels Names per Series"
+ MaxLabelNamesPerSeries int32 `json:"maxLabelNamesPerSeries,omitempty"`
+
+ // MaxGlobalStreamsPerTenant defines the maximum number of active streams
+ // per tenant, across the cluster.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Global Streams per Tenant"
+ MaxGlobalStreamsPerTenant int32 `json:"maxGlobalStreamsPerTenant,omitempty"`
+
+ // MaxLineSize defines the maximum line size on ingestion path. Units in Bytes.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Line Size"
+ MaxLineSize int32 `json:"maxLineSize,omitempty"`
+}
+
+// LimitsTemplateSpec defines the limits applied at ingestion or query path.
+type LimitsTemplateSpec struct {
+ // IngestionLimits defines the limits applied on ingested log streams.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ IngestionLimits *IngestionLimitSpec `json:"ingestion,omitempty"`
+
+ // QueryLimits defines the limit applied on querying log streams.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ QueryLimits *QueryLimitSpec `json:"queries,omitempty"`
+}
+
+// LimitsSpec defines the spec for limits applied at ingestion or query
+// path across the cluster or per tenant.
+type LimitsSpec struct {
+
+ // Global defines the limits applied globally across the cluster.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Global Limits"
+ Global *LimitsTemplateSpec `json:"global,omitempty"`
+
+ // Tenants defines the limits applied per tenant.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Limits per Tenant"
+ Tenants map[string]LimitsTemplateSpec `json:"tenants,omitempty"`
+}
+
+// RulesSpec deifnes the spec for the ruler component.
+type RulesSpec struct {
+ // Enabled defines a flag to enable/disable the ruler component
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch",displayName="Enable"
+ Enabled bool `json:"enabled"`
+
+ // A selector to select which LokiRules to mount for loading alerting/recording
+ // rules from.
+ //
+ // +optional
+ // +kubebuilder:validation:optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Selector"
+ Selector *metav1.LabelSelector `json:"selector,omitempty"`
+
+ // Namespaces to be selected for PrometheusRules discovery. If unspecified, only
+ // the same namespace as the LokiStack object is in is used.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Namespace Selector"
+ NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"`
+}
+
+// LokiStackSpec defines the desired state of LokiStack
+type LokiStackSpec struct {
+
+ // ManagementState defines if the CR should be managed by the operator or not.
+ // Default is managed.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +kubebuilder:default:=Managed
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:select:Managed","urn:alm:descriptor:com.tectonic.ui:select:Unmanaged"},displayName="Management State"
+ ManagementState ManagementStateType `json:"managementState,omitempty"`
+
+ // Size defines one of the support Loki deployment scale out sizes.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:select:1x.extra-small","urn:alm:descriptor:com.tectonic.ui:select:1x.small","urn:alm:descriptor:com.tectonic.ui:select:1x.medium"},displayName="LokiStack Size"
+ Size LokiStackSizeType `json:"size"`
+
+ // Storage defines the spec for the object storage endpoint to store logs.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Object Storage"
+ Storage ObjectStorageSpec `json:"storage"`
+
+ // Storage class name defines the storage class for ingester/querier PVCs.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:io.kubernetes:StorageClass",displayName="Storage Class Name"
+ StorageClassName string `json:"storageClassName"`
+
+ // ReplicationFactor defines the policy for log stream replication.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Minimum:=1
+ // +kubebuilder:default:=1
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Replication Factor"
+ ReplicationFactor int32 `json:"replicationFactor"`
+
+ // Rules defines the spec for the ruler component
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="Rules"
+ Rules *RulesSpec `json:"rules,omitempty"`
+
+ // Limits defines the limits to be applied to log stream processing.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="Rate Limiting"
+ Limits *LimitsSpec `json:"limits,omitempty"`
+
+ // Template defines the resource/limits/tolerations/nodeselectors per component
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="Node Placement"
+ Template *LokiTemplateSpec `json:"template,omitempty"`
+
+ // Tenants defines the per-tenant authentication and authorization spec for the lokistack-gateway component.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Tenants Configuration"
+ Tenants *TenantsSpec `json:"tenants,omitempty"`
+}
+
+// LokiStackConditionType deifnes the type of condition types of a Loki deployment.
+type LokiStackConditionType string
+
+const (
+ // ConditionReady defines the condition that all components in the Loki deployment are ready.
+ ConditionReady LokiStackConditionType = "Ready"
+
+ // ConditionPending defines the conditioin that some or all components are in pending state.
+ ConditionPending LokiStackConditionType = "Pending"
+
+ // ConditionFailed defines the condition that components in the Loki deployment failed to roll out.
+ ConditionFailed LokiStackConditionType = "Failed"
+
+ // ConditionDegraded defines the condition that some or all components in the Loki deployment
+ // are degraded or the cluster cannot connect to object storage.
+ ConditionDegraded LokiStackConditionType = "Degraded"
+)
+
+// LokiStackConditionReason defines the type for valid reasons of a Loki deployment conditions.
+type LokiStackConditionReason string
+
+const (
+ // ReasonFailedComponents when all/some LokiStack components fail to roll out.
+ ReasonFailedComponents LokiStackConditionReason = "FailedComponents"
+ // ReasonPendingComponents when all/some LokiStack components pending dependencies
+ ReasonPendingComponents LokiStackConditionReason = "PendingComponents"
+ // ReasonReadyComponents when all LokiStack components are ready to serve traffic.
+ ReasonReadyComponents LokiStackConditionReason = "ReadyComponents"
+ // ReasonMissingObjectStorageSecret when the required secret to store logs to object
+ // storage is missing.
+ ReasonMissingObjectStorageSecret LokiStackConditionReason = "MissingObjectStorageSecret"
+ // ReasonInvalidObjectStorageSecret when the format of the secret is invalid.
+ ReasonInvalidObjectStorageSecret LokiStackConditionReason = "InvalidObjectStorageSecret"
+ // ReasonInvalidObjectStorageSchema when the spec contains an invalid schema(s).
+ ReasonInvalidObjectStorageSchema LokiStackConditionReason = "InvalidObjectStorageSchema"
+ // ReasonMissingObjectStorageCAConfigMap when the required configmap to verify object storage
+ // certificates is missing.
+ ReasonMissingObjectStorageCAConfigMap LokiStackConditionReason = "MissingObjectStorageCAConfigMap"
+ // ReasonInvalidObjectStorageCAConfigMap when the format of the CA configmap is invalid.
+ ReasonInvalidObjectStorageCAConfigMap LokiStackConditionReason = "InvalidObjectStorageCAConfigMap"
+ // ReasonMissingRulerSecret when the required secret to authorization remote write connections
+ // for the ruler is missing.
+ ReasonMissingRulerSecret LokiStackConditionReason = "MissingRulerSecret"
+ // ReasonInvalidRulerSecret when the format of the ruler remote write authorization secret is invalid.
+ ReasonInvalidRulerSecret LokiStackConditionReason = "InvalidRulerSecret"
+ // ReasonInvalidReplicationConfiguration when the configurated replication factor is not valid
+ // with the select cluster size.
+ ReasonInvalidReplicationConfiguration LokiStackConditionReason = "InvalidReplicationConfiguration"
+ // ReasonMissingGatewayTenantSecret when the required tenant secret
+ // for authentication is missing.
+ ReasonMissingGatewayTenantSecret LokiStackConditionReason = "MissingGatewayTenantSecret"
+ // ReasonInvalidGatewayTenantSecret when the format of the secret is invalid.
+ ReasonInvalidGatewayTenantSecret LokiStackConditionReason = "InvalidGatewayTenantSecret"
+ // ReasonInvalidTenantsConfiguration when the tenant configuration provided is invalid.
+ ReasonInvalidTenantsConfiguration LokiStackConditionReason = "InvalidTenantsConfiguration"
+ // ReasonMissingGatewayOpenShiftBaseDomain when the reconciler cannot lookup the OpenShift DNS base domain.
+ ReasonMissingGatewayOpenShiftBaseDomain LokiStackConditionReason = "MissingGatewayOpenShiftBaseDomain"
+)
+
+// PodStatusMap defines the type for mapping pod status to pod name.
+type PodStatusMap map[corev1.PodPhase][]string
+
+// LokiStackComponentStatus defines the map of per pod status per LokiStack component.
+// Each component is represented by a separate map of v1.Phase to a list of pods.
+type LokiStackComponentStatus struct {
+ // Compactor is a map to the pod status of the compactor pod.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Compactor",order=5
+ Compactor PodStatusMap `json:"compactor,omitempty"`
+
+ // Distributor is a map to the per pod status of the distributor deployment
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Distributor",order=1
+ Distributor PodStatusMap `json:"distributor,omitempty"`
+
+ // IndexGateway is a map to the per pod status of the index gateway statefulset
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="IndexGateway",order=6
+ IndexGateway PodStatusMap `json:"indexGateway,omitempty"`
+
+ // Ingester is a map to the per pod status of the ingester statefulset
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Ingester",order=2
+ Ingester PodStatusMap `json:"ingester,omitempty"`
+
+ // Querier is a map to the per pod status of the querier deployment
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Querier",order=3
+ Querier PodStatusMap `json:"querier,omitempty"`
+
+ // QueryFrontend is a map to the per pod status of the query frontend deployment
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Query Frontend",order=4
+ QueryFrontend PodStatusMap `json:"queryFrontend,omitempty"`
+
+ // Gateway is a map to the per pod status of the lokistack gateway deployment.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Gateway",order=5
+ Gateway PodStatusMap `json:"gateway,omitempty"`
+
+ // Ruler is a map to the per pod status of the lokistack ruler statefulset.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Ruler",order=6
+ Ruler PodStatusMap `json:"ruler,omitempty"`
+}
+
+// LokiStackStorageStatus defines the observed state of
+// the Loki storage configuration.
+type LokiStackStorageStatus struct {
+
+ // Schemas is a list of schemas which have been applied
+ // to the LokiStack.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ Schemas []ObjectStorageSchema `json:"schemas,omitempty"`
+}
+
+// LokiStackStatus defines the observed state of LokiStack
+type LokiStackStatus struct {
+ // Components provides summary of all Loki pod status grouped
+ // per component.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ Components LokiStackComponentStatus `json:"components,omitempty"`
+
+ // Storage provides summary of all changes that have occurred
+ // to the storage configuration.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ Storage LokiStackStorageStatus `json:"storage,omitempty"`
+
+ // Conditions of the Loki deployment health.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:io.kubernetes.conditions"
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +kubebuilder:storageversion
+// +kubebuilder:resource:categories=logging
+
+// LokiStack is the Schema for the lokistacks API
+//
+// +operator-sdk:csv:customresourcedefinitions:displayName="LokiStack",resources={{Deployment,v1},{StatefulSet,v1},{ConfigMap,v1},{Ingress,v1},{Service,v1},{ServiceAccount,v1},{PersistentVolumeClaims,v1},{Route,v1},{ServiceMonitor,v1}}
+type LokiStack struct {
+ Spec LokiStackSpec `json:"spec,omitempty"`
+ Status LokiStackStatus `json:"status,omitempty"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+ metav1.TypeMeta `json:",inline"`
+}
+
+// +kubebuilder:object:root=true
+
+// LokiStackList contains a list of LokiStack
+type LokiStackList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []LokiStack `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&LokiStack{}, &LokiStackList{})
+}
+
+// Hub declares the v1beta1.LokiStack as the hub CRD version.
+func (*LokiStack) Hub() {}
diff --git a/operator/apis/loki/v1beta1/lokistack_webhook.go b/operator/apis/loki/v1/lokistack_webhook.go
similarity index 82%
rename from operator/apis/loki/v1beta1/lokistack_webhook.go
rename to operator/apis/loki/v1/lokistack_webhook.go
index dab158fb7a76f..2fe66c38cdb4b 100644
--- a/operator/apis/loki/v1beta1/lokistack_webhook.go
+++ b/operator/apis/loki/v1/lokistack_webhook.go
@@ -1,4 +1,4 @@
-package v1beta1
+package v1
import (
"reflect"
@@ -14,35 +14,38 @@ import (
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
+// objectStorageSchemaMap defines the type for mapping a schema version with a date
+type objectStorageSchemaMap map[StorageSchemaEffectiveDate]ObjectStorageSchemaVersion
+
// SetupWebhookWithManager registers the Lokistack to the controller-runtime manager
// or returns an error.
-func (s *LokiStack) SetupWebhookWithManager(mgr ctrl.Manager) error {
+func (r *LokiStack) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
- For(s).
+ For(r).
Complete()
}
-//+kubebuilder:webhook:path=/validate-loki-grafana-com-v1beta1-lokistack,mutating=false,failurePolicy=fail,sideEffects=None,groups=loki.grafana.com,resources=lokistacks,verbs=create;update,versions=v1beta1,name=vlokistack.kb.io,admissionReviewVersions=v1
+//+kubebuilder:webhook:path=/validate-loki-grafana-com-v1-lokistack,mutating=false,failurePolicy=fail,sideEffects=None,groups=loki.grafana.com,resources=lokistacks,verbs=create;update,versions=v1,name=vlokistack.loki.grafana.com,admissionReviewVersions=v1
var _ webhook.Validator = &LokiStack{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
-func (s *LokiStack) ValidateCreate() error {
- return s.validate(nil)
+func (r *LokiStack) ValidateCreate() error {
+ return r.validate(nil)
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
-func (s *LokiStack) ValidateUpdate(old runtime.Object) error {
+func (r *LokiStack) ValidateUpdate(old runtime.Object) error {
oldStack, ok := old.(*LokiStack)
if !ok {
t := reflect.TypeOf(old).String()
return apierrors.NewInternalError(kverrors.New("runtime object is incorrect type", "type", t))
}
- return s.validate(oldStack)
+ return r.validate(oldStack)
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
-func (s *LokiStack) ValidateDelete() error {
+func (r *LokiStack) ValidateDelete() error {
// Do nothing
return nil
}
@@ -131,7 +134,7 @@ func (s *ObjectStorageSpec) ValidateSchemas(utcTime time.Time, status LokiStackS
return allErrs
}
-func (s *LokiStack) validate(old *LokiStack) error {
+func (r *LokiStack) validate(old *LokiStack) error {
var allErrs field.ErrorList
storageStatus := LokiStackStorageStatus{}
@@ -139,7 +142,7 @@ func (s *LokiStack) validate(old *LokiStack) error {
storageStatus = old.Status.Storage
}
- errors := s.Spec.Storage.ValidateSchemas(time.Now().UTC(), storageStatus)
+ errors := r.Spec.Storage.ValidateSchemas(time.Now().UTC(), storageStatus)
if len(errors) != 0 {
allErrs = append(allErrs, errors...)
}
@@ -150,7 +153,7 @@ func (s *LokiStack) validate(old *LokiStack) error {
return apierrors.NewInvalid(
schema.GroupKind{Group: "loki.grafana.com", Kind: "LokiStack"},
- s.Name,
+ r.Name,
allErrs,
)
}
diff --git a/operator/apis/loki/v1beta1/lokistack_webhook_test.go b/operator/apis/loki/v1/lokistack_webhook_test.go
similarity index 55%
rename from operator/apis/loki/v1beta1/lokistack_webhook_test.go
rename to operator/apis/loki/v1/lokistack_webhook_test.go
index 9436a084aba82..d918810907582 100644
--- a/operator/apis/loki/v1beta1/lokistack_webhook_test.go
+++ b/operator/apis/loki/v1/lokistack_webhook_test.go
@@ -1,9 +1,9 @@
-package v1beta1_test
+package v1_test
import (
"testing"
- "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ v1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/stretchr/testify/require"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -14,21 +14,21 @@ import (
var ltt = []struct {
desc string
- spec v1beta1.LokiStack
+ spec v1.LokiStack
err *apierrors.StatusError
}{
{
desc: "valid spec - no status",
- spec: v1beta1.LokiStack{
- Spec: v1beta1.LokiStackSpec{
- Storage: v1beta1.ObjectStorageSpec{
- Schemas: []v1beta1.ObjectStorageSchema{
+ spec: v1.LokiStack{
+ Spec: v1.LokiStackSpec{
+ Storage: v1.ObjectStorageSpec{
+ Schemas: []v1.ObjectStorageSchema{
{
- Version: v1beta1.ObjectStorageSchemaV11,
+ Version: v1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
{
- Version: v1beta1.ObjectStorageSchemaV12,
+ Version: v1.ObjectStorageSchemaV12,
EffectiveDate: "2020-10-13",
},
},
@@ -38,30 +38,30 @@ var ltt = []struct {
},
{
desc: "valid spec - with status",
- spec: v1beta1.LokiStack{
- Spec: v1beta1.LokiStackSpec{
- Storage: v1beta1.ObjectStorageSpec{
- Schemas: []v1beta1.ObjectStorageSchema{
+ spec: v1.LokiStack{
+ Spec: v1.LokiStackSpec{
+ Storage: v1.ObjectStorageSpec{
+ Schemas: []v1.ObjectStorageSchema{
{
- Version: v1beta1.ObjectStorageSchemaV11,
+ Version: v1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
{
- Version: v1beta1.ObjectStorageSchemaV12,
+ Version: v1.ObjectStorageSchemaV12,
EffectiveDate: "2020-10-13",
},
},
},
},
- Status: v1beta1.LokiStackStatus{
- Storage: v1beta1.LokiStackStorageStatus{
- Schemas: []v1beta1.ObjectStorageSchema{
+ Status: v1.LokiStackStatus{
+ Storage: v1.LokiStackStorageStatus{
+ Schemas: []v1.ObjectStorageSchema{
{
- Version: v1beta1.ObjectStorageSchemaV11,
+ Version: v1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
{
- Version: v1beta1.ObjectStorageSchemaV12,
+ Version: v1.ObjectStorageSchemaV12,
EffectiveDate: "2020-10-13",
},
},
@@ -71,16 +71,16 @@ var ltt = []struct {
},
{
desc: "not unique schema effective dates",
- spec: v1beta1.LokiStack{
- Spec: v1beta1.LokiStackSpec{
- Storage: v1beta1.ObjectStorageSpec{
- Schemas: []v1beta1.ObjectStorageSchema{
+ spec: v1.LokiStack{
+ Spec: v1.LokiStackSpec{
+ Storage: v1.ObjectStorageSpec{
+ Schemas: []v1.ObjectStorageSchema{
{
- Version: v1beta1.ObjectStorageSchemaV11,
+ Version: v1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
{
- Version: v1beta1.ObjectStorageSchemaV12,
+ Version: v1.ObjectStorageSchemaV12,
EffectiveDate: "2020-10-11",
},
},
@@ -94,19 +94,19 @@ var ltt = []struct {
field.Invalid(
field.NewPath("Spec").Child("Storage").Child("Schemas").Index(1).Child("EffectiveDate"),
"2020-10-11",
- v1beta1.ErrEffectiveDatesNotUnique.Error(),
+ v1.ErrEffectiveDatesNotUnique.Error(),
),
},
),
},
{
desc: "schema effective dates bad format",
- spec: v1beta1.LokiStack{
- Spec: v1beta1.LokiStackSpec{
- Storage: v1beta1.ObjectStorageSpec{
- Schemas: []v1beta1.ObjectStorageSchema{
+ spec: v1.LokiStack{
+ Spec: v1.LokiStackSpec{
+ Storage: v1.ObjectStorageSpec{
+ Schemas: []v1.ObjectStorageSchema{
{
- Version: v1beta1.ObjectStorageSchemaV11,
+ Version: v1.ObjectStorageSchemaV11,
EffectiveDate: "2020/10/11",
},
},
@@ -120,19 +120,19 @@ var ltt = []struct {
field.Invalid(
field.NewPath("Spec").Child("Storage").Child("Schemas").Index(0).Child("EffectiveDate"),
"2020/10/11",
- v1beta1.ErrParseEffectiveDates.Error(),
+ v1.ErrParseEffectiveDates.Error(),
),
},
),
},
{
desc: "missing valid starting date",
- spec: v1beta1.LokiStack{
- Spec: v1beta1.LokiStackSpec{
- Storage: v1beta1.ObjectStorageSpec{
- Schemas: []v1beta1.ObjectStorageSchema{
+ spec: v1.LokiStack{
+ Spec: v1.LokiStackSpec{
+ Storage: v1.ObjectStorageSpec{
+ Schemas: []v1.ObjectStorageSchema{
{
- Version: v1beta1.ObjectStorageSchemaV11,
+ Version: v1.ObjectStorageSchemaV11,
EffectiveDate: "9000-10-10",
},
},
@@ -145,39 +145,39 @@ var ltt = []struct {
field.ErrorList{
field.Invalid(
field.NewPath("Spec").Child("Storage").Child("Schemas"),
- []v1beta1.ObjectStorageSchema{
+ []v1.ObjectStorageSchema{
{
- Version: v1beta1.ObjectStorageSchemaV11,
+ Version: v1.ObjectStorageSchemaV11,
EffectiveDate: "9000-10-10",
},
},
- v1beta1.ErrMissingValidStartDate.Error(),
+ v1.ErrMissingValidStartDate.Error(),
),
},
),
},
{
desc: "retroactively adding schema",
- spec: v1beta1.LokiStack{
- Spec: v1beta1.LokiStackSpec{
- Storage: v1beta1.ObjectStorageSpec{
- Schemas: []v1beta1.ObjectStorageSchema{
+ spec: v1.LokiStack{
+ Spec: v1.LokiStackSpec{
+ Storage: v1.ObjectStorageSpec{
+ Schemas: []v1.ObjectStorageSchema{
{
- Version: v1beta1.ObjectStorageSchemaV11,
+ Version: v1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
{
- Version: v1beta1.ObjectStorageSchemaV12,
+ Version: v1.ObjectStorageSchemaV12,
EffectiveDate: "2020-10-14",
},
},
},
},
- Status: v1beta1.LokiStackStatus{
- Storage: v1beta1.LokiStackStorageStatus{
- Schemas: []v1beta1.ObjectStorageSchema{
+ Status: v1.LokiStackStatus{
+ Storage: v1.LokiStackStorageStatus{
+ Schemas: []v1.ObjectStorageSchema{
{
- Version: v1beta1.ObjectStorageSchemaV11,
+ Version: v1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
},
@@ -190,37 +190,37 @@ var ltt = []struct {
field.ErrorList{
field.Invalid(
field.NewPath("Spec").Child("Storage").Child("Schemas"),
- v1beta1.ObjectStorageSchema{
- Version: v1beta1.ObjectStorageSchemaV12,
+ v1.ObjectStorageSchema{
+ Version: v1.ObjectStorageSchemaV12,
EffectiveDate: "2020-10-14",
},
- v1beta1.ErrSchemaRetroactivelyAdded.Error(),
+ v1.ErrSchemaRetroactivelyAdded.Error(),
),
},
),
},
{
desc: "retroactively removing schema",
- spec: v1beta1.LokiStack{
- Spec: v1beta1.LokiStackSpec{
- Storage: v1beta1.ObjectStorageSpec{
- Schemas: []v1beta1.ObjectStorageSchema{
+ spec: v1.LokiStack{
+ Spec: v1.LokiStackSpec{
+ Storage: v1.ObjectStorageSpec{
+ Schemas: []v1.ObjectStorageSchema{
{
- Version: v1beta1.ObjectStorageSchemaV11,
+ Version: v1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
},
},
},
- Status: v1beta1.LokiStackStatus{
- Storage: v1beta1.LokiStackStorageStatus{
- Schemas: []v1beta1.ObjectStorageSchema{
+ Status: v1.LokiStackStatus{
+ Storage: v1.LokiStackStorageStatus{
+ Schemas: []v1.ObjectStorageSchema{
{
- Version: v1beta1.ObjectStorageSchemaV11,
+ Version: v1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
{
- Version: v1beta1.ObjectStorageSchemaV12,
+ Version: v1.ObjectStorageSchemaV12,
EffectiveDate: "2020-10-14",
},
},
@@ -233,35 +233,35 @@ var ltt = []struct {
field.ErrorList{
field.Invalid(
field.NewPath("Spec").Child("Storage").Child("Schemas"),
- []v1beta1.ObjectStorageSchema{
+ []v1.ObjectStorageSchema{
{
- Version: v1beta1.ObjectStorageSchemaV11,
+ Version: v1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
},
- v1beta1.ErrSchemaRetroactivelyRemoved.Error(),
+ v1.ErrSchemaRetroactivelyRemoved.Error(),
),
},
),
},
{
desc: "retroactively changing schema",
- spec: v1beta1.LokiStack{
- Spec: v1beta1.LokiStackSpec{
- Storage: v1beta1.ObjectStorageSpec{
- Schemas: []v1beta1.ObjectStorageSchema{
+ spec: v1.LokiStack{
+ Spec: v1.LokiStackSpec{
+ Storage: v1.ObjectStorageSpec{
+ Schemas: []v1.ObjectStorageSchema{
{
- Version: v1beta1.ObjectStorageSchemaV12,
+ Version: v1.ObjectStorageSchemaV12,
EffectiveDate: "2020-10-11",
},
},
},
},
- Status: v1beta1.LokiStackStatus{
- Storage: v1beta1.LokiStackStorageStatus{
- Schemas: []v1beta1.ObjectStorageSchema{
+ Status: v1.LokiStackStatus{
+ Storage: v1.LokiStackStorageStatus{
+ Schemas: []v1.ObjectStorageSchema{
{
- Version: v1beta1.ObjectStorageSchemaV11,
+ Version: v1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
},
@@ -274,11 +274,11 @@ var ltt = []struct {
field.ErrorList{
field.Invalid(
field.NewPath("Spec").Child("Storage").Child("Schemas"),
- v1beta1.ObjectStorageSchema{
- Version: v1beta1.ObjectStorageSchemaV12,
+ v1.ObjectStorageSchema{
+ Version: v1.ObjectStorageSchemaV12,
EffectiveDate: "2020-10-11",
},
- v1beta1.ErrSchemaRetroactivelyChanged.Error(),
+ v1.ErrSchemaRetroactivelyChanged.Error(),
),
},
),
@@ -290,7 +290,7 @@ func TestLokiStackValidationWebhook_ValidateCreate(t *testing.T) {
tc := tc
t.Run(tc.desc, func(t *testing.T) {
t.Parallel()
- l := v1beta1.LokiStack{
+ l := v1.LokiStack{
ObjectMeta: metav1.ObjectMeta{
Name: "testing-stack",
},
@@ -312,14 +312,14 @@ func TestLokiStackValidationWebhook_ValidateUpdate(t *testing.T) {
tc := tc
t.Run(tc.desc, func(t *testing.T) {
t.Parallel()
- l := v1beta1.LokiStack{
+ l := v1.LokiStack{
ObjectMeta: metav1.ObjectMeta{
Name: "testing-stack",
},
Spec: tc.spec.Spec,
}
- err := l.ValidateUpdate(&v1beta1.LokiStack{})
+ err := l.ValidateUpdate(&v1.LokiStack{})
if err != nil {
require.Equal(t, tc.err, err)
} else {
diff --git a/operator/apis/loki/v1/v1.go b/operator/apis/loki/v1/v1.go
new file mode 100644
index 0000000000000..7f9cd57c70465
--- /dev/null
+++ b/operator/apis/loki/v1/v1.go
@@ -0,0 +1,39 @@
+package v1
+
+import (
+ "errors"
+ "time"
+)
+
+// StorageSchemaEffectiveDate defines the type for the Storage Schema Effect Date
+//
+// +kubebuilder:validation:Pattern:="^([0-9]{4,})([-]([0-9]{2})){2}$"
+type StorageSchemaEffectiveDate string
+
+// UTCTime returns the date as a time object in the UTC time zone
+func (d StorageSchemaEffectiveDate) UTCTime() (time.Time, error) {
+ return time.Parse(StorageSchemaEffectiveDateFormat, string(d))
+}
+
+const (
+ // StorageSchemaEffectiveDateFormat is the datetime string need to format the time.
+ StorageSchemaEffectiveDateFormat = "2006-01-02"
+ // StorageSchemaUpdateBuffer is the amount of time used as a buffer to prevent
+ // storage schemas from being added too close to midnight in UTC.
+ StorageSchemaUpdateBuffer = time.Hour * 2
+)
+
+var (
+ // ErrEffectiveDatesNotUnique when effective dates are not unique.
+ ErrEffectiveDatesNotUnique = errors.New("Effective dates are not unique")
+ // ErrParseEffectiveDates when effective dates cannot be parsed.
+ ErrParseEffectiveDates = errors.New("Failed to parse effective date")
+ // ErrMissingValidStartDate when a schema list is created without a valid effective date
+ ErrMissingValidStartDate = errors.New("Schema does not contain a valid starting effective date")
+ // ErrSchemaRetroactivelyAdded when a schema has been retroactively added
+ ErrSchemaRetroactivelyAdded = errors.New("Cannot retroactively add schema")
+ // ErrSchemaRetroactivelyRemoved when a schema or schemas has been retroactively removed
+ ErrSchemaRetroactivelyRemoved = errors.New("Cannot retroactively remove schema(s)")
+ // ErrSchemaRetroactivelyChanged when a schema has been retroactively changed
+ ErrSchemaRetroactivelyChanged = errors.New("Cannot retroactively change schema")
+)
diff --git a/operator/apis/loki/v1/zz_generated.deepcopy.go b/operator/apis/loki/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000000..0d2e49fb16328
--- /dev/null
+++ b/operator/apis/loki/v1/zz_generated.deepcopy.go
@@ -0,0 +1,778 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by controller-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthenticationSpec) DeepCopyInto(out *AuthenticationSpec) {
+ *out = *in
+ if in.OIDC != nil {
+ in, out := &in.OIDC, &out.OIDC
+ *out = new(OIDCSpec)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationSpec.
+func (in *AuthenticationSpec) DeepCopy() *AuthenticationSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AuthenticationSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthorizationSpec) DeepCopyInto(out *AuthorizationSpec) {
+ *out = *in
+ if in.OPA != nil {
+ in, out := &in.OPA, &out.OPA
+ *out = new(OPASpec)
+ **out = **in
+ }
+ if in.Roles != nil {
+ in, out := &in.Roles, &out.Roles
+ *out = make([]RoleSpec, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.RoleBindings != nil {
+ in, out := &in.RoleBindings, &out.RoleBindings
+ *out = make([]RoleBindingsSpec, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationSpec.
+func (in *AuthorizationSpec) DeepCopy() *AuthorizationSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AuthorizationSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngestionLimitSpec) DeepCopyInto(out *IngestionLimitSpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestionLimitSpec.
+func (in *IngestionLimitSpec) DeepCopy() *IngestionLimitSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(IngestionLimitSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LimitsSpec) DeepCopyInto(out *LimitsSpec) {
+ *out = *in
+ if in.Global != nil {
+ in, out := &in.Global, &out.Global
+ *out = new(LimitsTemplateSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Tenants != nil {
+ in, out := &in.Tenants, &out.Tenants
+ *out = make(map[string]LimitsTemplateSpec, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitsSpec.
+func (in *LimitsSpec) DeepCopy() *LimitsSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(LimitsSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LimitsTemplateSpec) DeepCopyInto(out *LimitsTemplateSpec) {
+ *out = *in
+ if in.IngestionLimits != nil {
+ in, out := &in.IngestionLimits, &out.IngestionLimits
+ *out = new(IngestionLimitSpec)
+ **out = **in
+ }
+ if in.QueryLimits != nil {
+ in, out := &in.QueryLimits, &out.QueryLimits
+ *out = new(QueryLimitSpec)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitsTemplateSpec.
+func (in *LimitsTemplateSpec) DeepCopy() *LimitsTemplateSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(LimitsTemplateSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LokiComponentSpec) DeepCopyInto(out *LokiComponentSpec) {
+ *out = *in
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Tolerations != nil {
+ in, out := &in.Tolerations, &out.Tolerations
+ *out = make([]corev1.Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiComponentSpec.
+func (in *LokiComponentSpec) DeepCopy() *LokiComponentSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(LokiComponentSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LokiStack) DeepCopyInto(out *LokiStack) {
+ *out = *in
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.TypeMeta = in.TypeMeta
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiStack.
+func (in *LokiStack) DeepCopy() *LokiStack {
+ if in == nil {
+ return nil
+ }
+ out := new(LokiStack)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *LokiStack) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LokiStackComponentStatus) DeepCopyInto(out *LokiStackComponentStatus) {
+ *out = *in
+ if in.Compactor != nil {
+ in, out := &in.Compactor, &out.Compactor
+ *out = make(PodStatusMap, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ in, out := &val, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ if in.Distributor != nil {
+ in, out := &in.Distributor, &out.Distributor
+ *out = make(PodStatusMap, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ in, out := &val, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ if in.IndexGateway != nil {
+ in, out := &in.IndexGateway, &out.IndexGateway
+ *out = make(PodStatusMap, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ in, out := &val, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ if in.Ingester != nil {
+ in, out := &in.Ingester, &out.Ingester
+ *out = make(PodStatusMap, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ in, out := &val, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ if in.Querier != nil {
+ in, out := &in.Querier, &out.Querier
+ *out = make(PodStatusMap, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ in, out := &val, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ if in.QueryFrontend != nil {
+ in, out := &in.QueryFrontend, &out.QueryFrontend
+ *out = make(PodStatusMap, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ in, out := &val, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ if in.Gateway != nil {
+ in, out := &in.Gateway, &out.Gateway
+ *out = make(PodStatusMap, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ in, out := &val, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ if in.Ruler != nil {
+ in, out := &in.Ruler, &out.Ruler
+ *out = make(PodStatusMap, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ in, out := &val, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiStackComponentStatus.
+func (in *LokiStackComponentStatus) DeepCopy() *LokiStackComponentStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(LokiStackComponentStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LokiStackList) DeepCopyInto(out *LokiStackList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]LokiStack, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiStackList.
+func (in *LokiStackList) DeepCopy() *LokiStackList {
+ if in == nil {
+ return nil
+ }
+ out := new(LokiStackList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *LokiStackList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LokiStackSpec) DeepCopyInto(out *LokiStackSpec) {
+ *out = *in
+ in.Storage.DeepCopyInto(&out.Storage)
+ if in.Rules != nil {
+ in, out := &in.Rules, &out.Rules
+ *out = new(RulesSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Limits != nil {
+ in, out := &in.Limits, &out.Limits
+ *out = new(LimitsSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Template != nil {
+ in, out := &in.Template, &out.Template
+ *out = new(LokiTemplateSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Tenants != nil {
+ in, out := &in.Tenants, &out.Tenants
+ *out = new(TenantsSpec)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiStackSpec.
+func (in *LokiStackSpec) DeepCopy() *LokiStackSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(LokiStackSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LokiStackStatus) DeepCopyInto(out *LokiStackStatus) {
+ *out = *in
+ in.Components.DeepCopyInto(&out.Components)
+ in.Storage.DeepCopyInto(&out.Storage)
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiStackStatus.
+func (in *LokiStackStatus) DeepCopy() *LokiStackStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(LokiStackStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LokiStackStorageStatus) DeepCopyInto(out *LokiStackStorageStatus) {
+ *out = *in
+ if in.Schemas != nil {
+ in, out := &in.Schemas, &out.Schemas
+ *out = make([]ObjectStorageSchema, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiStackStorageStatus.
+func (in *LokiStackStorageStatus) DeepCopy() *LokiStackStorageStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(LokiStackStorageStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LokiTemplateSpec) DeepCopyInto(out *LokiTemplateSpec) {
+ *out = *in
+ if in.Compactor != nil {
+ in, out := &in.Compactor, &out.Compactor
+ *out = new(LokiComponentSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Distributor != nil {
+ in, out := &in.Distributor, &out.Distributor
+ *out = new(LokiComponentSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Ingester != nil {
+ in, out := &in.Ingester, &out.Ingester
+ *out = new(LokiComponentSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Querier != nil {
+ in, out := &in.Querier, &out.Querier
+ *out = new(LokiComponentSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.QueryFrontend != nil {
+ in, out := &in.QueryFrontend, &out.QueryFrontend
+ *out = new(LokiComponentSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Gateway != nil {
+ in, out := &in.Gateway, &out.Gateway
+ *out = new(LokiComponentSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.IndexGateway != nil {
+ in, out := &in.IndexGateway, &out.IndexGateway
+ *out = new(LokiComponentSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Ruler != nil {
+ in, out := &in.Ruler, &out.Ruler
+ *out = new(LokiComponentSpec)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiTemplateSpec.
+func (in *LokiTemplateSpec) DeepCopy() *LokiTemplateSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(LokiTemplateSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OIDCSpec) DeepCopyInto(out *OIDCSpec) {
+ *out = *in
+ if in.Secret != nil {
+ in, out := &in.Secret, &out.Secret
+ *out = new(TenantSecretSpec)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCSpec.
+func (in *OIDCSpec) DeepCopy() *OIDCSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OIDCSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OPASpec) DeepCopyInto(out *OPASpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OPASpec.
+func (in *OPASpec) DeepCopy() *OPASpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OPASpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectStorageSchema) DeepCopyInto(out *ObjectStorageSchema) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageSchema.
+func (in *ObjectStorageSchema) DeepCopy() *ObjectStorageSchema {
+ if in == nil {
+ return nil
+ }
+ out := new(ObjectStorageSchema)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectStorageSecretSpec) DeepCopyInto(out *ObjectStorageSecretSpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageSecretSpec.
+func (in *ObjectStorageSecretSpec) DeepCopy() *ObjectStorageSecretSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ObjectStorageSecretSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectStorageSpec) DeepCopyInto(out *ObjectStorageSpec) {
+ *out = *in
+ if in.Schemas != nil {
+ in, out := &in.Schemas, &out.Schemas
+ *out = make([]ObjectStorageSchema, len(*in))
+ copy(*out, *in)
+ }
+ out.Secret = in.Secret
+ if in.TLS != nil {
+ in, out := &in.TLS, &out.TLS
+ *out = new(ObjectStorageTLSSpec)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageSpec.
+func (in *ObjectStorageSpec) DeepCopy() *ObjectStorageSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ObjectStorageSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectStorageTLSSpec) DeepCopyInto(out *ObjectStorageTLSSpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageTLSSpec.
+func (in *ObjectStorageTLSSpec) DeepCopy() *ObjectStorageTLSSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ObjectStorageTLSSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in PodStatusMap) DeepCopyInto(out *PodStatusMap) {
+ {
+ in := &in
+ *out = make(PodStatusMap, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ in, out := &val, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatusMap.
+func (in PodStatusMap) DeepCopy() PodStatusMap {
+ if in == nil {
+ return nil
+ }
+ out := new(PodStatusMap)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *QueryLimitSpec) DeepCopyInto(out *QueryLimitSpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryLimitSpec.
+func (in *QueryLimitSpec) DeepCopy() *QueryLimitSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(QueryLimitSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoleBindingsSpec) DeepCopyInto(out *RoleBindingsSpec) {
+ *out = *in
+ if in.Subjects != nil {
+ in, out := &in.Subjects, &out.Subjects
+ *out = make([]Subject, len(*in))
+ copy(*out, *in)
+ }
+ if in.Roles != nil {
+ in, out := &in.Roles, &out.Roles
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingsSpec.
+func (in *RoleBindingsSpec) DeepCopy() *RoleBindingsSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(RoleBindingsSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoleSpec) DeepCopyInto(out *RoleSpec) {
+ *out = *in
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Tenants != nil {
+ in, out := &in.Tenants, &out.Tenants
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Permissions != nil {
+ in, out := &in.Permissions, &out.Permissions
+ *out = make([]PermissionType, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleSpec.
+func (in *RoleSpec) DeepCopy() *RoleSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(RoleSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RulesSpec) DeepCopyInto(out *RulesSpec) {
+ *out = *in
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.NamespaceSelector != nil {
+ in, out := &in.NamespaceSelector, &out.NamespaceSelector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesSpec.
+func (in *RulesSpec) DeepCopy() *RulesSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(RulesSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Subject) DeepCopyInto(out *Subject) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject.
+func (in *Subject) DeepCopy() *Subject {
+ if in == nil {
+ return nil
+ }
+ out := new(Subject)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TenantSecretSpec) DeepCopyInto(out *TenantSecretSpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantSecretSpec.
+func (in *TenantSecretSpec) DeepCopy() *TenantSecretSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(TenantSecretSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TenantsSpec) DeepCopyInto(out *TenantsSpec) {
+ *out = *in
+ if in.Authentication != nil {
+ in, out := &in.Authentication, &out.Authentication
+ *out = make([]AuthenticationSpec, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Authorization != nil {
+ in, out := &in.Authorization, &out.Authorization
+ *out = new(AuthorizationSpec)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantsSpec.
+func (in *TenantsSpec) DeepCopy() *TenantsSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(TenantsSpec)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/operator/apis/loki/v1beta1/alertingrule_types.go b/operator/apis/loki/v1beta1/alertingrule_types.go
index 6881c1d06aef7..5fd57af1ccd3c 100644
--- a/operator/apis/loki/v1beta1/alertingrule_types.go
+++ b/operator/apis/loki/v1beta1/alertingrule_types.go
@@ -110,7 +110,7 @@ type AlertingRuleStatus struct {
// AlertingRule is the Schema for the alertingrules API
//
-// +operator-sdk:csv:customresourcedefinitions:displayName="AlertingRule",resources={{LokiStack,v1beta1}}
+// +operator-sdk:csv:customresourcedefinitions:displayName="AlertingRule",resources={{LokiStack,v1}}
type AlertingRule struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
diff --git a/operator/apis/loki/v1beta1/lokistack_types.go b/operator/apis/loki/v1beta1/lokistack_types.go
index d55eda2ae160f..73294503579cf 100644
--- a/operator/apis/loki/v1beta1/lokistack_types.go
+++ b/operator/apis/loki/v1beta1/lokistack_types.go
@@ -1,8 +1,10 @@
package v1beta1
import (
+ v1 "github.com/grafana/loki/operator/apis/loki/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "sigs.k8s.io/controller-runtime/pkg/conversion"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
@@ -369,9 +371,6 @@ type ObjectStorageSecretSpec struct {
Name string `json:"name"`
}
-// objectStorageSchemaMap defines the type for mapping a schema version with a date
-type objectStorageSchemaMap map[StorageSchemaEffectiveDate]ObjectStorageSchemaVersion
-
// ObjectStorageSchemaVersion defines the storage schema version which will be
// used with the Loki cluster.
//
@@ -811,6 +810,7 @@ type LokiStackStatus struct {
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
+// +kubebuilder:unservedversion
// +kubebuilder:resource:categories=logging
// LokiStack is the Schema for the lokistacks API
@@ -835,3 +835,534 @@ type LokiStackList struct {
func init() {
SchemeBuilder.Register(&LokiStack{}, &LokiStackList{})
}
+
+// ConvertTo converts this LokiStack (v1beta1) to the Hub version (v1).
+func (src *LokiStack) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*v1.LokiStack)
+
+ dst.ObjectMeta = src.ObjectMeta
+ dst.Status.Conditions = src.Status.Conditions
+ dst.Status.Components = v1.LokiStackComponentStatus{
+ Compactor: v1.PodStatusMap(src.Status.Components.Compactor),
+ Distributor: v1.PodStatusMap(src.Status.Components.Distributor),
+ Ingester: v1.PodStatusMap(src.Status.Components.Ingester),
+ Querier: v1.PodStatusMap(src.Status.Components.Querier),
+ QueryFrontend: v1.PodStatusMap(src.Status.Components.QueryFrontend),
+ IndexGateway: v1.PodStatusMap(src.Status.Components.IndexGateway),
+ Ruler: v1.PodStatusMap(src.Status.Components.Ruler),
+ Gateway: v1.PodStatusMap(src.Status.Components.Gateway),
+ }
+
+ var statusSchemas []v1.ObjectStorageSchema
+ for _, s := range src.Status.Storage.Schemas {
+ statusSchemas = append(statusSchemas, v1.ObjectStorageSchema{
+ Version: v1.ObjectStorageSchemaVersion(s.Version),
+ EffectiveDate: v1.StorageSchemaEffectiveDate(s.EffectiveDate),
+ })
+ }
+ dst.Status.Storage = v1.LokiStackStorageStatus{Schemas: statusSchemas}
+
+ if src.Spec.ManagementState != "" {
+ dst.Spec.ManagementState = v1.ManagementStateType(src.Spec.ManagementState)
+ }
+
+ if src.Spec.Size != "" {
+ dst.Spec.Size = v1.LokiStackSizeType(src.Spec.Size)
+ }
+
+ var storageTLS *v1.ObjectStorageTLSSpec
+ if src.Spec.Storage.TLS != nil {
+ storageTLS = &v1.ObjectStorageTLSSpec{
+ CA: src.Spec.Storage.TLS.CA,
+ }
+ }
+
+ var schemas []v1.ObjectStorageSchema
+ for _, s := range src.Spec.Storage.Schemas {
+ schemas = append(schemas, v1.ObjectStorageSchema{
+ EffectiveDate: v1.StorageSchemaEffectiveDate(s.EffectiveDate),
+ Version: v1.ObjectStorageSchemaVersion(s.Version),
+ })
+ }
+
+ dst.Spec.Storage = v1.ObjectStorageSpec{
+ Schemas: schemas,
+ Secret: v1.ObjectStorageSecretSpec{
+ Type: v1.ObjectStorageSecretType(src.Spec.Storage.Secret.Type),
+ Name: src.Spec.Storage.Secret.Name,
+ },
+ TLS: storageTLS,
+ }
+
+ if src.Spec.StorageClassName != "" {
+ dst.Spec.StorageClassName = src.Spec.StorageClassName
+ }
+
+ if src.Spec.ReplicationFactor != 0 {
+ dst.Spec.ReplicationFactor = src.Spec.ReplicationFactor
+ }
+
+ if src.Spec.Rules != nil {
+ dst.Spec.Rules = &v1.RulesSpec{
+ Enabled: src.Spec.Rules.Enabled,
+ Selector: src.Spec.Rules.Selector,
+ NamespaceSelector: src.Spec.Rules.NamespaceSelector,
+ }
+ }
+
+ if src.Spec.Limits != nil {
+ dst.Spec.Limits = &v1.LimitsSpec{}
+
+ if src.Spec.Limits.Global != nil {
+ dst.Spec.Limits.Global = &v1.LimitsTemplateSpec{}
+
+ if src.Spec.Limits.Global.IngestionLimits != nil {
+ dst.Spec.Limits.Global.IngestionLimits = &v1.IngestionLimitSpec{
+ IngestionRate: src.Spec.Limits.Global.IngestionLimits.IngestionRate,
+ IngestionBurstSize: src.Spec.Limits.Global.IngestionLimits.IngestionBurstSize,
+ MaxLabelNameLength: src.Spec.Limits.Global.IngestionLimits.MaxLabelNameLength,
+ MaxLabelValueLength: src.Spec.Limits.Global.IngestionLimits.MaxLabelValueLength,
+ MaxLabelNamesPerSeries: src.Spec.Limits.Global.IngestionLimits.MaxLabelNamesPerSeries,
+ MaxGlobalStreamsPerTenant: src.Spec.Limits.Global.IngestionLimits.MaxGlobalStreamsPerTenant,
+ MaxLineSize: src.Spec.Limits.Global.IngestionLimits.MaxLineSize,
+ }
+ }
+
+ if src.Spec.Limits.Global.QueryLimits != nil {
+ dst.Spec.Limits.Global.QueryLimits = &v1.QueryLimitSpec{
+ MaxEntriesLimitPerQuery: src.Spec.Limits.Global.QueryLimits.MaxEntriesLimitPerQuery,
+ MaxChunksPerQuery: src.Spec.Limits.Global.QueryLimits.MaxChunksPerQuery,
+ MaxQuerySeries: src.Spec.Limits.Global.QueryLimits.MaxQuerySeries,
+ }
+ }
+ }
+
+ if len(src.Spec.Limits.Tenants) > 0 {
+ dst.Spec.Limits.Tenants = make(map[string]v1.LimitsTemplateSpec)
+ }
+
+ for tenant, srcSpec := range src.Spec.Limits.Tenants {
+ dstSpec := v1.LimitsTemplateSpec{}
+
+ if srcSpec.IngestionLimits != nil {
+ dstSpec.IngestionLimits = &v1.IngestionLimitSpec{
+ IngestionRate: srcSpec.IngestionLimits.IngestionRate,
+ IngestionBurstSize: srcSpec.IngestionLimits.IngestionBurstSize,
+ MaxLabelNameLength: srcSpec.IngestionLimits.MaxLabelNameLength,
+ MaxLabelValueLength: srcSpec.IngestionLimits.MaxLabelValueLength,
+ MaxLabelNamesPerSeries: srcSpec.IngestionLimits.MaxLabelNamesPerSeries,
+ MaxGlobalStreamsPerTenant: srcSpec.IngestionLimits.MaxGlobalStreamsPerTenant,
+ MaxLineSize: srcSpec.IngestionLimits.MaxLineSize,
+ }
+ }
+
+ if srcSpec.QueryLimits != nil {
+ dstSpec.QueryLimits = &v1.QueryLimitSpec{
+ MaxEntriesLimitPerQuery: srcSpec.QueryLimits.MaxEntriesLimitPerQuery,
+ MaxChunksPerQuery: srcSpec.QueryLimits.MaxChunksPerQuery,
+ MaxQuerySeries: srcSpec.QueryLimits.MaxQuerySeries,
+ }
+ }
+
+ dst.Spec.Limits.Tenants[tenant] = dstSpec
+ }
+ }
+
+ if src.Spec.Template != nil {
+ dst.Spec.Template = &v1.LokiTemplateSpec{}
+ if src.Spec.Template.Compactor != nil {
+ dst.Spec.Template.Compactor = &v1.LokiComponentSpec{
+ Replicas: src.Spec.Template.Compactor.Replicas,
+ NodeSelector: src.Spec.Template.Compactor.NodeSelector,
+ Tolerations: src.Spec.Template.Compactor.Tolerations,
+ }
+ }
+ if src.Spec.Template.Distributor != nil {
+ dst.Spec.Template.Distributor = &v1.LokiComponentSpec{
+ Replicas: src.Spec.Template.Distributor.Replicas,
+ NodeSelector: src.Spec.Template.Distributor.NodeSelector,
+ Tolerations: src.Spec.Template.Distributor.Tolerations,
+ }
+ }
+ if src.Spec.Template.Ingester != nil {
+ dst.Spec.Template.Ingester = &v1.LokiComponentSpec{
+ Replicas: src.Spec.Template.Ingester.Replicas,
+ NodeSelector: src.Spec.Template.Ingester.NodeSelector,
+ Tolerations: src.Spec.Template.Ingester.Tolerations,
+ }
+ }
+ if src.Spec.Template.Querier != nil {
+ dst.Spec.Template.Querier = &v1.LokiComponentSpec{
+ Replicas: src.Spec.Template.Querier.Replicas,
+ NodeSelector: src.Spec.Template.Querier.NodeSelector,
+ Tolerations: src.Spec.Template.Querier.Tolerations,
+ }
+ }
+ if src.Spec.Template.QueryFrontend != nil {
+ dst.Spec.Template.QueryFrontend = &v1.LokiComponentSpec{
+ Replicas: src.Spec.Template.QueryFrontend.Replicas,
+ NodeSelector: src.Spec.Template.QueryFrontend.NodeSelector,
+ Tolerations: src.Spec.Template.QueryFrontend.Tolerations,
+ }
+ }
+ if src.Spec.Template.Gateway != nil {
+ dst.Spec.Template.Gateway = &v1.LokiComponentSpec{
+ Replicas: src.Spec.Template.Gateway.Replicas,
+ NodeSelector: src.Spec.Template.Gateway.NodeSelector,
+ Tolerations: src.Spec.Template.Gateway.Tolerations,
+ }
+ }
+ if src.Spec.Template.IndexGateway != nil {
+ dst.Spec.Template.IndexGateway = &v1.LokiComponentSpec{
+ Replicas: src.Spec.Template.IndexGateway.Replicas,
+ NodeSelector: src.Spec.Template.IndexGateway.NodeSelector,
+ Tolerations: src.Spec.Template.IndexGateway.Tolerations,
+ }
+ }
+ if src.Spec.Template.Ruler != nil {
+ dst.Spec.Template.Ruler = &v1.LokiComponentSpec{
+ Replicas: src.Spec.Template.Ruler.Replicas,
+ NodeSelector: src.Spec.Template.Ruler.NodeSelector,
+ Tolerations: src.Spec.Template.Ruler.Tolerations,
+ }
+ }
+ }
+
+ if src.Spec.Tenants != nil {
+ dst.Spec.Tenants = &v1.TenantsSpec{
+ Mode: v1.ModeType(src.Spec.Tenants.Mode),
+ }
+
+ for _, srcAuth := range src.Spec.Tenants.Authentication {
+ dstAuth := v1.AuthenticationSpec{
+ TenantName: srcAuth.TenantName,
+ TenantID: srcAuth.TenantID,
+ }
+
+ if srcAuth.OIDC != nil {
+ dstAuth.OIDC = &v1.OIDCSpec{
+ Secret: &v1.TenantSecretSpec{
+ Name: srcAuth.OIDC.Secret.Name,
+ },
+ IssuerURL: srcAuth.OIDC.IssuerURL,
+ RedirectURL: srcAuth.OIDC.RedirectURL,
+ GroupClaim: srcAuth.OIDC.GroupClaim,
+ UsernameClaim: srcAuth.OIDC.UsernameClaim,
+ }
+ }
+
+ dst.Spec.Tenants.Authentication = append(dst.Spec.Tenants.Authentication, dstAuth)
+ }
+
+ if src.Spec.Tenants.Authorization != nil {
+ dstAuthz := &v1.AuthorizationSpec{}
+
+ if src.Spec.Tenants.Authorization.OPA != nil {
+ dstAuthz.OPA = &v1.OPASpec{
+ URL: src.Spec.Tenants.Authorization.OPA.URL,
+ }
+ }
+
+ for _, srcRole := range src.Spec.Tenants.Authorization.Roles {
+ dstRole := v1.RoleSpec{
+ Name: srcRole.Name,
+ Resources: srcRole.Resources,
+ Tenants: srcRole.Tenants,
+ Permissions: []v1.PermissionType{},
+ }
+
+ for _, perm := range srcRole.Permissions {
+ dstRole.Permissions = append(dstRole.Permissions, v1.PermissionType(perm))
+ }
+
+ dstAuthz.Roles = append(dstAuthz.Roles, dstRole)
+ }
+
+ for _, srcBinding := range src.Spec.Tenants.Authorization.RoleBindings {
+ dstBinding := v1.RoleBindingsSpec{
+ Name: srcBinding.Name,
+ Roles: srcBinding.Roles,
+ }
+
+ for _, srcSubject := range srcBinding.Subjects {
+ dstBinding.Subjects = append(dstBinding.Subjects, v1.Subject{
+ Name: srcSubject.Name,
+ Kind: v1.SubjectKind(srcSubject.Kind),
+ })
+ }
+
+ dstAuthz.RoleBindings = append(dstAuthz.RoleBindings, dstBinding)
+ }
+
+ dst.Spec.Tenants.Authorization = dstAuthz
+ }
+ }
+
+ return nil
+}
+
+// ConvertFrom converts from the Hub version (v1) to this version (v1beta1).
+// nolint:golint
+func (dst *LokiStack) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*v1.LokiStack)
+
+ dst.ObjectMeta = src.ObjectMeta
+ dst.Status.Conditions = src.Status.Conditions
+ dst.Status.Components = LokiStackComponentStatus{
+ Compactor: PodStatusMap(src.Status.Components.Compactor),
+ Distributor: PodStatusMap(src.Status.Components.Distributor),
+ Ingester: PodStatusMap(src.Status.Components.Ingester),
+ Querier: PodStatusMap(src.Status.Components.Querier),
+ QueryFrontend: PodStatusMap(src.Status.Components.QueryFrontend),
+ IndexGateway: PodStatusMap(src.Status.Components.IndexGateway),
+ Ruler: PodStatusMap(src.Status.Components.Ruler),
+ Gateway: PodStatusMap(src.Status.Components.Gateway),
+ }
+
+ var statusSchemas []ObjectStorageSchema
+ for _, s := range src.Status.Storage.Schemas {
+ statusSchemas = append(statusSchemas, ObjectStorageSchema{
+ Version: ObjectStorageSchemaVersion(s.Version),
+ EffectiveDate: StorageSchemaEffectiveDate(s.EffectiveDate),
+ })
+ }
+ dst.Status.Storage = LokiStackStorageStatus{Schemas: statusSchemas}
+
+ if src.Spec.ManagementState != "" {
+ dst.Spec.ManagementState = ManagementStateType(src.Spec.ManagementState)
+ }
+
+ if src.Spec.Size != "" {
+ dst.Spec.Size = LokiStackSizeType(src.Spec.Size)
+ }
+
+ var storageTLS *ObjectStorageTLSSpec
+ if src.Spec.Storage.TLS != nil {
+ storageTLS = &ObjectStorageTLSSpec{
+ CA: src.Spec.Storage.TLS.CA,
+ }
+ }
+
+ var schemas []ObjectStorageSchema
+ for _, s := range src.Spec.Storage.Schemas {
+ schemas = append(schemas, ObjectStorageSchema{
+ EffectiveDate: StorageSchemaEffectiveDate(s.EffectiveDate),
+ Version: ObjectStorageSchemaVersion(s.Version),
+ })
+ }
+
+ dst.Spec.Storage = ObjectStorageSpec{
+ Schemas: schemas,
+ Secret: ObjectStorageSecretSpec{
+ Type: ObjectStorageSecretType(src.Spec.Storage.Secret.Type),
+ Name: src.Spec.Storage.Secret.Name,
+ },
+ TLS: storageTLS,
+ }
+
+ if src.Spec.StorageClassName != "" {
+ dst.Spec.StorageClassName = src.Spec.StorageClassName
+ }
+
+ if src.Spec.ReplicationFactor != 0 {
+ dst.Spec.ReplicationFactor = src.Spec.ReplicationFactor
+ }
+
+ if src.Spec.Rules != nil {
+ dst.Spec.Rules = &RulesSpec{
+ Enabled: src.Spec.Rules.Enabled,
+ Selector: src.Spec.Rules.Selector,
+ NamespaceSelector: src.Spec.Rules.NamespaceSelector,
+ }
+ }
+
+ if src.Spec.Limits != nil {
+ dst.Spec.Limits = &LimitsSpec{}
+
+ if src.Spec.Limits.Global != nil {
+ dst.Spec.Limits.Global = &LimitsTemplateSpec{}
+
+ if src.Spec.Limits.Global.IngestionLimits != nil {
+ dst.Spec.Limits.Global.IngestionLimits = &IngestionLimitSpec{
+ IngestionRate: src.Spec.Limits.Global.IngestionLimits.IngestionRate,
+ IngestionBurstSize: src.Spec.Limits.Global.IngestionLimits.IngestionBurstSize,
+ MaxLabelNameLength: src.Spec.Limits.Global.IngestionLimits.MaxLabelNameLength,
+ MaxLabelValueLength: src.Spec.Limits.Global.IngestionLimits.MaxLabelValueLength,
+ MaxLabelNamesPerSeries: src.Spec.Limits.Global.IngestionLimits.MaxLabelNamesPerSeries,
+ MaxGlobalStreamsPerTenant: src.Spec.Limits.Global.IngestionLimits.MaxGlobalStreamsPerTenant,
+ MaxLineSize: src.Spec.Limits.Global.IngestionLimits.MaxLineSize,
+ }
+ }
+
+ if src.Spec.Limits.Global.QueryLimits != nil {
+ dst.Spec.Limits.Global.QueryLimits = &QueryLimitSpec{
+ MaxEntriesLimitPerQuery: src.Spec.Limits.Global.QueryLimits.MaxEntriesLimitPerQuery,
+ MaxChunksPerQuery: src.Spec.Limits.Global.QueryLimits.MaxChunksPerQuery,
+ MaxQuerySeries: src.Spec.Limits.Global.QueryLimits.MaxQuerySeries,
+ }
+ }
+ }
+
+ if len(src.Spec.Limits.Tenants) > 0 {
+ dst.Spec.Limits.Tenants = make(map[string]LimitsTemplateSpec)
+ }
+
+ for tenant, srcSpec := range src.Spec.Limits.Tenants {
+ dstSpec := LimitsTemplateSpec{}
+
+ if srcSpec.IngestionLimits != nil {
+ dstSpec.IngestionLimits = &IngestionLimitSpec{
+ IngestionRate: srcSpec.IngestionLimits.IngestionRate,
+ IngestionBurstSize: srcSpec.IngestionLimits.IngestionBurstSize,
+ MaxLabelNameLength: srcSpec.IngestionLimits.MaxLabelNameLength,
+ MaxLabelValueLength: srcSpec.IngestionLimits.MaxLabelValueLength,
+ MaxLabelNamesPerSeries: srcSpec.IngestionLimits.MaxLabelNamesPerSeries,
+ MaxGlobalStreamsPerTenant: srcSpec.IngestionLimits.MaxGlobalStreamsPerTenant,
+ MaxLineSize: srcSpec.IngestionLimits.MaxLineSize,
+ }
+ }
+
+ if srcSpec.QueryLimits != nil {
+ dstSpec.QueryLimits = &QueryLimitSpec{
+ MaxEntriesLimitPerQuery: srcSpec.QueryLimits.MaxEntriesLimitPerQuery,
+ MaxChunksPerQuery: srcSpec.QueryLimits.MaxChunksPerQuery,
+ MaxQuerySeries: srcSpec.QueryLimits.MaxQuerySeries,
+ }
+ }
+
+ dst.Spec.Limits.Tenants[tenant] = dstSpec
+ }
+ }
+
+ if src.Spec.Template != nil {
+ dst.Spec.Template = &LokiTemplateSpec{}
+ if src.Spec.Template.Compactor != nil {
+ dst.Spec.Template.Compactor = &LokiComponentSpec{
+ Replicas: src.Spec.Template.Compactor.Replicas,
+ NodeSelector: src.Spec.Template.Compactor.NodeSelector,
+ Tolerations: src.Spec.Template.Compactor.Tolerations,
+ }
+ }
+ if src.Spec.Template.Distributor != nil {
+ dst.Spec.Template.Distributor = &LokiComponentSpec{
+ Replicas: src.Spec.Template.Distributor.Replicas,
+ NodeSelector: src.Spec.Template.Distributor.NodeSelector,
+ Tolerations: src.Spec.Template.Distributor.Tolerations,
+ }
+ }
+ if src.Spec.Template.Ingester != nil {
+ dst.Spec.Template.Ingester = &LokiComponentSpec{
+ Replicas: src.Spec.Template.Ingester.Replicas,
+ NodeSelector: src.Spec.Template.Ingester.NodeSelector,
+ Tolerations: src.Spec.Template.Ingester.Tolerations,
+ }
+ }
+ if src.Spec.Template.Querier != nil {
+ dst.Spec.Template.Querier = &LokiComponentSpec{
+ Replicas: src.Spec.Template.Querier.Replicas,
+ NodeSelector: src.Spec.Template.Querier.NodeSelector,
+ Tolerations: src.Spec.Template.Querier.Tolerations,
+ }
+ }
+ if src.Spec.Template.QueryFrontend != nil {
+ dst.Spec.Template.QueryFrontend = &LokiComponentSpec{
+ Replicas: src.Spec.Template.QueryFrontend.Replicas,
+ NodeSelector: src.Spec.Template.QueryFrontend.NodeSelector,
+ Tolerations: src.Spec.Template.QueryFrontend.Tolerations,
+ }
+ }
+ if src.Spec.Template.Gateway != nil {
+ dst.Spec.Template.Gateway = &LokiComponentSpec{
+ Replicas: src.Spec.Template.Gateway.Replicas,
+ NodeSelector: src.Spec.Template.Gateway.NodeSelector,
+ Tolerations: src.Spec.Template.Gateway.Tolerations,
+ }
+ }
+ if src.Spec.Template.IndexGateway != nil {
+ dst.Spec.Template.IndexGateway = &LokiComponentSpec{
+ Replicas: src.Spec.Template.IndexGateway.Replicas,
+ NodeSelector: src.Spec.Template.IndexGateway.NodeSelector,
+ Tolerations: src.Spec.Template.IndexGateway.Tolerations,
+ }
+ }
+ if src.Spec.Template.Ruler != nil {
+ dst.Spec.Template.Ruler = &LokiComponentSpec{
+ Replicas: src.Spec.Template.Ruler.Replicas,
+ NodeSelector: src.Spec.Template.Ruler.NodeSelector,
+ Tolerations: src.Spec.Template.Ruler.Tolerations,
+ }
+ }
+ }
+
+ if src.Spec.Tenants != nil {
+ dst.Spec.Tenants = &TenantsSpec{
+ Mode: ModeType(src.Spec.Tenants.Mode),
+ }
+
+ for _, srcAuth := range src.Spec.Tenants.Authentication {
+ dstAuth := AuthenticationSpec{
+ TenantName: srcAuth.TenantName,
+ TenantID: srcAuth.TenantID,
+ }
+
+ if srcAuth.OIDC != nil {
+ dstAuth.OIDC = &OIDCSpec{
+ Secret: &TenantSecretSpec{
+ Name: srcAuth.OIDC.Secret.Name,
+ },
+ IssuerURL: srcAuth.OIDC.IssuerURL,
+ RedirectURL: srcAuth.OIDC.RedirectURL,
+ GroupClaim: srcAuth.OIDC.GroupClaim,
+ UsernameClaim: srcAuth.OIDC.UsernameClaim,
+ }
+ }
+
+ dst.Spec.Tenants.Authentication = append(dst.Spec.Tenants.Authentication, dstAuth)
+ }
+
+ if src.Spec.Tenants.Authorization != nil {
+ dstAuthz := &AuthorizationSpec{}
+
+ if src.Spec.Tenants.Authorization.OPA != nil {
+ dstAuthz.OPA = &OPASpec{
+ URL: src.Spec.Tenants.Authorization.OPA.URL,
+ }
+ }
+
+ for _, srcRole := range src.Spec.Tenants.Authorization.Roles {
+ dstRole := RoleSpec{
+ Name: srcRole.Name,
+ Resources: srcRole.Resources,
+ Tenants: srcRole.Tenants,
+ Permissions: []PermissionType{},
+ }
+
+ for _, perm := range srcRole.Permissions {
+ dstRole.Permissions = append(dstRole.Permissions, PermissionType(perm))
+ }
+
+ dstAuthz.Roles = append(dstAuthz.Roles, dstRole)
+ }
+
+ for _, srcBinding := range src.Spec.Tenants.Authorization.RoleBindings {
+ dstBinding := RoleBindingsSpec{
+ Name: srcBinding.Name,
+ Roles: srcBinding.Roles,
+ }
+
+ for _, srcSubject := range srcBinding.Subjects {
+ dstBinding.Subjects = append(dstBinding.Subjects, Subject{
+ Name: srcSubject.Name,
+ Kind: SubjectKind(srcSubject.Kind),
+ })
+ }
+
+ dstAuthz.RoleBindings = append(dstAuthz.RoleBindings, dstBinding)
+ }
+
+ dst.Spec.Tenants.Authorization = dstAuthz
+ }
+ }
+
+ return nil
+}
diff --git a/operator/apis/loki/v1beta1/lokistack_types_test.go b/operator/apis/loki/v1beta1/lokistack_types_test.go
new file mode 100644
index 0000000000000..04a8b62948007
--- /dev/null
+++ b/operator/apis/loki/v1beta1/lokistack_types_test.go
@@ -0,0 +1,1256 @@
+package v1beta1_test
+
+import (
+ "testing"
+
+ v1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ "github.com/stretchr/testify/require"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func TestConvertToV1(t *testing.T) {
+ tt := []struct {
+ desc string
+ src v1beta1.LokiStack
+ want v1.LokiStack
+ }{
+ {
+ desc: "empty src(v1beta1) and dst(v1) lokistack",
+ src: v1beta1.LokiStack{},
+ want: v1.LokiStack{},
+ },
+ {
+ desc: "full conversion of src(v1beta1) to dst(v1) lokistack",
+ src: v1beta1.LokiStack{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "lokistack-dev",
+ Namespace: "mercury",
+ Labels: map[string]string{
+ "app": "loki",
+ "part-of": "lokistack",
+ },
+ Annotations: map[string]string{
+ "discoveredAt": "2022-06-28",
+ },
+ },
+ Spec: v1beta1.LokiStackSpec{
+ ManagementState: v1beta1.ManagementStateManaged,
+ Size: v1beta1.SizeOneXMedium,
+ Storage: v1beta1.ObjectStorageSpec{
+ Schemas: []v1beta1.ObjectStorageSchema{
+ {
+ EffectiveDate: v1beta1.StorageSchemaEffectiveDate("2020-11-20"),
+ Version: v1beta1.ObjectStorageSchemaV11,
+ },
+ {
+ EffectiveDate: v1beta1.StorageSchemaEffectiveDate("2021-11-20"),
+ Version: v1beta1.ObjectStorageSchemaV12,
+ },
+ },
+ Secret: v1beta1.ObjectStorageSecretSpec{
+ Type: v1beta1.ObjectStorageSecretS3,
+ Name: "test",
+ },
+ TLS: &v1beta1.ObjectStorageTLSSpec{
+ CA: "test-ca",
+ },
+ },
+ StorageClassName: "standard",
+ ReplicationFactor: 2,
+ Rules: &v1beta1.RulesSpec{
+ Enabled: true,
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "key": "Value",
+ },
+ },
+ NamespaceSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "key": "Value",
+ },
+ },
+ },
+ Limits: &v1beta1.LimitsSpec{
+ Global: &v1beta1.LimitsTemplateSpec{
+ IngestionLimits: &v1beta1.IngestionLimitSpec{
+ IngestionRate: 100,
+ IngestionBurstSize: 200,
+ MaxLabelNameLength: 1000,
+ MaxLabelValueLength: 1000,
+ MaxLabelNamesPerSeries: 1000,
+ MaxGlobalStreamsPerTenant: 10000,
+ MaxLineSize: 512,
+ },
+ QueryLimits: &v1beta1.QueryLimitSpec{
+ MaxEntriesLimitPerQuery: 1000,
+ MaxChunksPerQuery: 1000,
+ MaxQuerySeries: 10000,
+ },
+ },
+ Tenants: map[string]v1beta1.LimitsTemplateSpec{
+ "tenant-a": {
+ IngestionLimits: &v1beta1.IngestionLimitSpec{
+ IngestionRate: 100,
+ IngestionBurstSize: 200,
+ MaxLabelNameLength: 1000,
+ MaxLabelValueLength: 1000,
+ MaxLabelNamesPerSeries: 1000,
+ MaxGlobalStreamsPerTenant: 10000,
+ MaxLineSize: 512,
+ },
+ QueryLimits: &v1beta1.QueryLimitSpec{
+ MaxEntriesLimitPerQuery: 1000,
+ MaxChunksPerQuery: 1000,
+ MaxQuerySeries: 10000,
+ },
+ },
+ "tenant-b": {
+ IngestionLimits: &v1beta1.IngestionLimitSpec{
+ IngestionRate: 100,
+ IngestionBurstSize: 200,
+ MaxLabelNameLength: 1000,
+ MaxLabelValueLength: 1000,
+ MaxLabelNamesPerSeries: 1000,
+ MaxGlobalStreamsPerTenant: 10000,
+ MaxLineSize: 512,
+ },
+ QueryLimits: &v1beta1.QueryLimitSpec{
+ MaxEntriesLimitPerQuery: 1000,
+ MaxChunksPerQuery: 1000,
+ MaxQuerySeries: 10000,
+ },
+ },
+ },
+ },
+ Template: &v1beta1.LokiTemplateSpec{
+ Compactor: &v1beta1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ Distributor: &v1beta1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ Ingester: &v1beta1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ Querier: &v1beta1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ QueryFrontend: &v1beta1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ Gateway: &v1beta1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ IndexGateway: &v1beta1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ Ruler: &v1beta1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ },
+ Tenants: &v1beta1.TenantsSpec{
+ Mode: v1beta1.Dynamic,
+ Authentication: []v1beta1.AuthenticationSpec{
+ {
+ TenantName: "tenant-a",
+ TenantID: "tenant-a",
+ OIDC: &v1beta1.OIDCSpec{
+ Secret: &v1beta1.TenantSecretSpec{
+ Name: "tenant-a-secret",
+ },
+ IssuerURL: "http://go-to-issuer",
+ RedirectURL: "http://bring-me-back",
+ GroupClaim: "workgroups",
+ UsernameClaim: "email",
+ },
+ },
+ {
+ TenantName: "tenant-b",
+ TenantID: "tenant-b",
+ OIDC: &v1beta1.OIDCSpec{
+ Secret: &v1beta1.TenantSecretSpec{
+ Name: "tenant-a-secret",
+ },
+ IssuerURL: "http://go-to-issuer",
+ RedirectURL: "http://bring-me-back",
+ GroupClaim: "workgroups",
+ UsernameClaim: "email",
+ },
+ },
+ },
+ Authorization: &v1beta1.AuthorizationSpec{
+ OPA: &v1beta1.OPASpec{
+ URL: "http://authorize-me/opa",
+ },
+ Roles: []v1beta1.RoleSpec{
+ {
+ Name: "ro-role",
+ Resources: []string{"logs"},
+ Tenants: []string{"tenant-a", "tenant-b"},
+ Permissions: []v1beta1.PermissionType{v1beta1.Read},
+ },
+ {
+ Name: "rw-role",
+ Resources: []string{"logs"},
+ Tenants: []string{"tenant-a", "tenant-b"},
+ Permissions: []v1beta1.PermissionType{v1beta1.Read, v1beta1.Write},
+ },
+ },
+ RoleBindings: []v1beta1.RoleBindingsSpec{
+ {
+ Name: "bind-me",
+ Roles: []string{"ro-role"},
+ Subjects: []v1beta1.Subject{
+ {
+ Name: "a-user",
+ Kind: v1beta1.User,
+ },
+ {
+ Name: "a-group",
+ Kind: v1beta1.Group,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ Status: v1beta1.LokiStackStatus{
+ Conditions: []metav1.Condition{
+ {
+ Type: string(v1beta1.ConditionReady),
+ Status: metav1.ConditionTrue,
+ },
+ {
+ Type: string(v1beta1.ConditionPending),
+ Status: metav1.ConditionFalse,
+ },
+ },
+ Components: v1beta1.LokiStackComponentStatus{
+ Compactor: v1beta1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ Distributor: v1beta1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ Ingester: v1beta1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ Querier: v1beta1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ QueryFrontend: v1beta1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ IndexGateway: v1beta1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ Ruler: v1beta1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ Gateway: v1beta1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ },
+ Storage: v1beta1.LokiStackStorageStatus{
+ Schemas: []v1beta1.ObjectStorageSchema{
+ {
+ Version: v1beta1.ObjectStorageSchemaV11,
+ EffectiveDate: "2020-06-01",
+ },
+ },
+ },
+ },
+ },
+ want: v1.LokiStack{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "lokistack-dev",
+ Namespace: "mercury",
+ Labels: map[string]string{
+ "app": "loki",
+ "part-of": "lokistack",
+ },
+ Annotations: map[string]string{
+ "discoveredAt": "2022-06-28",
+ },
+ },
+ Spec: v1.LokiStackSpec{
+ ManagementState: v1.ManagementStateManaged,
+ Size: v1.SizeOneXMedium,
+ Storage: v1.ObjectStorageSpec{
+ Schemas: []v1.ObjectStorageSchema{
+ {
+ EffectiveDate: v1.StorageSchemaEffectiveDate("2020-11-20"),
+ Version: v1.ObjectStorageSchemaV11,
+ },
+ {
+ EffectiveDate: v1.StorageSchemaEffectiveDate("2021-11-20"),
+ Version: v1.ObjectStorageSchemaV12,
+ },
+ },
+ Secret: v1.ObjectStorageSecretSpec{
+ Type: v1.ObjectStorageSecretS3,
+ Name: "test",
+ },
+ TLS: &v1.ObjectStorageTLSSpec{
+ CA: "test-ca",
+ },
+ },
+ StorageClassName: "standard",
+ ReplicationFactor: 2,
+ Rules: &v1.RulesSpec{
+ Enabled: true,
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "key": "Value",
+ },
+ },
+ NamespaceSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "key": "Value",
+ },
+ },
+ },
+ Limits: &v1.LimitsSpec{
+ Global: &v1.LimitsTemplateSpec{
+ IngestionLimits: &v1.IngestionLimitSpec{
+ IngestionRate: 100,
+ IngestionBurstSize: 200,
+ MaxLabelNameLength: 1000,
+ MaxLabelValueLength: 1000,
+ MaxLabelNamesPerSeries: 1000,
+ MaxGlobalStreamsPerTenant: 10000,
+ MaxLineSize: 512,
+ },
+ QueryLimits: &v1.QueryLimitSpec{
+ MaxEntriesLimitPerQuery: 1000,
+ MaxChunksPerQuery: 1000,
+ MaxQuerySeries: 10000,
+ },
+ },
+ Tenants: map[string]v1.LimitsTemplateSpec{
+ "tenant-a": {
+ IngestionLimits: &v1.IngestionLimitSpec{
+ IngestionRate: 100,
+ IngestionBurstSize: 200,
+ MaxLabelNameLength: 1000,
+ MaxLabelValueLength: 1000,
+ MaxLabelNamesPerSeries: 1000,
+ MaxGlobalStreamsPerTenant: 10000,
+ MaxLineSize: 512,
+ },
+ QueryLimits: &v1.QueryLimitSpec{
+ MaxEntriesLimitPerQuery: 1000,
+ MaxChunksPerQuery: 1000,
+ MaxQuerySeries: 10000,
+ },
+ },
+ "tenant-b": {
+ IngestionLimits: &v1.IngestionLimitSpec{
+ IngestionRate: 100,
+ IngestionBurstSize: 200,
+ MaxLabelNameLength: 1000,
+ MaxLabelValueLength: 1000,
+ MaxLabelNamesPerSeries: 1000,
+ MaxGlobalStreamsPerTenant: 10000,
+ MaxLineSize: 512,
+ },
+ QueryLimits: &v1.QueryLimitSpec{
+ MaxEntriesLimitPerQuery: 1000,
+ MaxChunksPerQuery: 1000,
+ MaxQuerySeries: 10000,
+ },
+ },
+ },
+ },
+ Template: &v1.LokiTemplateSpec{
+ Compactor: &v1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ Distributor: &v1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ Ingester: &v1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ Querier: &v1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ QueryFrontend: &v1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ Gateway: &v1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ IndexGateway: &v1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ Ruler: &v1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ },
+ Tenants: &v1.TenantsSpec{
+ Mode: v1.Dynamic,
+ Authentication: []v1.AuthenticationSpec{
+ {
+ TenantName: "tenant-a",
+ TenantID: "tenant-a",
+ OIDC: &v1.OIDCSpec{
+ Secret: &v1.TenantSecretSpec{
+ Name: "tenant-a-secret",
+ },
+ IssuerURL: "http://go-to-issuer",
+ RedirectURL: "http://bring-me-back",
+ GroupClaim: "workgroups",
+ UsernameClaim: "email",
+ },
+ },
+ {
+ TenantName: "tenant-b",
+ TenantID: "tenant-b",
+ OIDC: &v1.OIDCSpec{
+ Secret: &v1.TenantSecretSpec{
+ Name: "tenant-a-secret",
+ },
+ IssuerURL: "http://go-to-issuer",
+ RedirectURL: "http://bring-me-back",
+ GroupClaim: "workgroups",
+ UsernameClaim: "email",
+ },
+ },
+ },
+ Authorization: &v1.AuthorizationSpec{
+ OPA: &v1.OPASpec{
+ URL: "http://authorize-me/opa",
+ },
+ Roles: []v1.RoleSpec{
+ {
+ Name: "ro-role",
+ Resources: []string{"logs"},
+ Tenants: []string{"tenant-a", "tenant-b"},
+ Permissions: []v1.PermissionType{v1.Read},
+ },
+ {
+ Name: "rw-role",
+ Resources: []string{"logs"},
+ Tenants: []string{"tenant-a", "tenant-b"},
+ Permissions: []v1.PermissionType{v1.Read, v1.Write},
+ },
+ },
+ RoleBindings: []v1.RoleBindingsSpec{
+ {
+ Name: "bind-me",
+ Roles: []string{"ro-role"},
+ Subjects: []v1.Subject{
+ {
+ Name: "a-user",
+ Kind: v1.User,
+ },
+ {
+ Name: "a-group",
+ Kind: v1.Group,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ Status: v1.LokiStackStatus{
+ Conditions: []metav1.Condition{
+ {
+ Type: string(v1.ConditionReady),
+ Status: metav1.ConditionTrue,
+ },
+ {
+ Type: string(v1.ConditionPending),
+ Status: metav1.ConditionFalse,
+ },
+ },
+ Components: v1.LokiStackComponentStatus{
+ Compactor: v1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ Distributor: v1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ Ingester: v1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ Querier: v1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ QueryFrontend: v1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ IndexGateway: v1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ Ruler: v1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ Gateway: v1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ },
+ Storage: v1.LokiStackStorageStatus{
+ Schemas: []v1.ObjectStorageSchema{
+ {
+ Version: v1.ObjectStorageSchemaV11,
+ EffectiveDate: "2020-06-01",
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+
+ for _, tc := range tt {
+ tc := tc
+ t.Run(tc.desc, func(t *testing.T) {
+ t.Parallel()
+ dst := v1.LokiStack{}
+ err := tc.src.ConvertTo(&dst)
+ require.NoError(t, err)
+ require.Equal(t, dst, tc.want)
+ })
+ }
+}
+
+func TestConvertFromV1(t *testing.T) {
+ tt := []struct {
+ desc string
+ src v1.LokiStack
+ want v1beta1.LokiStack
+ }{
+ {
+ desc: "empty src(v1) and dst(v1beta1) lokistack",
+ src: v1.LokiStack{},
+ want: v1beta1.LokiStack{},
+ },
+ {
+ desc: "full conversion of src(v1) to dst(v1beta1) lokistack",
+ src: v1.LokiStack{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "lokistack-dev",
+ Namespace: "mercury",
+ Labels: map[string]string{
+ "app": "loki",
+ "part-of": "lokistack",
+ },
+ Annotations: map[string]string{
+ "discoveredAt": "2022-06-28",
+ },
+ },
+ Spec: v1.LokiStackSpec{
+ ManagementState: v1.ManagementStateManaged,
+ Size: v1.SizeOneXMedium,
+ Storage: v1.ObjectStorageSpec{
+ Schemas: []v1.ObjectStorageSchema{
+ {
+ EffectiveDate: v1.StorageSchemaEffectiveDate("2020-11-20"),
+ Version: v1.ObjectStorageSchemaV11,
+ },
+ {
+ EffectiveDate: v1.StorageSchemaEffectiveDate("2021-11-20"),
+ Version: v1.ObjectStorageSchemaV12,
+ },
+ },
+ Secret: v1.ObjectStorageSecretSpec{
+ Type: v1.ObjectStorageSecretS3,
+ Name: "test",
+ },
+ TLS: &v1.ObjectStorageTLSSpec{
+ CA: "test-ca",
+ },
+ },
+ StorageClassName: "standard",
+ ReplicationFactor: 2,
+ Rules: &v1.RulesSpec{
+ Enabled: true,
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "key": "Value",
+ },
+ },
+ NamespaceSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "key": "Value",
+ },
+ },
+ },
+ Limits: &v1.LimitsSpec{
+ Global: &v1.LimitsTemplateSpec{
+ IngestionLimits: &v1.IngestionLimitSpec{
+ IngestionRate: 100,
+ IngestionBurstSize: 200,
+ MaxLabelNameLength: 1000,
+ MaxLabelValueLength: 1000,
+ MaxLabelNamesPerSeries: 1000,
+ MaxGlobalStreamsPerTenant: 10000,
+ MaxLineSize: 512,
+ },
+ QueryLimits: &v1.QueryLimitSpec{
+ MaxEntriesLimitPerQuery: 1000,
+ MaxChunksPerQuery: 1000,
+ MaxQuerySeries: 10000,
+ },
+ },
+ Tenants: map[string]v1.LimitsTemplateSpec{
+ "tenant-a": {
+ IngestionLimits: &v1.IngestionLimitSpec{
+ IngestionRate: 100,
+ IngestionBurstSize: 200,
+ MaxLabelNameLength: 1000,
+ MaxLabelValueLength: 1000,
+ MaxLabelNamesPerSeries: 1000,
+ MaxGlobalStreamsPerTenant: 10000,
+ MaxLineSize: 512,
+ },
+ QueryLimits: &v1.QueryLimitSpec{
+ MaxEntriesLimitPerQuery: 1000,
+ MaxChunksPerQuery: 1000,
+ MaxQuerySeries: 10000,
+ },
+ },
+ "tenant-b": {
+ IngestionLimits: &v1.IngestionLimitSpec{
+ IngestionRate: 100,
+ IngestionBurstSize: 200,
+ MaxLabelNameLength: 1000,
+ MaxLabelValueLength: 1000,
+ MaxLabelNamesPerSeries: 1000,
+ MaxGlobalStreamsPerTenant: 10000,
+ MaxLineSize: 512,
+ },
+ QueryLimits: &v1.QueryLimitSpec{
+ MaxEntriesLimitPerQuery: 1000,
+ MaxChunksPerQuery: 1000,
+ MaxQuerySeries: 10000,
+ },
+ },
+ },
+ },
+ Template: &v1.LokiTemplateSpec{
+ Compactor: &v1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ Distributor: &v1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ Ingester: &v1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ Querier: &v1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ QueryFrontend: &v1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ Gateway: &v1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ IndexGateway: &v1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ Ruler: &v1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ },
+ Tenants: &v1.TenantsSpec{
+ Mode: v1.Dynamic,
+ Authentication: []v1.AuthenticationSpec{
+ {
+ TenantName: "tenant-a",
+ TenantID: "tenant-a",
+ OIDC: &v1.OIDCSpec{
+ Secret: &v1.TenantSecretSpec{
+ Name: "tenant-a-secret",
+ },
+ IssuerURL: "http://go-to-issuer",
+ RedirectURL: "http://bring-me-back",
+ GroupClaim: "workgroups",
+ UsernameClaim: "email",
+ },
+ },
+ {
+ TenantName: "tenant-b",
+ TenantID: "tenant-b",
+ OIDC: &v1.OIDCSpec{
+ Secret: &v1.TenantSecretSpec{
+ Name: "tenant-a-secret",
+ },
+ IssuerURL: "http://go-to-issuer",
+ RedirectURL: "http://bring-me-back",
+ GroupClaim: "workgroups",
+ UsernameClaim: "email",
+ },
+ },
+ },
+ Authorization: &v1.AuthorizationSpec{
+ OPA: &v1.OPASpec{
+ URL: "http://authorize-me/opa",
+ },
+ Roles: []v1.RoleSpec{
+ {
+ Name: "ro-role",
+ Resources: []string{"logs"},
+ Tenants: []string{"tenant-a", "tenant-b"},
+ Permissions: []v1.PermissionType{v1.Read},
+ },
+ {
+ Name: "rw-role",
+ Resources: []string{"logs"},
+ Tenants: []string{"tenant-a", "tenant-b"},
+ Permissions: []v1.PermissionType{v1.Read, v1.Write},
+ },
+ },
+ RoleBindings: []v1.RoleBindingsSpec{
+ {
+ Name: "bind-me",
+ Roles: []string{"ro-role"},
+ Subjects: []v1.Subject{
+ {
+ Name: "a-user",
+ Kind: v1.User,
+ },
+ {
+ Name: "a-group",
+ Kind: v1.Group,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ Status: v1.LokiStackStatus{
+ Conditions: []metav1.Condition{
+ {
+ Type: string(v1.ConditionReady),
+ Status: metav1.ConditionTrue,
+ },
+ {
+ Type: string(v1.ConditionPending),
+ Status: metav1.ConditionFalse,
+ },
+ },
+ Components: v1.LokiStackComponentStatus{
+ Compactor: v1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ Distributor: v1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ Ingester: v1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ Querier: v1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ QueryFrontend: v1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ IndexGateway: v1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ Ruler: v1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ Gateway: v1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ },
+ Storage: v1.LokiStackStorageStatus{
+ Schemas: []v1.ObjectStorageSchema{
+ {
+ Version: v1.ObjectStorageSchemaV11,
+ EffectiveDate: "2020-06-01",
+ },
+ },
+ },
+ },
+ },
+ want: v1beta1.LokiStack{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "lokistack-dev",
+ Namespace: "mercury",
+ Labels: map[string]string{
+ "app": "loki",
+ "part-of": "lokistack",
+ },
+ Annotations: map[string]string{
+ "discoveredAt": "2022-06-28",
+ },
+ },
+ Spec: v1beta1.LokiStackSpec{
+ ManagementState: v1beta1.ManagementStateManaged,
+ Size: v1beta1.SizeOneXMedium,
+ Storage: v1beta1.ObjectStorageSpec{
+ Schemas: []v1beta1.ObjectStorageSchema{
+ {
+ EffectiveDate: v1beta1.StorageSchemaEffectiveDate("2020-11-20"),
+ Version: v1beta1.ObjectStorageSchemaV11,
+ },
+ {
+ EffectiveDate: v1beta1.StorageSchemaEffectiveDate("2021-11-20"),
+ Version: v1beta1.ObjectStorageSchemaV12,
+ },
+ },
+ Secret: v1beta1.ObjectStorageSecretSpec{
+ Type: v1beta1.ObjectStorageSecretS3,
+ Name: "test",
+ },
+ TLS: &v1beta1.ObjectStorageTLSSpec{
+ CA: "test-ca",
+ },
+ },
+ StorageClassName: "standard",
+ ReplicationFactor: 2,
+ Rules: &v1beta1.RulesSpec{
+ Enabled: true,
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "key": "Value",
+ },
+ },
+ NamespaceSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "key": "Value",
+ },
+ },
+ },
+ Limits: &v1beta1.LimitsSpec{
+ Global: &v1beta1.LimitsTemplateSpec{
+ IngestionLimits: &v1beta1.IngestionLimitSpec{
+ IngestionRate: 100,
+ IngestionBurstSize: 200,
+ MaxLabelNameLength: 1000,
+ MaxLabelValueLength: 1000,
+ MaxLabelNamesPerSeries: 1000,
+ MaxGlobalStreamsPerTenant: 10000,
+ MaxLineSize: 512,
+ },
+ QueryLimits: &v1beta1.QueryLimitSpec{
+ MaxEntriesLimitPerQuery: 1000,
+ MaxChunksPerQuery: 1000,
+ MaxQuerySeries: 10000,
+ },
+ },
+ Tenants: map[string]v1beta1.LimitsTemplateSpec{
+ "tenant-a": {
+ IngestionLimits: &v1beta1.IngestionLimitSpec{
+ IngestionRate: 100,
+ IngestionBurstSize: 200,
+ MaxLabelNameLength: 1000,
+ MaxLabelValueLength: 1000,
+ MaxLabelNamesPerSeries: 1000,
+ MaxGlobalStreamsPerTenant: 10000,
+ MaxLineSize: 512,
+ },
+ QueryLimits: &v1beta1.QueryLimitSpec{
+ MaxEntriesLimitPerQuery: 1000,
+ MaxChunksPerQuery: 1000,
+ MaxQuerySeries: 10000,
+ },
+ },
+ "tenant-b": {
+ IngestionLimits: &v1beta1.IngestionLimitSpec{
+ IngestionRate: 100,
+ IngestionBurstSize: 200,
+ MaxLabelNameLength: 1000,
+ MaxLabelValueLength: 1000,
+ MaxLabelNamesPerSeries: 1000,
+ MaxGlobalStreamsPerTenant: 10000,
+ MaxLineSize: 512,
+ },
+ QueryLimits: &v1beta1.QueryLimitSpec{
+ MaxEntriesLimitPerQuery: 1000,
+ MaxChunksPerQuery: 1000,
+ MaxQuerySeries: 10000,
+ },
+ },
+ },
+ },
+ Template: &v1beta1.LokiTemplateSpec{
+ Compactor: &v1beta1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ Distributor: &v1beta1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ Ingester: &v1beta1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ Querier: &v1beta1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ QueryFrontend: &v1beta1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ Gateway: &v1beta1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ IndexGateway: &v1beta1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ Ruler: &v1beta1.LokiComponentSpec{
+ Replicas: 1,
+ NodeSelector: map[string]string{"node": "a"},
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "tolerate",
+ Value: "this",
+ },
+ },
+ },
+ },
+ Tenants: &v1beta1.TenantsSpec{
+ Mode: v1beta1.Dynamic,
+ Authentication: []v1beta1.AuthenticationSpec{
+ {
+ TenantName: "tenant-a",
+ TenantID: "tenant-a",
+ OIDC: &v1beta1.OIDCSpec{
+ Secret: &v1beta1.TenantSecretSpec{
+ Name: "tenant-a-secret",
+ },
+ IssuerURL: "http://go-to-issuer",
+ RedirectURL: "http://bring-me-back",
+ GroupClaim: "workgroups",
+ UsernameClaim: "email",
+ },
+ },
+ {
+ TenantName: "tenant-b",
+ TenantID: "tenant-b",
+ OIDC: &v1beta1.OIDCSpec{
+ Secret: &v1beta1.TenantSecretSpec{
+ Name: "tenant-a-secret",
+ },
+ IssuerURL: "http://go-to-issuer",
+ RedirectURL: "http://bring-me-back",
+ GroupClaim: "workgroups",
+ UsernameClaim: "email",
+ },
+ },
+ },
+ Authorization: &v1beta1.AuthorizationSpec{
+ OPA: &v1beta1.OPASpec{
+ URL: "http://authorize-me/opa",
+ },
+ Roles: []v1beta1.RoleSpec{
+ {
+ Name: "ro-role",
+ Resources: []string{"logs"},
+ Tenants: []string{"tenant-a", "tenant-b"},
+ Permissions: []v1beta1.PermissionType{v1beta1.Read},
+ },
+ {
+ Name: "rw-role",
+ Resources: []string{"logs"},
+ Tenants: []string{"tenant-a", "tenant-b"},
+ Permissions: []v1beta1.PermissionType{v1beta1.Read, v1beta1.Write},
+ },
+ },
+ RoleBindings: []v1beta1.RoleBindingsSpec{
+ {
+ Name: "bind-me",
+ Roles: []string{"ro-role"},
+ Subjects: []v1beta1.Subject{
+ {
+ Name: "a-user",
+ Kind: v1beta1.User,
+ },
+ {
+ Name: "a-group",
+ Kind: v1beta1.Group,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ Status: v1beta1.LokiStackStatus{
+ Conditions: []metav1.Condition{
+ {
+ Type: string(v1beta1.ConditionReady),
+ Status: metav1.ConditionTrue,
+ },
+ {
+ Type: string(v1beta1.ConditionPending),
+ Status: metav1.ConditionFalse,
+ },
+ },
+ Components: v1beta1.LokiStackComponentStatus{
+ Compactor: v1beta1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ Distributor: v1beta1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ Ingester: v1beta1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ Querier: v1beta1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ QueryFrontend: v1beta1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ IndexGateway: v1beta1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ Ruler: v1beta1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ Gateway: v1beta1.PodStatusMap{
+ "ready": []string{"pod-1"},
+ },
+ },
+ Storage: v1beta1.LokiStackStorageStatus{
+ Schemas: []v1beta1.ObjectStorageSchema{
+ {
+ Version: v1beta1.ObjectStorageSchemaV11,
+ EffectiveDate: "2020-06-01",
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+
+ for _, tc := range tt {
+ tc := tc
+ t.Run(tc.desc, func(t *testing.T) {
+ t.Parallel()
+
+ dst := v1beta1.LokiStack{}
+ err := dst.ConvertFrom(&tc.src)
+ require.NoError(t, err)
+ require.Equal(t, dst, tc.want)
+ })
+ }
+}
diff --git a/operator/apis/loki/v1beta1/recordingrule_types.go b/operator/apis/loki/v1beta1/recordingrule_types.go
index 4cd08e96a8ec5..49380e6ad8450 100644
--- a/operator/apis/loki/v1beta1/recordingrule_types.go
+++ b/operator/apis/loki/v1beta1/recordingrule_types.go
@@ -88,7 +88,7 @@ type RecordingRuleStatus struct {
// RecordingRule is the Schema for the recordingrules API
//
-// +operator-sdk:csv:customresourcedefinitions:displayName="RecordingRule",resources={{LokiStack,v1beta1}}
+// +operator-sdk:csv:customresourcedefinitions:displayName="RecordingRule",resources={{LokiStack,v1}}
type RecordingRule struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
diff --git a/operator/apis/loki/v1beta1/rulerconfig_types.go b/operator/apis/loki/v1beta1/rulerconfig_types.go
index 1de1eb69475d6..3035ee84f477b 100644
--- a/operator/apis/loki/v1beta1/rulerconfig_types.go
+++ b/operator/apis/loki/v1beta1/rulerconfig_types.go
@@ -399,7 +399,7 @@ type RulerConfigStatus struct {
// RulerConfig is the Schema for the rulerconfigs API
//
-// +operator-sdk:csv:customresourcedefinitions:displayName="RulerConfig",resources={{LokiStack,v1beta1}}
+// +operator-sdk:csv:customresourcedefinitions:displayName="RulerConfig",resources={{LokiStack,v1}}
type RulerConfig struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
diff --git a/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml
index a21aa324d5d28..fc2fec8b281df 100644
--- a/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml
@@ -4,6 +4,22 @@ metadata:
annotations:
alm-examples: |-
[
+ {
+ "apiVersion": "loki.grafana.com/v1",
+ "kind": "LokiStack",
+ "metadata": {
+ "name": "lokistack-sample"
+ },
+ "spec": {
+ "size": "1x.small",
+ "storage": {
+ "secret": {
+ "name": "test"
+ }
+ },
+ "storageClassName": "standard"
+ }
+ },
{
"apiVersion": "loki.grafana.com/v1beta1",
"kind": "AlertingRule",
@@ -44,22 +60,6 @@ metadata:
"tenantID": "test-tenant"
}
},
- {
- "apiVersion": "loki.grafana.com/v1beta1",
- "kind": "LokiStack",
- "metadata": {
- "name": "lokistack-sample"
- },
- "spec": {
- "size": "1x.small",
- "storage": {
- "secret": {
- "name": "test"
- }
- },
- "storageClassName": "standard"
- }
- },
{
"apiVersion": "loki.grafana.com/v1beta1",
"kind": "RecordingRule",
@@ -183,7 +183,7 @@ spec:
resources:
- kind: LokiStack
name: ""
- version: v1beta1
+ version: v1
specDescriptors:
- description: List of groups for alerting rules.
displayName: Groups
@@ -652,7 +652,7 @@ spec:
path: conditions
x-descriptors:
- urn:alm:descriptor:io.kubernetes.conditions
- version: v1beta1
+ version: v1
- description: RecordingRule is the Schema for the recordingrules API
displayName: RecordingRule
kind: RecordingRule
@@ -660,7 +660,7 @@ spec:
resources:
- kind: LokiStack
name: ""
- version: v1beta1
+ version: v1
specDescriptors:
- description: List of groups for recording rules.
displayName: Groups
@@ -708,7 +708,7 @@ spec:
resources:
- kind: LokiStack
name: ""
- version: v1beta1
+ version: v1
specDescriptors:
- description: Defines alert manager configuration to notify on firing alerts.
displayName: Alert Manager Configuration
@@ -1285,7 +1285,7 @@ spec:
serviceAccountName: default
strategy: deployment
installModes:
- - supported: true
+ - supported: false
type: OwnNamespace
- supported: false
type: SingleNamespace
@@ -1315,6 +1315,18 @@ spec:
name: opa
version: 0.0.1
webhookdefinitions:
+ - admissionReviewVersions:
+ - v1
+ - v1beta1
+ containerPort: 443
+ conversionCRDs:
+ - lokistacks.loki.grafana.com
+ deploymentName: loki-operator-controller-manager
+ generateName: clokistacks.kb.io
+ sideEffects: None
+ targetPort: 9443
+ type: ConversionWebhook
+ webhookPath: /convert
- admissionReviewVersions:
- v1
containerPort: 443
@@ -1340,12 +1352,12 @@ spec:
containerPort: 443
deploymentName: loki-operator-controller-manager
failurePolicy: Fail
- generateName: vlokistack.kb.io
+ generateName: vlokistack.loki.grafana.com
rules:
- apiGroups:
- loki.grafana.com
apiVersions:
- - v1beta1
+ - v1
operations:
- CREATE
- UPDATE
@@ -1354,7 +1366,7 @@ spec:
sideEffects: None
targetPort: 9443
type: ValidatingAdmissionWebhook
- webhookPath: /validate-loki-grafana-com-v1beta1-lokistack
+ webhookPath: /validate-loki-grafana-com-v1-lokistack
- admissionReviewVersions:
- v1
containerPort: 443
diff --git a/operator/bundle/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/manifests/loki.grafana.com_lokistacks.yaml
index bcf7450e0b3e4..efaa0b0da1b24 100644
--- a/operator/bundle/manifests/loki.grafana.com_lokistacks.yaml
+++ b/operator/bundle/manifests/loki.grafana.com_lokistacks.yaml
@@ -12,6 +12,18 @@ metadata:
app.kubernetes.io/version: 0.0.1
name: lokistacks.loki.grafana.com
spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ service:
+ name: loki-operator-webhook-service
+ namespace: openshift-operators-redhat
+ path: /convert
+ port: 443
+ conversionReviewVersions:
+ - v1
+ - v1beta1
group: loki.grafana.com
names:
categories:
@@ -22,7 +34,7 @@ spec:
singular: lokistack
scope: Namespaced
versions:
- - name: v1beta1
+ - name: v1
schema:
openAPIV3Schema:
description: LokiStack is the Schema for the lokistacks API
@@ -1183,6 +1195,1167 @@ spec:
storage: true
subresources:
status: {}
+ - name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: LokiStack is the Schema for the lokistacks API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: LokiStackSpec defines the desired state of LokiStack
+ properties:
+ limits:
+ description: Limits defines the limits to be applied to log stream
+ processing.
+ properties:
+ global:
+ description: Global defines the limits applied globally across
+ the cluster.
+ properties:
+ ingestion:
+ description: IngestionLimits defines the limits applied on
+ ingested log streams.
+ properties:
+ ingestionBurstSize:
+ description: IngestionBurstSize defines the local rate-limited
+ sample size per distributor replica. It should be set
+ to the set at least to the maximum logs size expected
+ in a single push request.
+ format: int32
+ type: integer
+ ingestionRate:
+ description: IngestionRate defines the sample size per
+ second. Units MB.
+ format: int32
+ type: integer
+ maxGlobalStreamsPerTenant:
+ description: MaxGlobalStreamsPerTenant defines the maximum
+ number of active streams per tenant, across the cluster.
+ format: int32
+ type: integer
+ maxLabelNameLength:
+ description: MaxLabelNameLength defines the maximum number
+ of characters allowed for label keys in log streams.
+ format: int32
+ type: integer
+ maxLabelNamesPerSeries:
+ description: MaxLabelNamesPerSeries defines the maximum
+ number of label names per series in each log stream.
+ format: int32
+ type: integer
+ maxLabelValueLength:
+ description: MaxLabelValueLength defines the maximum number
+ of characters allowed for label values in log streams.
+ format: int32
+ type: integer
+ maxLineSize:
+ description: MaxLineSize defines the maximum line size
+ on ingestion path. Units in Bytes.
+ format: int32
+ type: integer
+ type: object
+ queries:
+ description: QueryLimits defines the limit applied on querying
+ log streams.
+ properties:
+ maxChunksPerQuery:
+ description: MaxChunksPerQuery defines the maximum number
+ of chunks that can be fetched by a single query.
+ format: int32
+ type: integer
+ maxEntriesLimitPerQuery:
+ description: MaxEntriesLimitsPerQuery defines the maximum
+ number of log entries that will be returned for a query.
+ format: int32
+ type: integer
+ maxQuerySeries:
+ description: MaxQuerySeries defines the the maximum of
+ unique series that is returned by a metric query.
+ format: int32
+ type: integer
+ type: object
+ type: object
+ tenants:
+ additionalProperties:
+ description: LimitsTemplateSpec defines the limits applied
+ at ingestion or query path.
+ properties:
+ ingestion:
+ description: IngestionLimits defines the limits applied
+ on ingested log streams.
+ properties:
+ ingestionBurstSize:
+ description: IngestionBurstSize defines the local rate-limited
+ sample size per distributor replica. It should be
+ set to the set at least to the maximum logs size expected
+ in a single push request.
+ format: int32
+ type: integer
+ ingestionRate:
+ description: IngestionRate defines the sample size per
+ second. Units MB.
+ format: int32
+ type: integer
+ maxGlobalStreamsPerTenant:
+ description: MaxGlobalStreamsPerTenant defines the maximum
+ number of active streams per tenant, across the cluster.
+ format: int32
+ type: integer
+ maxLabelNameLength:
+ description: MaxLabelNameLength defines the maximum
+ number of characters allowed for label keys in log
+ streams.
+ format: int32
+ type: integer
+ maxLabelNamesPerSeries:
+ description: MaxLabelNamesPerSeries defines the maximum
+ number of label names per series in each log stream.
+ format: int32
+ type: integer
+ maxLabelValueLength:
+ description: MaxLabelValueLength defines the maximum
+ number of characters allowed for label values in log
+ streams.
+ format: int32
+ type: integer
+ maxLineSize:
+ description: MaxLineSize defines the maximum line size
+ on ingestion path. Units in Bytes.
+ format: int32
+ type: integer
+ type: object
+ queries:
+ description: QueryLimits defines the limit applied on querying
+ log streams.
+ properties:
+ maxChunksPerQuery:
+ description: MaxChunksPerQuery defines the maximum number
+ of chunks that can be fetched by a single query.
+ format: int32
+ type: integer
+ maxEntriesLimitPerQuery:
+ description: MaxEntriesLimitsPerQuery defines the maximum
+ number of log entries that will be returned for a
+ query.
+ format: int32
+ type: integer
+ maxQuerySeries:
+ description: MaxQuerySeries defines the the maximum
+ of unique series that is returned by a metric query.
+ format: int32
+ type: integer
+ type: object
+ type: object
+ description: Tenants defines the limits applied per tenant.
+ type: object
+ type: object
+ managementState:
+ default: Managed
+ description: ManagementState defines if the CR should be managed by
+ the operator or not. Default is managed.
+ enum:
+ - Managed
+ - Unmanaged
+ type: string
+ replicationFactor:
+ default: 1
+ description: ReplicationFactor defines the policy for log stream replication.
+ format: int32
+ minimum: 1
+ type: integer
+ rules:
+ description: Rules defines the spec for the ruler component
+ properties:
+ enabled:
+ description: Enabled defines a flag to enable/disable the ruler
+ component
+ type: boolean
+ namespaceSelector:
+ description: Namespaces to be selected for PrometheusRules discovery.
+ If unspecified, only the same namespace as the LokiStack object
+ is in is used.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that relates
+ the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn,
+ Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If
+ the operator is In or NotIn, the values array must
+ be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced
+ during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A
+ single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field is "key",
+ the operator is "In", and the values array contains only
+ "value". The requirements are ANDed.
+ type: object
+ type: object
+ selector:
+ description: A selector to select which LokiRules to mount for
+ loading alerting/recording rules from.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that relates
+ the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn,
+ Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If
+ the operator is In or NotIn, the values array must
+ be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced
+ during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A
+ single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field is "key",
+ the operator is "In", and the values array contains only
+ "value". The requirements are ANDed.
+ type: object
+ type: object
+ required:
+ - enabled
+ type: object
+ size:
+ description: Size defines one of the support Loki deployment scale
+ out sizes.
+ enum:
+ - 1x.extra-small
+ - 1x.small
+ - 1x.medium
+ type: string
+ storage:
+ description: Storage defines the spec for the object storage endpoint
+ to store logs.
+ properties:
+ schemas:
+ default:
+ - effectiveDate: "2020-10-11"
+ version: v11
+ description: Schemas for reading and writing logs.
+ items:
+ description: ObjectStorageSchema defines the requirements needed
+ to configure a new storage schema.
+ properties:
+ effectiveDate:
+ description: EffectiveDate is the date in UTC that the schema
+ will be applied on. To ensure readibility of logs, this
+ date should be before the current date in UTC.
+ pattern: ^([0-9]{4,})([-]([0-9]{2})){2}$
+ type: string
+ version:
+ description: Version for writing and reading logs.
+ enum:
+ - v11
+ - v12
+ type: string
+ required:
+ - effectiveDate
+ - version
+ type: object
+ minItems: 1
+ type: array
+ secret:
+ description: Secret for object storage authentication. Name of
+ a secret in the same namespace as the LokiStack custom resource.
+ properties:
+ name:
+ description: Name of a secret in the namespace configured
+ for object storage secrets.
+ type: string
+ type:
+ description: Type of object storage that should be used
+ enum:
+ - azure
+ - gcs
+ - s3
+ - swift
+ type: string
+ required:
+ - name
+ - type
+ type: object
+ tls:
+ description: TLS configuration for reaching the object storage
+ endpoint.
+ properties:
+ caName:
+ description: CA is the name of a ConfigMap containing a CA
+ certificate. It needs to be in the same namespace as the
+ LokiStack custom resource.
+ type: string
+ type: object
+ required:
+ - secret
+ type: object
+ storageClassName:
+ description: Storage class name defines the storage class for ingester/querier
+ PVCs.
+ type: string
+ template:
+ description: Template defines the resource/limits/tolerations/nodeselectors
+ per component
+ properties:
+ compactor:
+ description: Compactor defines the compaction component spec.
+ properties:
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: NodeSelector defines the labels required by a
+ node to schedule the component onto it.
+ type: object
+ replicas:
+ description: Replicas defines the number of replica pods of
+ the component.
+ format: int32
+ type: integer
+ tolerations:
+ description: Tolerations defines the tolerations required
+ by a node to schedule the component onto it.
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple <key,value,effect> using
+ the matching operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match.
+ Empty means match all taint effects. When specified,
+ allowed values are NoSchedule, PreferNoSchedule and
+ NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration
+ applies to. Empty means match all taint keys. If the
+ key is empty, operator must be Exists; this combination
+ means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship
+ to the value. Valid operators are Exists and Equal.
+ Defaults to Equal. Exists is equivalent to wildcard
+ for value, so that a pod can tolerate all taints of
+ a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period
+ of time the toleration (which must be of effect NoExecute,
+ otherwise this field is ignored) tolerates the taint.
+ By default, it is not set, which means tolerate the
+ taint forever (do not evict). Zero and negative values
+ will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration
+ matches to. If the operator is Exists, the value should
+ be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ distributor:
+ description: Distributor defines the distributor component spec.
+ properties:
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: NodeSelector defines the labels required by a
+ node to schedule the component onto it.
+ type: object
+ replicas:
+ description: Replicas defines the number of replica pods of
+ the component.
+ format: int32
+ type: integer
+ tolerations:
+ description: Tolerations defines the tolerations required
+ by a node to schedule the component onto it.
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple <key,value,effect> using
+ the matching operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match.
+ Empty means match all taint effects. When specified,
+ allowed values are NoSchedule, PreferNoSchedule and
+ NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration
+ applies to. Empty means match all taint keys. If the
+ key is empty, operator must be Exists; this combination
+ means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship
+ to the value. Valid operators are Exists and Equal.
+ Defaults to Equal. Exists is equivalent to wildcard
+ for value, so that a pod can tolerate all taints of
+ a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period
+ of time the toleration (which must be of effect NoExecute,
+ otherwise this field is ignored) tolerates the taint.
+ By default, it is not set, which means tolerate the
+ taint forever (do not evict). Zero and negative values
+ will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration
+ matches to. If the operator is Exists, the value should
+ be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ gateway:
+ description: Gateway defines the lokistack gateway component spec.
+ properties:
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: NodeSelector defines the labels required by a
+ node to schedule the component onto it.
+ type: object
+ replicas:
+ description: Replicas defines the number of replica pods of
+ the component.
+ format: int32
+ type: integer
+ tolerations:
+ description: Tolerations defines the tolerations required
+ by a node to schedule the component onto it.
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple <key,value,effect> using
+ the matching operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match.
+ Empty means match all taint effects. When specified,
+ allowed values are NoSchedule, PreferNoSchedule and
+ NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration
+ applies to. Empty means match all taint keys. If the
+ key is empty, operator must be Exists; this combination
+ means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship
+ to the value. Valid operators are Exists and Equal.
+ Defaults to Equal. Exists is equivalent to wildcard
+ for value, so that a pod can tolerate all taints of
+ a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period
+ of time the toleration (which must be of effect NoExecute,
+ otherwise this field is ignored) tolerates the taint.
+ By default, it is not set, which means tolerate the
+ taint forever (do not evict). Zero and negative values
+ will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration
+ matches to. If the operator is Exists, the value should
+ be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ indexGateway:
+ description: IndexGateway defines the index gateway component
+ spec.
+ properties:
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: NodeSelector defines the labels required by a
+ node to schedule the component onto it.
+ type: object
+ replicas:
+ description: Replicas defines the number of replica pods of
+ the component.
+ format: int32
+ type: integer
+ tolerations:
+ description: Tolerations defines the tolerations required
+ by a node to schedule the component onto it.
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple <key,value,effect> using
+ the matching operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match.
+ Empty means match all taint effects. When specified,
+ allowed values are NoSchedule, PreferNoSchedule and
+ NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration
+ applies to. Empty means match all taint keys. If the
+ key is empty, operator must be Exists; this combination
+ means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship
+ to the value. Valid operators are Exists and Equal.
+ Defaults to Equal. Exists is equivalent to wildcard
+ for value, so that a pod can tolerate all taints of
+ a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period
+ of time the toleration (which must be of effect NoExecute,
+ otherwise this field is ignored) tolerates the taint.
+ By default, it is not set, which means tolerate the
+ taint forever (do not evict). Zero and negative values
+ will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration
+ matches to. If the operator is Exists, the value should
+ be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ ingester:
+ description: Ingester defines the ingester component spec.
+ properties:
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: NodeSelector defines the labels required by a
+ node to schedule the component onto it.
+ type: object
+ replicas:
+ description: Replicas defines the number of replica pods of
+ the component.
+ format: int32
+ type: integer
+ tolerations:
+ description: Tolerations defines the tolerations required
+ by a node to schedule the component onto it.
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple <key,value,effect> using
+ the matching operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match.
+ Empty means match all taint effects. When specified,
+ allowed values are NoSchedule, PreferNoSchedule and
+ NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration
+ applies to. Empty means match all taint keys. If the
+ key is empty, operator must be Exists; this combination
+ means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship
+ to the value. Valid operators are Exists and Equal.
+ Defaults to Equal. Exists is equivalent to wildcard
+ for value, so that a pod can tolerate all taints of
+ a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period
+ of time the toleration (which must be of effect NoExecute,
+ otherwise this field is ignored) tolerates the taint.
+ By default, it is not set, which means tolerate the
+ taint forever (do not evict). Zero and negative values
+ will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration
+ matches to. If the operator is Exists, the value should
+ be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ querier:
+ description: Querier defines the querier component spec.
+ properties:
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: NodeSelector defines the labels required by a
+ node to schedule the component onto it.
+ type: object
+ replicas:
+ description: Replicas defines the number of replica pods of
+ the component.
+ format: int32
+ type: integer
+ tolerations:
+ description: Tolerations defines the tolerations required
+ by a node to schedule the component onto it.
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple <key,value,effect> using
+ the matching operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match.
+ Empty means match all taint effects. When specified,
+ allowed values are NoSchedule, PreferNoSchedule and
+ NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration
+ applies to. Empty means match all taint keys. If the
+ key is empty, operator must be Exists; this combination
+ means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship
+ to the value. Valid operators are Exists and Equal.
+ Defaults to Equal. Exists is equivalent to wildcard
+ for value, so that a pod can tolerate all taints of
+ a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period
+ of time the toleration (which must be of effect NoExecute,
+ otherwise this field is ignored) tolerates the taint.
+ By default, it is not set, which means tolerate the
+ taint forever (do not evict). Zero and negative values
+ will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration
+ matches to. If the operator is Exists, the value should
+ be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ queryFrontend:
+ description: QueryFrontend defines the query frontend component
+ spec.
+ properties:
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: NodeSelector defines the labels required by a
+ node to schedule the component onto it.
+ type: object
+ replicas:
+ description: Replicas defines the number of replica pods of
+ the component.
+ format: int32
+ type: integer
+ tolerations:
+ description: Tolerations defines the tolerations required
+ by a node to schedule the component onto it.
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple <key,value,effect> using
+ the matching operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match.
+ Empty means match all taint effects. When specified,
+ allowed values are NoSchedule, PreferNoSchedule and
+ NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration
+ applies to. Empty means match all taint keys. If the
+ key is empty, operator must be Exists; this combination
+ means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship
+ to the value. Valid operators are Exists and Equal.
+ Defaults to Equal. Exists is equivalent to wildcard
+ for value, so that a pod can tolerate all taints of
+ a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period
+ of time the toleration (which must be of effect NoExecute,
+ otherwise this field is ignored) tolerates the taint.
+ By default, it is not set, which means tolerate the
+ taint forever (do not evict). Zero and negative values
+ will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration
+ matches to. If the operator is Exists, the value should
+ be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ ruler:
+ description: Ruler defines the ruler component spec.
+ properties:
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: NodeSelector defines the labels required by a
+ node to schedule the component onto it.
+ type: object
+ replicas:
+ description: Replicas defines the number of replica pods of
+ the component.
+ format: int32
+ type: integer
+ tolerations:
+ description: Tolerations defines the tolerations required
+ by a node to schedule the component onto it.
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple <key,value,effect> using
+ the matching operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match.
+ Empty means match all taint effects. When specified,
+ allowed values are NoSchedule, PreferNoSchedule and
+ NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration
+ applies to. Empty means match all taint keys. If the
+ key is empty, operator must be Exists; this combination
+ means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship
+ to the value. Valid operators are Exists and Equal.
+ Defaults to Equal. Exists is equivalent to wildcard
+ for value, so that a pod can tolerate all taints of
+ a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period
+ of time the toleration (which must be of effect NoExecute,
+ otherwise this field is ignored) tolerates the taint.
+ By default, it is not set, which means tolerate the
+ taint forever (do not evict). Zero and negative values
+ will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration
+ matches to. If the operator is Exists, the value should
+ be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ type: object
+ tenants:
+ description: Tenants defines the per-tenant authentication and authorization
+ spec for the lokistack-gateway component.
+ properties:
+ authentication:
+ description: Authentication defines the lokistack-gateway component
+ authentication configuration spec per tenant.
+ items:
+ description: AuthenticationSpec defines the oidc configuration
+ per tenant for lokiStack Gateway component.
+ properties:
+ oidc:
+ description: OIDC defines the spec for the OIDC tenant's
+ authentication.
+ properties:
+ groupClaim:
+ description: Group claim field from ID Token
+ type: string
+ issuerURL:
+ description: IssuerURL defines the URL for issuer.
+ type: string
+ redirectURL:
+ description: RedirectURL defines the URL for redirect.
+ type: string
+ secret:
+ description: Secret defines the spec for the clientID,
+ clientSecret and issuerCAPath for tenant's authentication.
+ properties:
+ name:
+ description: Name of a secret in the namespace configured
+ for tenant secrets.
+ type: string
+ required:
+ - name
+ type: object
+ usernameClaim:
+ description: User claim field from ID Token
+ type: string
+ required:
+ - issuerURL
+ - secret
+ type: object
+ tenantId:
+ description: TenantID defines the id of the tenant.
+ type: string
+ tenantName:
+ description: TenantName defines the name of the tenant.
+ type: string
+ required:
+ - oidc
+ - tenantId
+ - tenantName
+ type: object
+ type: array
+ authorization:
+ description: Authorization defines the lokistack-gateway component
+ authorization configuration spec per tenant.
+ properties:
+ opa:
+ description: OPA defines the spec for the third-party endpoint
+ for tenant's authorization.
+ properties:
+ url:
+ description: URL defines the third-party endpoint for
+ authorization.
+ type: string
+ required:
+ - url
+ type: object
+ roleBindings:
+ description: RoleBindings defines configuration to bind a
+ set of roles to a set of subjects.
+ items:
+ description: RoleBindingsSpec binds a set of roles to a
+ set of subjects.
+ properties:
+ name:
+ type: string
+ roles:
+ items:
+ type: string
+ type: array
+ subjects:
+ items:
+ description: Subject represents a subject that has
+ been bound to a role.
+ properties:
+ kind:
+ description: SubjectKind is a kind of LokiStack
+ Gateway RBAC subject.
+ enum:
+ - user
+ - group
+ type: string
+ name:
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ type: array
+ required:
+ - name
+ - roles
+ - subjects
+ type: object
+ type: array
+ roles:
+ description: Roles defines a set of permissions to interact
+ with a tenant.
+ items:
+ description: RoleSpec describes a set of permissions to
+ interact with a tenant.
+ properties:
+ name:
+ type: string
+ permissions:
+ items:
+ description: PermissionType is a LokiStack Gateway
+ RBAC permission.
+ enum:
+ - read
+ - write
+ type: string
+ type: array
+ resources:
+ items:
+ type: string
+ type: array
+ tenants:
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - permissions
+ - resources
+ - tenants
+ type: object
+ type: array
+ type: object
+ mode:
+ default: openshift-logging
+ description: Mode defines the mode in which lokistack-gateway
+ component will be configured.
+ enum:
+ - static
+ - dynamic
+ - openshift-logging
+ type: string
+ required:
+ - mode
+ type: object
+ required:
+ - size
+ - storage
+ - storageClassName
+ type: object
+ status:
+ description: LokiStackStatus defines the observed state of LokiStack
+ properties:
+ components:
+ description: Components provides summary of all Loki pod status grouped
+ per component.
+ properties:
+ compactor:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: Compactor is a map to the pod status of the compactor
+ pod.
+ type: object
+ distributor:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: Distributor is a map to the per pod status of the
+ distributor deployment
+ type: object
+ gateway:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: Gateway is a map to the per pod status of the lokistack
+ gateway deployment.
+ type: object
+ indexGateway:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: IndexGateway is a map to the per pod status of the
+ index gateway statefulset
+ type: object
+ ingester:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: Ingester is a map to the per pod status of the ingester
+ statefulset
+ type: object
+ querier:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: Querier is a map to the per pod status of the querier
+ deployment
+ type: object
+ queryFrontend:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: QueryFrontend is a map to the per pod status of the
+ query frontend deployment
+ type: object
+ ruler:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: Ruler is a map to the per pod status of the lokistack
+ ruler statefulset.
+ type: object
+ type: object
+ conditions:
+ description: Conditions of the Loki deployment health.
+ items:
+ description: "Condition contains details for one aspect of the current
+ state of this API Resource. --- This struct is intended for direct
+ use as an array at the field path .status.conditions. For example,
+ type FooStatus struct{ // Represents the observations of a foo's
+ current state. // Known .status.conditions.type are: \"Available\",
+ \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge
+ // +listType=map // +listMapKey=type Conditions []metav1.Condition
+ `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\"
+ protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
+ properties:
+ lastTransitionTime:
+ description: lastTransitionTime is the last time the condition
+ transitioned from one status to another. This should be when
+ the underlying condition changed. If that is not known, then
+ using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: message is a human readable message indicating
+ details about the transition. This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: observedGeneration represents the .metadata.generation
+ that the condition was set based upon. For instance, if .metadata.generation
+ is currently 12, but the .status.conditions[x].observedGeneration
+ is 9, the condition is out of date with respect to the current
+ state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: reason contains a programmatic identifier indicating
+ the reason for the condition's last transition. Producers
+ of specific condition types may define expected values and
+ meanings for this field, and whether the values are considered
+ a guaranteed API. The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ --- Many .condition.type values are consistent across resources
+ like Available, but because arbitrary conditions can be useful
+ (see .node.status.conditions), the ability to deconflict is
+ important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ storage:
+ description: Storage provides summary of all changes that have occurred
+ to the storage configuration.
+ properties:
+ schemas:
+ description: Schemas is a list of schemas which have been applied
+ to the LokiStack.
+ items:
+ description: ObjectStorageSchema defines the requirements needed
+ to configure a new storage schema.
+ properties:
+ effectiveDate:
+ description: EffectiveDate is the date in UTC that the schema
+ will be applied on. To ensure readibility of logs, this
+ date should be before the current date in UTC.
+ pattern: ^([0-9]{4,})([-]([0-9]{2})){2}$
+ type: string
+ version:
+ description: Version for writing and reading logs.
+ enum:
+ - v11
+ - v12
+ type: string
+ required:
+ - effectiveDate
+ - version
+ type: object
+ type: array
+ type: object
+ type: object
+ type: object
+ served: false
+ storage: false
+ subresources:
+ status: {}
status:
acceptedNames:
kind: ""
diff --git a/operator/cmd/loki-broker/main.go b/operator/cmd/loki-broker/main.go
index 25586298bf50c..b44b31859251d 100644
--- a/operator/cmd/loki-broker/main.go
+++ b/operator/cmd/loki-broker/main.go
@@ -11,7 +11,7 @@ import (
"github.com/ViaQ/logerr/v2/log"
"github.com/go-logr/logr"
configv1 "github.com/grafana/loki/operator/apis/config/v1"
- "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests"
"github.com/grafana/loki/operator/internal/manifests/storage"
"sigs.k8s.io/yaml"
@@ -120,7 +120,7 @@ func main() {
os.Exit(1)
}
- ls := &v1beta1.LokiStack{}
+ ls := &lokiv1.LokiStack{}
if err = yaml.Unmarshal(b, ls); err != nil {
logger.Error(err, "failed to unmarshal LokiStack CR", "path", cfg.crFilepath)
os.Exit(1)
diff --git a/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml b/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml
index d8226096c1c96..b9be4970f1282 100644
--- a/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml
+++ b/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml
@@ -17,7 +17,7 @@ spec:
singular: lokistack
scope: Namespaced
versions:
- - name: v1beta1
+ - name: v1
schema:
openAPIV3Schema:
description: LokiStack is the Schema for the lokistacks API
@@ -1178,3 +1178,1164 @@ spec:
storage: true
subresources:
status: {}
+ - name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: LokiStack is the Schema for the lokistacks API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: LokiStackSpec defines the desired state of LokiStack
+ properties:
+ limits:
+ description: Limits defines the limits to be applied to log stream
+ processing.
+ properties:
+ global:
+ description: Global defines the limits applied globally across
+ the cluster.
+ properties:
+ ingestion:
+ description: IngestionLimits defines the limits applied on
+ ingested log streams.
+ properties:
+ ingestionBurstSize:
+ description: IngestionBurstSize defines the local rate-limited
+ sample size per distributor replica. It should be set
+ to the set at least to the maximum logs size expected
+ in a single push request.
+ format: int32
+ type: integer
+ ingestionRate:
+ description: IngestionRate defines the sample size per
+ second. Units MB.
+ format: int32
+ type: integer
+ maxGlobalStreamsPerTenant:
+ description: MaxGlobalStreamsPerTenant defines the maximum
+ number of active streams per tenant, across the cluster.
+ format: int32
+ type: integer
+ maxLabelNameLength:
+ description: MaxLabelNameLength defines the maximum number
+ of characters allowed for label keys in log streams.
+ format: int32
+ type: integer
+ maxLabelNamesPerSeries:
+ description: MaxLabelNamesPerSeries defines the maximum
+ number of label names per series in each log stream.
+ format: int32
+ type: integer
+ maxLabelValueLength:
+ description: MaxLabelValueLength defines the maximum number
+ of characters allowed for label values in log streams.
+ format: int32
+ type: integer
+ maxLineSize:
+ description: MaxLineSize defines the maximum line size
+ on ingestion path. Units in Bytes.
+ format: int32
+ type: integer
+ type: object
+ queries:
+ description: QueryLimits defines the limit applied on querying
+ log streams.
+ properties:
+ maxChunksPerQuery:
+ description: MaxChunksPerQuery defines the maximum number
+ of chunks that can be fetched by a single query.
+ format: int32
+ type: integer
+ maxEntriesLimitPerQuery:
+ description: MaxEntriesLimitsPerQuery defines the maximum
+ number of log entries that will be returned for a query.
+ format: int32
+ type: integer
+ maxQuerySeries:
+ description: MaxQuerySeries defines the the maximum of
+ unique series that is returned by a metric query.
+ format: int32
+ type: integer
+ type: object
+ type: object
+ tenants:
+ additionalProperties:
+ description: LimitsTemplateSpec defines the limits applied
+ at ingestion or query path.
+ properties:
+ ingestion:
+ description: IngestionLimits defines the limits applied
+ on ingested log streams.
+ properties:
+ ingestionBurstSize:
+ description: IngestionBurstSize defines the local rate-limited
+ sample size per distributor replica. It should be
+ set to the set at least to the maximum logs size expected
+ in a single push request.
+ format: int32
+ type: integer
+ ingestionRate:
+ description: IngestionRate defines the sample size per
+ second. Units MB.
+ format: int32
+ type: integer
+ maxGlobalStreamsPerTenant:
+ description: MaxGlobalStreamsPerTenant defines the maximum
+ number of active streams per tenant, across the cluster.
+ format: int32
+ type: integer
+ maxLabelNameLength:
+ description: MaxLabelNameLength defines the maximum
+ number of characters allowed for label keys in log
+ streams.
+ format: int32
+ type: integer
+ maxLabelNamesPerSeries:
+ description: MaxLabelNamesPerSeries defines the maximum
+ number of label names per series in each log stream.
+ format: int32
+ type: integer
+ maxLabelValueLength:
+ description: MaxLabelValueLength defines the maximum
+ number of characters allowed for label values in log
+ streams.
+ format: int32
+ type: integer
+ maxLineSize:
+ description: MaxLineSize defines the maximum line size
+ on ingestion path. Units in Bytes.
+ format: int32
+ type: integer
+ type: object
+ queries:
+ description: QueryLimits defines the limit applied on querying
+ log streams.
+ properties:
+ maxChunksPerQuery:
+ description: MaxChunksPerQuery defines the maximum number
+ of chunks that can be fetched by a single query.
+ format: int32
+ type: integer
+ maxEntriesLimitPerQuery:
+ description: MaxEntriesLimitsPerQuery defines the maximum
+ number of log entries that will be returned for a
+ query.
+ format: int32
+ type: integer
+ maxQuerySeries:
+ description: MaxQuerySeries defines the the maximum
+ of unique series that is returned by a metric query.
+ format: int32
+ type: integer
+ type: object
+ type: object
+ description: Tenants defines the limits applied per tenant.
+ type: object
+ type: object
+ managementState:
+ default: Managed
+ description: ManagementState defines if the CR should be managed by
+ the operator or not. Default is managed.
+ enum:
+ - Managed
+ - Unmanaged
+ type: string
+ replicationFactor:
+ default: 1
+ description: ReplicationFactor defines the policy for log stream replication.
+ format: int32
+ minimum: 1
+ type: integer
+ rules:
+ description: Rules defines the spec for the ruler component
+ properties:
+ enabled:
+ description: Enabled defines a flag to enable/disable the ruler
+ component
+ type: boolean
+ namespaceSelector:
+ description: Namespaces to be selected for PrometheusRules discovery.
+ If unspecified, only the same namespace as the LokiStack object
+ is in is used.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that relates
+ the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn,
+ Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If
+ the operator is In or NotIn, the values array must
+ be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced
+ during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A
+ single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field is "key",
+ the operator is "In", and the values array contains only
+ "value". The requirements are ANDed.
+ type: object
+ type: object
+ selector:
+ description: A selector to select which LokiRules to mount for
+ loading alerting/recording rules from.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that relates
+ the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn,
+ Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If
+ the operator is In or NotIn, the values array must
+ be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced
+ during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A
+ single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field is "key",
+ the operator is "In", and the values array contains only
+ "value". The requirements are ANDed.
+ type: object
+ type: object
+ required:
+ - enabled
+ type: object
+ size:
+ description: Size defines one of the support Loki deployment scale
+ out sizes.
+ enum:
+ - 1x.extra-small
+ - 1x.small
+ - 1x.medium
+ type: string
+ storage:
+ description: Storage defines the spec for the object storage endpoint
+ to store logs.
+ properties:
+ schemas:
+ default:
+ - effectiveDate: "2020-10-11"
+ version: v11
+ description: Schemas for reading and writing logs.
+ items:
+ description: ObjectStorageSchema defines the requirements needed
+ to configure a new storage schema.
+ properties:
+ effectiveDate:
+ description: EffectiveDate is the date in UTC that the schema
+ will be applied on. To ensure readibility of logs, this
+ date should be before the current date in UTC.
+ pattern: ^([0-9]{4,})([-]([0-9]{2})){2}$
+ type: string
+ version:
+ description: Version for writing and reading logs.
+ enum:
+ - v11
+ - v12
+ type: string
+ required:
+ - effectiveDate
+ - version
+ type: object
+ minItems: 1
+ type: array
+ secret:
+ description: Secret for object storage authentication. Name of
+ a secret in the same namespace as the LokiStack custom resource.
+ properties:
+ name:
+ description: Name of a secret in the namespace configured
+ for object storage secrets.
+ type: string
+ type:
+ description: Type of object storage that should be used
+ enum:
+ - azure
+ - gcs
+ - s3
+ - swift
+ type: string
+ required:
+ - name
+ - type
+ type: object
+ tls:
+ description: TLS configuration for reaching the object storage
+ endpoint.
+ properties:
+ caName:
+ description: CA is the name of a ConfigMap containing a CA
+ certificate. It needs to be in the same namespace as the
+ LokiStack custom resource.
+ type: string
+ type: object
+ required:
+ - secret
+ type: object
+ storageClassName:
+ description: Storage class name defines the storage class for ingester/querier
+ PVCs.
+ type: string
+ template:
+ description: Template defines the resource/limits/tolerations/nodeselectors
+ per component
+ properties:
+ compactor:
+ description: Compactor defines the compaction component spec.
+ properties:
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: NodeSelector defines the labels required by a
+ node to schedule the component onto it.
+ type: object
+ replicas:
+ description: Replicas defines the number of replica pods of
+ the component.
+ format: int32
+ type: integer
+ tolerations:
+ description: Tolerations defines the tolerations required
+ by a node to schedule the component onto it.
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple <key,value,effect> using
+ the matching operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match.
+ Empty means match all taint effects. When specified,
+ allowed values are NoSchedule, PreferNoSchedule and
+ NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration
+ applies to. Empty means match all taint keys. If the
+ key is empty, operator must be Exists; this combination
+ means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship
+ to the value. Valid operators are Exists and Equal.
+ Defaults to Equal. Exists is equivalent to wildcard
+ for value, so that a pod can tolerate all taints of
+ a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period
+ of time the toleration (which must be of effect NoExecute,
+ otherwise this field is ignored) tolerates the taint.
+ By default, it is not set, which means tolerate the
+ taint forever (do not evict). Zero and negative values
+ will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration
+ matches to. If the operator is Exists, the value should
+ be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ distributor:
+ description: Distributor defines the distributor component spec.
+ properties:
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: NodeSelector defines the labels required by a
+ node to schedule the component onto it.
+ type: object
+ replicas:
+ description: Replicas defines the number of replica pods of
+ the component.
+ format: int32
+ type: integer
+ tolerations:
+ description: Tolerations defines the tolerations required
+ by a node to schedule the component onto it.
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple <key,value,effect> using
+ the matching operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match.
+ Empty means match all taint effects. When specified,
+ allowed values are NoSchedule, PreferNoSchedule and
+ NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration
+ applies to. Empty means match all taint keys. If the
+ key is empty, operator must be Exists; this combination
+ means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship
+ to the value. Valid operators are Exists and Equal.
+ Defaults to Equal. Exists is equivalent to wildcard
+ for value, so that a pod can tolerate all taints of
+ a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period
+ of time the toleration (which must be of effect NoExecute,
+ otherwise this field is ignored) tolerates the taint.
+ By default, it is not set, which means tolerate the
+ taint forever (do not evict). Zero and negative values
+ will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration
+ matches to. If the operator is Exists, the value should
+ be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ gateway:
+ description: Gateway defines the lokistack gateway component spec.
+ properties:
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: NodeSelector defines the labels required by a
+ node to schedule the component onto it.
+ type: object
+ replicas:
+ description: Replicas defines the number of replica pods of
+ the component.
+ format: int32
+ type: integer
+ tolerations:
+ description: Tolerations defines the tolerations required
+ by a node to schedule the component onto it.
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple <key,value,effect> using
+ the matching operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match.
+ Empty means match all taint effects. When specified,
+ allowed values are NoSchedule, PreferNoSchedule and
+ NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration
+ applies to. Empty means match all taint keys. If the
+ key is empty, operator must be Exists; this combination
+ means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship
+ to the value. Valid operators are Exists and Equal.
+ Defaults to Equal. Exists is equivalent to wildcard
+ for value, so that a pod can tolerate all taints of
+ a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period
+ of time the toleration (which must be of effect NoExecute,
+ otherwise this field is ignored) tolerates the taint.
+ By default, it is not set, which means tolerate the
+ taint forever (do not evict). Zero and negative values
+ will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration
+ matches to. If the operator is Exists, the value should
+ be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ indexGateway:
+ description: IndexGateway defines the index gateway component
+ spec.
+ properties:
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: NodeSelector defines the labels required by a
+ node to schedule the component onto it.
+ type: object
+ replicas:
+ description: Replicas defines the number of replica pods of
+ the component.
+ format: int32
+ type: integer
+ tolerations:
+ description: Tolerations defines the tolerations required
+ by a node to schedule the component onto it.
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple <key,value,effect> using
+ the matching operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match.
+ Empty means match all taint effects. When specified,
+ allowed values are NoSchedule, PreferNoSchedule and
+ NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration
+ applies to. Empty means match all taint keys. If the
+ key is empty, operator must be Exists; this combination
+ means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship
+ to the value. Valid operators are Exists and Equal.
+ Defaults to Equal. Exists is equivalent to wildcard
+ for value, so that a pod can tolerate all taints of
+ a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period
+ of time the toleration (which must be of effect NoExecute,
+ otherwise this field is ignored) tolerates the taint.
+ By default, it is not set, which means tolerate the
+ taint forever (do not evict). Zero and negative values
+ will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration
+ matches to. If the operator is Exists, the value should
+ be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ ingester:
+ description: Ingester defines the ingester component spec.
+ properties:
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: NodeSelector defines the labels required by a
+ node to schedule the component onto it.
+ type: object
+ replicas:
+ description: Replicas defines the number of replica pods of
+ the component.
+ format: int32
+ type: integer
+ tolerations:
+ description: Tolerations defines the tolerations required
+ by a node to schedule the component onto it.
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple <key,value,effect> using
+ the matching operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match.
+ Empty means match all taint effects. When specified,
+ allowed values are NoSchedule, PreferNoSchedule and
+ NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration
+ applies to. Empty means match all taint keys. If the
+ key is empty, operator must be Exists; this combination
+ means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship
+ to the value. Valid operators are Exists and Equal.
+ Defaults to Equal. Exists is equivalent to wildcard
+ for value, so that a pod can tolerate all taints of
+ a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period
+ of time the toleration (which must be of effect NoExecute,
+ otherwise this field is ignored) tolerates the taint.
+ By default, it is not set, which means tolerate the
+ taint forever (do not evict). Zero and negative values
+ will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration
+ matches to. If the operator is Exists, the value should
+ be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ querier:
+ description: Querier defines the querier component spec.
+ properties:
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: NodeSelector defines the labels required by a
+ node to schedule the component onto it.
+ type: object
+ replicas:
+ description: Replicas defines the number of replica pods of
+ the component.
+ format: int32
+ type: integer
+ tolerations:
+ description: Tolerations defines the tolerations required
+ by a node to schedule the component onto it.
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple <key,value,effect> using
+ the matching operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match.
+ Empty means match all taint effects. When specified,
+ allowed values are NoSchedule, PreferNoSchedule and
+ NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration
+ applies to. Empty means match all taint keys. If the
+ key is empty, operator must be Exists; this combination
+ means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship
+ to the value. Valid operators are Exists and Equal.
+ Defaults to Equal. Exists is equivalent to wildcard
+ for value, so that a pod can tolerate all taints of
+ a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period
+ of time the toleration (which must be of effect NoExecute,
+ otherwise this field is ignored) tolerates the taint.
+ By default, it is not set, which means tolerate the
+ taint forever (do not evict). Zero and negative values
+ will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration
+ matches to. If the operator is Exists, the value should
+ be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ queryFrontend:
+ description: QueryFrontend defines the query frontend component
+ spec.
+ properties:
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: NodeSelector defines the labels required by a
+ node to schedule the component onto it.
+ type: object
+ replicas:
+ description: Replicas defines the number of replica pods of
+ the component.
+ format: int32
+ type: integer
+ tolerations:
+ description: Tolerations defines the tolerations required
+ by a node to schedule the component onto it.
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple <key,value,effect> using
+ the matching operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match.
+ Empty means match all taint effects. When specified,
+ allowed values are NoSchedule, PreferNoSchedule and
+ NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration
+ applies to. Empty means match all taint keys. If the
+ key is empty, operator must be Exists; this combination
+ means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship
+ to the value. Valid operators are Exists and Equal.
+ Defaults to Equal. Exists is equivalent to wildcard
+ for value, so that a pod can tolerate all taints of
+ a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period
+ of time the toleration (which must be of effect NoExecute,
+ otherwise this field is ignored) tolerates the taint.
+ By default, it is not set, which means tolerate the
+ taint forever (do not evict). Zero and negative values
+ will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration
+ matches to. If the operator is Exists, the value should
+ be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ ruler:
+ description: Ruler defines the ruler component spec.
+ properties:
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: NodeSelector defines the labels required by a
+ node to schedule the component onto it.
+ type: object
+ replicas:
+ description: Replicas defines the number of replica pods of
+ the component.
+ format: int32
+ type: integer
+ tolerations:
+ description: Tolerations defines the tolerations required
+ by a node to schedule the component onto it.
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple <key,value,effect> using
+ the matching operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match.
+ Empty means match all taint effects. When specified,
+ allowed values are NoSchedule, PreferNoSchedule and
+ NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration
+ applies to. Empty means match all taint keys. If the
+ key is empty, operator must be Exists; this combination
+ means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship
+ to the value. Valid operators are Exists and Equal.
+ Defaults to Equal. Exists is equivalent to wildcard
+ for value, so that a pod can tolerate all taints of
+ a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period
+ of time the toleration (which must be of effect NoExecute,
+ otherwise this field is ignored) tolerates the taint.
+ By default, it is not set, which means tolerate the
+ taint forever (do not evict). Zero and negative values
+ will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration
+ matches to. If the operator is Exists, the value should
+ be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ type: object
+ tenants:
+ description: Tenants defines the per-tenant authentication and authorization
+ spec for the lokistack-gateway component.
+ properties:
+ authentication:
+ description: Authentication defines the lokistack-gateway component
+ authentication configuration spec per tenant.
+ items:
+ description: AuthenticationSpec defines the oidc configuration
+ per tenant for lokiStack Gateway component.
+ properties:
+ oidc:
+ description: OIDC defines the spec for the OIDC tenant's
+ authentication.
+ properties:
+ groupClaim:
+ description: Group claim field from ID Token
+ type: string
+ issuerURL:
+ description: IssuerURL defines the URL for issuer.
+ type: string
+ redirectURL:
+ description: RedirectURL defines the URL for redirect.
+ type: string
+ secret:
+ description: Secret defines the spec for the clientID,
+ clientSecret and issuerCAPath for tenant's authentication.
+ properties:
+ name:
+ description: Name of a secret in the namespace configured
+ for tenant secrets.
+ type: string
+ required:
+ - name
+ type: object
+ usernameClaim:
+ description: User claim field from ID Token
+ type: string
+ required:
+ - issuerURL
+ - secret
+ type: object
+ tenantId:
+ description: TenantID defines the id of the tenant.
+ type: string
+ tenantName:
+ description: TenantName defines the name of the tenant.
+ type: string
+ required:
+ - oidc
+ - tenantId
+ - tenantName
+ type: object
+ type: array
+ authorization:
+ description: Authorization defines the lokistack-gateway component
+ authorization configuration spec per tenant.
+ properties:
+ opa:
+ description: OPA defines the spec for the third-party endpoint
+ for tenant's authorization.
+ properties:
+ url:
+ description: URL defines the third-party endpoint for
+ authorization.
+ type: string
+ required:
+ - url
+ type: object
+ roleBindings:
+ description: RoleBindings defines configuration to bind a
+ set of roles to a set of subjects.
+ items:
+ description: RoleBindingsSpec binds a set of roles to a
+ set of subjects.
+ properties:
+ name:
+ type: string
+ roles:
+ items:
+ type: string
+ type: array
+ subjects:
+ items:
+ description: Subject represents a subject that has
+ been bound to a role.
+ properties:
+ kind:
+ description: SubjectKind is a kind of LokiStack
+ Gateway RBAC subject.
+ enum:
+ - user
+ - group
+ type: string
+ name:
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ type: array
+ required:
+ - name
+ - roles
+ - subjects
+ type: object
+ type: array
+ roles:
+ description: Roles defines a set of permissions to interact
+ with a tenant.
+ items:
+ description: RoleSpec describes a set of permissions to
+ interact with a tenant.
+ properties:
+ name:
+ type: string
+ permissions:
+ items:
+ description: PermissionType is a LokiStack Gateway
+ RBAC permission.
+ enum:
+ - read
+ - write
+ type: string
+ type: array
+ resources:
+ items:
+ type: string
+ type: array
+ tenants:
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - permissions
+ - resources
+ - tenants
+ type: object
+ type: array
+ type: object
+ mode:
+ default: openshift-logging
+ description: Mode defines the mode in which lokistack-gateway
+ component will be configured.
+ enum:
+ - static
+ - dynamic
+ - openshift-logging
+ type: string
+ required:
+ - mode
+ type: object
+ required:
+ - size
+ - storage
+ - storageClassName
+ type: object
+ status:
+ description: LokiStackStatus defines the observed state of LokiStack
+ properties:
+ components:
+ description: Components provides summary of all Loki pod status grouped
+ per component.
+ properties:
+ compactor:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: Compactor is a map to the pod status of the compactor
+ pod.
+ type: object
+ distributor:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: Distributor is a map to the per pod status of the
+ distributor deployment
+ type: object
+ gateway:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: Gateway is a map to the per pod status of the lokistack
+ gateway deployment.
+ type: object
+ indexGateway:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: IndexGateway is a map to the per pod status of the
+ index gateway statefulset
+ type: object
+ ingester:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: Ingester is a map to the per pod status of the ingester
+ statefulset
+ type: object
+ querier:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: Querier is a map to the per pod status of the querier
+ deployment
+ type: object
+ queryFrontend:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: QueryFrontend is a map to the per pod status of the
+ query frontend deployment
+ type: object
+ ruler:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: Ruler is a map to the per pod status of the lokistack
+ ruler statefulset.
+ type: object
+ type: object
+ conditions:
+ description: Conditions of the Loki deployment health.
+ items:
+ description: "Condition contains details for one aspect of the current
+ state of this API Resource. --- This struct is intended for direct
+ use as an array at the field path .status.conditions. For example,
+ type FooStatus struct{ // Represents the observations of a foo's
+ current state. // Known .status.conditions.type are: \"Available\",
+ \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge
+ // +listType=map // +listMapKey=type Conditions []metav1.Condition
+ `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\"
+ protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
+ properties:
+ lastTransitionTime:
+ description: lastTransitionTime is the last time the condition
+ transitioned from one status to another. This should be when
+ the underlying condition changed. If that is not known, then
+ using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: message is a human readable message indicating
+ details about the transition. This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: observedGeneration represents the .metadata.generation
+ that the condition was set based upon. For instance, if .metadata.generation
+ is currently 12, but the .status.conditions[x].observedGeneration
+ is 9, the condition is out of date with respect to the current
+ state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: reason contains a programmatic identifier indicating
+ the reason for the condition's last transition. Producers
+ of specific condition types may define expected values and
+ meanings for this field, and whether the values are considered
+ a guaranteed API. The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ --- Many .condition.type values are consistent across resources
+ like Available, but because arbitrary conditions can be useful
+ (see .node.status.conditions), the ability to deconflict is
+ important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ storage:
+ description: Storage provides summary of all changes that have occurred
+ to the storage configuration.
+ properties:
+ schemas:
+ description: Schemas is a list of schemas which have been applied
+ to the LokiStack.
+ items:
+ description: ObjectStorageSchema defines the requirements needed
+ to configure a new storage schema.
+ properties:
+ effectiveDate:
+ description: EffectiveDate is the date in UTC that the schema
+ will be applied on. To ensure readibility of logs, this
+ date should be before the current date in UTC.
+ pattern: ^([0-9]{4,})([-]([0-9]{2})){2}$
+ type: string
+ version:
+ description: Version for writing and reading logs.
+ enum:
+ - v11
+ - v12
+ type: string
+ required:
+ - effectiveDate
+ - version
+ type: object
+ type: array
+ type: object
+ type: object
+ type: object
+ served: false
+ storage: false
+ subresources:
+ status: {}
diff --git a/operator/config/crd/kustomization.yaml b/operator/config/crd/kustomization.yaml
index e33c65ac2c1c8..73a29cdb61eb9 100644
--- a/operator/config/crd/kustomization.yaml
+++ b/operator/config/crd/kustomization.yaml
@@ -11,7 +11,7 @@ resources:
patchesStrategicMerge:
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
# patches here are for enabling the conversion webhook for each CRD
-#- patches/webhook_in_lokistacks.yaml
+- patches/webhook_in_lokistacks.yaml
#- patches/webhook_in_alertingrules.yaml
#- patches/webhook_in_recordingrules.yaml
#- patches/webhook_in_rulerconfigs.yaml
diff --git a/operator/config/crd/kustomizeconfig.yaml b/operator/config/crd/kustomizeconfig.yaml
index bcebe0475b23b..ec5c150a9df25 100644
--- a/operator/config/crd/kustomizeconfig.yaml
+++ b/operator/config/crd/kustomizeconfig.yaml
@@ -4,15 +4,15 @@ nameReference:
version: v1
fieldSpecs:
- kind: CustomResourceDefinition
- version: v1beta1
+ version: v1
group: apiextensions.k8s.io
- path: spec/conversion/webhookClientConfig/service/name
+ path: spec/conversion/webhook/clientConfig/service/name
namespace:
- kind: CustomResourceDefinition
- version: v1beta1
+ version: v1
group: apiextensions.k8s.io
- path: spec/conversion/webhookClientConfig/service/namespace
+ path: spec/conversion/webhook/clientConfig/service/namespace
create: false
varReference:
diff --git a/operator/config/crd/patches/webhook_in_lokistacks.yaml b/operator/config/crd/patches/webhook_in_lokistacks.yaml
new file mode 100644
index 0000000000000..ab3e7b4cd9ce9
--- /dev/null
+++ b/operator/config/crd/patches/webhook_in_lokistacks.yaml
@@ -0,0 +1,18 @@
+# The following patch enables a conversion webhook for the CRD
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: lokistacks.loki.grafana.com
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ service:
+ namespace: system
+ name: webhook-service
+ path: /convert
+ port: 443
+ conversionReviewVersions:
+ - v1
+ - v1beta1
diff --git a/operator/config/manifests/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/bases/loki-operator.clusterserviceversion.yaml
index 21aadf912e48c..fa21dc948fe6a 100644
--- a/operator/config/manifests/bases/loki-operator.clusterserviceversion.yaml
+++ b/operator/config/manifests/bases/loki-operator.clusterserviceversion.yaml
@@ -38,7 +38,7 @@ spec:
resources:
- kind: LokiStack
name: ""
- version: v1beta1
+ version: v1
specDescriptors:
- description: List of groups for alerting rules.
displayName: Groups
@@ -458,6 +458,425 @@ spec:
- urn:alm:descriptor:com.tectonic.ui:select:dynamic
- urn:alm:descriptor:com.tectonic.ui:select:openshift-logging
statusDescriptors:
+ - description: Distributor is a map to the per pod status of the distributor
+ deployment
+ displayName: Distributor
+ path: components.distributor
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:podStatuses
+ - description: Ingester is a map to the per pod status of the ingester statefulset
+ displayName: Ingester
+ path: components.ingester
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:podStatuses
+ - description: Querier is a map to the per pod status of the querier deployment
+ displayName: Querier
+ path: components.querier
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:podStatuses
+ - description: QueryFrontend is a map to the per pod status of the query frontend
+ deployment
+ displayName: Query Frontend
+ path: components.queryFrontend
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:podStatuses
+ - description: Compactor is a map to the pod status of the compactor pod.
+ displayName: Compactor
+ path: components.compactor
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:podStatuses
+ - description: Gateway is a map to the per pod status of the lokistack gateway
+ deployment.
+ displayName: Gateway
+ path: components.gateway
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:podStatuses
+ - description: IndexGateway is a map to the per pod status of the index gateway
+ statefulset
+ displayName: IndexGateway
+ path: components.indexGateway
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:podStatuses
+ - description: Ruler is a map to the per pod status of the lokistack ruler statefulset.
+ displayName: Ruler
+ path: components.ruler
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:podStatuses
+ - description: Conditions of the Loki deployment health.
+ displayName: Conditions
+ path: conditions
+ x-descriptors:
+ - urn:alm:descriptor:io.kubernetes.conditions
+ version: v1
+ - description: LokiStack is the Schema for the lokistacks API
+ displayName: LokiStack
+ kind: LokiStack
+ name: lokistacks.loki.grafana.com
+ resources:
+ - kind: ConfigMap
+ name: ""
+ version: v1
+ - kind: Deployment
+ name: ""
+ version: v1
+ - kind: Ingress
+ name: ""
+ version: v1
+ - kind: PersistentVolumeClaims
+ name: ""
+ version: v1
+ - kind: Route
+ name: ""
+ version: v1
+ - kind: Service
+ name: ""
+ version: v1
+ - kind: ServiceAccount
+ name: ""
+ version: v1
+ - kind: ServiceMonitor
+ name: ""
+ version: v1
+ - kind: StatefulSet
+ name: ""
+ version: v1
+ specDescriptors:
+ - description: Limits defines the limits to be applied to log stream processing.
+ displayName: Rate Limiting
+ path: limits
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:advanced
+ - description: Global defines the limits applied globally across the cluster.
+ displayName: Global Limits
+ path: limits.global
+ - description: IngestionBurstSize defines the local rate-limited sample size
+ per distributor replica. It should be set to the set at least to the maximum
+ logs size expected in a single push request.
+ displayName: Ingestion Burst Size (in MB)
+ path: limits.global.ingestion.ingestionBurstSize
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
+ - description: IngestionRate defines the sample size per second. Units MB.
+ displayName: Ingestion Rate (in MB)
+ path: limits.global.ingestion.ingestionRate
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxGlobalStreamsPerTenant defines the maximum number of active
+ streams per tenant, across the cluster.
+ displayName: Max Global Streams per Tenant
+ path: limits.global.ingestion.maxGlobalStreamsPerTenant
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxLabelNameLength defines the maximum number of characters allowed
+ for label keys in log streams.
+ displayName: Max Label Name Length
+ path: limits.global.ingestion.maxLabelNameLength
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxLabelNamesPerSeries defines the maximum number of label names
+ per series in each log stream.
+ displayName: Max Labels Names per Series
+ path: limits.global.ingestion.maxLabelNamesPerSeries
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxLabelValueLength defines the maximum number of characters
+ allowed for label values in log streams.
+ displayName: Max Label Value Length
+ path: limits.global.ingestion.maxLabelValueLength
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxLineSize defines the maximum line size on ingestion path.
+ Units in Bytes.
+ displayName: Max Line Size
+ path: limits.global.ingestion.maxLineSize
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxChunksPerQuery defines the maximum number of chunks that can
+ be fetched by a single query.
+ displayName: Max Chunk per Query
+ path: limits.global.queries.maxChunksPerQuery
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries
+ that will be returned for a query.
+ displayName: Max Entries Limit per Query
+ path: limits.global.queries.maxEntriesLimitPerQuery
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxQuerySeries defines the the maximum of unique series that
+ is returned by a metric query.
+ displayName: Max Query Series
+ path: limits.global.queries.maxQuerySeries
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
+ - description: Tenants defines the limits applied per tenant.
+ displayName: Limits per Tenant
+ path: limits.tenants
+ - description: IngestionBurstSize defines the local rate-limited sample size
+ per distributor replica. It should be set to the set at least to the maximum
+ logs size expected in a single push request.
+ displayName: Ingestion Burst Size (in MB)
+ path: limits.tenants.ingestion.ingestionBurstSize
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
+ - description: IngestionRate defines the sample size per second. Units MB.
+ displayName: Ingestion Rate (in MB)
+ path: limits.tenants.ingestion.ingestionRate
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxGlobalStreamsPerTenant defines the maximum number of active
+ streams per tenant, across the cluster.
+ displayName: Max Global Streams per Tenant
+ path: limits.tenants.ingestion.maxGlobalStreamsPerTenant
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxLabelNameLength defines the maximum number of characters allowed
+ for label keys in log streams.
+ displayName: Max Label Name Length
+ path: limits.tenants.ingestion.maxLabelNameLength
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxLabelNamesPerSeries defines the maximum number of label names
+ per series in each log stream.
+ displayName: Max Labels Names per Series
+ path: limits.tenants.ingestion.maxLabelNamesPerSeries
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxLabelValueLength defines the maximum number of characters
+ allowed for label values in log streams.
+ displayName: Max Label Value Length
+ path: limits.tenants.ingestion.maxLabelValueLength
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxLineSize defines the maximum line size on ingestion path.
+ Units in Bytes.
+ displayName: Max Line Size
+ path: limits.tenants.ingestion.maxLineSize
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxChunksPerQuery defines the maximum number of chunks that can
+ be fetched by a single query.
+ displayName: Max Chunk per Query
+ path: limits.tenants.queries.maxChunksPerQuery
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries
+ that will be returned for a query.
+ displayName: Max Entries Limit per Query
+ path: limits.tenants.queries.maxEntriesLimitPerQuery
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
+ - description: MaxQuerySeries defines the the maximum of unique series that
+ is returned by a metric query.
+ displayName: Max Query Series
+ path: limits.tenants.queries.maxQuerySeries
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
+ - description: ManagementState defines if the CR should be managed by the operator
+ or not. Default is managed.
+ displayName: Management State
+ path: managementState
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:select:Managed
+ - urn:alm:descriptor:com.tectonic.ui:select:Unmanaged
+ - description: ReplicationFactor defines the policy for log stream replication.
+ displayName: Replication Factor
+ path: replicationFactor
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
+ - description: Rules defines the spec for the ruler component
+ displayName: Rules
+ path: rules
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:advanced
+ - description: Enabled defines a flag to enable/disable the ruler component
+ displayName: Enable
+ path: rules.enabled
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:booleanSwitch
+ - description: Namespaces to be selected for PrometheusRules discovery. If unspecified,
+ only the same namespace as the LokiStack object is in is used.
+ displayName: Namespace Selector
+ path: rules.namespaceSelector
+ - description: A selector to select which LokiRules to mount for loading alerting/recording
+ rules from.
+ displayName: Selector
+ path: rules.selector
+ - description: Size defines one of the support Loki deployment scale out sizes.
+ displayName: LokiStack Size
+ path: size
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:select:1x.extra-small
+ - urn:alm:descriptor:com.tectonic.ui:select:1x.small
+ - urn:alm:descriptor:com.tectonic.ui:select:1x.medium
+ - description: Storage defines the spec for the object storage endpoint to store
+ logs.
+ displayName: Object Storage
+ path: storage
+ - description: Version for writing and reading logs.
+ displayName: Version
+ path: storage.schemas[0].version
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:select:v11
+ - urn:alm:descriptor:com.tectonic.ui:select:v12
+ - description: Name of a secret in the namespace configured for object storage
+ secrets.
+ displayName: Object Storage Secret Name
+ path: storage.secret.name
+ x-descriptors:
+ - urn:alm:descriptor:io.kubernetes:Secret
+ - description: Type of object storage that should be used
+ displayName: Object Storage Secret Type
+ path: storage.secret.type
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:select:azure
+ - urn:alm:descriptor:com.tectonic.ui:select:gcs
+ - urn:alm:descriptor:com.tectonic.ui:select:s3
+ - urn:alm:descriptor:com.tectonic.ui:select:swift
+ - description: TLS configuration for reaching the object storage endpoint.
+ displayName: TLS Config
+ path: storage.tls
+ - description: CA is the name of a ConfigMap containing a CA certificate. It
+ needs to be in the same namespace as the LokiStack custom resource.
+ displayName: CA ConfigMap Name
+ path: storage.tls.caName
+ x-descriptors:
+ - urn:alm:descriptor:io.kubernetes:ConfigMap
+ - description: Storage class name defines the storage class for ingester/querier
+ PVCs.
+ displayName: Storage Class Name
+ path: storageClassName
+ x-descriptors:
+ - urn:alm:descriptor:io.kubernetes:StorageClass
+ - description: Template defines the resource/limits/tolerations/nodeselectors
+ per component
+ displayName: Node Placement
+ path: template
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:advanced
+ - description: Compactor defines the compaction component spec.
+ displayName: Compactor pods
+ path: template.compactor
+ - description: Replicas defines the number of replica pods of the component.
+ displayName: Replicas
+ path: template.compactor.replicas
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:hidden
+ - description: Distributor defines the distributor component spec.
+ displayName: Distributor pods
+ path: template.distributor
+ - description: Replicas defines the number of replica pods of the component.
+ displayName: Replicas
+ path: template.distributor.replicas
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:hidden
+ - description: Gateway defines the lokistack gateway component spec.
+ displayName: Gateway pods
+ path: template.gateway
+ - description: Replicas defines the number of replica pods of the component.
+ displayName: Replicas
+ path: template.gateway.replicas
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:hidden
+ - description: IndexGateway defines the index gateway component spec.
+ displayName: Index Gateway pods
+ path: template.indexGateway
+ - description: Replicas defines the number of replica pods of the component.
+ displayName: Replicas
+ path: template.indexGateway.replicas
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:hidden
+ - description: Ingester defines the ingester component spec.
+ displayName: Ingester pods
+ path: template.ingester
+ - description: Replicas defines the number of replica pods of the component.
+ displayName: Replicas
+ path: template.ingester.replicas
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:hidden
+ - description: Querier defines the querier component spec.
+ displayName: Querier pods
+ path: template.querier
+ - description: Replicas defines the number of replica pods of the component.
+ displayName: Replicas
+ path: template.querier.replicas
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:hidden
+ - description: QueryFrontend defines the query frontend component spec.
+ displayName: Query Frontend pods
+ path: template.queryFrontend
+ - description: Replicas defines the number of replica pods of the component.
+ displayName: Replicas
+ path: template.queryFrontend.replicas
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:hidden
+ - description: Ruler defines the ruler component spec.
+ displayName: Ruler pods
+ path: template.ruler
+ - description: Replicas defines the number of replica pods of the component.
+ displayName: Replicas
+ path: template.ruler.replicas
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:hidden
+ - description: Tenants defines the per-tenant authentication and authorization
+ spec for the lokistack-gateway component.
+ displayName: Tenants Configuration
+ path: tenants
+ - description: Authentication defines the lokistack-gateway component authentication
+ configuration spec per tenant.
+ displayName: Authentication
+ path: tenants.authentication
+ - description: OIDC defines the spec for the OIDC tenant's authentication.
+ displayName: OIDC Configuration
+ path: tenants.authentication[0].oidc
+ - description: IssuerURL defines the URL for issuer.
+ displayName: Issuer URL
+ path: tenants.authentication[0].oidc.issuerURL
+ - description: RedirectURL defines the URL for redirect.
+ displayName: Redirect URL
+ path: tenants.authentication[0].oidc.redirectURL
+ - description: Secret defines the spec for the clientID, clientSecret and issuerCAPath
+ for tenant's authentication.
+ displayName: Tenant Secret
+ path: tenants.authentication[0].oidc.secret
+ - description: Name of a secret in the namespace configured for tenant secrets.
+ displayName: Tenant Secret Name
+ path: tenants.authentication[0].oidc.secret.name
+ x-descriptors:
+ - urn:alm:descriptor:io.kubernetes:Secret
+ - description: TenantID defines the id of the tenant.
+ displayName: Tenant ID
+ path: tenants.authentication[0].tenantId
+ - description: TenantName defines the name of the tenant.
+ displayName: Tenant Name
+ path: tenants.authentication[0].tenantName
+ - description: Authorization defines the lokistack-gateway component authorization
+ configuration spec per tenant.
+ displayName: Authorization
+ path: tenants.authorization
+ - description: OPA defines the spec for the third-party endpoint for tenant's
+ authorization.
+ displayName: OPA Configuration
+ path: tenants.authorization.opa
+ - description: URL defines the third-party endpoint for authorization.
+ displayName: OpenPolicyAgent URL
+ path: tenants.authorization.opa.url
+ - description: RoleBindings defines configuration to bind a set of roles to
+ a set of subjects.
+ displayName: Static Role Bindings
+ path: tenants.authorization.roleBindings
+ - description: Roles defines a set of permissions to interact with a tenant.
+ displayName: Static Roles
+ path: tenants.authorization.roles
+ - description: Mode defines the mode in which lokistack-gateway component will
+ be configured.
+ displayName: Mode
+ path: tenants.mode
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:select:static
+ - urn:alm:descriptor:com.tectonic.ui:select:dynamic
+ - urn:alm:descriptor:com.tectonic.ui:select:openshift-logging
+ statusDescriptors:
- description: Distributor is a map to the per pod status of the distributor
deployment
displayName: Distributor
@@ -515,7 +934,7 @@ spec:
resources:
- kind: LokiStack
name: ""
- version: v1beta1
+ version: v1
specDescriptors:
- description: List of groups for recording rules.
displayName: Groups
@@ -563,7 +982,7 @@ spec:
resources:
- kind: LokiStack
name: ""
- version: v1beta1
+ version: v1
specDescriptors:
- description: Defines alert manager configuration to notify on firing alerts.
displayName: Alert Manager Configuration
@@ -777,7 +1196,7 @@ spec:
deployments: null
strategy: ""
installModes:
- - supported: true
+ - supported: false
type: OwnNamespace
- supported: false
type: SingleNamespace
diff --git a/operator/config/overlays/openshift/kustomization.yaml b/operator/config/overlays/openshift/kustomization.yaml
index 7e21f4ff6013e..032f2aaef5678 100644
--- a/operator/config/overlays/openshift/kustomization.yaml
+++ b/operator/config/overlays/openshift/kustomization.yaml
@@ -6,7 +6,7 @@ resources:
- ../../prometheus
# Adds namespace to all resources.
-namespace: openshift-logging
+namespace: openshift-operators-redhat
# Value of this field is prepended to the
# names of all resources, e.g. a deployment named
diff --git a/operator/config/samples/kustomization.yaml b/operator/config/samples/kustomization.yaml
index bd14a02d9b59d..65e177f8b0ffe 100644
--- a/operator/config/samples/kustomization.yaml
+++ b/operator/config/samples/kustomization.yaml
@@ -1,7 +1,7 @@
## Append samples you want in your CSV to this file as resources ##
resources:
-- loki_v1beta1_lokistack.yaml
- loki_v1beta1_alertingrule.yaml
- loki_v1beta1_recordingrule.yaml
- loki_v1beta1_rulerconfig.yaml
+- loki_v1_lokistack.yaml
# +kubebuilder:scaffold:manifestskustomizesamples
diff --git a/operator/config/samples/loki_v1beta1_lokistack.yaml b/operator/config/samples/loki_v1_lokistack.yaml
similarity index 79%
rename from operator/config/samples/loki_v1beta1_lokistack.yaml
rename to operator/config/samples/loki_v1_lokistack.yaml
index 5839d2e80b00a..9529053a9bf9e 100644
--- a/operator/config/samples/loki_v1beta1_lokistack.yaml
+++ b/operator/config/samples/loki_v1_lokistack.yaml
@@ -1,4 +1,4 @@
-apiVersion: loki.grafana.com/v1beta1
+apiVersion: loki.grafana.com/v1
kind: LokiStack
metadata:
name: lokistack-sample
diff --git a/operator/config/webhook/manifests.yaml b/operator/config/webhook/manifests.yaml
index f8a127bb20546..c50dd95b5ca13 100644
--- a/operator/config/webhook/manifests.yaml
+++ b/operator/config/webhook/manifests.yaml
@@ -11,19 +11,19 @@ webhooks:
service:
name: webhook-service
namespace: system
- path: /validate-loki-grafana-com-v1beta1-alertingrule
+ path: /validate-loki-grafana-com-v1-lokistack
failurePolicy: Fail
- name: valertingrule.kb.io
+ name: vlokistack.loki.grafana.com
rules:
- apiGroups:
- loki.grafana.com
apiVersions:
- - v1beta1
+ - v1
operations:
- CREATE
- UPDATE
resources:
- - alertingrules
+ - lokistacks
sideEffects: None
- admissionReviewVersions:
- v1
@@ -31,9 +31,9 @@ webhooks:
service:
name: webhook-service
namespace: system
- path: /validate-loki-grafana-com-v1beta1-lokistack
+ path: /validate-loki-grafana-com-v1beta1-alertingrule
failurePolicy: Fail
- name: vlokistack.kb.io
+ name: valertingrule.kb.io
rules:
- apiGroups:
- loki.grafana.com
@@ -43,7 +43,7 @@ webhooks:
- CREATE
- UPDATE
resources:
- - lokistacks
+ - alertingrules
sideEffects: None
- admissionReviewVersions:
- v1
diff --git a/operator/controllers/loki/internal/lokistack/ruler_config_discovery.go b/operator/controllers/loki/internal/lokistack/ruler_config_discovery.go
index 71d6bd8f6a8b7..f4f2e00d9aef8 100644
--- a/operator/controllers/loki/internal/lokistack/ruler_config_discovery.go
+++ b/operator/controllers/loki/internal/lokistack/ruler_config_discovery.go
@@ -5,7 +5,7 @@ import (
"time"
"github.com/ViaQ/logerr/v2/kverrors"
- lokistackv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -15,7 +15,7 @@ import (
// to the named Lokistack in the same namespace of the RulerConfig. If no LokiStack is found, then
// skip reconciliation.
func AnnotateForRulerConfig(ctx context.Context, k k8s.Client, name, namespace string) error {
- var s lokistackv1beta1.LokiStack
+ var s lokiv1.LokiStack
key := client.ObjectKey{Name: name, Namespace: namespace}
if err := k.Get(ctx, key, &s); err != nil {
diff --git a/operator/controllers/loki/internal/lokistack/rules_discovery.go b/operator/controllers/loki/internal/lokistack/rules_discovery.go
index 6b75ba4f17842..f5082d7884def 100644
--- a/operator/controllers/loki/internal/lokistack/rules_discovery.go
+++ b/operator/controllers/loki/internal/lokistack/rules_discovery.go
@@ -5,7 +5,7 @@ import (
"time"
"github.com/ViaQ/logerr/v2/kverrors"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -14,7 +14,7 @@ import (
// AnnotateForDiscoveredRules adds/updates the `loki.grafana.com/rulesDiscoveredAt` annotation
// to all instance of LokiStack on all namespaces to trigger the reconciliation loop.
func AnnotateForDiscoveredRules(ctx context.Context, k k8s.Client) error {
- var stacks lokiv1beta1.LokiStackList
+ var stacks lokiv1.LokiStackList
err := k.List(ctx, &stacks, client.MatchingLabelsSelector{Selector: labels.Everything()})
if err != nil {
return kverrors.Wrap(err, "failed to list any lokistack instances", "req")
diff --git a/operator/controllers/loki/internal/management/state/state.go b/operator/controllers/loki/internal/management/state/state.go
index 459387c1d5ace..33a31b36dd333 100644
--- a/operator/controllers/loki/internal/management/state/state.go
+++ b/operator/controllers/loki/internal/management/state/state.go
@@ -3,7 +3,7 @@ package state
import (
"context"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/ViaQ/logerr/v2/kverrors"
@@ -13,12 +13,12 @@ import (
// IsManaged checks if the custom resource is configured with ManagementState Managed.
func IsManaged(ctx context.Context, req ctrl.Request, k k8s.Client) (bool, error) {
- var stack lokiv1beta1.LokiStack
+ var stack lokiv1.LokiStack
if err := k.Get(ctx, req.NamespacedName, &stack); err != nil {
if apierrors.IsNotFound(err) {
return false, nil
}
return false, kverrors.Wrap(err, "failed to lookup lokistack", "name", req.NamespacedName)
}
- return stack.Spec.ManagementState == lokiv1beta1.ManagementStateManaged, nil
+ return stack.Spec.ManagementState == lokiv1.ManagementStateManaged, nil
}
diff --git a/operator/controllers/loki/internal/management/state/state_test.go b/operator/controllers/loki/internal/management/state/state_test.go
index bebf7a3265e6c..9f200800c1e05 100644
--- a/operator/controllers/loki/internal/management/state/state_test.go
+++ b/operator/controllers/loki/internal/management/state/state_test.go
@@ -4,7 +4,7 @@ import (
"context"
"testing"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/controllers/loki/internal/management/state"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
@@ -21,7 +21,7 @@ import (
func TestIsManaged(t *testing.T) {
type test struct {
name string
- stack lokiv1beta1.LokiStack
+ stack lokiv1.LokiStack
wantOk bool
}
@@ -35,7 +35,7 @@ func TestIsManaged(t *testing.T) {
table := []test{
{
name: "managed",
- stack: lokiv1beta1.LokiStack{
+ stack: lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -44,15 +44,15 @@ func TestIsManaged(t *testing.T) {
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- ManagementState: lokiv1beta1.ManagementStateManaged,
+ Spec: lokiv1.LokiStackSpec{
+ ManagementState: lokiv1.ManagementStateManaged,
},
},
wantOk: true,
},
{
name: "unmanaged",
- stack: lokiv1beta1.LokiStack{
+ stack: lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -61,8 +61,8 @@ func TestIsManaged(t *testing.T) {
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- ManagementState: lokiv1beta1.ManagementStateUnmanaged,
+ Spec: lokiv1.LokiStackSpec{
+ ManagementState: lokiv1.ManagementStateUnmanaged,
},
},
},
diff --git a/operator/controllers/loki/lokistack_controller.go b/operator/controllers/loki/lokistack_controller.go
index f286277f6554c..4f412563c3139 100644
--- a/operator/controllers/loki/lokistack_controller.go
+++ b/operator/controllers/loki/lokistack_controller.go
@@ -28,7 +28,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/predicate"
configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
)
var (
@@ -152,7 +152,7 @@ func (r *LokiStackReconciler) SetupWithManager(mgr manager.Manager) error {
func (r *LokiStackReconciler) buildController(bld k8s.Builder) error {
bld = bld.
- For(&lokiv1beta1.LokiStack{}, createOrUpdateOnlyPred).
+ For(&lokiv1.LokiStack{}, createOrUpdateOnlyPred).
Owns(&corev1.ConfigMap{}, updateOrDeleteOnlyPred).
Owns(&corev1.ServiceAccount{}, updateOrDeleteOnlyPred).
Owns(&corev1.Service{}, updateOrDeleteOnlyPred).
diff --git a/operator/controllers/loki/lokistack_controller_test.go b/operator/controllers/loki/lokistack_controller_test.go
index 2b60252ae347a..7ab6481393a67 100644
--- a/operator/controllers/loki/lokistack_controller_test.go
+++ b/operator/controllers/loki/lokistack_controller_test.go
@@ -7,7 +7,7 @@ import (
"testing"
configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
"github.com/ViaQ/logerr/v2/log"
@@ -44,7 +44,7 @@ func TestMain(m *testing.M) {
// Register the clientgo and CRD schemes
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(routev1.AddToScheme(scheme))
- utilruntime.Must(lokiv1beta1.AddToScheme(scheme))
+ utilruntime.Must(lokiv1.AddToScheme(scheme))
os.Exit(m.Run())
}
@@ -65,7 +65,7 @@ func TestLokiStackController_RegistersCustomResourceForCreateOrUpdate(t *testing
// Require For-call options to have create and update predicates
obj, opts := b.ForArgsForCall(0)
- require.Equal(t, &lokiv1beta1.LokiStack{}, obj)
+ require.Equal(t, &lokiv1.LokiStack{}, obj)
require.Equal(t, opts[0], createOrUpdateOnlyPred)
}
diff --git a/operator/hack/lokistack_dev.yaml b/operator/hack/lokistack_dev.yaml
index 0bcabe5a645ff..9c935ffeb81c7 100644
--- a/operator/hack/lokistack_dev.yaml
+++ b/operator/hack/lokistack_dev.yaml
@@ -1,10 +1,13 @@
-apiVersion: loki.grafana.com/v1beta1
+apiVersion: loki.grafana.com/v1
kind: LokiStack
metadata:
name: lokistack-dev
spec:
size: 1x.extra-small
storage:
+ schemas:
+ - version: v12
+ effectiveDate: 2022-06-01
secret:
name: test
type: s3
diff --git a/operator/hack/lokistack_gateway_dev.yaml b/operator/hack/lokistack_gateway_dev.yaml
index 9620f2840de5c..c644d45af2834 100644
--- a/operator/hack/lokistack_gateway_dev.yaml
+++ b/operator/hack/lokistack_gateway_dev.yaml
@@ -6,13 +6,16 @@ stringData:
metadata:
name: test-oidc
---
-apiVersion: loki.grafana.com/v1beta1
+apiVersion: loki.grafana.com/v1
kind: LokiStack
metadata:
name: lokistack-dev
spec:
size: 1x.extra-small
storage:
+ schemas:
+ - version: v12
+ effectiveDate: 2022-06-01
secret:
name: test
type: s3
diff --git a/operator/hack/lokistack_gateway_ocp.yaml b/operator/hack/lokistack_gateway_ocp.yaml
index a46c1e22db26b..34d9a8ba23af6 100644
--- a/operator/hack/lokistack_gateway_ocp.yaml
+++ b/operator/hack/lokistack_gateway_ocp.yaml
@@ -1,4 +1,4 @@
-apiVersion: loki.grafana.com/v1beta1
+apiVersion: loki.grafana.com/v1
kind: LokiStack
metadata:
name: lokistack-dev
@@ -6,6 +6,9 @@ metadata:
spec:
size: 1x.extra-small
storage:
+ schemas:
+ - version: v12
+ effectiveDate: 2022-06-01
secret:
name: test
type: s3
diff --git a/operator/internal/handlers/internal/gateway/base_domain.go b/operator/internal/handlers/internal/gateway/base_domain.go
index ce4f66ca58685..656ff20a18172 100644
--- a/operator/internal/handlers/internal/gateway/base_domain.go
+++ b/operator/internal/handlers/internal/gateway/base_domain.go
@@ -4,7 +4,7 @@ import (
"context"
"github.com/ViaQ/logerr/v2/kverrors"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/grafana/loki/operator/internal/status"
configv1 "github.com/openshift/api/config/v1"
@@ -25,7 +25,7 @@ func GetOpenShiftBaseDomain(ctx context.Context, k k8s.Client, req ctrl.Request)
if apierrors.IsNotFound(err) {
return "", &status.DegradedError{
Message: "Missing cluster DNS configuration to read base domain",
- Reason: lokiv1beta1.ReasonMissingGatewayOpenShiftBaseDomain,
+ Reason: lokiv1.ReasonMissingGatewayOpenShiftBaseDomain,
Requeue: true,
}
}
diff --git a/operator/internal/handlers/internal/gateway/modes.go b/operator/internal/handlers/internal/gateway/modes.go
index 56c839bbb2b39..b44294981bbf4 100644
--- a/operator/internal/handlers/internal/gateway/modes.go
+++ b/operator/internal/handlers/internal/gateway/modes.go
@@ -2,12 +2,12 @@ package gateway
import (
"github.com/ViaQ/logerr/v2/kverrors"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
)
// ValidateModes validates the tenants mode specification.
-func ValidateModes(stack lokiv1beta1.LokiStack) error {
- if stack.Spec.Tenants.Mode == lokiv1beta1.Static {
+func ValidateModes(stack lokiv1.LokiStack) error {
+ if stack.Spec.Tenants.Mode == lokiv1.Static {
if stack.Spec.Tenants.Authentication == nil {
return kverrors.New("mandatory configuration - missing tenants' authentication configuration")
}
@@ -25,7 +25,7 @@ func ValidateModes(stack lokiv1beta1.LokiStack) error {
}
}
- if stack.Spec.Tenants.Mode == lokiv1beta1.Dynamic {
+ if stack.Spec.Tenants.Mode == lokiv1.Dynamic {
if stack.Spec.Tenants.Authentication == nil {
return kverrors.New("mandatory configuration - missing tenants configuration")
}
@@ -43,7 +43,7 @@ func ValidateModes(stack lokiv1beta1.LokiStack) error {
}
}
- if stack.Spec.Tenants.Mode == lokiv1beta1.OpenshiftLogging {
+ if stack.Spec.Tenants.Mode == lokiv1.OpenshiftLogging {
if stack.Spec.Tenants.Authentication != nil {
return kverrors.New("incompatible configuration - custom tenants configuration not required")
}
diff --git a/operator/internal/handlers/internal/gateway/modes_test.go b/operator/internal/handlers/internal/gateway/modes_test.go
index 12386987e8274..2b79f6383d7f5 100644
--- a/operator/internal/handlers/internal/gateway/modes_test.go
+++ b/operator/internal/handlers/internal/gateway/modes_test.go
@@ -3,7 +3,7 @@ package gateway
import (
"testing"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -12,13 +12,13 @@ func TestValidateModes_StaticMode(t *testing.T) {
type test struct {
name string
wantErr string
- stack lokiv1beta1.LokiStack
+ stack lokiv1.LokiStack
}
table := []test{
{
name: "missing authentication spec",
wantErr: "mandatory configuration - missing tenants' authentication configuration",
- stack: lokiv1beta1.LokiStack{
+ stack: lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -27,9 +27,9 @@ func TestValidateModes_StaticMode(t *testing.T) {
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Tenants: &lokiv1beta1.TenantsSpec{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Tenants: &lokiv1.TenantsSpec{
Mode: "static",
},
},
@@ -38,7 +38,7 @@ func TestValidateModes_StaticMode(t *testing.T) {
{
name: "missing roles spec",
wantErr: "mandatory configuration - missing roles configuration",
- stack: lokiv1beta1.LokiStack{
+ stack: lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -47,15 +47,15 @@ func TestValidateModes_StaticMode(t *testing.T) {
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Tenants: &lokiv1beta1.TenantsSpec{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Tenants: &lokiv1.TenantsSpec{
Mode: "static",
- Authentication: []lokiv1beta1.AuthenticationSpec{
+ Authentication: []lokiv1.AuthenticationSpec{
{
TenantName: "test",
TenantID: "1234",
- OIDC: &lokiv1beta1.OIDCSpec{
+ OIDC: &lokiv1.OIDCSpec{
IssuerURL: "some-url",
RedirectURL: "some-other-url",
GroupClaim: "test",
@@ -63,7 +63,7 @@ func TestValidateModes_StaticMode(t *testing.T) {
},
},
},
- Authorization: &lokiv1beta1.AuthorizationSpec{
+ Authorization: &lokiv1.AuthorizationSpec{
Roles: nil,
},
},
@@ -73,7 +73,7 @@ func TestValidateModes_StaticMode(t *testing.T) {
{
name: "missing role bindings spec",
wantErr: "mandatory configuration - missing role bindings configuration",
- stack: lokiv1beta1.LokiStack{
+ stack: lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -82,15 +82,15 @@ func TestValidateModes_StaticMode(t *testing.T) {
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Tenants: &lokiv1beta1.TenantsSpec{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Tenants: &lokiv1.TenantsSpec{
Mode: "static",
- Authentication: []lokiv1beta1.AuthenticationSpec{
+ Authentication: []lokiv1.AuthenticationSpec{
{
TenantName: "test",
TenantID: "1234",
- OIDC: &lokiv1beta1.OIDCSpec{
+ OIDC: &lokiv1.OIDCSpec{
IssuerURL: "some-url",
RedirectURL: "some-other-url",
GroupClaim: "test",
@@ -98,13 +98,13 @@ func TestValidateModes_StaticMode(t *testing.T) {
},
},
},
- Authorization: &lokiv1beta1.AuthorizationSpec{
- Roles: []lokiv1beta1.RoleSpec{
+ Authorization: &lokiv1.AuthorizationSpec{
+ Roles: []lokiv1.RoleSpec{
{
Name: "some-name",
Resources: []string{"test"},
Tenants: []string{"test"},
- Permissions: []lokiv1beta1.PermissionType{"read"},
+ Permissions: []lokiv1.PermissionType{"read"},
},
},
RoleBindings: nil,
@@ -116,7 +116,7 @@ func TestValidateModes_StaticMode(t *testing.T) {
{
name: "incompatible OPA URL provided",
wantErr: "incompatible configuration - OPA URL not required for mode static",
- stack: lokiv1beta1.LokiStack{
+ stack: lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -125,15 +125,15 @@ func TestValidateModes_StaticMode(t *testing.T) {
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Tenants: &lokiv1beta1.TenantsSpec{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Tenants: &lokiv1.TenantsSpec{
Mode: "static",
- Authentication: []lokiv1beta1.AuthenticationSpec{
+ Authentication: []lokiv1.AuthenticationSpec{
{
TenantName: "test",
TenantID: "1234",
- OIDC: &lokiv1beta1.OIDCSpec{
+ OIDC: &lokiv1.OIDCSpec{
IssuerURL: "some-url",
RedirectURL: "some-other-url",
GroupClaim: "test",
@@ -141,22 +141,22 @@ func TestValidateModes_StaticMode(t *testing.T) {
},
},
},
- Authorization: &lokiv1beta1.AuthorizationSpec{
- OPA: &lokiv1beta1.OPASpec{
+ Authorization: &lokiv1.AuthorizationSpec{
+ OPA: &lokiv1.OPASpec{
URL: "some-url",
},
- Roles: []lokiv1beta1.RoleSpec{
+ Roles: []lokiv1.RoleSpec{
{
Name: "some-name",
Resources: []string{"test"},
Tenants: []string{"test"},
- Permissions: []lokiv1beta1.PermissionType{"read"},
+ Permissions: []lokiv1.PermissionType{"read"},
},
},
- RoleBindings: []lokiv1beta1.RoleBindingsSpec{
+ RoleBindings: []lokiv1.RoleBindingsSpec{
{
Name: "some-name",
- Subjects: []lokiv1beta1.Subject{
+ Subjects: []lokiv1.Subject{
{
Name: "sub-1",
Kind: "user",
@@ -173,7 +173,7 @@ func TestValidateModes_StaticMode(t *testing.T) {
{
name: "all set",
wantErr: "",
- stack: lokiv1beta1.LokiStack{
+ stack: lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -182,15 +182,15 @@ func TestValidateModes_StaticMode(t *testing.T) {
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Tenants: &lokiv1beta1.TenantsSpec{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Tenants: &lokiv1.TenantsSpec{
Mode: "static",
- Authentication: []lokiv1beta1.AuthenticationSpec{
+ Authentication: []lokiv1.AuthenticationSpec{
{
TenantName: "test",
TenantID: "1234",
- OIDC: &lokiv1beta1.OIDCSpec{
+ OIDC: &lokiv1.OIDCSpec{
IssuerURL: "some-url",
RedirectURL: "some-other-url",
GroupClaim: "test",
@@ -198,19 +198,19 @@ func TestValidateModes_StaticMode(t *testing.T) {
},
},
},
- Authorization: &lokiv1beta1.AuthorizationSpec{
- Roles: []lokiv1beta1.RoleSpec{
+ Authorization: &lokiv1.AuthorizationSpec{
+ Roles: []lokiv1.RoleSpec{
{
Name: "some-name",
Resources: []string{"test"},
Tenants: []string{"test"},
- Permissions: []lokiv1beta1.PermissionType{"read"},
+ Permissions: []lokiv1.PermissionType{"read"},
},
},
- RoleBindings: []lokiv1beta1.RoleBindingsSpec{
+ RoleBindings: []lokiv1.RoleBindingsSpec{
{
Name: "some-name",
- Subjects: []lokiv1beta1.Subject{
+ Subjects: []lokiv1.Subject{
{
Name: "sub-1",
Kind: "user",
@@ -242,13 +242,13 @@ func TestValidateModes_DynamicMode(t *testing.T) {
type test struct {
name string
wantErr string
- stack lokiv1beta1.LokiStack
+ stack lokiv1.LokiStack
}
table := []test{
{
name: "missing authentication spec",
wantErr: "mandatory configuration - missing tenants configuration",
- stack: lokiv1beta1.LokiStack{
+ stack: lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -257,9 +257,9 @@ func TestValidateModes_DynamicMode(t *testing.T) {
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Tenants: &lokiv1beta1.TenantsSpec{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Tenants: &lokiv1.TenantsSpec{
Mode: "dynamic",
},
},
@@ -268,7 +268,7 @@ func TestValidateModes_DynamicMode(t *testing.T) {
{
name: "missing OPA URL spec",
wantErr: "mandatory configuration - missing OPA Url",
- stack: lokiv1beta1.LokiStack{
+ stack: lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -277,15 +277,15 @@ func TestValidateModes_DynamicMode(t *testing.T) {
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Tenants: &lokiv1beta1.TenantsSpec{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Tenants: &lokiv1.TenantsSpec{
Mode: "dynamic",
- Authentication: []lokiv1beta1.AuthenticationSpec{
+ Authentication: []lokiv1.AuthenticationSpec{
{
TenantName: "test",
TenantID: "1234",
- OIDC: &lokiv1beta1.OIDCSpec{
+ OIDC: &lokiv1.OIDCSpec{
IssuerURL: "some-url",
RedirectURL: "some-other-url",
GroupClaim: "test",
@@ -293,7 +293,7 @@ func TestValidateModes_DynamicMode(t *testing.T) {
},
},
},
- Authorization: &lokiv1beta1.AuthorizationSpec{
+ Authorization: &lokiv1.AuthorizationSpec{
OPA: nil,
},
},
@@ -303,7 +303,7 @@ func TestValidateModes_DynamicMode(t *testing.T) {
{
name: "incompatible roles configuration provided",
wantErr: "incompatible configuration - static roles not required for mode dynamic",
- stack: lokiv1beta1.LokiStack{
+ stack: lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -312,15 +312,15 @@ func TestValidateModes_DynamicMode(t *testing.T) {
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Tenants: &lokiv1beta1.TenantsSpec{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Tenants: &lokiv1.TenantsSpec{
Mode: "dynamic",
- Authentication: []lokiv1beta1.AuthenticationSpec{
+ Authentication: []lokiv1.AuthenticationSpec{
{
TenantName: "test",
TenantID: "1234",
- OIDC: &lokiv1beta1.OIDCSpec{
+ OIDC: &lokiv1.OIDCSpec{
IssuerURL: "some-url",
RedirectURL: "some-other-url",
GroupClaim: "test",
@@ -328,16 +328,16 @@ func TestValidateModes_DynamicMode(t *testing.T) {
},
},
},
- Authorization: &lokiv1beta1.AuthorizationSpec{
- OPA: &lokiv1beta1.OPASpec{
+ Authorization: &lokiv1.AuthorizationSpec{
+ OPA: &lokiv1.OPASpec{
URL: "some-url",
},
- Roles: []lokiv1beta1.RoleSpec{
+ Roles: []lokiv1.RoleSpec{
{
Name: "some-name",
Resources: []string{"test"},
Tenants: []string{"test"},
- Permissions: []lokiv1beta1.PermissionType{"read"},
+ Permissions: []lokiv1.PermissionType{"read"},
},
},
},
@@ -348,7 +348,7 @@ func TestValidateModes_DynamicMode(t *testing.T) {
{
name: "incompatible roleBindings configuration provided",
wantErr: "incompatible configuration - static roleBindings not required for mode dynamic",
- stack: lokiv1beta1.LokiStack{
+ stack: lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -357,15 +357,15 @@ func TestValidateModes_DynamicMode(t *testing.T) {
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Tenants: &lokiv1beta1.TenantsSpec{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Tenants: &lokiv1.TenantsSpec{
Mode: "dynamic",
- Authentication: []lokiv1beta1.AuthenticationSpec{
+ Authentication: []lokiv1.AuthenticationSpec{
{
TenantName: "test",
TenantID: "1234",
- OIDC: &lokiv1beta1.OIDCSpec{
+ OIDC: &lokiv1.OIDCSpec{
IssuerURL: "some-url",
RedirectURL: "some-other-url",
GroupClaim: "test",
@@ -373,14 +373,14 @@ func TestValidateModes_DynamicMode(t *testing.T) {
},
},
},
- Authorization: &lokiv1beta1.AuthorizationSpec{
- OPA: &lokiv1beta1.OPASpec{
+ Authorization: &lokiv1.AuthorizationSpec{
+ OPA: &lokiv1.OPASpec{
URL: "some-url",
},
- RoleBindings: []lokiv1beta1.RoleBindingsSpec{
+ RoleBindings: []lokiv1.RoleBindingsSpec{
{
Name: "some-name",
- Subjects: []lokiv1beta1.Subject{
+ Subjects: []lokiv1.Subject{
{
Name: "sub-1",
Kind: "user",
@@ -397,7 +397,7 @@ func TestValidateModes_DynamicMode(t *testing.T) {
{
name: "all set",
wantErr: "",
- stack: lokiv1beta1.LokiStack{
+ stack: lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -406,15 +406,15 @@ func TestValidateModes_DynamicMode(t *testing.T) {
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Tenants: &lokiv1beta1.TenantsSpec{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Tenants: &lokiv1.TenantsSpec{
Mode: "dynamic",
- Authentication: []lokiv1beta1.AuthenticationSpec{
+ Authentication: []lokiv1.AuthenticationSpec{
{
TenantName: "test",
TenantID: "1234",
- OIDC: &lokiv1beta1.OIDCSpec{
+ OIDC: &lokiv1.OIDCSpec{
IssuerURL: "some-url",
RedirectURL: "some-other-url",
GroupClaim: "test",
@@ -422,8 +422,8 @@ func TestValidateModes_DynamicMode(t *testing.T) {
},
},
},
- Authorization: &lokiv1beta1.AuthorizationSpec{
- OPA: &lokiv1beta1.OPASpec{
+ Authorization: &lokiv1.AuthorizationSpec{
+ OPA: &lokiv1.OPASpec{
URL: "some-url",
},
},
@@ -449,13 +449,13 @@ func TestValidateModes_OpenshiftLoggingMode(t *testing.T) {
type test struct {
name string
wantErr string
- stack lokiv1beta1.LokiStack
+ stack lokiv1.LokiStack
}
table := []test{
{
name: "incompatible authentication spec provided",
wantErr: "incompatible configuration - custom tenants configuration not required",
- stack: lokiv1beta1.LokiStack{
+ stack: lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -464,15 +464,15 @@ func TestValidateModes_OpenshiftLoggingMode(t *testing.T) {
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Tenants: &lokiv1beta1.TenantsSpec{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Tenants: &lokiv1.TenantsSpec{
Mode: "openshift-logging",
- Authentication: []lokiv1beta1.AuthenticationSpec{
+ Authentication: []lokiv1.AuthenticationSpec{
{
TenantName: "test",
TenantID: "1234",
- OIDC: &lokiv1beta1.OIDCSpec{
+ OIDC: &lokiv1.OIDCSpec{
IssuerURL: "some-url",
RedirectURL: "some-other-url",
GroupClaim: "test",
@@ -487,7 +487,7 @@ func TestValidateModes_OpenshiftLoggingMode(t *testing.T) {
{
name: "incompatible authorization spec provided",
wantErr: "incompatible configuration - custom tenants configuration not required",
- stack: lokiv1beta1.LokiStack{
+ stack: lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -496,13 +496,13 @@ func TestValidateModes_OpenshiftLoggingMode(t *testing.T) {
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Tenants: &lokiv1beta1.TenantsSpec{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Tenants: &lokiv1.TenantsSpec{
Mode: "openshift-logging",
Authentication: nil,
- Authorization: &lokiv1beta1.AuthorizationSpec{
- OPA: &lokiv1beta1.OPASpec{
+ Authorization: &lokiv1.AuthorizationSpec{
+ OPA: &lokiv1.OPASpec{
URL: "some-url",
},
},
@@ -513,7 +513,7 @@ func TestValidateModes_OpenshiftLoggingMode(t *testing.T) {
{
name: "all set",
wantErr: "",
- stack: lokiv1beta1.LokiStack{
+ stack: lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -522,9 +522,9 @@ func TestValidateModes_OpenshiftLoggingMode(t *testing.T) {
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Tenants: &lokiv1beta1.TenantsSpec{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Tenants: &lokiv1.TenantsSpec{
Mode: "openshift-logging",
},
},
diff --git a/operator/internal/handlers/internal/gateway/tenant_secrets.go b/operator/internal/handlers/internal/gateway/tenant_secrets.go
index 6396e132293fc..398742e2f1372 100644
--- a/operator/internal/handlers/internal/gateway/tenant_secrets.go
+++ b/operator/internal/handlers/internal/gateway/tenant_secrets.go
@@ -6,7 +6,7 @@ import (
"github.com/ViaQ/logerr/v2/kverrors"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/grafana/loki/operator/internal/manifests"
"github.com/grafana/loki/operator/internal/status"
@@ -25,7 +25,7 @@ func GetTenantSecrets(
ctx context.Context,
k k8s.Client,
req ctrl.Request,
- stack *lokiv1beta1.LokiStack,
+ stack *lokiv1.LokiStack,
) ([]*manifests.TenantSecrets, error) {
var (
tenantSecrets []*manifests.TenantSecrets
@@ -38,7 +38,7 @@ func GetTenantSecrets(
if apierrors.IsNotFound(err) {
return nil, &status.DegradedError{
Message: fmt.Sprintf("Missing secrets for tenant %s", tenant.TenantName),
- Reason: lokiv1beta1.ReasonMissingGatewayTenantSecret,
+ Reason: lokiv1.ReasonMissingGatewayTenantSecret,
Requeue: true,
}
}
@@ -51,7 +51,7 @@ func GetTenantSecrets(
if err != nil {
return nil, &status.DegradedError{
Message: "Invalid gateway tenant secret contents",
- Reason: lokiv1beta1.ReasonInvalidGatewayTenantSecret,
+ Reason: lokiv1.ReasonInvalidGatewayTenantSecret,
Requeue: true,
}
}
diff --git a/operator/internal/handlers/internal/gateway/tenant_secrets_test.go b/operator/internal/handlers/internal/gateway/tenant_secrets_test.go
index b443bbd86239a..1af09e0192c83 100644
--- a/operator/internal/handlers/internal/gateway/tenant_secrets_test.go
+++ b/operator/internal/handlers/internal/gateway/tenant_secrets_test.go
@@ -6,7 +6,7 @@ import (
"github.com/stretchr/testify/require"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
"github.com/grafana/loki/operator/internal/manifests"
@@ -26,20 +26,20 @@ func TestGetTenantSecrets_StaticMode(t *testing.T) {
},
}
- s := &lokiv1beta1.LokiStack{
+ s := &lokiv1.LokiStack{
ObjectMeta: metav1.ObjectMeta{
Name: "mystack",
Namespace: "some-ns",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Tenants: &lokiv1beta1.TenantsSpec{
- Mode: lokiv1beta1.Static,
- Authentication: []lokiv1beta1.AuthenticationSpec{
+ Spec: lokiv1.LokiStackSpec{
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.Static,
+ Authentication: []lokiv1.AuthenticationSpec{
{
TenantName: "test",
TenantID: "test",
- OIDC: &lokiv1beta1.OIDCSpec{
- Secret: &lokiv1beta1.TenantSecretSpec{
+ OIDC: &lokiv1.OIDCSpec{
+ Secret: &lokiv1.TenantSecretSpec{
Name: "test",
},
},
@@ -89,20 +89,20 @@ func TestGetTenantSecrets_DynamicMode(t *testing.T) {
},
}
- s := &lokiv1beta1.LokiStack{
+ s := &lokiv1.LokiStack{
ObjectMeta: metav1.ObjectMeta{
Name: "mystack",
Namespace: "some-ns",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Tenants: &lokiv1beta1.TenantsSpec{
- Mode: lokiv1beta1.Dynamic,
- Authentication: []lokiv1beta1.AuthenticationSpec{
+ Spec: lokiv1.LokiStackSpec{
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.Dynamic,
+ Authentication: []lokiv1.AuthenticationSpec{
{
TenantName: "test",
TenantID: "test",
- OIDC: &lokiv1beta1.OIDCSpec{
- Secret: &lokiv1beta1.TenantSecretSpec{
+ OIDC: &lokiv1.OIDCSpec{
+ Secret: &lokiv1.TenantSecretSpec{
Name: "test",
},
},
diff --git a/operator/internal/handlers/internal/rules/rules.go b/operator/internal/handlers/internal/rules/rules.go
index 89cddaea56b04..ee14a49a47925 100644
--- a/operator/internal/handlers/internal/rules/rules.go
+++ b/operator/internal/handlers/internal/rules/rules.go
@@ -4,6 +4,7 @@ import (
"context"
"github.com/ViaQ/logerr/v2/kverrors"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
"github.com/grafana/loki/operator/internal/external/k8s"
corev1 "k8s.io/api/core/v1"
@@ -16,7 +17,7 @@ import (
// - Return only matching rules in the stack namespace if no namespace selector given.
// - Return only matching rules in the stack namespace and in namespaces matching the namespace selector.
// - Return no rules if rules selector does not apply at all.
-func List(ctx context.Context, k k8s.Client, stackNs string, rs *lokiv1beta1.RulesSpec) ([]lokiv1beta1.AlertingRule, []lokiv1beta1.RecordingRule, error) {
+func List(ctx context.Context, k k8s.Client, stackNs string, rs *lokiv1.RulesSpec) ([]lokiv1beta1.AlertingRule, []lokiv1beta1.RecordingRule, error) {
nsl, err := selectRulesNamespaces(ctx, k, stackNs, rs)
if err != nil {
return nil, nil, err
@@ -55,7 +56,7 @@ func List(ctx context.Context, k k8s.Client, stackNs string, rs *lokiv1beta1.Rul
return alerts, recs, nil
}
-func selectRulesNamespaces(ctx context.Context, k k8s.Client, stackNs string, rs *lokiv1beta1.RulesSpec) (corev1.NamespaceList, error) {
+func selectRulesNamespaces(ctx context.Context, k k8s.Client, stackNs string, rs *lokiv1.RulesSpec) (corev1.NamespaceList, error) {
var stackNamespace corev1.Namespace
key := client.ObjectKey{Name: stackNs}
@@ -88,7 +89,7 @@ func selectRulesNamespaces(ctx context.Context, k k8s.Client, stackNs string, rs
return nsList, nil
}
-func selectAlertingRules(ctx context.Context, k k8s.Client, rs *lokiv1beta1.RulesSpec) (lokiv1beta1.AlertingRuleList, error) {
+func selectAlertingRules(ctx context.Context, k k8s.Client, rs *lokiv1.RulesSpec) (lokiv1beta1.AlertingRuleList, error) {
rulesSelector, err := metav1.LabelSelectorAsSelector(rs.Selector)
if err != nil {
return lokiv1beta1.AlertingRuleList{}, kverrors.Wrap(err, "failed to create AlertingRules selector", "selector", rs.Selector)
@@ -103,7 +104,7 @@ func selectAlertingRules(ctx context.Context, k k8s.Client, rs *lokiv1beta1.Rule
return rl, nil
}
-func selectRecordingRules(ctx context.Context, k k8s.Client, rs *lokiv1beta1.RulesSpec) (lokiv1beta1.RecordingRuleList, error) {
+func selectRecordingRules(ctx context.Context, k k8s.Client, rs *lokiv1.RulesSpec) (lokiv1beta1.RecordingRuleList, error) {
rulesSelector, err := metav1.LabelSelectorAsSelector(rs.Selector)
if err != nil {
return lokiv1beta1.RecordingRuleList{}, kverrors.Wrap(err, "failed to create RecordingRules selector", "selector", rs.Selector)
diff --git a/operator/internal/handlers/internal/rules/rules_test.go b/operator/internal/handlers/internal/rules/rules_test.go
index 5cb2da4c302db..9255241477e93 100644
--- a/operator/internal/handlers/internal/rules/rules_test.go
+++ b/operator/internal/handlers/internal/rules/rules_test.go
@@ -4,6 +4,7 @@ import (
"context"
"testing"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
"github.com/grafana/loki/operator/internal/handlers/internal/rules"
@@ -21,7 +22,7 @@ func TestList_AlertingRulesMatchSelector_WithDefaultStackNamespaceRules(t *testi
const stackNs = "some-ns"
k := &k8sfakes.FakeClient{}
- rs := &lokiv1beta1.RulesSpec{
+ rs := &lokiv1.RulesSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"labelname": "labelvalue",
@@ -93,7 +94,7 @@ func TestList_AlertingRulesMatchSelector_FilteredByNamespaceSelector(t *testing.
const stackNs = "some-ns"
k := &k8sfakes.FakeClient{}
- rs := &lokiv1beta1.RulesSpec{
+ rs := &lokiv1.RulesSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"labelname": "labelvalue",
@@ -195,7 +196,7 @@ func TestList_RecordingRulesMatchSelector_WithDefaultStackNamespaceRules(t *test
const stackNs = "some-ns"
k := &k8sfakes.FakeClient{}
- rs := &lokiv1beta1.RulesSpec{
+ rs := &lokiv1.RulesSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"labelname": "labelvalue",
@@ -267,7 +268,7 @@ func TestList_RecordingRulesMatchSelector_FilteredByNamespaceSelector(t *testing
const stackNs = "some-ns"
k := &k8sfakes.FakeClient{}
- rs := &lokiv1beta1.RulesSpec{
+ rs := &lokiv1.RulesSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"labelname": "labelvalue",
diff --git a/operator/internal/handlers/internal/storage/secrets.go b/operator/internal/handlers/internal/storage/secrets.go
index edf0f5871640e..d7603123fc5a8 100644
--- a/operator/internal/handlers/internal/storage/secrets.go
+++ b/operator/internal/handlers/internal/storage/secrets.go
@@ -2,14 +2,14 @@ package storage
import (
"github.com/ViaQ/logerr/v2/kverrors"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/storage"
corev1 "k8s.io/api/core/v1"
)
// ExtractSecret reads a k8s secret into a manifest object storage struct if valid.
-func ExtractSecret(s *corev1.Secret, secretType lokiv1beta1.ObjectStorageSecretType) (*storage.Options, error) {
+func ExtractSecret(s *corev1.Secret, secretType lokiv1.ObjectStorageSecretType) (*storage.Options, error) {
var err error
storageOpts := storage.Options{
SecretName: s.Name,
@@ -17,13 +17,13 @@ func ExtractSecret(s *corev1.Secret, secretType lokiv1beta1.ObjectStorageSecretT
}
switch secretType {
- case lokiv1beta1.ObjectStorageSecretAzure:
+ case lokiv1.ObjectStorageSecretAzure:
storageOpts.Azure, err = extractAzureConfigSecret(s)
- case lokiv1beta1.ObjectStorageSecretGCS:
+ case lokiv1.ObjectStorageSecretGCS:
storageOpts.GCS, err = extractGCSConfigSecret(s)
- case lokiv1beta1.ObjectStorageSecretS3:
+ case lokiv1.ObjectStorageSecretS3:
storageOpts.S3, err = extractS3ConfigSecret(s)
- case lokiv1beta1.ObjectStorageSecretSwift:
+ case lokiv1.ObjectStorageSecretSwift:
storageOpts.Swift, err = extractSwiftConfigSecret(s)
default:
return nil, kverrors.New("unknown secret type", "type", secretType)
diff --git a/operator/internal/handlers/internal/storage/secrets_test.go b/operator/internal/handlers/internal/storage/secrets_test.go
index d6579a49b34fa..df69e2180b75d 100644
--- a/operator/internal/handlers/internal/storage/secrets_test.go
+++ b/operator/internal/handlers/internal/storage/secrets_test.go
@@ -3,7 +3,7 @@ package storage_test
import (
"testing"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/handlers/internal/storage"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
@@ -68,7 +68,7 @@ func TestAzureExtract(t *testing.T) {
t.Run(tst.name, func(t *testing.T) {
t.Parallel()
- _, err := storage.ExtractSecret(tst.secret, lokiv1beta1.ObjectStorageSecretAzure)
+ _, err := storage.ExtractSecret(tst.secret, lokiv1.ObjectStorageSecretAzure)
if !tst.wantErr {
require.NoError(t, err)
}
@@ -115,7 +115,7 @@ func TestGCSExtract(t *testing.T) {
t.Run(tst.name, func(t *testing.T) {
t.Parallel()
- _, err := storage.ExtractSecret(tst.secret, lokiv1beta1.ObjectStorageSecretGCS)
+ _, err := storage.ExtractSecret(tst.secret, lokiv1.ObjectStorageSecretGCS)
if !tst.wantErr {
require.NoError(t, err)
}
@@ -185,7 +185,7 @@ func TestS3Extract(t *testing.T) {
t.Run(tst.name, func(t *testing.T) {
t.Parallel()
- _, err := storage.ExtractSecret(tst.secret, lokiv1beta1.ObjectStorageSecretS3)
+ _, err := storage.ExtractSecret(tst.secret, lokiv1.ObjectStorageSecretS3)
if !tst.wantErr {
require.NoError(t, err)
}
@@ -330,7 +330,7 @@ func TestSwiftExtract(t *testing.T) {
t.Run(tst.name, func(t *testing.T) {
t.Parallel()
- _, err := storage.ExtractSecret(tst.secret, lokiv1beta1.ObjectStorageSecretSwift)
+ _, err := storage.ExtractSecret(tst.secret, lokiv1.ObjectStorageSecretSwift)
if !tst.wantErr {
require.NoError(t, err)
}
diff --git a/operator/internal/handlers/lokistack_create_or_update.go b/operator/internal/handlers/lokistack_create_or_update.go
index 001d3308a12c1..2ed40b3efc31e 100644
--- a/operator/internal/handlers/lokistack_create_or_update.go
+++ b/operator/internal/handlers/lokistack_create_or_update.go
@@ -7,6 +7,7 @@ import (
"time"
configv1 "github.com/grafana/loki/operator/apis/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/grafana/loki/operator/internal/handlers/internal/gateway"
@@ -38,7 +39,7 @@ func CreateOrUpdateLokiStack(
) error {
ll := log.WithValues("lokistack", req.NamespacedName, "event", "createOrUpdate")
- var stack lokiv1beta1.LokiStack
+ var stack lokiv1.LokiStack
if err := k.Get(ctx, req.NamespacedName, &stack); err != nil {
if apierrors.IsNotFound(err) {
// maybe the user deleted it before we could react? Either way this isn't an issue
@@ -64,7 +65,7 @@ func CreateOrUpdateLokiStack(
if apierrors.IsNotFound(err) {
return &status.DegradedError{
Message: "Missing object storage secret",
- Reason: lokiv1beta1.ReasonMissingObjectStorageSecret,
+ Reason: lokiv1.ReasonMissingObjectStorageSecret,
Requeue: false,
}
}
@@ -75,7 +76,7 @@ func CreateOrUpdateLokiStack(
if err != nil {
return &status.DegradedError{
Message: fmt.Sprintf("Invalid object storage secret contents: %s", err),
- Reason: lokiv1beta1.ReasonInvalidObjectStorageSecret,
+ Reason: lokiv1.ReasonInvalidObjectStorageSecret,
Requeue: false,
}
}
@@ -88,7 +89,7 @@ func CreateOrUpdateLokiStack(
if err != nil {
return &status.DegradedError{
Message: fmt.Sprintf("Invalid object storage schema contents: %s", err),
- Reason: lokiv1beta1.ReasonInvalidObjectStorageSchema,
+ Reason: lokiv1.ReasonInvalidObjectStorageSchema,
Requeue: false,
}
}
@@ -102,7 +103,7 @@ func CreateOrUpdateLokiStack(
if apierrors.IsNotFound(err) {
return &status.DegradedError{
Message: "Missing object storage CA config map",
- Reason: lokiv1beta1.ReasonMissingObjectStorageCAConfigMap,
+ Reason: lokiv1.ReasonMissingObjectStorageCAConfigMap,
Requeue: false,
}
}
@@ -112,7 +113,7 @@ func CreateOrUpdateLokiStack(
if !storage.IsValidCAConfigMap(&cm) {
return &status.DegradedError{
Message: "Invalid object storage CA configmap contents: missing key `service-ca.crt` or no contents",
- Reason: lokiv1beta1.ReasonInvalidObjectStorageCAConfigMap,
+ Reason: lokiv1.ReasonInvalidObjectStorageCAConfigMap,
Requeue: false,
}
}
@@ -128,26 +129,26 @@ func CreateOrUpdateLokiStack(
if fg.LokiStackGateway && stack.Spec.Tenants == nil {
return &status.DegradedError{
Message: "Invalid tenants configuration - TenantsSpec cannot be nil when gateway flag is enabled",
- Reason: lokiv1beta1.ReasonInvalidTenantsConfiguration,
+ Reason: lokiv1.ReasonInvalidTenantsConfiguration,
Requeue: false,
}
} else if fg.LokiStackGateway && stack.Spec.Tenants != nil {
if err = gateway.ValidateModes(stack); err != nil {
return &status.DegradedError{
Message: fmt.Sprintf("Invalid tenants configuration: %s", err),
- Reason: lokiv1beta1.ReasonInvalidTenantsConfiguration,
+ Reason: lokiv1.ReasonInvalidTenantsConfiguration,
Requeue: false,
}
}
- if stack.Spec.Tenants.Mode != lokiv1beta1.OpenshiftLogging {
+ if stack.Spec.Tenants.Mode != lokiv1.OpenshiftLogging {
tenantSecrets, err = gateway.GetTenantSecrets(ctx, k, req, &stack)
if err != nil {
return err
}
}
- if stack.Spec.Tenants.Mode == lokiv1beta1.OpenshiftLogging {
+ if stack.Spec.Tenants.Mode == lokiv1.OpenshiftLogging {
baseDomain, err = gateway.GetOpenShiftBaseDomain(ctx, k, req)
if err != nil {
return err
@@ -185,7 +186,7 @@ func CreateOrUpdateLokiStack(
if apierrors.IsNotFound(err) {
return &status.DegradedError{
Message: "Missing ruler remote write authorization secret",
- Reason: lokiv1beta1.ReasonMissingRulerSecret,
+ Reason: lokiv1.ReasonMissingRulerSecret,
Requeue: false,
}
}
@@ -196,14 +197,14 @@ func CreateOrUpdateLokiStack(
if err != nil {
return &status.DegradedError{
Message: "Invalid ruler remote write authorization secret contents",
- Reason: lokiv1beta1.ReasonInvalidRulerSecret,
+ Reason: lokiv1.ReasonInvalidRulerSecret,
Requeue: false,
}
}
}
}
- // Here we will translate the lokiv1beta1.LokiStack options into manifest options
+ // Here we will translate the lokiv1.LokiStack options into manifest options
opts := manifests.Options{
Name: req.Name,
Namespace: req.Namespace,
@@ -294,7 +295,7 @@ func CreateOrUpdateLokiStack(
// 1x.extra-small is used only for development, so the metrics will not
// be collected.
- if opts.Stack.Size != lokiv1beta1.SizeOneXExtraSmall {
+ if opts.Stack.Size != lokiv1.SizeOneXExtraSmall {
metrics.Collect(&opts.Stack, opts.Name)
}
diff --git a/operator/internal/handlers/lokistack_create_or_update_test.go b/operator/internal/handlers/lokistack_create_or_update_test.go
index 5d5f5c6ccc1f9..c04c926586267 100644
--- a/operator/internal/handlers/lokistack_create_or_update_test.go
+++ b/operator/internal/handlers/lokistack_create_or_update_test.go
@@ -9,7 +9,7 @@ import (
"testing"
configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
"github.com/grafana/loki/operator/internal/handlers"
"github.com/grafana/loki/operator/internal/status"
@@ -100,7 +100,7 @@ func TestMain(m *testing.M) {
// Register the clientgo and CRD schemes
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(routev1.AddToScheme(scheme))
- utilruntime.Must(lokiv1beta1.AddToScheme(scheme))
+ utilruntime.Must(lokiv1.AddToScheme(scheme))
os.Exit(m.Run())
}
@@ -163,7 +163,7 @@ func TestCreateOrUpdateLokiStack_SetsNamespaceOnAllObjects(t *testing.T) {
},
}
- stack := lokiv1beta1.LokiStack{
+ stack := lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -172,35 +172,35 @@ func TestCreateOrUpdateLokiStack_SetsNamespaceOnAllObjects(t *testing.T) {
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Storage: lokiv1beta1.ObjectStorageSpec{
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
},
- Secret: lokiv1beta1.ObjectStorageSecretSpec{
+ Secret: lokiv1.ObjectStorageSecretSpec{
Name: defaultSecret.Name,
- Type: lokiv1beta1.ObjectStorageSecretS3,
+ Type: lokiv1.ObjectStorageSecretS3,
},
},
- Tenants: &lokiv1beta1.TenantsSpec{
+ Tenants: &lokiv1.TenantsSpec{
Mode: "dynamic",
- Authentication: []lokiv1beta1.AuthenticationSpec{
+ Authentication: []lokiv1.AuthenticationSpec{
{
TenantName: "test",
TenantID: "1234",
- OIDC: &lokiv1beta1.OIDCSpec{
- Secret: &lokiv1beta1.TenantSecretSpec{
+ OIDC: &lokiv1.OIDCSpec{
+ Secret: &lokiv1.TenantSecretSpec{
Name: defaultGatewaySecret.Name,
},
},
},
},
- Authorization: &lokiv1beta1.AuthorizationSpec{
- OPA: &lokiv1beta1.OPASpec{
+ Authorization: &lokiv1.AuthorizationSpec{
+ OPA: &lokiv1.OPASpec{
URL: "some-url",
},
},
@@ -248,7 +248,7 @@ func TestCreateOrUpdateLokiStack_SetsOwnerRefOnAllObjects(t *testing.T) {
},
}
- stack := lokiv1beta1.LokiStack{
+ stack := lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -257,35 +257,35 @@ func TestCreateOrUpdateLokiStack_SetsOwnerRefOnAllObjects(t *testing.T) {
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Storage: lokiv1beta1.ObjectStorageSpec{
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
},
- Secret: lokiv1beta1.ObjectStorageSecretSpec{
+ Secret: lokiv1.ObjectStorageSecretSpec{
Name: defaultSecret.Name,
- Type: lokiv1beta1.ObjectStorageSecretS3,
+ Type: lokiv1.ObjectStorageSecretS3,
},
},
- Tenants: &lokiv1beta1.TenantsSpec{
+ Tenants: &lokiv1.TenantsSpec{
Mode: "dynamic",
- Authentication: []lokiv1beta1.AuthenticationSpec{
+ Authentication: []lokiv1.AuthenticationSpec{
{
TenantName: "test",
TenantID: "1234",
- OIDC: &lokiv1beta1.OIDCSpec{
- Secret: &lokiv1beta1.TenantSecretSpec{
+ OIDC: &lokiv1.OIDCSpec{
+ Secret: &lokiv1.TenantSecretSpec{
Name: defaultGatewaySecret.Name,
},
},
},
},
- Authorization: &lokiv1beta1.AuthorizationSpec{
- OPA: &lokiv1beta1.OPASpec{
+ Authorization: &lokiv1.AuthorizationSpec{
+ OPA: &lokiv1.OPASpec{
URL: "some-url",
},
},
@@ -311,7 +311,7 @@ func TestCreateOrUpdateLokiStack_SetsOwnerRefOnAllObjects(t *testing.T) {
}
expected := metav1.OwnerReference{
- APIVersion: lokiv1beta1.GroupVersion.String(),
+ APIVersion: lokiv1.GroupVersion.String(),
Kind: stack.Kind,
Name: stack.Name,
UID: stack.UID,
@@ -355,7 +355,7 @@ func TestCreateOrUpdateLokiStack_WhenSetControllerRefInvalid_ContinueWithOtherOb
},
}
- stack := lokiv1beta1.LokiStack{
+ stack := lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -367,18 +367,18 @@ func TestCreateOrUpdateLokiStack_WhenSetControllerRefInvalid_ContinueWithOtherOb
Namespace: "invalid-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Storage: lokiv1beta1.ObjectStorageSpec{
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
},
- Secret: lokiv1beta1.ObjectStorageSecretSpec{
+ Secret: lokiv1.ObjectStorageSecretSpec{
Name: defaultSecret.Name,
- Type: lokiv1beta1.ObjectStorageSecretS3,
+ Type: lokiv1.ObjectStorageSecretS3,
},
},
},
@@ -413,7 +413,7 @@ func TestCreateOrUpdateLokiStack_WhenGetReturnsNoError_UpdateObjects(t *testing.
},
}
- stack := lokiv1beta1.LokiStack{
+ stack := lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -422,18 +422,18 @@ func TestCreateOrUpdateLokiStack_WhenGetReturnsNoError_UpdateObjects(t *testing.
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Storage: lokiv1beta1.ObjectStorageSpec{
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
},
- Secret: lokiv1beta1.ObjectStorageSecretSpec{
+ Secret: lokiv1.ObjectStorageSecretSpec{
Name: defaultSecret.Name,
- Type: lokiv1beta1.ObjectStorageSecretS3,
+ Type: lokiv1.ObjectStorageSecretS3,
},
},
},
@@ -457,7 +457,7 @@ func TestCreateOrUpdateLokiStack_WhenGetReturnsNoError_UpdateObjects(t *testing.
},
OwnerReferences: []metav1.OwnerReference{
{
- APIVersion: "loki.grafana.com/v1beta1",
+ APIVersion: "loki.grafana.com/v1",
Kind: "LokiStack",
Name: "my-stack",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
@@ -519,7 +519,7 @@ func TestCreateOrUpdateLokiStack_WhenCreateReturnsError_ContinueWithOtherObjects
},
}
- stack := lokiv1beta1.LokiStack{
+ stack := lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -528,18 +528,18 @@ func TestCreateOrUpdateLokiStack_WhenCreateReturnsError_ContinueWithOtherObjects
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Storage: lokiv1beta1.ObjectStorageSpec{
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
},
- Secret: lokiv1beta1.ObjectStorageSecretSpec{
+ Secret: lokiv1.ObjectStorageSecretSpec{
Name: defaultSecret.Name,
- Type: lokiv1beta1.ObjectStorageSecretS3,
+ Type: lokiv1.ObjectStorageSecretS3,
},
},
},
@@ -582,7 +582,7 @@ func TestCreateOrUpdateLokiStack_WhenUpdateReturnsError_ContinueWithOtherObjects
},
}
- stack := lokiv1beta1.LokiStack{
+ stack := lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -591,18 +591,18 @@ func TestCreateOrUpdateLokiStack_WhenUpdateReturnsError_ContinueWithOtherObjects
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Storage: lokiv1beta1.ObjectStorageSpec{
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
},
- Secret: lokiv1beta1.ObjectStorageSecretSpec{
+ Secret: lokiv1.ObjectStorageSecretSpec{
Name: defaultSecret.Name,
- Type: lokiv1beta1.ObjectStorageSecretS3,
+ Type: lokiv1.ObjectStorageSecretS3,
},
},
},
@@ -626,7 +626,7 @@ func TestCreateOrUpdateLokiStack_WhenUpdateReturnsError_ContinueWithOtherObjects
},
OwnerReferences: []metav1.OwnerReference{
{
- APIVersion: "loki.grafana.com/v1beta1",
+ APIVersion: "loki.grafana.com/v1",
Kind: "LokiStack",
Name: "someStack",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
@@ -692,11 +692,11 @@ func TestCreateOrUpdateLokiStack_WhenMissingSecret_SetDegraded(t *testing.T) {
degradedErr := &status.DegradedError{
Message: "Missing object storage secret",
- Reason: lokiv1beta1.ReasonMissingObjectStorageSecret,
+ Reason: lokiv1.ReasonMissingObjectStorageSecret,
Requeue: false,
}
- stack := &lokiv1beta1.LokiStack{
+ stack := &lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -705,18 +705,18 @@ func TestCreateOrUpdateLokiStack_WhenMissingSecret_SetDegraded(t *testing.T) {
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Storage: lokiv1beta1.ObjectStorageSpec{
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
},
- Secret: lokiv1beta1.ObjectStorageSecretSpec{
+ Secret: lokiv1.ObjectStorageSecretSpec{
Name: defaultSecret.Name,
- Type: lokiv1beta1.ObjectStorageSecretS3,
+ Type: lokiv1.ObjectStorageSecretS3,
},
},
},
@@ -753,11 +753,11 @@ func TestCreateOrUpdateLokiStack_WhenInvalidSecret_SetDegraded(t *testing.T) {
degradedErr := &status.DegradedError{
Message: "Invalid object storage secret contents: missing secret field",
- Reason: lokiv1beta1.ReasonInvalidObjectStorageSecret,
+ Reason: lokiv1.ReasonInvalidObjectStorageSecret,
Requeue: false,
}
- stack := &lokiv1beta1.LokiStack{
+ stack := &lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -766,18 +766,18 @@ func TestCreateOrUpdateLokiStack_WhenInvalidSecret_SetDegraded(t *testing.T) {
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Storage: lokiv1beta1.ObjectStorageSpec{
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
},
- Secret: lokiv1beta1.ObjectStorageSecretSpec{
+ Secret: lokiv1.ObjectStorageSecretSpec{
Name: invalidSecret.Name,
- Type: lokiv1beta1.ObjectStorageSecretS3,
+ Type: lokiv1.ObjectStorageSecretS3,
},
},
},
@@ -818,11 +818,11 @@ func TestCreateOrUpdateLokiStack_WithInvalidStorageSchema_SetDegraded(t *testing
degradedErr := &status.DegradedError{
Message: "Invalid object storage schema contents: spec does not contain any schemas",
- Reason: lokiv1beta1.ReasonInvalidObjectStorageSchema,
+ Reason: lokiv1.ReasonInvalidObjectStorageSchema,
Requeue: false,
}
- stack := &lokiv1beta1.LokiStack{
+ stack := &lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -831,25 +831,25 @@ func TestCreateOrUpdateLokiStack_WithInvalidStorageSchema_SetDegraded(t *testing
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Storage: lokiv1beta1.ObjectStorageSpec{
- Schemas: []lokiv1beta1.ObjectStorageSchema{},
- Secret: lokiv1beta1.ObjectStorageSecretSpec{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{},
+ Secret: lokiv1.ObjectStorageSecretSpec{
Name: defaultSecret.Name,
- Type: lokiv1beta1.ObjectStorageSecretS3,
+ Type: lokiv1.ObjectStorageSecretS3,
},
},
},
- Status: lokiv1beta1.LokiStackStatus{
- Storage: lokiv1beta1.LokiStackStorageStatus{
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ Status: lokiv1.LokiStackStatus{
+ Storage: lokiv1.LokiStackStorageStatus{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
{
- Version: lokiv1beta1.ObjectStorageSchemaV12,
+ Version: lokiv1.ObjectStorageSchemaV12,
EffectiveDate: "2021-10-11",
},
},
@@ -892,11 +892,11 @@ func TestCreateOrUpdateLokiStack_WhenMissingCAConfigMap_SetDegraded(t *testing.T
degradedErr := &status.DegradedError{
Message: "Missing object storage CA config map",
- Reason: lokiv1beta1.ReasonMissingObjectStorageCAConfigMap,
+ Reason: lokiv1.ReasonMissingObjectStorageCAConfigMap,
Requeue: false,
}
- stack := &lokiv1beta1.LokiStack{
+ stack := &lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -905,20 +905,20 @@ func TestCreateOrUpdateLokiStack_WhenMissingCAConfigMap_SetDegraded(t *testing.T
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Storage: lokiv1beta1.ObjectStorageSpec{
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
},
- Secret: lokiv1beta1.ObjectStorageSecretSpec{
+ Secret: lokiv1.ObjectStorageSecretSpec{
Name: defaultSecret.Name,
- Type: lokiv1beta1.ObjectStorageSecretS3,
+ Type: lokiv1.ObjectStorageSecretS3,
},
- TLS: &lokiv1beta1.ObjectStorageTLSSpec{
+ TLS: &lokiv1.ObjectStorageTLSSpec{
CA: "not-existing",
},
},
@@ -962,11 +962,11 @@ func TestCreateOrUpdateLokiStack_WhenInvalidCAConfigMap_SetDegraded(t *testing.T
degradedErr := &status.DegradedError{
Message: "Invalid object storage CA configmap contents: missing key `service-ca.crt` or no contents",
- Reason: lokiv1beta1.ReasonInvalidObjectStorageCAConfigMap,
+ Reason: lokiv1.ReasonInvalidObjectStorageCAConfigMap,
Requeue: false,
}
- stack := &lokiv1beta1.LokiStack{
+ stack := &lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -975,20 +975,20 @@ func TestCreateOrUpdateLokiStack_WhenInvalidCAConfigMap_SetDegraded(t *testing.T
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Storage: lokiv1beta1.ObjectStorageSpec{
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
},
- Secret: lokiv1beta1.ObjectStorageSecretSpec{
+ Secret: lokiv1.ObjectStorageSecretSpec{
Name: defaultSecret.Name,
- Type: lokiv1beta1.ObjectStorageSecretS3,
+ Type: lokiv1.ObjectStorageSecretS3,
},
- TLS: &lokiv1beta1.ObjectStorageTLSSpec{
+ TLS: &lokiv1.ObjectStorageTLSSpec{
CA: invalidCAConfigMap.Name,
},
},
@@ -1035,7 +1035,7 @@ func TestCreateOrUpdateLokiStack_WhenInvalidTenantsConfiguration_SetDegraded(t *
degradedErr := &status.DegradedError{
Message: "Invalid tenants configuration: mandatory configuration - missing OPA Url",
- Reason: lokiv1beta1.ReasonInvalidTenantsConfiguration,
+ Reason: lokiv1.ReasonInvalidTenantsConfiguration,
Requeue: false,
}
@@ -1043,7 +1043,7 @@ func TestCreateOrUpdateLokiStack_WhenInvalidTenantsConfiguration_SetDegraded(t *
LokiStackGateway: true,
}
- stack := &lokiv1beta1.LokiStack{
+ stack := &lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -1052,28 +1052,28 @@ func TestCreateOrUpdateLokiStack_WhenInvalidTenantsConfiguration_SetDegraded(t *
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Storage: lokiv1beta1.ObjectStorageSpec{
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
},
- Secret: lokiv1beta1.ObjectStorageSecretSpec{
+ Secret: lokiv1.ObjectStorageSecretSpec{
Name: defaultSecret.Name,
- Type: lokiv1beta1.ObjectStorageSecretS3,
+ Type: lokiv1.ObjectStorageSecretS3,
},
},
- Tenants: &lokiv1beta1.TenantsSpec{
+ Tenants: &lokiv1.TenantsSpec{
Mode: "dynamic",
- Authentication: []lokiv1beta1.AuthenticationSpec{
+ Authentication: []lokiv1.AuthenticationSpec{
{
TenantName: "test",
TenantID: "1234",
- OIDC: &lokiv1beta1.OIDCSpec{
- Secret: &lokiv1beta1.TenantSecretSpec{
+ OIDC: &lokiv1.OIDCSpec{
+ Secret: &lokiv1.TenantSecretSpec{
Name: defaultGatewaySecret.Name,
},
},
@@ -1119,7 +1119,7 @@ func TestCreateOrUpdateLokiStack_WhenMissingGatewaySecret_SetDegraded(t *testing
degradedErr := &status.DegradedError{
Message: "Missing secrets for tenant test",
- Reason: lokiv1beta1.ReasonMissingGatewayTenantSecret,
+ Reason: lokiv1.ReasonMissingGatewayTenantSecret,
Requeue: true,
}
@@ -1127,7 +1127,7 @@ func TestCreateOrUpdateLokiStack_WhenMissingGatewaySecret_SetDegraded(t *testing
LokiStackGateway: true,
}
- stack := &lokiv1beta1.LokiStack{
+ stack := &lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -1136,35 +1136,35 @@ func TestCreateOrUpdateLokiStack_WhenMissingGatewaySecret_SetDegraded(t *testing
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Storage: lokiv1beta1.ObjectStorageSpec{
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
},
- Secret: lokiv1beta1.ObjectStorageSecretSpec{
+ Secret: lokiv1.ObjectStorageSecretSpec{
Name: defaultSecret.Name,
- Type: lokiv1beta1.ObjectStorageSecretS3,
+ Type: lokiv1.ObjectStorageSecretS3,
},
},
- Tenants: &lokiv1beta1.TenantsSpec{
+ Tenants: &lokiv1.TenantsSpec{
Mode: "dynamic",
- Authentication: []lokiv1beta1.AuthenticationSpec{
+ Authentication: []lokiv1.AuthenticationSpec{
{
TenantName: "test",
TenantID: "1234",
- OIDC: &lokiv1beta1.OIDCSpec{
- Secret: &lokiv1beta1.TenantSecretSpec{
+ OIDC: &lokiv1.OIDCSpec{
+ Secret: &lokiv1.TenantSecretSpec{
Name: defaultGatewaySecret.Name,
},
},
},
},
- Authorization: &lokiv1beta1.AuthorizationSpec{
- OPA: &lokiv1beta1.OPASpec{
+ Authorization: &lokiv1.AuthorizationSpec{
+ OPA: &lokiv1.OPASpec{
URL: "some-url",
},
},
@@ -1175,7 +1175,7 @@ func TestCreateOrUpdateLokiStack_WhenMissingGatewaySecret_SetDegraded(t *testing
// GetStub looks up the CR first, so we need to return our fake stack
// return NotFound for everything else to trigger create.
k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error {
- o, ok := object.(*lokiv1beta1.LokiStack)
+ o, ok := object.(*lokiv1.LokiStack)
if r.Name == name.Name && r.Namespace == name.Namespace && ok {
k.SetClientObject(o, stack)
return nil
@@ -1208,7 +1208,7 @@ func TestCreateOrUpdateLokiStack_WhenInvalidGatewaySecret_SetDegraded(t *testing
degradedErr := &status.DegradedError{
Message: "Invalid gateway tenant secret contents",
- Reason: lokiv1beta1.ReasonInvalidGatewayTenantSecret,
+ Reason: lokiv1.ReasonInvalidGatewayTenantSecret,
Requeue: true,
}
@@ -1216,7 +1216,7 @@ func TestCreateOrUpdateLokiStack_WhenInvalidGatewaySecret_SetDegraded(t *testing
LokiStackGateway: true,
}
- stack := &lokiv1beta1.LokiStack{
+ stack := &lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -1225,35 +1225,35 @@ func TestCreateOrUpdateLokiStack_WhenInvalidGatewaySecret_SetDegraded(t *testing
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Storage: lokiv1beta1.ObjectStorageSpec{
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
},
- Secret: lokiv1beta1.ObjectStorageSecretSpec{
+ Secret: lokiv1.ObjectStorageSecretSpec{
Name: defaultSecret.Name,
- Type: lokiv1beta1.ObjectStorageSecretS3,
+ Type: lokiv1.ObjectStorageSecretS3,
},
},
- Tenants: &lokiv1beta1.TenantsSpec{
+ Tenants: &lokiv1.TenantsSpec{
Mode: "dynamic",
- Authentication: []lokiv1beta1.AuthenticationSpec{
+ Authentication: []lokiv1.AuthenticationSpec{
{
TenantName: "test",
TenantID: "1234",
- OIDC: &lokiv1beta1.OIDCSpec{
- Secret: &lokiv1beta1.TenantSecretSpec{
+ OIDC: &lokiv1.OIDCSpec{
+ Secret: &lokiv1.TenantSecretSpec{
Name: invalidSecret.Name,
},
},
},
},
- Authorization: &lokiv1beta1.AuthorizationSpec{
- OPA: &lokiv1beta1.OPASpec{
+ Authorization: &lokiv1.AuthorizationSpec{
+ OPA: &lokiv1.OPASpec{
URL: "some-url",
},
},
@@ -1264,7 +1264,7 @@ func TestCreateOrUpdateLokiStack_WhenInvalidGatewaySecret_SetDegraded(t *testing
// GetStub looks up the CR first, so we need to return our fake stack
// return NotFound for everything else to trigger create.
k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error {
- o, ok := object.(*lokiv1beta1.LokiStack)
+ o, ok := object.(*lokiv1.LokiStack)
if r.Name == name.Name && r.Namespace == name.Namespace && ok {
k.SetClientObject(o, stack)
return nil
@@ -1301,7 +1301,7 @@ func TestCreateOrUpdateLokiStack_MissingTenantsSpec_SetDegraded(t *testing.T) {
degradedErr := &status.DegradedError{
Message: "Invalid tenants configuration - TenantsSpec cannot be nil when gateway flag is enabled",
- Reason: lokiv1beta1.ReasonInvalidTenantsConfiguration,
+ Reason: lokiv1.ReasonInvalidTenantsConfiguration,
Requeue: false,
}
@@ -1309,7 +1309,7 @@ func TestCreateOrUpdateLokiStack_MissingTenantsSpec_SetDegraded(t *testing.T) {
LokiStackGateway: true,
}
- stack := &lokiv1beta1.LokiStack{
+ stack := &lokiv1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
},
@@ -1318,18 +1318,18 @@ func TestCreateOrUpdateLokiStack_MissingTenantsSpec_SetDegraded(t *testing.T) {
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Storage: lokiv1beta1.ObjectStorageSpec{
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
},
- Secret: lokiv1beta1.ObjectStorageSecretSpec{
+ Secret: lokiv1.ObjectStorageSecretSpec{
Name: defaultSecret.Name,
- Type: lokiv1beta1.ObjectStorageSecretS3,
+ Type: lokiv1.ObjectStorageSecretS3,
},
},
Tenants: nil,
@@ -1339,7 +1339,7 @@ func TestCreateOrUpdateLokiStack_MissingTenantsSpec_SetDegraded(t *testing.T) {
// GetStub looks up the CR first, so we need to return our fake stack
// return NotFound for everything else to trigger create.
k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error {
- o, ok := object.(*lokiv1beta1.LokiStack)
+ o, ok := object.(*lokiv1.LokiStack)
if r.Name == name.Name && r.Namespace == name.Namespace && ok {
k.SetClientObject(o, stack)
return nil
diff --git a/operator/internal/manifests/build.go b/operator/internal/manifests/build.go
index e0300ac422912..9cea54d65e1b5 100644
--- a/operator/internal/manifests/build.go
+++ b/operator/internal/manifests/build.go
@@ -2,7 +2,7 @@ package manifests
import (
"github.com/ViaQ/logerr/v2/kverrors"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/internal"
"github.com/imdario/mergo"
@@ -104,7 +104,7 @@ func BuildAll(opts Options) ([]client.Object, error) {
// DefaultLokiStackSpec returns the default configuration for a LokiStack of
// the specified size
-func DefaultLokiStackSpec(size lokiv1beta1.LokiStackSizeType) *lokiv1beta1.LokiStackSpec {
+func DefaultLokiStackSpec(size lokiv1.LokiStackSizeType) *lokiv1.LokiStackSpec {
defaults := internal.StackSizeTable[size]
return (&defaults).DeepCopy()
}
@@ -118,9 +118,9 @@ func ApplyDefaultSettings(opts *Options) error {
return kverrors.Wrap(err, "failed merging stack user options", "name", opts.Name)
}
- strictOverrides := lokiv1beta1.LokiStackSpec{
- Template: &lokiv1beta1.LokiTemplateSpec{
- Compactor: &lokiv1beta1.LokiComponentSpec{
+ strictOverrides := lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
// Compactor is a singelton application.
// Only one replica allowed!!!
Replicas: 1,
diff --git a/operator/internal/manifests/build_test.go b/operator/internal/manifests/build_test.go
index 1158414998190..01429bb87cc3f 100644
--- a/operator/internal/manifests/build_test.go
+++ b/operator/internal/manifests/build_test.go
@@ -9,25 +9,25 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/internal"
"github.com/stretchr/testify/require"
)
func TestApplyUserOptions_OverrideDefaults(t *testing.T) {
- allSizes := []lokiv1beta1.LokiStackSizeType{
- lokiv1beta1.SizeOneXExtraSmall,
- lokiv1beta1.SizeOneXSmall,
- lokiv1beta1.SizeOneXMedium,
+ allSizes := []lokiv1.LokiStackSizeType{
+ lokiv1.SizeOneXExtraSmall,
+ lokiv1.SizeOneXSmall,
+ lokiv1.SizeOneXMedium,
}
for _, size := range allSizes {
opt := Options{
Name: "abcd",
Namespace: "efgh",
- Stack: lokiv1beta1.LokiStackSpec{
+ Stack: lokiv1.LokiStackSpec{
Size: size,
- Template: &lokiv1beta1.LokiTemplateSpec{
- Distributor: &lokiv1beta1.LokiComponentSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Distributor: &lokiv1.LokiComponentSpec{
Replicas: 42,
},
},
@@ -55,19 +55,19 @@ func TestApplyUserOptions_OverrideDefaults(t *testing.T) {
}
func TestApplyUserOptions_AlwaysSetCompactorReplicasToOne(t *testing.T) {
- allSizes := []lokiv1beta1.LokiStackSizeType{
- lokiv1beta1.SizeOneXExtraSmall,
- lokiv1beta1.SizeOneXSmall,
- lokiv1beta1.SizeOneXMedium,
+ allSizes := []lokiv1.LokiStackSizeType{
+ lokiv1.SizeOneXExtraSmall,
+ lokiv1.SizeOneXSmall,
+ lokiv1.SizeOneXMedium,
}
for _, size := range allSizes {
opt := Options{
Name: "abcd",
Namespace: "efgh",
- Stack: lokiv1beta1.LokiStackSpec{
+ Stack: lokiv1.LokiStackSpec{
Size: size,
- Template: &lokiv1beta1.LokiTemplateSpec{
- Compactor: &lokiv1beta1.LokiComponentSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
Replicas: 2,
},
},
@@ -97,9 +97,9 @@ func TestBuildAll_WithFeatureGates_ServiceMonitors(t *testing.T) {
BuildOptions: Options{
Name: "test",
Namespace: "test",
- Stack: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXSmall,
- Rules: &lokiv1beta1.RulesSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXSmall,
+ Rules: &lokiv1.RulesSpec{
Enabled: true,
},
},
@@ -118,8 +118,8 @@ func TestBuildAll_WithFeatureGates_ServiceMonitors(t *testing.T) {
BuildOptions: Options{
Name: "test",
Namespace: "test",
- Stack: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXSmall,
+ Stack: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXSmall,
},
Gates: configv1.FeatureGates{
ServiceMonitors: true,
@@ -160,8 +160,8 @@ func TestBuildAll_WithFeatureGates_OpenShift_ServingCertsService(t *testing.T) {
BuildOptions: Options{
Name: "test",
Namespace: "test",
- Stack: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXSmall,
+ Stack: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXSmall,
},
Gates: configv1.FeatureGates{
ServiceMonitors: false,
@@ -177,8 +177,8 @@ func TestBuildAll_WithFeatureGates_OpenShift_ServingCertsService(t *testing.T) {
BuildOptions: Options{
Name: "test",
Namespace: "test",
- Stack: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXSmall,
+ Stack: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXSmall,
},
Gates: configv1.FeatureGates{
ServiceMonitors: false,
@@ -232,9 +232,9 @@ func TestBuildAll_WithFeatureGates_HTTPEncryption(t *testing.T) {
opts := Options{
Name: "test",
Namespace: "test",
- Stack: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXSmall,
- Rules: &lokiv1beta1.RulesSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXSmall,
+ Rules: &lokiv1.RulesSpec{
Enabled: true,
},
},
@@ -306,9 +306,9 @@ func TestBuildAll_WithFeatureGates_ServiceMonitorTLSEndpoints(t *testing.T) {
opts := Options{
Name: "test",
Namespace: "test",
- Stack: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXSmall,
- Rules: &lokiv1beta1.RulesSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXSmall,
+ Rules: &lokiv1.RulesSpec{
Enabled: true,
},
},
@@ -391,34 +391,34 @@ func TestBuildAll_WithFeatureGates_GRPCEncryption(t *testing.T) {
BuildOptions: Options{
Name: "test",
Namespace: "test",
- Stack: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXSmall,
- Rules: &lokiv1beta1.RulesSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXSmall,
+ Rules: &lokiv1.RulesSpec{
Enabled: true,
},
- Template: &lokiv1beta1.LokiTemplateSpec{
- Compactor: &lokiv1beta1.LokiComponentSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Distributor: &lokiv1beta1.LokiComponentSpec{
+ Distributor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Ingester: &lokiv1beta1.LokiComponentSpec{
+ Ingester: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Querier: &lokiv1beta1.LokiComponentSpec{
+ Querier: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- QueryFrontend: &lokiv1beta1.LokiComponentSpec{
+ QueryFrontend: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Gateway: &lokiv1beta1.LokiComponentSpec{
+ Gateway: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- IndexGateway: &lokiv1beta1.LokiComponentSpec{
+ IndexGateway: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Ruler: &lokiv1beta1.LokiComponentSpec{
+ Ruler: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
},
@@ -433,34 +433,34 @@ func TestBuildAll_WithFeatureGates_GRPCEncryption(t *testing.T) {
BuildOptions: Options{
Name: "test",
Namespace: "test",
- Stack: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXSmall,
- Rules: &lokiv1beta1.RulesSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXSmall,
+ Rules: &lokiv1.RulesSpec{
Enabled: true,
},
- Template: &lokiv1beta1.LokiTemplateSpec{
- Compactor: &lokiv1beta1.LokiComponentSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Distributor: &lokiv1beta1.LokiComponentSpec{
+ Distributor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Ingester: &lokiv1beta1.LokiComponentSpec{
+ Ingester: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Querier: &lokiv1beta1.LokiComponentSpec{
+ Querier: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- QueryFrontend: &lokiv1beta1.LokiComponentSpec{
+ QueryFrontend: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Gateway: &lokiv1beta1.LokiComponentSpec{
+ Gateway: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- IndexGateway: &lokiv1beta1.LokiComponentSpec{
+ IndexGateway: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Ruler: &lokiv1beta1.LokiComponentSpec{
+ Ruler: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
},
@@ -560,34 +560,34 @@ func TestBuildAll_WithFeatureGates_RuntimeSeccompProfile(t *testing.T) {
BuildOptions: Options{
Name: "test",
Namespace: "test",
- Stack: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXSmall,
- Rules: &lokiv1beta1.RulesSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXSmall,
+ Rules: &lokiv1.RulesSpec{
Enabled: true,
},
- Template: &lokiv1beta1.LokiTemplateSpec{
- Compactor: &lokiv1beta1.LokiComponentSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Distributor: &lokiv1beta1.LokiComponentSpec{
+ Distributor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Ingester: &lokiv1beta1.LokiComponentSpec{
+ Ingester: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Querier: &lokiv1beta1.LokiComponentSpec{
+ Querier: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- QueryFrontend: &lokiv1beta1.LokiComponentSpec{
+ QueryFrontend: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Gateway: &lokiv1beta1.LokiComponentSpec{
+ Gateway: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- IndexGateway: &lokiv1beta1.LokiComponentSpec{
+ IndexGateway: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Ruler: &lokiv1beta1.LokiComponentSpec{
+ Ruler: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
},
@@ -602,34 +602,34 @@ func TestBuildAll_WithFeatureGates_RuntimeSeccompProfile(t *testing.T) {
BuildOptions: Options{
Name: "test",
Namespace: "test",
- Stack: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXSmall,
- Rules: &lokiv1beta1.RulesSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXSmall,
+ Rules: &lokiv1.RulesSpec{
Enabled: true,
},
- Template: &lokiv1beta1.LokiTemplateSpec{
- Compactor: &lokiv1beta1.LokiComponentSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Distributor: &lokiv1beta1.LokiComponentSpec{
+ Distributor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Ingester: &lokiv1beta1.LokiComponentSpec{
+ Ingester: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Querier: &lokiv1beta1.LokiComponentSpec{
+ Querier: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- QueryFrontend: &lokiv1beta1.LokiComponentSpec{
+ QueryFrontend: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Gateway: &lokiv1beta1.LokiComponentSpec{
+ Gateway: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- IndexGateway: &lokiv1beta1.LokiComponentSpec{
+ IndexGateway: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Ruler: &lokiv1beta1.LokiComponentSpec{
+ Ruler: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
},
@@ -692,8 +692,8 @@ func TestBuildAll_WithFeatureGates_LokiStackGateway(t *testing.T) {
BuildOptions: Options{
Name: "test",
Namespace: "test",
- Stack: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXSmall,
+ Stack: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXSmall,
},
Gates: configv1.FeatureGates{
LokiStackGateway: false,
@@ -707,16 +707,16 @@ func TestBuildAll_WithFeatureGates_LokiStackGateway(t *testing.T) {
BuildOptions: Options{
Name: "test",
Namespace: "test",
- Stack: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXSmall,
- Tenants: &lokiv1beta1.TenantsSpec{
- Mode: lokiv1beta1.Dynamic,
- Authentication: []lokiv1beta1.AuthenticationSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXSmall,
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.Dynamic,
+ Authentication: []lokiv1.AuthenticationSpec{
{
TenantName: "test",
TenantID: "1234",
- OIDC: &lokiv1beta1.OIDCSpec{
- Secret: &lokiv1beta1.TenantSecretSpec{
+ OIDC: &lokiv1.OIDCSpec{
+ Secret: &lokiv1.TenantSecretSpec{
Name: "test",
},
IssuerURL: "https://127.0.0.1:5556/dex",
@@ -726,8 +726,8 @@ func TestBuildAll_WithFeatureGates_LokiStackGateway(t *testing.T) {
},
},
},
- Authorization: &lokiv1beta1.AuthorizationSpec{
- OPA: &lokiv1beta1.OPASpec{
+ Authorization: &lokiv1.AuthorizationSpec{
+ OPA: &lokiv1.OPASpec{
URL: "http://127.0.0.1:8181/v1/data/observatorium/allow",
},
},
@@ -769,8 +769,8 @@ func TestBuildAll_WithFeatureGates_LokiStackAlerts(t *testing.T) {
BuildOptions: Options{
Name: "test",
Namespace: "test",
- Stack: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXSmall,
+ Stack: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXSmall,
},
Gates: configv1.FeatureGates{
ServiceMonitors: false,
@@ -783,8 +783,8 @@ func TestBuildAll_WithFeatureGates_LokiStackAlerts(t *testing.T) {
BuildOptions: Options{
Name: "test",
Namespace: "test",
- Stack: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXSmall,
+ Stack: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXSmall,
},
Gates: configv1.FeatureGates{
ServiceMonitors: true,
diff --git a/operator/internal/manifests/compactor_test.go b/operator/internal/manifests/compactor_test.go
index 1a3e2d95a760b..81fc8c7562830 100644
--- a/operator/internal/manifests/compactor_test.go
+++ b/operator/internal/manifests/compactor_test.go
@@ -3,7 +3,7 @@ package manifests_test
import (
"testing"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests"
"github.com/stretchr/testify/require"
)
@@ -18,10 +18,10 @@ func TestNewCompactorStatefulSet_SelectorMatchesLabels(t *testing.T) {
sts := manifests.NewCompactorStatefulSet(manifests.Options{
Name: "abcd",
Namespace: "efgh",
- Stack: lokiv1beta1.LokiStackSpec{
+ Stack: lokiv1.LokiStackSpec{
StorageClassName: "standard",
- Template: &lokiv1beta1.LokiTemplateSpec{
- Compactor: &lokiv1beta1.LokiComponentSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
},
@@ -40,10 +40,10 @@ func TestNewCompactorStatefulSet_HasTemplateConfigHashAnnotation(t *testing.T) {
Name: "abcd",
Namespace: "efgh",
ConfigSHA1: "deadbeef",
- Stack: lokiv1beta1.LokiStackSpec{
+ Stack: lokiv1.LokiStackSpec{
StorageClassName: "standard",
- Template: &lokiv1beta1.LokiTemplateSpec{
- Compactor: &lokiv1beta1.LokiComponentSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
},
diff --git a/operator/internal/manifests/config_test.go b/operator/internal/manifests/config_test.go
index 567274845493e..3a66f57d3f6d1 100644
--- a/operator/internal/manifests/config_test.go
+++ b/operator/internal/manifests/config_test.go
@@ -6,7 +6,7 @@ import (
"testing"
"github.com/google/uuid"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -44,14 +44,14 @@ func randomConfigOptions() manifests.Options {
Name: uuid.New().String(),
Namespace: uuid.New().String(),
Image: uuid.New().String(),
- Stack: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Storage: lokiv1beta1.ObjectStorageSpec{},
+ Stack: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Storage: lokiv1.ObjectStorageSpec{},
StorageClassName: uuid.New().String(),
ReplicationFactor: rand.Int31(),
- Limits: &lokiv1beta1.LimitsSpec{
- Global: &lokiv1beta1.LimitsTemplateSpec{
- IngestionLimits: &lokiv1beta1.IngestionLimitSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ IngestionLimits: &lokiv1.IngestionLimitSpec{
IngestionRate: rand.Int31(),
IngestionBurstSize: rand.Int31(),
MaxLabelNameLength: rand.Int31(),
@@ -60,15 +60,15 @@ func randomConfigOptions() manifests.Options {
MaxGlobalStreamsPerTenant: rand.Int31(),
MaxLineSize: rand.Int31(),
},
- QueryLimits: &lokiv1beta1.QueryLimitSpec{
+ QueryLimits: &lokiv1.QueryLimitSpec{
MaxEntriesLimitPerQuery: rand.Int31(),
MaxChunksPerQuery: rand.Int31(),
MaxQuerySeries: rand.Int31(),
},
},
- Tenants: map[string]lokiv1beta1.LimitsTemplateSpec{
+ Tenants: map[string]lokiv1.LimitsTemplateSpec{
uuid.New().String(): {
- IngestionLimits: &lokiv1beta1.IngestionLimitSpec{
+ IngestionLimits: &lokiv1.IngestionLimitSpec{
IngestionRate: rand.Int31(),
IngestionBurstSize: rand.Int31(),
MaxLabelNameLength: rand.Int31(),
@@ -77,7 +77,7 @@ func randomConfigOptions() manifests.Options {
MaxGlobalStreamsPerTenant: rand.Int31(),
MaxLineSize: rand.Int31(),
},
- QueryLimits: &lokiv1beta1.QueryLimitSpec{
+ QueryLimits: &lokiv1.QueryLimitSpec{
MaxEntriesLimitPerQuery: rand.Int31(),
MaxChunksPerQuery: rand.Int31(),
MaxQuerySeries: rand.Int31(),
@@ -85,8 +85,8 @@ func randomConfigOptions() manifests.Options {
},
},
},
- Template: &lokiv1beta1.LokiTemplateSpec{
- Compactor: &lokiv1beta1.LokiComponentSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
Replicas: 1,
NodeSelector: map[string]string{
uuid.New().String(): uuid.New().String(),
@@ -101,7 +101,7 @@ func randomConfigOptions() manifests.Options {
},
},
},
- Distributor: &lokiv1beta1.LokiComponentSpec{
+ Distributor: &lokiv1.LokiComponentSpec{
Replicas: rand.Int31(),
NodeSelector: map[string]string{
uuid.New().String(): uuid.New().String(),
@@ -116,7 +116,7 @@ func randomConfigOptions() manifests.Options {
},
},
},
- Ingester: &lokiv1beta1.LokiComponentSpec{
+ Ingester: &lokiv1.LokiComponentSpec{
Replicas: rand.Int31(),
NodeSelector: map[string]string{
uuid.New().String(): uuid.New().String(),
@@ -131,7 +131,7 @@ func randomConfigOptions() manifests.Options {
},
},
},
- Querier: &lokiv1beta1.LokiComponentSpec{
+ Querier: &lokiv1.LokiComponentSpec{
Replicas: rand.Int31(),
NodeSelector: map[string]string{
uuid.New().String(): uuid.New().String(),
@@ -146,7 +146,7 @@ func randomConfigOptions() manifests.Options {
},
},
},
- QueryFrontend: &lokiv1beta1.LokiComponentSpec{
+ QueryFrontend: &lokiv1.LokiComponentSpec{
Replicas: rand.Int31(),
NodeSelector: map[string]string{
uuid.New().String(): uuid.New().String(),
@@ -161,7 +161,7 @@ func randomConfigOptions() manifests.Options {
},
},
},
- IndexGateway: &lokiv1beta1.LokiComponentSpec{
+ IndexGateway: &lokiv1.LokiComponentSpec{
Replicas: rand.Int31(),
NodeSelector: map[string]string{
uuid.New().String(): uuid.New().String(),
diff --git a/operator/internal/manifests/distributor_test.go b/operator/internal/manifests/distributor_test.go
index bc5e96cf59ea1..acbdfc2cc55ae 100644
--- a/operator/internal/manifests/distributor_test.go
+++ b/operator/internal/manifests/distributor_test.go
@@ -3,7 +3,7 @@ package manifests_test
import (
"testing"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests"
"github.com/stretchr/testify/require"
)
@@ -12,9 +12,9 @@ func TestNewDistributorDeployment_SelectorMatchesLabels(t *testing.T) {
dpl := manifests.NewDistributorDeployment(manifests.Options{
Name: "abcd",
Namespace: "efgh",
- Stack: lokiv1beta1.LokiStackSpec{
- Template: &lokiv1beta1.LokiTemplateSpec{
- Distributor: &lokiv1beta1.LokiComponentSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Distributor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
},
@@ -33,9 +33,9 @@ func TestNewDistributorDeployme_HasTemplateConfigHashAnnotation(t *testing.T) {
Name: "abcd",
Namespace: "efgh",
ConfigSHA1: "deadbeef",
- Stack: lokiv1beta1.LokiStackSpec{
- Template: &lokiv1beta1.LokiTemplateSpec{
- Distributor: &lokiv1beta1.LokiComponentSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Distributor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
},
diff --git a/operator/internal/manifests/gateway_tenants.go b/operator/internal/manifests/gateway_tenants.go
index fec51ebcb5df1..27280af674e40 100644
--- a/operator/internal/manifests/gateway_tenants.go
+++ b/operator/internal/manifests/gateway_tenants.go
@@ -2,8 +2,9 @@ package manifests
import (
"github.com/ViaQ/logerr/v2/kverrors"
+
configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/openshift"
"github.com/imdario/mergo"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
@@ -23,10 +24,10 @@ func ApplyGatewayDefaultOptions(opts *Options) error {
}
switch opts.Stack.Tenants.Mode {
- case lokiv1beta1.Static, lokiv1beta1.Dynamic:
+ case lokiv1.Static, lokiv1.Dynamic:
return nil // continue using user input
- case lokiv1beta1.OpenshiftLogging:
+ case lokiv1.OpenshiftLogging:
tenantData := make(map[string]openshift.TenantData)
for name, tenant := range opts.Tenants.Configs {
tenantData[name] = openshift.TenantData{
@@ -54,11 +55,11 @@ func ApplyGatewayDefaultOptions(opts *Options) error {
return nil
}
-func configureDeploymentForMode(d *appsv1.Deployment, mode lokiv1beta1.ModeType, fg configv1.FeatureGates, stackName, stackNs string) error {
+func configureDeploymentForMode(d *appsv1.Deployment, mode lokiv1.ModeType, fg configv1.FeatureGates, stackName, stackNs string) error {
switch mode {
- case lokiv1beta1.Static, lokiv1beta1.Dynamic:
+ case lokiv1.Static, lokiv1.Dynamic:
return nil // nothing to configure
- case lokiv1beta1.OpenshiftLogging:
+ case lokiv1.OpenshiftLogging:
caBundleName := signingCABundleName(stackName)
serviceName := serviceNameGatewayHTTP(stackName)
secretName := signingServiceSecretName(serviceName)
@@ -84,11 +85,11 @@ func configureDeploymentForMode(d *appsv1.Deployment, mode lokiv1beta1.ModeType,
return nil
}
-func configureServiceForMode(s *corev1.ServiceSpec, mode lokiv1beta1.ModeType) error {
+func configureServiceForMode(s *corev1.ServiceSpec, mode lokiv1.ModeType) error {
switch mode {
- case lokiv1beta1.Static, lokiv1beta1.Dynamic:
+ case lokiv1.Static, lokiv1.Dynamic:
return nil // nothing to configure
- case lokiv1beta1.OpenshiftLogging:
+ case lokiv1.OpenshiftLogging:
return openshift.ConfigureGatewayService(s)
}
@@ -97,9 +98,9 @@ func configureServiceForMode(s *corev1.ServiceSpec, mode lokiv1beta1.ModeType) e
func configureLokiStackObjsForMode(objs []client.Object, opts Options) []client.Object {
switch opts.Stack.Tenants.Mode {
- case lokiv1beta1.Static, lokiv1beta1.Dynamic:
+ case lokiv1.Static, lokiv1.Dynamic:
// nothing to configure
- case lokiv1beta1.OpenshiftLogging:
+ case lokiv1.OpenshiftLogging:
openShiftObjs := openshift.BuildLokiStackObjects(opts.OpenShiftOptions)
objs = append(objs, openShiftObjs...)
}
@@ -109,9 +110,9 @@ func configureLokiStackObjsForMode(objs []client.Object, opts Options) []client.
func configureGatewayObjsForMode(objs []client.Object, opts Options) []client.Object {
switch opts.Stack.Tenants.Mode {
- case lokiv1beta1.Static, lokiv1beta1.Dynamic:
+ case lokiv1.Static, lokiv1.Dynamic:
// nothing to configure
- case lokiv1beta1.OpenshiftLogging:
+ case lokiv1.OpenshiftLogging:
openShiftObjs := openshift.BuildGatewayObjects(opts.OpenShiftOptions)
var cObjs []client.Object
@@ -133,11 +134,11 @@ func configureGatewayObjsForMode(objs []client.Object, opts Options) []client.Ob
return objs
}
-func configureServiceMonitorForMode(sm *monitoringv1.ServiceMonitor, mode lokiv1beta1.ModeType, fg configv1.FeatureGates) error {
+func configureServiceMonitorForMode(sm *monitoringv1.ServiceMonitor, mode lokiv1.ModeType, fg configv1.FeatureGates) error {
switch mode {
- case lokiv1beta1.Static, lokiv1beta1.Dynamic:
+ case lokiv1.Static, lokiv1.Dynamic:
return nil // nothing to configure
- case lokiv1beta1.OpenshiftLogging:
+ case lokiv1.OpenshiftLogging:
return openshift.ConfigureGatewayServiceMonitor(sm, fg.ServiceMonitorTLSEndpoints)
}
diff --git a/operator/internal/manifests/gateway_tenants_test.go b/operator/internal/manifests/gateway_tenants_test.go
index 69d754dc621ff..87f0ceabbc690 100644
--- a/operator/internal/manifests/gateway_tenants_test.go
+++ b/operator/internal/manifests/gateway_tenants_test.go
@@ -8,7 +8,7 @@ import (
"github.com/stretchr/testify/require"
configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/openshift"
appsv1 "k8s.io/api/apps/v1"
@@ -28,16 +28,16 @@ func TestApplyGatewayDefaultsOptions(t *testing.T) {
{
desc: "static mode",
opts: &Options{
- Stack: lokiv1beta1.LokiStackSpec{
- Tenants: &lokiv1beta1.TenantsSpec{
- Mode: lokiv1beta1.Static,
+ Stack: lokiv1.LokiStackSpec{
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.Static,
},
},
},
want: &Options{
- Stack: lokiv1beta1.LokiStackSpec{
- Tenants: &lokiv1beta1.TenantsSpec{
- Mode: lokiv1beta1.Static,
+ Stack: lokiv1.LokiStackSpec{
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.Static,
},
},
},
@@ -45,16 +45,16 @@ func TestApplyGatewayDefaultsOptions(t *testing.T) {
{
desc: "dynamic mode",
opts: &Options{
- Stack: lokiv1beta1.LokiStackSpec{
- Tenants: &lokiv1beta1.TenantsSpec{
- Mode: lokiv1beta1.Dynamic,
+ Stack: lokiv1.LokiStackSpec{
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.Dynamic,
},
},
},
want: &Options{
- Stack: lokiv1beta1.LokiStackSpec{
- Tenants: &lokiv1beta1.TenantsSpec{
- Mode: lokiv1beta1.Dynamic,
+ Stack: lokiv1.LokiStackSpec{
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.Dynamic,
},
},
},
@@ -65,9 +65,9 @@ func TestApplyGatewayDefaultsOptions(t *testing.T) {
Name: "lokistack-ocp",
Namespace: "stack-ns",
GatewayBaseDomain: "example.com",
- Stack: lokiv1beta1.LokiStackSpec{
- Tenants: &lokiv1beta1.TenantsSpec{
- Mode: lokiv1beta1.OpenshiftLogging,
+ Stack: lokiv1.LokiStackSpec{
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.OpenshiftLogging,
},
},
Tenants: Tenants{
@@ -94,9 +94,9 @@ func TestApplyGatewayDefaultsOptions(t *testing.T) {
Name: "lokistack-ocp",
Namespace: "stack-ns",
GatewayBaseDomain: "example.com",
- Stack: lokiv1beta1.LokiStackSpec{
- Tenants: &lokiv1beta1.TenantsSpec{
- Mode: lokiv1beta1.OpenshiftLogging,
+ Stack: lokiv1.LokiStackSpec{
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.OpenshiftLogging,
},
},
Tenants: Tenants{
@@ -180,7 +180,7 @@ func TestApplyGatewayDefaultsOptions(t *testing.T) {
func TestConfigureDeploymentForMode(t *testing.T) {
type tt struct {
desc string
- mode lokiv1beta1.ModeType
+ mode lokiv1.ModeType
stackName string
stackNs string
featureGates configv1.FeatureGates
@@ -191,20 +191,20 @@ func TestConfigureDeploymentForMode(t *testing.T) {
tc := []tt{
{
desc: "static mode",
- mode: lokiv1beta1.Static,
+ mode: lokiv1.Static,
dpl: &appsv1.Deployment{},
want: &appsv1.Deployment{},
},
{
desc: "dynamic mode",
- mode: lokiv1beta1.Dynamic,
+ mode: lokiv1.Dynamic,
dpl: &appsv1.Deployment{},
want: &appsv1.Deployment{},
},
{
desc: "openshift-logging mode",
- mode: lokiv1beta1.OpenshiftLogging,
+ mode: lokiv1.OpenshiftLogging,
stackName: "test",
stackNs: "test-ns",
dpl: &appsv1.Deployment{
@@ -354,7 +354,7 @@ func TestConfigureDeploymentForMode(t *testing.T) {
},
{
desc: "openshift-logging mode with-tls-service-monitor-config",
- mode: lokiv1beta1.OpenshiftLogging,
+ mode: lokiv1.OpenshiftLogging,
stackName: "test",
stackNs: "test-ns",
featureGates: configv1.FeatureGates{
@@ -534,7 +534,7 @@ func TestConfigureDeploymentForMode(t *testing.T) {
},
{
desc: "openshift-logging mode with-cert-signing-service",
- mode: lokiv1beta1.OpenshiftLogging,
+ mode: lokiv1.OpenshiftLogging,
stackName: "test",
stackNs: "test-ns",
featureGates: configv1.FeatureGates{
@@ -742,7 +742,7 @@ func TestConfigureDeploymentForMode(t *testing.T) {
func TestConfigureServiceForMode(t *testing.T) {
type tt struct {
desc string
- mode lokiv1beta1.ModeType
+ mode lokiv1.ModeType
svc *corev1.ServiceSpec
want *corev1.ServiceSpec
}
@@ -750,19 +750,19 @@ func TestConfigureServiceForMode(t *testing.T) {
tc := []tt{
{
desc: "static mode",
- mode: lokiv1beta1.Static,
+ mode: lokiv1.Static,
svc: &corev1.ServiceSpec{},
want: &corev1.ServiceSpec{},
},
{
desc: "dynamic mode",
- mode: lokiv1beta1.Dynamic,
+ mode: lokiv1.Dynamic,
svc: &corev1.ServiceSpec{},
want: &corev1.ServiceSpec{},
},
{
desc: "openshift-logging mode",
- mode: lokiv1beta1.OpenshiftLogging,
+ mode: lokiv1.OpenshiftLogging,
svc: &corev1.ServiceSpec{},
want: &corev1.ServiceSpec{
Ports: []corev1.ServicePort{
@@ -788,7 +788,7 @@ func TestConfigureServiceForMode(t *testing.T) {
func TestConfigureServiceMonitorForMode(t *testing.T) {
type tt struct {
desc string
- mode lokiv1beta1.ModeType
+ mode lokiv1.ModeType
featureGates configv1.FeatureGates
sm *monitoringv1.ServiceMonitor
want *monitoringv1.ServiceMonitor
@@ -797,19 +797,19 @@ func TestConfigureServiceMonitorForMode(t *testing.T) {
tc := []tt{
{
desc: "static mode",
- mode: lokiv1beta1.Static,
+ mode: lokiv1.Static,
sm: &monitoringv1.ServiceMonitor{},
want: &monitoringv1.ServiceMonitor{},
},
{
desc: "dynamic mode",
- mode: lokiv1beta1.Dynamic,
+ mode: lokiv1.Dynamic,
sm: &monitoringv1.ServiceMonitor{},
want: &monitoringv1.ServiceMonitor{},
},
{
desc: "openshift-logging mode",
- mode: lokiv1beta1.OpenshiftLogging,
+ mode: lokiv1.OpenshiftLogging,
sm: &monitoringv1.ServiceMonitor{},
want: &monitoringv1.ServiceMonitor{
Spec: monitoringv1.ServiceMonitorSpec{
@@ -825,7 +825,7 @@ func TestConfigureServiceMonitorForMode(t *testing.T) {
},
{
desc: "openshift-logging mode with-tls-service-monitor-config",
- mode: lokiv1beta1.OpenshiftLogging,
+ mode: lokiv1.OpenshiftLogging,
featureGates: configv1.FeatureGates{
HTTPEncryption: true,
ServiceMonitorTLSEndpoints: true,
diff --git a/operator/internal/manifests/gateway_test.go b/operator/internal/manifests/gateway_test.go
index a01e29a494188..04e78c8e6593f 100644
--- a/operator/internal/manifests/gateway_test.go
+++ b/operator/internal/manifests/gateway_test.go
@@ -6,7 +6,7 @@ import (
"testing"
configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/openshift"
"github.com/google/uuid"
@@ -22,21 +22,21 @@ func TestNewGatewayDeployment_HasTemplateConfigHashAnnotation(t *testing.T) {
ss := NewGatewayDeployment(Options{
Name: "abcd",
Namespace: "efgh",
- Stack: lokiv1beta1.LokiStackSpec{
- Template: &lokiv1beta1.LokiTemplateSpec{
- Compactor: &lokiv1beta1.LokiComponentSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
Replicas: rand.Int31(),
},
- Distributor: &lokiv1beta1.LokiComponentSpec{
+ Distributor: &lokiv1.LokiComponentSpec{
Replicas: rand.Int31(),
},
- Ingester: &lokiv1beta1.LokiComponentSpec{
+ Ingester: &lokiv1.LokiComponentSpec{
Replicas: rand.Int31(),
},
- Querier: &lokiv1beta1.LokiComponentSpec{
+ Querier: &lokiv1.LokiComponentSpec{
Replicas: rand.Int31(),
},
- QueryFrontend: &lokiv1beta1.LokiComponentSpec{
+ QueryFrontend: &lokiv1.LokiComponentSpec{
Replicas: rand.Int31(),
},
},
@@ -54,32 +54,32 @@ func TestGatewayConfigMap_ReturnsSHA1OfBinaryContents(t *testing.T) {
Name: uuid.New().String(),
Namespace: uuid.New().String(),
Image: uuid.New().String(),
- Stack: lokiv1beta1.LokiStackSpec{
- Template: &lokiv1beta1.LokiTemplateSpec{
- Compactor: &lokiv1beta1.LokiComponentSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
Replicas: rand.Int31(),
},
- Distributor: &lokiv1beta1.LokiComponentSpec{
+ Distributor: &lokiv1.LokiComponentSpec{
Replicas: rand.Int31(),
},
- Ingester: &lokiv1beta1.LokiComponentSpec{
+ Ingester: &lokiv1.LokiComponentSpec{
Replicas: rand.Int31(),
},
- Querier: &lokiv1beta1.LokiComponentSpec{
+ Querier: &lokiv1.LokiComponentSpec{
Replicas: rand.Int31(),
},
- QueryFrontend: &lokiv1beta1.LokiComponentSpec{
+ QueryFrontend: &lokiv1.LokiComponentSpec{
Replicas: rand.Int31(),
},
},
- Tenants: &lokiv1beta1.TenantsSpec{
- Mode: lokiv1beta1.Dynamic,
- Authentication: []lokiv1beta1.AuthenticationSpec{
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.Dynamic,
+ Authentication: []lokiv1.AuthenticationSpec{
{
TenantName: "test",
TenantID: "1234",
- OIDC: &lokiv1beta1.OIDCSpec{
- Secret: &lokiv1beta1.TenantSecretSpec{
+ OIDC: &lokiv1.OIDCSpec{
+ Secret: &lokiv1.TenantSecretSpec{
Name: "test",
},
IssuerURL: "https://127.0.0.1:5556/dex",
@@ -89,8 +89,8 @@ func TestGatewayConfigMap_ReturnsSHA1OfBinaryContents(t *testing.T) {
},
},
},
- Authorization: &lokiv1beta1.AuthorizationSpec{
- OPA: &lokiv1beta1.OPASpec{
+ Authorization: &lokiv1.AuthorizationSpec{
+ OPA: &lokiv1.OPASpec{
URL: "http://127.0.0.1:8181/v1/data/observatorium/allow",
},
},
@@ -120,14 +120,14 @@ func TestBuildGateway_HasConfigForTenantMode(t *testing.T) {
Gates: configv1.FeatureGates{
LokiStackGateway: true,
},
- Stack: lokiv1beta1.LokiStackSpec{
- Template: &lokiv1beta1.LokiTemplateSpec{
- Gateway: &lokiv1beta1.LokiComponentSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Gateway: &lokiv1.LokiComponentSpec{
Replicas: rand.Int31(),
},
},
- Tenants: &lokiv1beta1.TenantsSpec{
- Mode: lokiv1beta1.OpenshiftLogging,
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.OpenshiftLogging,
},
},
})
@@ -153,14 +153,14 @@ func TestBuildGateway_HasExtraObjectsForTenantMode(t *testing.T) {
LokiStackNamespace: "efgh",
},
},
- Stack: lokiv1beta1.LokiStackSpec{
- Template: &lokiv1beta1.LokiTemplateSpec{
- Gateway: &lokiv1beta1.LokiComponentSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Gateway: &lokiv1.LokiComponentSpec{
Replicas: rand.Int31(),
},
},
- Tenants: &lokiv1beta1.TenantsSpec{
- Mode: lokiv1beta1.OpenshiftLogging,
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.OpenshiftLogging,
},
},
})
@@ -185,14 +185,14 @@ func TestBuildGateway_WithExtraObjectsForTenantMode_RouteSvcMatches(t *testing.T
LokiStackNamespace: "efgh",
},
},
- Stack: lokiv1beta1.LokiStackSpec{
- Template: &lokiv1beta1.LokiTemplateSpec{
- Gateway: &lokiv1beta1.LokiComponentSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Gateway: &lokiv1.LokiComponentSpec{
Replicas: rand.Int31(),
},
},
- Tenants: &lokiv1beta1.TenantsSpec{
- Mode: lokiv1beta1.OpenshiftLogging,
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.OpenshiftLogging,
},
},
})
@@ -222,14 +222,14 @@ func TestBuildGateway_WithExtraObjectsForTenantMode_ServiceAccountNameMatches(t
LokiStackNamespace: "efgh",
},
},
- Stack: lokiv1beta1.LokiStackSpec{
- Template: &lokiv1beta1.LokiTemplateSpec{
- Gateway: &lokiv1beta1.LokiComponentSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Gateway: &lokiv1.LokiComponentSpec{
Replicas: rand.Int31(),
},
},
- Tenants: &lokiv1beta1.TenantsSpec{
- Mode: lokiv1beta1.OpenshiftLogging,
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.OpenshiftLogging,
},
},
})
@@ -257,14 +257,14 @@ func TestBuildGateway_WithExtraObjectsForTenantMode_ReplacesIngressWithRoute(t *
LokiStackNamespace: "efgh",
},
},
- Stack: lokiv1beta1.LokiStackSpec{
- Template: &lokiv1beta1.LokiTemplateSpec{
- Gateway: &lokiv1beta1.LokiComponentSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Gateway: &lokiv1.LokiComponentSpec{
Replicas: rand.Int31(),
},
},
- Tenants: &lokiv1beta1.TenantsSpec{
- Mode: lokiv1beta1.OpenshiftLogging,
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.OpenshiftLogging,
},
},
})
diff --git a/operator/internal/manifests/indexgateway_test.go b/operator/internal/manifests/indexgateway_test.go
index 595852bfd80ae..aec2df5cbf98d 100644
--- a/operator/internal/manifests/indexgateway_test.go
+++ b/operator/internal/manifests/indexgateway_test.go
@@ -3,7 +3,7 @@ package manifests_test
import (
"testing"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests"
"github.com/stretchr/testify/require"
)
@@ -13,10 +13,10 @@ func TestNewIndexGatewayStatefulSet_HasTemplateConfigHashAnnotation(t *testing.T
Name: "abcd",
Namespace: "efgh",
ConfigSHA1: "deadbeef",
- Stack: lokiv1beta1.LokiStackSpec{
+ Stack: lokiv1.LokiStackSpec{
StorageClassName: "standard",
- Template: &lokiv1beta1.LokiTemplateSpec{
- IndexGateway: &lokiv1beta1.LokiComponentSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ IndexGateway: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
},
@@ -39,10 +39,10 @@ func TestNewIndexGatewayStatefulSet_SelectorMatchesLabels(t *testing.T) {
ss := manifests.NewIndexGatewayStatefulSet(manifests.Options{
Name: "abcd",
Namespace: "efgh",
- Stack: lokiv1beta1.LokiStackSpec{
+ Stack: lokiv1.LokiStackSpec{
StorageClassName: "standard",
- Template: &lokiv1beta1.LokiTemplateSpec{
- IndexGateway: &lokiv1beta1.LokiComponentSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ IndexGateway: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
},
diff --git a/operator/internal/manifests/ingester_test.go b/operator/internal/manifests/ingester_test.go
index fd854352550aa..84314226b59b4 100644
--- a/operator/internal/manifests/ingester_test.go
+++ b/operator/internal/manifests/ingester_test.go
@@ -3,7 +3,7 @@ package manifests_test
import (
"testing"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests"
"github.com/stretchr/testify/require"
)
@@ -13,10 +13,10 @@ func TestNewIngesterStatefulSet_HasTemplateConfigHashAnnotation(t *testing.T) {
Name: "abcd",
Namespace: "efgh",
ConfigSHA1: "deadbeef",
- Stack: lokiv1beta1.LokiStackSpec{
+ Stack: lokiv1.LokiStackSpec{
StorageClassName: "standard",
- Template: &lokiv1beta1.LokiTemplateSpec{
- Ingester: &lokiv1beta1.LokiComponentSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Ingester: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
},
@@ -39,10 +39,10 @@ func TestNewIngesterStatefulSet_SelectorMatchesLabels(t *testing.T) {
sts := manifests.NewIngesterStatefulSet(manifests.Options{
Name: "abcd",
Namespace: "efgh",
- Stack: lokiv1beta1.LokiStackSpec{
+ Stack: lokiv1.LokiStackSpec{
StorageClassName: "standard",
- Template: &lokiv1beta1.LokiTemplateSpec{
- Ingester: &lokiv1beta1.LokiComponentSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Ingester: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
},
diff --git a/operator/internal/manifests/internal/config/build_test.go b/operator/internal/manifests/internal/config/build_test.go
index fe814615699ea..1a98943f98951 100644
--- a/operator/internal/manifests/internal/config/build_test.go
+++ b/operator/internal/manifests/internal/config/build_test.go
@@ -3,7 +3,7 @@ package config
import (
"testing"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/storage"
"github.com/stretchr/testify/require"
)
@@ -166,11 +166,11 @@ analytics:
overrides:
`
opts := Options{
- Stack: lokiv1beta1.LokiStackSpec{
+ Stack: lokiv1.LokiStackSpec{
ReplicationFactor: 1,
- Limits: &lokiv1beta1.LimitsSpec{
- Global: &lokiv1beta1.LimitsTemplateSpec{
- IngestionLimits: &lokiv1beta1.IngestionLimitSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ IngestionLimits: &lokiv1.IngestionLimitSpec{
IngestionRate: 4,
IngestionBurstSize: 6,
MaxLabelNameLength: 1024,
@@ -179,7 +179,7 @@ overrides:
MaxGlobalStreamsPerTenant: 0,
MaxLineSize: 256000,
},
- QueryLimits: &lokiv1beta1.QueryLimitSpec{
+ QueryLimits: &lokiv1.QueryLimitSpec{
MaxEntriesLimitPerQuery: 5000,
MaxChunksPerQuery: 2000000,
MaxQuerySeries: 500,
@@ -214,7 +214,7 @@ overrides:
IngesterMemoryRequest: 5000,
},
ObjectStorage: storage.Options{
- SharedStore: lokiv1beta1.ObjectStorageSecretS3,
+ SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
Endpoint: "http://test.default.svc.cluster.local.:9000",
Region: "us-east",
@@ -222,9 +222,9 @@ overrides:
AccessKeyID: "test",
AccessKeySecret: "test123",
},
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-01",
},
},
@@ -400,11 +400,11 @@ overrides:
max_chunks_per_query: 1000000
`
opts := Options{
- Stack: lokiv1beta1.LokiStackSpec{
+ Stack: lokiv1.LokiStackSpec{
ReplicationFactor: 1,
- Limits: &lokiv1beta1.LimitsSpec{
- Global: &lokiv1beta1.LimitsTemplateSpec{
- IngestionLimits: &lokiv1beta1.IngestionLimitSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ IngestionLimits: &lokiv1.IngestionLimitSpec{
IngestionRate: 4,
IngestionBurstSize: 6,
MaxLabelNameLength: 1024,
@@ -413,20 +413,20 @@ overrides:
MaxGlobalStreamsPerTenant: 0,
MaxLineSize: 256000,
},
- QueryLimits: &lokiv1beta1.QueryLimitSpec{
+ QueryLimits: &lokiv1.QueryLimitSpec{
MaxEntriesLimitPerQuery: 5000,
MaxChunksPerQuery: 2000000,
MaxQuerySeries: 500,
},
},
- Tenants: map[string]lokiv1beta1.LimitsTemplateSpec{
+ Tenants: map[string]lokiv1.LimitsTemplateSpec{
"test-a": {
- IngestionLimits: &lokiv1beta1.IngestionLimitSpec{
+ IngestionLimits: &lokiv1.IngestionLimitSpec{
IngestionRate: 2,
IngestionBurstSize: 5,
MaxGlobalStreamsPerTenant: 1,
},
- QueryLimits: &lokiv1beta1.QueryLimitSpec{
+ QueryLimits: &lokiv1.QueryLimitSpec{
MaxChunksPerQuery: 1000000,
},
},
@@ -460,7 +460,7 @@ overrides:
IngesterMemoryRequest: 5000,
},
ObjectStorage: storage.Options{
- SharedStore: lokiv1beta1.ObjectStorageSecretS3,
+ SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
Endpoint: "http://test.default.svc.cluster.local.:9000",
Region: "us-east",
@@ -468,9 +468,9 @@ overrides:
AccessKeyID: "test",
AccessKeySecret: "test123",
},
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-01",
},
},
@@ -484,11 +484,11 @@ overrides:
func TestBuild_ConfigAndRuntimeConfig_CreateLokiConfigFailed(t *testing.T) {
opts := Options{
- Stack: lokiv1beta1.LokiStackSpec{
+ Stack: lokiv1.LokiStackSpec{
ReplicationFactor: 1,
- Limits: &lokiv1beta1.LimitsSpec{
- Global: &lokiv1beta1.LimitsTemplateSpec{
- IngestionLimits: &lokiv1beta1.IngestionLimitSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ IngestionLimits: &lokiv1.IngestionLimitSpec{
IngestionRate: 4,
IngestionBurstSize: 6,
MaxLabelNameLength: 1024,
@@ -529,7 +529,7 @@ func TestBuild_ConfigAndRuntimeConfig_CreateLokiConfigFailed(t *testing.T) {
IngesterMemoryRequest: 5000,
},
ObjectStorage: storage.Options{
- SharedStore: lokiv1beta1.ObjectStorageSecretS3,
+ SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
Endpoint: "http://test.default.svc.cluster.local.:9000",
Region: "us-east",
@@ -537,9 +537,9 @@ func TestBuild_ConfigAndRuntimeConfig_CreateLokiConfigFailed(t *testing.T) {
AccessKeyID: "test",
AccessKeySecret: "test123",
},
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-01",
},
},
@@ -763,11 +763,11 @@ analytics:
overrides:
`
opts := Options{
- Stack: lokiv1beta1.LokiStackSpec{
+ Stack: lokiv1.LokiStackSpec{
ReplicationFactor: 1,
- Limits: &lokiv1beta1.LimitsSpec{
- Global: &lokiv1beta1.LimitsTemplateSpec{
- IngestionLimits: &lokiv1beta1.IngestionLimitSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ IngestionLimits: &lokiv1.IngestionLimitSpec{
IngestionRate: 4,
IngestionBurstSize: 6,
MaxLabelNameLength: 1024,
@@ -776,7 +776,7 @@ overrides:
MaxGlobalStreamsPerTenant: 0,
MaxLineSize: 256000,
},
- QueryLimits: &lokiv1beta1.QueryLimitSpec{
+ QueryLimits: &lokiv1.QueryLimitSpec{
MaxEntriesLimitPerQuery: 5000,
MaxChunksPerQuery: 2000000,
MaxQuerySeries: 500,
@@ -858,7 +858,7 @@ overrides:
IngesterMemoryRequest: 5000,
},
ObjectStorage: storage.Options{
- SharedStore: lokiv1beta1.ObjectStorageSecretS3,
+ SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
Endpoint: "http://test.default.svc.cluster.local.:9000",
Region: "us-east",
@@ -866,9 +866,9 @@ overrides:
AccessKeyID: "test",
AccessKeySecret: "test123",
},
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-01",
},
},
@@ -1093,11 +1093,11 @@ analytics:
overrides:
`
opts := Options{
- Stack: lokiv1beta1.LokiStackSpec{
+ Stack: lokiv1.LokiStackSpec{
ReplicationFactor: 1,
- Limits: &lokiv1beta1.LimitsSpec{
- Global: &lokiv1beta1.LimitsTemplateSpec{
- IngestionLimits: &lokiv1beta1.IngestionLimitSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ IngestionLimits: &lokiv1.IngestionLimitSpec{
IngestionRate: 4,
IngestionBurstSize: 6,
MaxLabelNameLength: 1024,
@@ -1106,7 +1106,7 @@ overrides:
MaxGlobalStreamsPerTenant: 0,
MaxLineSize: 256000,
},
- QueryLimits: &lokiv1beta1.QueryLimitSpec{
+ QueryLimits: &lokiv1.QueryLimitSpec{
MaxEntriesLimitPerQuery: 5000,
MaxChunksPerQuery: 2000000,
MaxQuerySeries: 500,
@@ -1189,7 +1189,7 @@ overrides:
IngesterMemoryRequest: 5000,
},
ObjectStorage: storage.Options{
- SharedStore: lokiv1beta1.ObjectStorageSecretS3,
+ SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
Endpoint: "http://test.default.svc.cluster.local.:9000",
Region: "us-east",
@@ -1197,9 +1197,9 @@ overrides:
AccessKeyID: "test",
AccessKeySecret: "test123",
},
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-01",
},
},
@@ -1437,11 +1437,11 @@ analytics:
overrides:
`
opts := Options{
- Stack: lokiv1beta1.LokiStackSpec{
+ Stack: lokiv1.LokiStackSpec{
ReplicationFactor: 1,
- Limits: &lokiv1beta1.LimitsSpec{
- Global: &lokiv1beta1.LimitsTemplateSpec{
- IngestionLimits: &lokiv1beta1.IngestionLimitSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ IngestionLimits: &lokiv1.IngestionLimitSpec{
IngestionRate: 4,
IngestionBurstSize: 6,
MaxLabelNameLength: 1024,
@@ -1450,7 +1450,7 @@ overrides:
MaxGlobalStreamsPerTenant: 0,
MaxLineSize: 256000,
},
- QueryLimits: &lokiv1beta1.QueryLimitSpec{
+ QueryLimits: &lokiv1.QueryLimitSpec{
MaxEntriesLimitPerQuery: 5000,
MaxChunksPerQuery: 2000000,
MaxQuerySeries: 500,
@@ -1550,7 +1550,7 @@ overrides:
IngesterMemoryRequest: 5000,
},
ObjectStorage: storage.Options{
- SharedStore: lokiv1beta1.ObjectStorageSecretS3,
+ SharedStore: lokiv1.ObjectStorageSecretS3,
S3: &storage.S3StorageConfig{
Endpoint: "http://test.default.svc.cluster.local.:9000",
Region: "us-east",
@@ -1558,9 +1558,9 @@ overrides:
AccessKeyID: "test",
AccessKeySecret: "test123",
},
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-01",
},
},
diff --git a/operator/internal/manifests/internal/config/options.go b/operator/internal/manifests/internal/config/options.go
index ab9e9990609b0..ef07ae5103d7a 100644
--- a/operator/internal/manifests/internal/config/options.go
+++ b/operator/internal/manifests/internal/config/options.go
@@ -5,13 +5,13 @@ import (
"math"
"strings"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/storage"
)
// Options is used to render the loki-config.yaml file template
type Options struct {
- Stack lokiv1beta1.LokiStackSpec
+ Stack lokiv1.LokiStackSpec
Namespace string
Name string
diff --git a/operator/internal/manifests/internal/gateway/build.go b/operator/internal/manifests/internal/gateway/build.go
index 6b3da6acd6fe8..0b5e308411204 100644
--- a/operator/internal/manifests/internal/gateway/build.go
+++ b/operator/internal/manifests/internal/gateway/build.go
@@ -6,7 +6,7 @@ import (
"io/ioutil"
"text/template"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/ViaQ/logerr/v2/kverrors"
)
@@ -62,7 +62,7 @@ func Build(opts Options) (rbacCfg []byte, tenantsCfg []byte, regoCfg []byte, err
return nil, nil, nil, kverrors.Wrap(err, "failed to read configuration from buffer")
}
// Build loki gateway observatorium rego for static mode
- if opts.Stack.Tenants.Mode == lokiv1beta1.Static {
+ if opts.Stack.Tenants.Mode == lokiv1.Static {
w = bytes.NewBuffer(nil)
err = lokiStackGatewayRegoTmpl.Execute(w, opts)
if err != nil {
diff --git a/operator/internal/manifests/internal/gateway/build_test.go b/operator/internal/manifests/internal/gateway/build_test.go
index dd07988066bd1..5c31c83b2e317 100644
--- a/operator/internal/manifests/internal/gateway/build_test.go
+++ b/operator/internal/manifests/internal/gateway/build_test.go
@@ -3,7 +3,7 @@ package gateway
import (
"testing"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/openshift"
"github.com/stretchr/testify/require"
)
@@ -45,15 +45,15 @@ roles:
- test-a
`
opts := Options{
- Stack: lokiv1beta1.LokiStackSpec{
- Tenants: &lokiv1beta1.TenantsSpec{
- Mode: lokiv1beta1.Static,
- Authentication: []lokiv1beta1.AuthenticationSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.Static,
+ Authentication: []lokiv1.AuthenticationSpec{
{
TenantName: "test-a",
TenantID: "test",
- OIDC: &lokiv1beta1.OIDCSpec{
- Secret: &lokiv1beta1.TenantSecretSpec{
+ OIDC: &lokiv1.OIDCSpec{
+ Secret: &lokiv1.TenantSecretSpec{
Name: "test",
},
IssuerURL: "https://127.0.0.1:5556/dex",
@@ -63,19 +63,19 @@ roles:
},
},
},
- Authorization: &lokiv1beta1.AuthorizationSpec{
- Roles: []lokiv1beta1.RoleSpec{
+ Authorization: &lokiv1.AuthorizationSpec{
+ Roles: []lokiv1.RoleSpec{
{
Name: "some-name",
Resources: []string{"metrics"},
Tenants: []string{"test-a"},
- Permissions: []lokiv1beta1.PermissionType{"read"},
+ Permissions: []lokiv1.PermissionType{"read"},
},
},
- RoleBindings: []lokiv1beta1.RoleBindingsSpec{
+ RoleBindings: []lokiv1.RoleBindingsSpec{
{
Name: "test-a",
- Subjects: []lokiv1beta1.Subject{
+ Subjects: []lokiv1.Subject{
{
Name: "[email protected]",
Kind: "user",
@@ -122,15 +122,15 @@ tenants:
url: http://127.0.0.1:8181/v1/data/observatorium/allow
`
opts := Options{
- Stack: lokiv1beta1.LokiStackSpec{
- Tenants: &lokiv1beta1.TenantsSpec{
- Mode: lokiv1beta1.Dynamic,
- Authentication: []lokiv1beta1.AuthenticationSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.Dynamic,
+ Authentication: []lokiv1.AuthenticationSpec{
{
TenantName: "test-a",
TenantID: "test",
- OIDC: &lokiv1beta1.OIDCSpec{
- Secret: &lokiv1beta1.TenantSecretSpec{
+ OIDC: &lokiv1.OIDCSpec{
+ Secret: &lokiv1.TenantSecretSpec{
Name: "test",
},
IssuerURL: "https://127.0.0.1:5556/dex",
@@ -140,8 +140,8 @@ tenants:
},
},
},
- Authorization: &lokiv1beta1.AuthorizationSpec{
- OPA: &lokiv1beta1.OPASpec{
+ Authorization: &lokiv1.AuthorizationSpec{
+ OPA: &lokiv1.OPASpec{
URL: "http://127.0.0.1:8181/v1/data/observatorium/allow",
},
},
@@ -197,9 +197,9 @@ tenants:
withAccessToken: true
`
opts := Options{
- Stack: lokiv1beta1.LokiStackSpec{
- Tenants: &lokiv1beta1.TenantsSpec{
- Mode: lokiv1beta1.OpenshiftLogging,
+ Stack: lokiv1.LokiStackSpec{
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.OpenshiftLogging,
},
},
OpenShiftOptions: openshift.Options{
diff --git a/operator/internal/manifests/internal/gateway/options.go b/operator/internal/manifests/internal/gateway/options.go
index 806d889ca47af..f3f5d1769ee04 100644
--- a/operator/internal/manifests/internal/gateway/options.go
+++ b/operator/internal/manifests/internal/gateway/options.go
@@ -1,13 +1,13 @@
package gateway
import (
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/openshift"
)
// Options is used to render the rbac.yaml and tenants.yaml file template
type Options struct {
- Stack lokiv1beta1.LokiStackSpec
+ Stack lokiv1.LokiStackSpec
Namespace string
Name string
diff --git a/operator/internal/manifests/internal/sizes.go b/operator/internal/manifests/internal/sizes.go
index 8b9550fa8e08b..3b6e9eea2035e 100644
--- a/operator/internal/manifests/internal/sizes.go
+++ b/operator/internal/manifests/internal/sizes.go
@@ -1,7 +1,7 @@
package internal
import (
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
)
@@ -28,8 +28,8 @@ type ResourceRequirements struct {
}
// ResourceRequirementsTable defines the default resource requests and limits for each size
-var ResourceRequirementsTable = map[lokiv1beta1.LokiStackSizeType]ComponentResources{
- lokiv1beta1.SizeOneXExtraSmall: {
+var ResourceRequirementsTable = map[lokiv1.LokiStackSizeType]ComponentResources{
+ lokiv1.SizeOneXExtraSmall: {
Querier: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{
corev1.ResourceCPU: resource.MustParse("1"),
@@ -86,7 +86,7 @@ var ResourceRequirementsTable = map[lokiv1beta1.LokiStackSizeType]ComponentResou
PVCSize: resource.MustParse("150Gi"),
},
},
- lokiv1beta1.SizeOneXSmall: {
+ lokiv1.SizeOneXSmall: {
Querier: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{
corev1.ResourceCPU: resource.MustParse("4"),
@@ -143,7 +143,7 @@ var ResourceRequirementsTable = map[lokiv1beta1.LokiStackSizeType]ComponentResou
PVCSize: resource.MustParse("150Gi"),
},
},
- lokiv1beta1.SizeOneXMedium: {
+ lokiv1.SizeOneXMedium: {
Querier: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{
corev1.ResourceCPU: resource.MustParse("6"),
@@ -203,14 +203,14 @@ var ResourceRequirementsTable = map[lokiv1beta1.LokiStackSizeType]ComponentResou
}
// StackSizeTable defines the default configurations for each size
-var StackSizeTable = map[lokiv1beta1.LokiStackSizeType]lokiv1beta1.LokiStackSpec{
+var StackSizeTable = map[lokiv1.LokiStackSizeType]lokiv1.LokiStackSpec{
- lokiv1beta1.SizeOneXExtraSmall: {
- Size: lokiv1beta1.SizeOneXExtraSmall,
+ lokiv1.SizeOneXExtraSmall: {
+ Size: lokiv1.SizeOneXExtraSmall,
ReplicationFactor: 1,
- Limits: &lokiv1beta1.LimitsSpec{
- Global: &lokiv1beta1.LimitsTemplateSpec{
- IngestionLimits: &lokiv1beta1.IngestionLimitSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ IngestionLimits: &lokiv1.IngestionLimitSpec{
// Defaults from Loki docs
IngestionRate: 4,
IngestionBurstSize: 6,
@@ -219,7 +219,7 @@ var StackSizeTable = map[lokiv1beta1.LokiStackSizeType]lokiv1beta1.LokiStackSpec
MaxLabelNamesPerSeries: 30,
MaxLineSize: 256000,
},
- QueryLimits: &lokiv1beta1.QueryLimitSpec{
+ QueryLimits: &lokiv1.QueryLimitSpec{
// Defaults from Loki docs
MaxEntriesLimitPerQuery: 5000,
MaxChunksPerQuery: 2000000,
@@ -227,40 +227,40 @@ var StackSizeTable = map[lokiv1beta1.LokiStackSizeType]lokiv1beta1.LokiStackSpec
},
},
},
- Template: &lokiv1beta1.LokiTemplateSpec{
- Compactor: &lokiv1beta1.LokiComponentSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Distributor: &lokiv1beta1.LokiComponentSpec{
+ Distributor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Ingester: &lokiv1beta1.LokiComponentSpec{
+ Ingester: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Querier: &lokiv1beta1.LokiComponentSpec{
+ Querier: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- QueryFrontend: &lokiv1beta1.LokiComponentSpec{
+ QueryFrontend: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Gateway: &lokiv1beta1.LokiComponentSpec{
+ Gateway: &lokiv1.LokiComponentSpec{
Replicas: 2,
},
- IndexGateway: &lokiv1beta1.LokiComponentSpec{
+ IndexGateway: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Ruler: &lokiv1beta1.LokiComponentSpec{
+ Ruler: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
},
},
- lokiv1beta1.SizeOneXSmall: {
- Size: lokiv1beta1.SizeOneXSmall,
+ lokiv1.SizeOneXSmall: {
+ Size: lokiv1.SizeOneXSmall,
ReplicationFactor: 2,
- Limits: &lokiv1beta1.LimitsSpec{
- Global: &lokiv1beta1.LimitsTemplateSpec{
- IngestionLimits: &lokiv1beta1.IngestionLimitSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ IngestionLimits: &lokiv1.IngestionLimitSpec{
// Custom for 1x.small
IngestionRate: 10,
IngestionBurstSize: 20,
@@ -271,7 +271,7 @@ var StackSizeTable = map[lokiv1beta1.LokiStackSizeType]lokiv1beta1.LokiStackSpec
MaxLabelNamesPerSeries: 30,
MaxLineSize: 256000,
},
- QueryLimits: &lokiv1beta1.QueryLimitSpec{
+ QueryLimits: &lokiv1.QueryLimitSpec{
// Defaults from Loki docs
MaxEntriesLimitPerQuery: 5000,
MaxChunksPerQuery: 2000000,
@@ -279,40 +279,40 @@ var StackSizeTable = map[lokiv1beta1.LokiStackSizeType]lokiv1beta1.LokiStackSpec
},
},
},
- Template: &lokiv1beta1.LokiTemplateSpec{
- Compactor: &lokiv1beta1.LokiComponentSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Distributor: &lokiv1beta1.LokiComponentSpec{
+ Distributor: &lokiv1.LokiComponentSpec{
Replicas: 2,
},
- Ingester: &lokiv1beta1.LokiComponentSpec{
+ Ingester: &lokiv1.LokiComponentSpec{
Replicas: 2,
},
- Querier: &lokiv1beta1.LokiComponentSpec{
+ Querier: &lokiv1.LokiComponentSpec{
Replicas: 2,
},
- QueryFrontend: &lokiv1beta1.LokiComponentSpec{
+ QueryFrontend: &lokiv1.LokiComponentSpec{
Replicas: 2,
},
- Gateway: &lokiv1beta1.LokiComponentSpec{
+ Gateway: &lokiv1.LokiComponentSpec{
Replicas: 2,
},
- IndexGateway: &lokiv1beta1.LokiComponentSpec{
+ IndexGateway: &lokiv1.LokiComponentSpec{
Replicas: 2,
},
- Ruler: &lokiv1beta1.LokiComponentSpec{
+ Ruler: &lokiv1.LokiComponentSpec{
Replicas: 2,
},
},
},
- lokiv1beta1.SizeOneXMedium: {
- Size: lokiv1beta1.SizeOneXMedium,
+ lokiv1.SizeOneXMedium: {
+ Size: lokiv1.SizeOneXMedium,
ReplicationFactor: 3,
- Limits: &lokiv1beta1.LimitsSpec{
- Global: &lokiv1beta1.LimitsTemplateSpec{
- IngestionLimits: &lokiv1beta1.IngestionLimitSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ IngestionLimits: &lokiv1.IngestionLimitSpec{
// Custom for 1x.medium
IngestionRate: 10,
IngestionBurstSize: 20,
@@ -323,7 +323,7 @@ var StackSizeTable = map[lokiv1beta1.LokiStackSizeType]lokiv1beta1.LokiStackSpec
MaxLabelNamesPerSeries: 30,
MaxLineSize: 256000,
},
- QueryLimits: &lokiv1beta1.QueryLimitSpec{
+ QueryLimits: &lokiv1.QueryLimitSpec{
// Defaults from Loki docs
MaxEntriesLimitPerQuery: 5000,
MaxChunksPerQuery: 2000000,
@@ -331,29 +331,29 @@ var StackSizeTable = map[lokiv1beta1.LokiStackSizeType]lokiv1beta1.LokiStackSpec
},
},
},
- Template: &lokiv1beta1.LokiTemplateSpec{
- Compactor: &lokiv1beta1.LokiComponentSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Distributor: &lokiv1beta1.LokiComponentSpec{
+ Distributor: &lokiv1.LokiComponentSpec{
Replicas: 2,
},
- Ingester: &lokiv1beta1.LokiComponentSpec{
+ Ingester: &lokiv1.LokiComponentSpec{
Replicas: 3,
},
- Querier: &lokiv1beta1.LokiComponentSpec{
+ Querier: &lokiv1.LokiComponentSpec{
Replicas: 3,
},
- QueryFrontend: &lokiv1beta1.LokiComponentSpec{
+ QueryFrontend: &lokiv1.LokiComponentSpec{
Replicas: 2,
},
- Gateway: &lokiv1beta1.LokiComponentSpec{
+ Gateway: &lokiv1.LokiComponentSpec{
Replicas: 2,
},
- IndexGateway: &lokiv1beta1.LokiComponentSpec{
+ IndexGateway: &lokiv1.LokiComponentSpec{
Replicas: 2,
},
- Ruler: &lokiv1beta1.LokiComponentSpec{
+ Ruler: &lokiv1.LokiComponentSpec{
Replicas: 2,
},
},
diff --git a/operator/internal/manifests/node_placement_test.go b/operator/internal/manifests/node_placement_test.go
index 62612c7baefc6..fb03b945bdf3d 100644
--- a/operator/internal/manifests/node_placement_test.go
+++ b/operator/internal/manifests/node_placement_test.go
@@ -3,7 +3,7 @@ package manifests
import (
"testing"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/storage"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
@@ -17,33 +17,33 @@ func TestTolerationsAreSetForEachComponent(t *testing.T) {
Effect: corev1.TaintEffectNoSchedule,
}}
optsWithTolerations := Options{
- Stack: lokiv1beta1.LokiStackSpec{
- Template: &lokiv1beta1.LokiTemplateSpec{
- Compactor: &lokiv1beta1.LokiComponentSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
Tolerations: tolerations,
Replicas: 1,
},
- Distributor: &lokiv1beta1.LokiComponentSpec{
+ Distributor: &lokiv1.LokiComponentSpec{
Tolerations: tolerations,
Replicas: 1,
},
- Ingester: &lokiv1beta1.LokiComponentSpec{
+ Ingester: &lokiv1.LokiComponentSpec{
Tolerations: tolerations,
Replicas: 1,
},
- Querier: &lokiv1beta1.LokiComponentSpec{
+ Querier: &lokiv1.LokiComponentSpec{
Tolerations: tolerations,
Replicas: 1,
},
- QueryFrontend: &lokiv1beta1.LokiComponentSpec{
+ QueryFrontend: &lokiv1.LokiComponentSpec{
Tolerations: tolerations,
Replicas: 1,
},
- IndexGateway: &lokiv1beta1.LokiComponentSpec{
+ IndexGateway: &lokiv1.LokiComponentSpec{
Tolerations: tolerations,
Replicas: 1,
},
- Ruler: &lokiv1beta1.LokiComponentSpec{
+ Ruler: &lokiv1.LokiComponentSpec{
Tolerations: tolerations,
Replicas: 1,
},
@@ -53,27 +53,27 @@ func TestTolerationsAreSetForEachComponent(t *testing.T) {
}
optsWithoutTolerations := Options{
- Stack: lokiv1beta1.LokiStackSpec{
- Template: &lokiv1beta1.LokiTemplateSpec{
- Compactor: &lokiv1beta1.LokiComponentSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Distributor: &lokiv1beta1.LokiComponentSpec{
+ Distributor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Ingester: &lokiv1beta1.LokiComponentSpec{
+ Ingester: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Querier: &lokiv1beta1.LokiComponentSpec{
+ Querier: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- QueryFrontend: &lokiv1beta1.LokiComponentSpec{
+ QueryFrontend: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- IndexGateway: &lokiv1beta1.LokiComponentSpec{
+ IndexGateway: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Ruler: &lokiv1beta1.LokiComponentSpec{
+ Ruler: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
},
@@ -120,33 +120,33 @@ func TestTolerationsAreSetForEachComponent(t *testing.T) {
func TestNodeSelectorsAreSetForEachComponent(t *testing.T) {
nodeSelectors := map[string]string{"type": "storage"}
optsWithNodeSelectors := Options{
- Stack: lokiv1beta1.LokiStackSpec{
- Template: &lokiv1beta1.LokiTemplateSpec{
- Compactor: &lokiv1beta1.LokiComponentSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
NodeSelector: nodeSelectors,
Replicas: 1,
},
- Distributor: &lokiv1beta1.LokiComponentSpec{
+ Distributor: &lokiv1.LokiComponentSpec{
NodeSelector: nodeSelectors,
Replicas: 1,
},
- Ingester: &lokiv1beta1.LokiComponentSpec{
+ Ingester: &lokiv1.LokiComponentSpec{
NodeSelector: nodeSelectors,
Replicas: 1,
},
- Querier: &lokiv1beta1.LokiComponentSpec{
+ Querier: &lokiv1.LokiComponentSpec{
NodeSelector: nodeSelectors,
Replicas: 1,
},
- QueryFrontend: &lokiv1beta1.LokiComponentSpec{
+ QueryFrontend: &lokiv1.LokiComponentSpec{
NodeSelector: nodeSelectors,
Replicas: 1,
},
- IndexGateway: &lokiv1beta1.LokiComponentSpec{
+ IndexGateway: &lokiv1.LokiComponentSpec{
NodeSelector: nodeSelectors,
Replicas: 1,
},
- Ruler: &lokiv1beta1.LokiComponentSpec{
+ Ruler: &lokiv1.LokiComponentSpec{
NodeSelector: nodeSelectors,
Replicas: 1,
},
@@ -156,27 +156,27 @@ func TestNodeSelectorsAreSetForEachComponent(t *testing.T) {
}
optsWithoutNodeSelectors := Options{
- Stack: lokiv1beta1.LokiStackSpec{
- Template: &lokiv1beta1.LokiTemplateSpec{
- Compactor: &lokiv1beta1.LokiComponentSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Distributor: &lokiv1beta1.LokiComponentSpec{
+ Distributor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Ingester: &lokiv1beta1.LokiComponentSpec{
+ Ingester: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Querier: &lokiv1beta1.LokiComponentSpec{
+ Querier: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- QueryFrontend: &lokiv1beta1.LokiComponentSpec{
+ QueryFrontend: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- IndexGateway: &lokiv1beta1.LokiComponentSpec{
+ IndexGateway: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Ruler: &lokiv1beta1.LokiComponentSpec{
+ Ruler: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
},
diff --git a/operator/internal/manifests/options.go b/operator/internal/manifests/options.go
index b666060415f16..c61c86b2fd815 100644
--- a/operator/internal/manifests/options.go
+++ b/operator/internal/manifests/options.go
@@ -2,6 +2,7 @@ package manifests
import (
configv1 "github.com/grafana/loki/operator/apis/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
"github.com/grafana/loki/operator/internal/manifests/internal"
"github.com/grafana/loki/operator/internal/manifests/openshift"
@@ -19,7 +20,7 @@ type Options struct {
ConfigSHA1 string
Gates configv1.FeatureGates
- Stack lokiv1beta1.LokiStackSpec
+ Stack lokiv1.LokiStackSpec
ResourceRequirements internal.ComponentResources
AlertingRules []lokiv1beta1.AlertingRule
diff --git a/operator/internal/manifests/querier_test.go b/operator/internal/manifests/querier_test.go
index d432c0563e2c8..a452f0906825d 100644
--- a/operator/internal/manifests/querier_test.go
+++ b/operator/internal/manifests/querier_test.go
@@ -3,7 +3,7 @@ package manifests_test
import (
"testing"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests"
"github.com/stretchr/testify/require"
)
@@ -13,10 +13,10 @@ func TestNewQuerierDeployment_HasTemplateConfigHashAnnotation(t *testing.T) {
Name: "abcd",
Namespace: "efgh",
ConfigSHA1: "deadbeef",
- Stack: lokiv1beta1.LokiStackSpec{
+ Stack: lokiv1.LokiStackSpec{
StorageClassName: "standard",
- Template: &lokiv1beta1.LokiTemplateSpec{
- Querier: &lokiv1beta1.LokiComponentSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Querier: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
},
@@ -39,10 +39,10 @@ func TestNewQuerierDeployment_SelectorMatchesLabels(t *testing.T) {
ss := manifests.NewQuerierDeployment(manifests.Options{
Name: "abcd",
Namespace: "efgh",
- Stack: lokiv1beta1.LokiStackSpec{
+ Stack: lokiv1.LokiStackSpec{
StorageClassName: "standard",
- Template: &lokiv1beta1.LokiTemplateSpec{
- Querier: &lokiv1beta1.LokiComponentSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Querier: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
},
diff --git a/operator/internal/manifests/query-frontend_test.go b/operator/internal/manifests/query-frontend_test.go
index f890ae165b45d..7bf30a42366ab 100644
--- a/operator/internal/manifests/query-frontend_test.go
+++ b/operator/internal/manifests/query-frontend_test.go
@@ -3,7 +3,7 @@ package manifests_test
import (
"testing"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests"
"github.com/stretchr/testify/require"
)
@@ -12,9 +12,9 @@ func TestNewQueryFrontendDeployment_SelectorMatchesLabels(t *testing.T) {
ss := manifests.NewQueryFrontendDeployment(manifests.Options{
Name: "abcd",
Namespace: "efgh",
- Stack: lokiv1beta1.LokiStackSpec{
- Template: &lokiv1beta1.LokiTemplateSpec{
- QueryFrontend: &lokiv1beta1.LokiComponentSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ QueryFrontend: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
},
@@ -32,9 +32,9 @@ func TestNewQueryFrontendDeployment_HasTemplateConfigHashAnnotation(t *testing.T
Name: "abcd",
Namespace: "efgh",
ConfigSHA1: "deadbeef",
- Stack: lokiv1beta1.LokiStackSpec{
- Template: &lokiv1beta1.LokiTemplateSpec{
- QueryFrontend: &lokiv1beta1.LokiComponentSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ QueryFrontend: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
},
diff --git a/operator/internal/manifests/ruler_test.go b/operator/internal/manifests/ruler_test.go
index a71704a202bc4..ff35916375406 100644
--- a/operator/internal/manifests/ruler_test.go
+++ b/operator/internal/manifests/ruler_test.go
@@ -3,7 +3,7 @@ package manifests_test
import (
"testing"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
@@ -14,10 +14,10 @@ func TestNewRulerStatefulSet_HasTemplateConfigHashAnnotation(t *testing.T) {
Name: "abcd",
Namespace: "efgh",
ConfigSHA1: "deadbeef",
- Stack: lokiv1beta1.LokiStackSpec{
+ Stack: lokiv1.LokiStackSpec{
StorageClassName: "standard",
- Template: &lokiv1beta1.LokiTemplateSpec{
- Ruler: &lokiv1beta1.LokiComponentSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Ruler: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
},
@@ -40,10 +40,10 @@ func TestNewRulerStatefulSet_SelectorMatchesLabels(t *testing.T) {
sts := manifests.NewRulerStatefulSet(manifests.Options{
Name: "abcd",
Namespace: "efgh",
- Stack: lokiv1beta1.LokiStackSpec{
+ Stack: lokiv1.LokiStackSpec{
StorageClassName: "standard",
- Template: &lokiv1beta1.LokiTemplateSpec{
- Ruler: &lokiv1beta1.LokiComponentSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Ruler: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
},
@@ -61,10 +61,10 @@ func TestNewRulerStatefulSet_MountsRulesInPerTenantIDSubDirectories(t *testing.T
sts := manifests.NewRulerStatefulSet(manifests.Options{
Name: "abcd",
Namespace: "efgh",
- Stack: lokiv1beta1.LokiStackSpec{
+ Stack: lokiv1.LokiStackSpec{
StorageClassName: "standard",
- Template: &lokiv1beta1.LokiTemplateSpec{
- Ruler: &lokiv1beta1.LokiComponentSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Ruler: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
},
diff --git a/operator/internal/manifests/service_monitor_test.go b/operator/internal/manifests/service_monitor_test.go
index ce9224ca6a60f..34661dce10aab 100644
--- a/operator/internal/manifests/service_monitor_test.go
+++ b/operator/internal/manifests/service_monitor_test.go
@@ -7,7 +7,7 @@ import (
corev1 "k8s.io/api/core/v1"
configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -34,31 +34,31 @@ func TestServiceMonitorMatchLabels(t *testing.T) {
Namespace: "test",
Image: "test",
Gates: featureGates,
- Stack: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Template: &lokiv1beta1.LokiTemplateSpec{
- Compactor: &lokiv1beta1.LokiComponentSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Distributor: &lokiv1beta1.LokiComponentSpec{
+ Distributor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Ingester: &lokiv1beta1.LokiComponentSpec{
+ Ingester: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Querier: &lokiv1beta1.LokiComponentSpec{
+ Querier: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- QueryFrontend: &lokiv1beta1.LokiComponentSpec{
+ QueryFrontend: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Gateway: &lokiv1beta1.LokiComponentSpec{
+ Gateway: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- IndexGateway: &lokiv1beta1.LokiComponentSpec{
+ IndexGateway: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Ruler: &lokiv1beta1.LokiComponentSpec{
+ Ruler: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
},
@@ -129,13 +129,13 @@ func TestServiceMonitorEndpoints_ForOpenShiftLoggingMode(t *testing.T) {
Namespace: "test",
Image: "test",
Gates: featureGates,
- Stack: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Tenants: &lokiv1beta1.TenantsSpec{
- Mode: lokiv1beta1.OpenshiftLogging,
+ Stack: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.OpenshiftLogging,
},
- Template: &lokiv1beta1.LokiTemplateSpec{
- Gateway: &lokiv1beta1.LokiComponentSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Gateway: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
},
diff --git a/operator/internal/manifests/service_test.go b/operator/internal/manifests/service_test.go
index fa07982a0d5b5..41a197650ba78 100644
--- a/operator/internal/manifests/service_test.go
+++ b/operator/internal/manifests/service_test.go
@@ -4,7 +4,7 @@ import (
"fmt"
"testing"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -20,31 +20,31 @@ func TestServicesMatchPorts(t *testing.T) {
Name: "test",
Namespace: "test",
Image: "test",
- Stack: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Template: &lokiv1beta1.LokiTemplateSpec{
- Compactor: &lokiv1beta1.LokiComponentSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Distributor: &lokiv1beta1.LokiComponentSpec{
+ Distributor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Ingester: &lokiv1beta1.LokiComponentSpec{
+ Ingester: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Querier: &lokiv1beta1.LokiComponentSpec{
+ Querier: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- QueryFrontend: &lokiv1beta1.LokiComponentSpec{
+ QueryFrontend: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Gateway: &lokiv1beta1.LokiComponentSpec{
+ Gateway: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- IndexGateway: &lokiv1beta1.LokiComponentSpec{
+ IndexGateway: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Ruler: &lokiv1beta1.LokiComponentSpec{
+ Ruler: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
},
@@ -149,31 +149,31 @@ func TestServicesMatchLabels(t *testing.T) {
Name: "test",
Namespace: "test",
Image: "test",
- Stack: lokiv1beta1.LokiStackSpec{
- Size: lokiv1beta1.SizeOneXExtraSmall,
- Template: &lokiv1beta1.LokiTemplateSpec{
- Compactor: &lokiv1beta1.LokiComponentSpec{
+ Stack: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Distributor: &lokiv1beta1.LokiComponentSpec{
+ Distributor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Ingester: &lokiv1beta1.LokiComponentSpec{
+ Ingester: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Querier: &lokiv1beta1.LokiComponentSpec{
+ Querier: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- QueryFrontend: &lokiv1beta1.LokiComponentSpec{
+ QueryFrontend: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Gateway: &lokiv1beta1.LokiComponentSpec{
+ Gateway: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- IndexGateway: &lokiv1beta1.LokiComponentSpec{
+ IndexGateway: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
- Ruler: &lokiv1beta1.LokiComponentSpec{
+ Ruler: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
},
diff --git a/operator/internal/manifests/storage/configure.go b/operator/internal/manifests/storage/configure.go
index 8413c3d10ce62..14ce2cd2972fe 100644
--- a/operator/internal/manifests/storage/configure.go
+++ b/operator/internal/manifests/storage/configure.go
@@ -5,7 +5,7 @@ import (
"path"
"github.com/ViaQ/logerr/v2/kverrors"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/imdario/mergo"
appsv1 "k8s.io/api/apps/v1"
@@ -28,9 +28,9 @@ const (
// - S3: Ensure mounting custom CA configmap if any TLSConfig given
func ConfigureDeployment(d *appsv1.Deployment, opts Options) error {
switch opts.SharedStore {
- case lokiv1beta1.ObjectStorageSecretGCS:
+ case lokiv1.ObjectStorageSecretGCS:
return configureDeployment(d, opts.SecretName)
- case lokiv1beta1.ObjectStorageSecretS3:
+ case lokiv1.ObjectStorageSecretS3:
if opts.TLS == nil {
return nil
}
@@ -46,9 +46,9 @@ func ConfigureDeployment(d *appsv1.Deployment, opts Options) error {
// - S3: Ensure mounting custom CA configmap if any TLSConfig given
func ConfigureStatefulSet(d *appsv1.StatefulSet, opts Options) error {
switch opts.SharedStore {
- case lokiv1beta1.ObjectStorageSecretGCS:
+ case lokiv1.ObjectStorageSecretGCS:
return configureStatefulSet(d, opts.SecretName)
- case lokiv1beta1.ObjectStorageSecretS3:
+ case lokiv1.ObjectStorageSecretS3:
if opts.TLS == nil {
return nil
}
diff --git a/operator/internal/manifests/storage/configure_test.go b/operator/internal/manifests/storage/configure_test.go
index ced3cb2b0c3ee..e33d37ffe18d4 100644
--- a/operator/internal/manifests/storage/configure_test.go
+++ b/operator/internal/manifests/storage/configure_test.go
@@ -3,7 +3,7 @@ package storage_test
import (
"testing"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/storage"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
@@ -23,7 +23,7 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
desc: "object storage other than GCS",
opts: storage.Options{
SecretName: "test",
- SharedStore: lokiv1beta1.ObjectStorageSecretS3,
+ SharedStore: lokiv1.ObjectStorageSecretS3,
},
dpl: &appsv1.Deployment{
Spec: appsv1.DeploymentSpec{
@@ -56,7 +56,7 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
desc: "object storage GCS",
opts: storage.Options{
SecretName: "test",
- SharedStore: lokiv1beta1.ObjectStorageSecretGCS,
+ SharedStore: lokiv1.ObjectStorageSecretGCS,
},
dpl: &appsv1.Deployment{
Spec: appsv1.DeploymentSpec{
@@ -135,7 +135,7 @@ func TestConfigureStatefulSetForStorageType(t *testing.T) {
desc: "object storage other than GCS",
opts: storage.Options{
SecretName: "test",
- SharedStore: lokiv1beta1.ObjectStorageSecretS3,
+ SharedStore: lokiv1.ObjectStorageSecretS3,
},
sts: &appsv1.StatefulSet{
Spec: appsv1.StatefulSetSpec{
@@ -168,7 +168,7 @@ func TestConfigureStatefulSetForStorageType(t *testing.T) {
desc: "object storage GCS",
opts: storage.Options{
SecretName: "test",
- SharedStore: lokiv1beta1.ObjectStorageSecretGCS,
+ SharedStore: lokiv1.ObjectStorageSecretGCS,
},
sts: &appsv1.StatefulSet{
Spec: appsv1.StatefulSetSpec{
@@ -246,7 +246,7 @@ func TestConfigureDeploymentForStorageCA(t *testing.T) {
desc: "object storage other than S3",
opts: storage.Options{
SecretName: "test",
- SharedStore: lokiv1beta1.ObjectStorageSecretAzure,
+ SharedStore: lokiv1.ObjectStorageSecretAzure,
},
dpl: &appsv1.Deployment{
Spec: appsv1.DeploymentSpec{
@@ -279,7 +279,7 @@ func TestConfigureDeploymentForStorageCA(t *testing.T) {
desc: "object storage S3",
opts: storage.Options{
SecretName: "test",
- SharedStore: lokiv1beta1.ObjectStorageSecretS3,
+ SharedStore: lokiv1.ObjectStorageSecretS3,
TLS: &storage.TLSConfig{
CA: "test",
},
@@ -359,7 +359,7 @@ func TestConfigureStatefulSetForStorageCA(t *testing.T) {
desc: "object storage other than S3",
opts: storage.Options{
SecretName: "test",
- SharedStore: lokiv1beta1.ObjectStorageSecretAzure,
+ SharedStore: lokiv1.ObjectStorageSecretAzure,
TLS: &storage.TLSConfig{
CA: "test",
},
@@ -395,7 +395,7 @@ func TestConfigureStatefulSetForStorageCA(t *testing.T) {
desc: "object storage S3",
opts: storage.Options{
SecretName: "test",
- SharedStore: lokiv1beta1.ObjectStorageSecretS3,
+ SharedStore: lokiv1.ObjectStorageSecretS3,
TLS: &storage.TLSConfig{
CA: "test",
},
diff --git a/operator/internal/manifests/storage/options.go b/operator/internal/manifests/storage/options.go
index 8d50f4f2ec654..1984590273017 100644
--- a/operator/internal/manifests/storage/options.go
+++ b/operator/internal/manifests/storage/options.go
@@ -1,14 +1,14 @@
package storage
import (
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
)
// Options is used to configure Loki to integrate with
// supported object storages.
type Options struct {
- Schemas []lokiv1beta1.ObjectStorageSchema
- SharedStore lokiv1beta1.ObjectStorageSecretType
+ Schemas []lokiv1.ObjectStorageSchema
+ SharedStore lokiv1.ObjectStorageSecretType
Azure *AzureStorageConfig
GCS *GCSStorageConfig
diff --git a/operator/internal/manifests/storage/schema.go b/operator/internal/manifests/storage/schema.go
index 4c6783407248c..27a6cd8caae62 100644
--- a/operator/internal/manifests/storage/schema.go
+++ b/operator/internal/manifests/storage/schema.go
@@ -5,7 +5,7 @@ import (
"time"
"github.com/ViaQ/logerr/v2/kverrors"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
)
// BuildSchemaConfig creates a list of schemas to be used to configure
@@ -17,9 +17,9 @@ import (
//
func BuildSchemaConfig(
utcTime time.Time,
- spec lokiv1beta1.ObjectStorageSpec,
- status lokiv1beta1.LokiStackStorageStatus,
-) ([]lokiv1beta1.ObjectStorageSchema, error) {
+ spec lokiv1.ObjectStorageSpec,
+ status lokiv1.LokiStackStorageStatus,
+) ([]lokiv1.ObjectStorageSchema, error) {
if len(spec.Schemas) == 0 {
return nil, kverrors.New("spec does not contain any schemas")
}
@@ -35,8 +35,8 @@ func BuildSchemaConfig(
}
// buildSchemas creates a sorted and reduced list of schemaConfigs
-func buildSchemas(schemas []lokiv1beta1.ObjectStorageSchema) []lokiv1beta1.ObjectStorageSchema {
- sortedSchemas := make([]lokiv1beta1.ObjectStorageSchema, len(schemas))
+func buildSchemas(schemas []lokiv1.ObjectStorageSchema) []lokiv1.ObjectStorageSchema {
+ sortedSchemas := make([]lokiv1.ObjectStorageSchema, len(schemas))
copy(sortedSchemas, schemas)
sort.SliceStable(sortedSchemas, func(i, j int) bool {
@@ -50,9 +50,9 @@ func buildSchemas(schemas []lokiv1beta1.ObjectStorageSchema) []lokiv1beta1.Objec
}
// reduceSortedSchemas returns a list of schemas that have removed redundant entries.
-func reduceSortedSchemas(schemas []lokiv1beta1.ObjectStorageSchema) []lokiv1beta1.ObjectStorageSchema {
+func reduceSortedSchemas(schemas []lokiv1.ObjectStorageSchema) []lokiv1.ObjectStorageSchema {
version := ""
- reduced := []lokiv1beta1.ObjectStorageSchema{}
+ reduced := []lokiv1.ObjectStorageSchema{}
for _, schema := range schemas {
strSchemaVersion := string(schema.Version)
diff --git a/operator/internal/manifests/storage/schema_test.go b/operator/internal/manifests/storage/schema_test.go
index 3b6de444280f2..3663a5c0ddf2f 100644
--- a/operator/internal/manifests/storage/schema_test.go
+++ b/operator/internal/manifests/storage/schema_test.go
@@ -4,14 +4,14 @@ import (
"testing"
"time"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/stretchr/testify/require"
)
func TestBuildSchemaConfig_NoSchemas(t *testing.T) {
- spec := lokiv1beta1.ObjectStorageSpec{}
- status := lokiv1beta1.LokiStackStorageStatus{}
+ spec := lokiv1.ObjectStorageSpec{}
+ status := lokiv1.LokiStackStorageStatus{}
expected, err := BuildSchemaConfig(time.Now().UTC(), spec, status)
@@ -20,20 +20,20 @@ func TestBuildSchemaConfig_NoSchemas(t *testing.T) {
}
func TestBuildSchemaConfig_AddSchema_NoStatuses(t *testing.T) {
- spec := lokiv1beta1.ObjectStorageSpec{
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ spec := lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-01",
},
},
}
- status := lokiv1beta1.LokiStackStorageStatus{}
+ status := lokiv1.LokiStackStorageStatus{}
actual, err := BuildSchemaConfig(time.Now().UTC(), spec, status)
- expected := []lokiv1beta1.ObjectStorageSchema{
+ expected := []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-01",
},
}
@@ -44,35 +44,35 @@ func TestBuildSchemaConfig_AddSchema_NoStatuses(t *testing.T) {
func TestBuildSchemaConfig_AddSchema_WithStatuses_WithValidDate(t *testing.T) {
utcTime := time.Date(2021, 9, 1, 0, 0, 0, 0, time.UTC)
- spec := lokiv1beta1.ObjectStorageSpec{
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ spec := lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-01",
},
{
- Version: lokiv1beta1.ObjectStorageSchemaV12,
+ Version: lokiv1.ObjectStorageSchemaV12,
EffectiveDate: "2021-10-01",
},
},
}
- status := lokiv1beta1.LokiStackStorageStatus{
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ status := lokiv1.LokiStackStorageStatus{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-01",
},
},
}
actual, err := BuildSchemaConfig(utcTime, spec, status)
- expected := []lokiv1beta1.ObjectStorageSchema{
+ expected := []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-01",
},
{
- Version: lokiv1beta1.ObjectStorageSchemaV12,
+ Version: lokiv1.ObjectStorageSchemaV12,
EffectiveDate: "2021-10-01",
},
}
@@ -83,23 +83,23 @@ func TestBuildSchemaConfig_AddSchema_WithStatuses_WithValidDate(t *testing.T) {
func TestBuildSchemaConfig_AddSchema_WithStatuses_WithInvalidDate(t *testing.T) {
utcTime := time.Date(2021, 10, 1, 0, 0, 0, 0, time.UTC)
- updateWindow := utcTime.Add(lokiv1beta1.StorageSchemaUpdateBuffer).Format(lokiv1beta1.StorageSchemaEffectiveDateFormat)
- spec := lokiv1beta1.ObjectStorageSpec{
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ updateWindow := utcTime.Add(lokiv1.StorageSchemaUpdateBuffer).Format(lokiv1.StorageSchemaEffectiveDateFormat)
+ spec := lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-01",
},
{
- Version: lokiv1beta1.ObjectStorageSchemaV12,
- EffectiveDate: lokiv1beta1.StorageSchemaEffectiveDate(updateWindow),
+ Version: lokiv1.ObjectStorageSchemaV12,
+ EffectiveDate: lokiv1.StorageSchemaEffectiveDate(updateWindow),
},
},
}
- status := lokiv1beta1.LokiStackStorageStatus{
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ status := lokiv1.LokiStackStorageStatus{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-01",
},
},
@@ -112,48 +112,48 @@ func TestBuildSchemaConfig_AddSchema_WithStatuses_WithInvalidDate(t *testing.T)
}
func TestBuildSchemas(t *testing.T) {
- schemas := []lokiv1beta1.ObjectStorageSchema{
+ schemas := []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV12,
+ Version: lokiv1.ObjectStorageSchemaV12,
EffectiveDate: "2021-11-01",
},
{
- Version: lokiv1beta1.ObjectStorageSchemaV12,
+ Version: lokiv1.ObjectStorageSchemaV12,
EffectiveDate: "2021-06-01",
},
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-01",
},
{
- Version: lokiv1beta1.ObjectStorageSchemaV12,
+ Version: lokiv1.ObjectStorageSchemaV12,
EffectiveDate: "2021-12-01",
},
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2021-10-01",
},
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2021-02-01",
},
}
- expected := []lokiv1beta1.ObjectStorageSchema{
+ expected := []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-01",
},
{
- Version: lokiv1beta1.ObjectStorageSchemaV12,
+ Version: lokiv1.ObjectStorageSchemaV12,
EffectiveDate: "2021-06-01",
},
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2021-10-01",
},
{
- Version: lokiv1beta1.ObjectStorageSchemaV12,
+ Version: lokiv1.ObjectStorageSchemaV12,
EffectiveDate: "2021-11-01",
},
}
@@ -163,48 +163,48 @@ func TestBuildSchemas(t *testing.T) {
}
func TestReduceSortedSchemas(t *testing.T) {
- schemas := []lokiv1beta1.ObjectStorageSchema{
+ schemas := []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-01",
},
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2021-02-01",
},
{
- Version: lokiv1beta1.ObjectStorageSchemaV12,
+ Version: lokiv1.ObjectStorageSchemaV12,
EffectiveDate: "2021-06-01",
},
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2021-10-01",
},
{
- Version: lokiv1beta1.ObjectStorageSchemaV12,
+ Version: lokiv1.ObjectStorageSchemaV12,
EffectiveDate: "2021-11-01",
},
{
- Version: lokiv1beta1.ObjectStorageSchemaV12,
+ Version: lokiv1.ObjectStorageSchemaV12,
EffectiveDate: "2021-12-01",
},
}
- expected := []lokiv1beta1.ObjectStorageSchema{
+ expected := []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-01",
},
{
- Version: lokiv1beta1.ObjectStorageSchemaV12,
+ Version: lokiv1.ObjectStorageSchemaV12,
EffectiveDate: "2021-06-01",
},
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2021-10-01",
},
{
- Version: lokiv1beta1.ObjectStorageSchemaV12,
+ Version: lokiv1.ObjectStorageSchemaV12,
EffectiveDate: "2021-11-01",
},
}
diff --git a/operator/internal/metrics/metrics.go b/operator/internal/metrics/metrics.go
index 3fd97855c81f6..a461bd38e14b0 100644
--- a/operator/internal/metrics/metrics.go
+++ b/operator/internal/metrics/metrics.go
@@ -6,7 +6,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"sigs.k8s.io/controller-runtime/pkg/metrics"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests"
)
@@ -68,9 +68,9 @@ func RegisterMetricCollectors() {
}
// Collect takes metrics based on the spec
-func Collect(spec *lokiv1beta1.LokiStackSpec, stackName string) {
+func Collect(spec *lokiv1.LokiStackSpec, stackName string) {
defaultSpec := manifests.DefaultLokiStackSpec(spec.Size)
- sizes := []lokiv1beta1.LokiStackSizeType{lokiv1beta1.SizeOneXSmall, lokiv1beta1.SizeOneXMedium}
+ sizes := []lokiv1.LokiStackSizeType{lokiv1.SizeOneXSmall, lokiv1.SizeOneXMedium}
for _, size := range sizes {
var (
@@ -106,14 +106,14 @@ func Collect(spec *lokiv1beta1.LokiStackSpec, stackName string) {
}
}
-func setDeploymentMetric(size lokiv1beta1.LokiStackSizeType, identifier string, active bool) {
+func setDeploymentMetric(size lokiv1.LokiStackSizeType, identifier string, active bool) {
deploymentMetric.With(prometheus.Labels{
"size": string(size),
"stack_id": identifier,
}).Set(boolValue(active))
}
-func setUserDefinedLimitsMetric(size lokiv1beta1.LokiStackSizeType, identifier string, limitType UserDefinedLimitsType, active bool) {
+func setUserDefinedLimitsMetric(size lokiv1.LokiStackSizeType, identifier string, limitType UserDefinedLimitsType, active bool) {
userDefinedLimitsMetric.With(prometheus.Labels{
"size": string(size),
"stack_id": identifier,
@@ -121,14 +121,14 @@ func setUserDefinedLimitsMetric(size lokiv1beta1.LokiStackSizeType, identifier s
}).Set(boolValue(active))
}
-func setGlobalStreamLimitMetric(size lokiv1beta1.LokiStackSizeType, identifier string, rate float64) {
+func setGlobalStreamLimitMetric(size lokiv1.LokiStackSizeType, identifier string, rate float64) {
globalStreamLimitMetric.With(prometheus.Labels{
"size": string(size),
"stack_id": identifier,
}).Set(rate)
}
-func setAverageTenantStreamLimitMetric(size lokiv1beta1.LokiStackSizeType, identifier string, rate float64) {
+func setAverageTenantStreamLimitMetric(size lokiv1.LokiStackSizeType, identifier string, rate float64) {
averageTenantStreamLimitMetric.With(prometheus.Labels{
"size": string(size),
"stack_id": identifier,
@@ -142,7 +142,7 @@ func boolValue(value bool) float64 {
return 0
}
-func streamRate(tenantLimits map[string]lokiv1beta1.LimitsTemplateSpec, ingesters int32) float64 {
+func streamRate(tenantLimits map[string]lokiv1.LimitsTemplateSpec, ingesters int32) float64 {
var tenants, tenantStreamLimit int32 = 0, 0
for _, tenant := range tenantLimits {
diff --git a/operator/internal/status/components.go b/operator/internal/status/components.go
index 8b73fabad5c79..032f6d3e71fe5 100644
--- a/operator/internal/status/components.go
+++ b/operator/internal/status/components.go
@@ -4,7 +4,7 @@ import (
"context"
"github.com/ViaQ/logerr/v2/kverrors"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/grafana/loki/operator/internal/manifests"
@@ -16,7 +16,7 @@ import (
// SetComponentsStatus updates the pod status map component
func SetComponentsStatus(ctx context.Context, k k8s.Client, req ctrl.Request) error {
- var s lokiv1beta1.LokiStack
+ var s lokiv1.LokiStack
if err := k.Get(ctx, req.NamespacedName, &s); err != nil {
if apierrors.IsNotFound(err) {
return nil
@@ -25,7 +25,7 @@ func SetComponentsStatus(ctx context.Context, k k8s.Client, req ctrl.Request) er
}
var err error
- s.Status.Components = lokiv1beta1.LokiStackComponentStatus{}
+ s.Status.Components = lokiv1.LokiStackComponentStatus{}
s.Status.Components.Compactor, err = appendPodStatus(ctx, k, manifests.LabelCompactorComponent, s.Name, s.Namespace)
if err != nil {
return kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelCompactorComponent)
@@ -69,8 +69,8 @@ func SetComponentsStatus(ctx context.Context, k k8s.Client, req ctrl.Request) er
return k.Status().Update(ctx, &s, &client.UpdateOptions{})
}
-func appendPodStatus(ctx context.Context, k k8s.Client, component, stack, ns string) (lokiv1beta1.PodStatusMap, error) {
- psm := lokiv1beta1.PodStatusMap{}
+func appendPodStatus(ctx context.Context, k k8s.Client, component, stack, ns string) (lokiv1.PodStatusMap, error) {
+ psm := lokiv1.PodStatusMap{}
pods := &corev1.PodList{}
opts := []client.ListOption{
client.MatchingLabels(manifests.ComponentLabels(component, stack)),
diff --git a/operator/internal/status/components_test.go b/operator/internal/status/components_test.go
index 8ceb645ae1495..f2e5834837377 100644
--- a/operator/internal/status/components_test.go
+++ b/operator/internal/status/components_test.go
@@ -4,7 +4,7 @@ import (
"context"
"testing"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
"github.com/grafana/loki/operator/internal/status"
"github.com/stretchr/testify/require"
@@ -60,7 +60,7 @@ func TestSetComponentsStatus_WhenListReturnError_ReturnError(t *testing.T) {
k.StatusStub = func() client.StatusWriter { return sw }
- s := lokiv1beta1.LokiStack{
+ s := lokiv1.LokiStack{
ObjectMeta: metav1.ObjectMeta{
Name: "my-stack",
Namespace: "some-ns",
@@ -96,7 +96,7 @@ func TestSetComponentsStatus_WhenPodListExisting_SetPodStatusMap(t *testing.T) {
k.StatusStub = func() client.StatusWriter { return sw }
- s := lokiv1beta1.LokiStack{
+ s := lokiv1.LokiStack{
ObjectMeta: metav1.ObjectMeta{
Name: "my-stack",
Namespace: "some-ns",
@@ -143,13 +143,13 @@ func TestSetComponentsStatus_WhenPodListExisting_SetPodStatusMap(t *testing.T) {
return nil
}
- expected := lokiv1beta1.PodStatusMap{
+ expected := lokiv1.PodStatusMap{
"Pending": []string{"pod-a"},
"Running": []string{"pod-b"},
}
sw.UpdateStub = func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error {
- stack := obj.(*lokiv1beta1.LokiStack)
+ stack := obj.(*lokiv1.LokiStack)
require.Equal(t, expected, stack.Status.Components.Compactor)
return nil
}
@@ -167,13 +167,13 @@ func TestSetComponentsStatus_WhenRulerEnabled_SetPodStatusMap(t *testing.T) {
k.StatusStub = func() client.StatusWriter { return sw }
- s := lokiv1beta1.LokiStack{
+ s := lokiv1.LokiStack{
ObjectMeta: metav1.ObjectMeta{
Name: "my-stack",
Namespace: "some-ns",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Rules: &lokiv1beta1.RulesSpec{
+ Spec: lokiv1.LokiStackSpec{
+ Rules: &lokiv1.RulesSpec{
Enabled: true,
},
},
@@ -219,13 +219,13 @@ func TestSetComponentsStatus_WhenRulerEnabled_SetPodStatusMap(t *testing.T) {
return nil
}
- expected := lokiv1beta1.PodStatusMap{
+ expected := lokiv1.PodStatusMap{
"Pending": []string{"pod-a"},
"Running": []string{"pod-b"},
}
sw.UpdateStub = func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error {
- stack := obj.(*lokiv1beta1.LokiStack)
+ stack := obj.(*lokiv1.LokiStack)
require.Equal(t, expected, stack.Status.Components.Ruler)
return nil
}
@@ -243,13 +243,13 @@ func TestSetComponentsStatus_WhenRulerNotEnabled_DoNothing(t *testing.T) {
k.StatusStub = func() client.StatusWriter { return sw }
- s := lokiv1beta1.LokiStack{
+ s := lokiv1.LokiStack{
ObjectMeta: metav1.ObjectMeta{
Name: "my-stack",
Namespace: "some-ns",
},
- Spec: lokiv1beta1.LokiStackSpec{
- Rules: &lokiv1beta1.RulesSpec{
+ Spec: lokiv1.LokiStackSpec{
+ Rules: &lokiv1.RulesSpec{
Enabled: false,
},
},
@@ -303,8 +303,8 @@ func TestSetComponentsStatus_WhenRulerNotEnabled_DoNothing(t *testing.T) {
}
sw.UpdateStub = func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error {
- stack := obj.(*lokiv1beta1.LokiStack)
- require.Equal(t, stack.Status.Components.Ruler, lokiv1beta1.PodStatusMap{})
+ stack := obj.(*lokiv1.LokiStack)
+ require.Equal(t, stack.Status.Components.Ruler, lokiv1.PodStatusMap{})
return nil
}
diff --git a/operator/internal/status/lokistack.go b/operator/internal/status/lokistack.go
index e99e2a2c858cb..0a8304719feb5 100644
--- a/operator/internal/status/lokistack.go
+++ b/operator/internal/status/lokistack.go
@@ -5,7 +5,7 @@ import (
"fmt"
"github.com/ViaQ/logerr/v2/kverrors"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -17,7 +17,7 @@ import (
// DegradedError contains information about why the managed LokiStack has an invalid configuration.
type DegradedError struct {
Message string
- Reason lokiv1beta1.LokiStackConditionReason
+ Reason lokiv1.LokiStackConditionReason
Requeue bool
}
@@ -28,7 +28,7 @@ func (e *DegradedError) Error() string {
// SetReadyCondition updates or appends the condition Ready to the lokistack status conditions.
// In addition it resets all other Status conditions to false.
func SetReadyCondition(ctx context.Context, k k8s.Client, req ctrl.Request) error {
- var s lokiv1beta1.LokiStack
+ var s lokiv1.LokiStack
if err := k.Get(ctx, req.NamespacedName, &s); err != nil {
if apierrors.IsNotFound(err) {
return nil
@@ -37,17 +37,17 @@ func SetReadyCondition(ctx context.Context, k k8s.Client, req ctrl.Request) erro
}
for _, cond := range s.Status.Conditions {
- if cond.Type == string(lokiv1beta1.ConditionReady) && cond.Status == metav1.ConditionTrue {
+ if cond.Type == string(lokiv1.ConditionReady) && cond.Status == metav1.ConditionTrue {
return nil
}
}
ready := metav1.Condition{
- Type: string(lokiv1beta1.ConditionReady),
+ Type: string(lokiv1.ConditionReady),
Status: metav1.ConditionTrue,
LastTransitionTime: metav1.Now(),
Message: "All components ready",
- Reason: string(lokiv1beta1.ReasonReadyComponents),
+ Reason: string(lokiv1.ReasonReadyComponents),
}
index := -1
@@ -57,7 +57,7 @@ func SetReadyCondition(ctx context.Context, k k8s.Client, req ctrl.Request) erro
s.Status.Conditions[i].LastTransitionTime = metav1.Now()
// Locate existing ready condition if any
- if s.Status.Conditions[i].Type == string(lokiv1beta1.ConditionReady) {
+ if s.Status.Conditions[i].Type == string(lokiv1.ConditionReady) {
index = i
}
}
@@ -74,7 +74,7 @@ func SetReadyCondition(ctx context.Context, k k8s.Client, req ctrl.Request) erro
// SetFailedCondition updates or appends the condition Failed to the lokistack status conditions.
// In addition it resets all other Status conditions to false.
func SetFailedCondition(ctx context.Context, k k8s.Client, req ctrl.Request) error {
- var s lokiv1beta1.LokiStack
+ var s lokiv1.LokiStack
if err := k.Get(ctx, req.NamespacedName, &s); err != nil {
if apierrors.IsNotFound(err) {
return nil
@@ -83,17 +83,17 @@ func SetFailedCondition(ctx context.Context, k k8s.Client, req ctrl.Request) err
}
for _, cond := range s.Status.Conditions {
- if cond.Type == string(lokiv1beta1.ConditionFailed) && cond.Status == metav1.ConditionTrue {
+ if cond.Type == string(lokiv1.ConditionFailed) && cond.Status == metav1.ConditionTrue {
return nil
}
}
failed := metav1.Condition{
- Type: string(lokiv1beta1.ConditionFailed),
+ Type: string(lokiv1.ConditionFailed),
Status: metav1.ConditionTrue,
LastTransitionTime: metav1.Now(),
Message: "Some LokiStack components failed",
- Reason: string(lokiv1beta1.ReasonFailedComponents),
+ Reason: string(lokiv1.ReasonFailedComponents),
}
index := -1
@@ -103,7 +103,7 @@ func SetFailedCondition(ctx context.Context, k k8s.Client, req ctrl.Request) err
s.Status.Conditions[i].LastTransitionTime = metav1.Now()
// Locate existing failed condition if any
- if s.Status.Conditions[i].Type == string(lokiv1beta1.ConditionFailed) {
+ if s.Status.Conditions[i].Type == string(lokiv1.ConditionFailed) {
index = i
}
}
@@ -120,7 +120,7 @@ func SetFailedCondition(ctx context.Context, k k8s.Client, req ctrl.Request) err
// SetPendingCondition updates or appends the condition Pending to the lokistack status conditions.
// In addition it resets all other Status conditions to false.
func SetPendingCondition(ctx context.Context, k k8s.Client, req ctrl.Request) error {
- var s lokiv1beta1.LokiStack
+ var s lokiv1.LokiStack
if err := k.Get(ctx, req.NamespacedName, &s); err != nil {
if apierrors.IsNotFound(err) {
return nil
@@ -129,17 +129,17 @@ func SetPendingCondition(ctx context.Context, k k8s.Client, req ctrl.Request) er
}
for _, cond := range s.Status.Conditions {
- if cond.Type == string(lokiv1beta1.ConditionPending) && cond.Status == metav1.ConditionTrue {
+ if cond.Type == string(lokiv1.ConditionPending) && cond.Status == metav1.ConditionTrue {
return nil
}
}
pending := metav1.Condition{
- Type: string(lokiv1beta1.ConditionPending),
+ Type: string(lokiv1.ConditionPending),
Status: metav1.ConditionTrue,
LastTransitionTime: metav1.Now(),
Message: "Some LokiStack components pending on dependendies",
- Reason: string(lokiv1beta1.ReasonPendingComponents),
+ Reason: string(lokiv1.ReasonPendingComponents),
}
index := -1
@@ -149,7 +149,7 @@ func SetPendingCondition(ctx context.Context, k k8s.Client, req ctrl.Request) er
s.Status.Conditions[i].LastTransitionTime = metav1.Now()
// Locate existing pending condition if any
- if s.Status.Conditions[i].Type == string(lokiv1beta1.ConditionPending) {
+ if s.Status.Conditions[i].Type == string(lokiv1.ConditionPending) {
index = i
}
}
@@ -164,8 +164,8 @@ func SetPendingCondition(ctx context.Context, k k8s.Client, req ctrl.Request) er
}
// SetDegradedCondition appends the condition Degraded to the lokistack status conditions.
-func SetDegradedCondition(ctx context.Context, k k8s.Client, req ctrl.Request, msg string, reason lokiv1beta1.LokiStackConditionReason) error {
- var s lokiv1beta1.LokiStack
+func SetDegradedCondition(ctx context.Context, k k8s.Client, req ctrl.Request, msg string, reason lokiv1.LokiStackConditionReason) error {
+ var s lokiv1.LokiStack
if err := k.Get(ctx, req.NamespacedName, &s); err != nil {
if apierrors.IsNotFound(err) {
return nil
@@ -175,13 +175,13 @@ func SetDegradedCondition(ctx context.Context, k k8s.Client, req ctrl.Request, m
reasonStr := string(reason)
for _, cond := range s.Status.Conditions {
- if cond.Type == string(lokiv1beta1.ConditionDegraded) && cond.Reason == reasonStr && cond.Status == metav1.ConditionTrue {
+ if cond.Type == string(lokiv1.ConditionDegraded) && cond.Reason == reasonStr && cond.Status == metav1.ConditionTrue {
return nil
}
}
degraded := metav1.Condition{
- Type: string(lokiv1beta1.ConditionDegraded),
+ Type: string(lokiv1.ConditionDegraded),
Status: metav1.ConditionTrue,
LastTransitionTime: metav1.Now(),
Reason: reasonStr,
@@ -195,7 +195,7 @@ func SetDegradedCondition(ctx context.Context, k k8s.Client, req ctrl.Request, m
s.Status.Conditions[i].LastTransitionTime = metav1.Now()
// Locate existing pending condition if any
- if s.Status.Conditions[i].Type == string(lokiv1beta1.ConditionDegraded) {
+ if s.Status.Conditions[i].Type == string(lokiv1.ConditionDegraded) {
index = i
}
}
diff --git a/operator/internal/status/lokistack_test.go b/operator/internal/status/lokistack_test.go
index a395bce12ba80..e83f568800ffb 100644
--- a/operator/internal/status/lokistack_test.go
+++ b/operator/internal/status/lokistack_test.go
@@ -4,7 +4,7 @@ import (
"context"
"testing"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
"github.com/grafana/loki/operator/internal/status"
@@ -57,15 +57,15 @@ func TestSetReadyCondition_WhenGetLokiStackReturnsNotFound_DoNothing(t *testing.
func TestSetReadyCondition_WhenExisting_DoNothing(t *testing.T) {
k := &k8sfakes.FakeClient{}
- s := lokiv1beta1.LokiStack{
+ s := lokiv1.LokiStack{
ObjectMeta: metav1.ObjectMeta{
Name: "my-stack",
Namespace: "some-ns",
},
- Status: lokiv1beta1.LokiStackStatus{
+ Status: lokiv1.LokiStackStatus{
Conditions: []metav1.Condition{
{
- Type: string(lokiv1beta1.ConditionReady),
+ Type: string(lokiv1.ConditionReady),
Status: metav1.ConditionTrue,
},
},
@@ -98,15 +98,15 @@ func TestSetReadyCondition_WhenExisting_SetReadyConditionTrue(t *testing.T) {
k.StatusStub = func() client.StatusWriter { return sw }
- s := lokiv1beta1.LokiStack{
+ s := lokiv1.LokiStack{
ObjectMeta: metav1.ObjectMeta{
Name: "my-stack",
Namespace: "some-ns",
},
- Status: lokiv1beta1.LokiStackStatus{
+ Status: lokiv1.LokiStackStatus{
Conditions: []metav1.Condition{
{
- Type: string(lokiv1beta1.ConditionReady),
+ Type: string(lokiv1.ConditionReady),
Status: metav1.ConditionFalse,
},
},
@@ -129,7 +129,7 @@ func TestSetReadyCondition_WhenExisting_SetReadyConditionTrue(t *testing.T) {
}
sw.UpdateStub = func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error {
- actual := obj.(*lokiv1beta1.LokiStack)
+ actual := obj.(*lokiv1.LokiStack)
require.NotEmpty(t, actual.Status.Conditions)
require.Equal(t, metav1.ConditionTrue, actual.Status.Conditions[0].Status)
return nil
@@ -148,7 +148,7 @@ func TestSetReadyCondition_WhenNoneExisting_AppendReadyCondition(t *testing.T) {
k.StatusStub = func() client.StatusWriter { return sw }
- s := lokiv1beta1.LokiStack{
+ s := lokiv1.LokiStack{
ObjectMeta: metav1.ObjectMeta{
Name: "my-stack",
Namespace: "some-ns",
@@ -171,7 +171,7 @@ func TestSetReadyCondition_WhenNoneExisting_AppendReadyCondition(t *testing.T) {
}
sw.UpdateStub = func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error {
- actual := obj.(*lokiv1beta1.LokiStack)
+ actual := obj.(*lokiv1.LokiStack)
require.NotEmpty(t, actual.Status.Conditions)
return nil
}
@@ -222,15 +222,15 @@ func TestSetFailedCondition_WhenGetLokiStackReturnsNotFound_DoNothing(t *testing
func TestSetFailedCondition_WhenExisting_DoNothing(t *testing.T) {
k := &k8sfakes.FakeClient{}
- s := lokiv1beta1.LokiStack{
+ s := lokiv1.LokiStack{
ObjectMeta: metav1.ObjectMeta{
Name: "my-stack",
Namespace: "some-ns",
},
- Status: lokiv1beta1.LokiStackStatus{
+ Status: lokiv1.LokiStackStatus{
Conditions: []metav1.Condition{
{
- Type: string(lokiv1beta1.ConditionFailed),
+ Type: string(lokiv1.ConditionFailed),
Status: metav1.ConditionTrue,
},
},
@@ -263,15 +263,15 @@ func TestSetFailedCondition_WhenExisting_SetFailedConditionTrue(t *testing.T) {
k.StatusStub = func() client.StatusWriter { return sw }
- s := lokiv1beta1.LokiStack{
+ s := lokiv1.LokiStack{
ObjectMeta: metav1.ObjectMeta{
Name: "my-stack",
Namespace: "some-ns",
},
- Status: lokiv1beta1.LokiStackStatus{
+ Status: lokiv1.LokiStackStatus{
Conditions: []metav1.Condition{
{
- Type: string(lokiv1beta1.ConditionFailed),
+ Type: string(lokiv1.ConditionFailed),
Status: metav1.ConditionFalse,
},
},
@@ -294,7 +294,7 @@ func TestSetFailedCondition_WhenExisting_SetFailedConditionTrue(t *testing.T) {
}
sw.UpdateStub = func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error {
- actual := obj.(*lokiv1beta1.LokiStack)
+ actual := obj.(*lokiv1.LokiStack)
require.NotEmpty(t, actual.Status.Conditions)
require.Equal(t, metav1.ConditionTrue, actual.Status.Conditions[0].Status)
return nil
@@ -313,7 +313,7 @@ func TestSetFailedCondition_WhenNoneExisting_AppendFailedCondition(t *testing.T)
k.StatusStub = func() client.StatusWriter { return sw }
- s := lokiv1beta1.LokiStack{
+ s := lokiv1.LokiStack{
ObjectMeta: metav1.ObjectMeta{
Name: "my-stack",
Namespace: "some-ns",
@@ -336,7 +336,7 @@ func TestSetFailedCondition_WhenNoneExisting_AppendFailedCondition(t *testing.T)
}
sw.UpdateStub = func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error {
- actual := obj.(*lokiv1beta1.LokiStack)
+ actual := obj.(*lokiv1.LokiStack)
require.NotEmpty(t, actual.Status.Conditions)
return nil
}
@@ -352,7 +352,7 @@ func TestSetDegradedCondition_WhenGetLokiStackReturnsError_ReturnError(t *testin
k := &k8sfakes.FakeClient{}
msg := "tell me nothing"
- reason := lokiv1beta1.ReasonMissingObjectStorageSecret
+ reason := lokiv1.ReasonMissingObjectStorageSecret
r := ctrl.Request{
NamespacedName: types.NamespacedName{
@@ -408,15 +408,15 @@ func TestSetPendingCondition_WhenGetLokiStackReturnsNotFound_DoNothing(t *testin
func TestSetPendingCondition_WhenExisting_DoNothing(t *testing.T) {
k := &k8sfakes.FakeClient{}
- s := lokiv1beta1.LokiStack{
+ s := lokiv1.LokiStack{
ObjectMeta: metav1.ObjectMeta{
Name: "my-stack",
Namespace: "some-ns",
},
- Status: lokiv1beta1.LokiStackStatus{
+ Status: lokiv1.LokiStackStatus{
Conditions: []metav1.Condition{
{
- Type: string(lokiv1beta1.ConditionPending),
+ Type: string(lokiv1.ConditionPending),
Status: metav1.ConditionTrue,
},
},
@@ -449,15 +449,15 @@ func TestSetPendingCondition_WhenExisting_SetPendingConditionTrue(t *testing.T)
k.StatusStub = func() client.StatusWriter { return sw }
- s := lokiv1beta1.LokiStack{
+ s := lokiv1.LokiStack{
ObjectMeta: metav1.ObjectMeta{
Name: "my-stack",
Namespace: "some-ns",
},
- Status: lokiv1beta1.LokiStackStatus{
+ Status: lokiv1.LokiStackStatus{
Conditions: []metav1.Condition{
{
- Type: string(lokiv1beta1.ConditionPending),
+ Type: string(lokiv1.ConditionPending),
Status: metav1.ConditionFalse,
},
},
@@ -480,7 +480,7 @@ func TestSetPendingCondition_WhenExisting_SetPendingConditionTrue(t *testing.T)
}
sw.UpdateStub = func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error {
- actual := obj.(*lokiv1beta1.LokiStack)
+ actual := obj.(*lokiv1.LokiStack)
require.NotEmpty(t, actual.Status.Conditions)
require.Equal(t, metav1.ConditionTrue, actual.Status.Conditions[0].Status)
return nil
@@ -498,7 +498,7 @@ func TestSetPendingCondition_WhenNoneExisting_AppendPendingCondition(t *testing.
k.StatusStub = func() client.StatusWriter { return sw }
- s := lokiv1beta1.LokiStack{
+ s := lokiv1.LokiStack{
ObjectMeta: metav1.ObjectMeta{
Name: "my-stack",
Namespace: "some-ns",
@@ -521,7 +521,7 @@ func TestSetPendingCondition_WhenNoneExisting_AppendPendingCondition(t *testing.
}
sw.UpdateStub = func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error {
- actual := obj.(*lokiv1beta1.LokiStack)
+ actual := obj.(*lokiv1.LokiStack)
require.NotEmpty(t, actual.Status.Conditions)
return nil
}
@@ -537,7 +537,7 @@ func TestSetDegradedCondition_WhenGetLokiStackReturnsNotFound_DoNothing(t *testi
k := &k8sfakes.FakeClient{}
msg := "tell me nothing"
- reason := lokiv1beta1.ReasonMissingObjectStorageSecret
+ reason := lokiv1.ReasonMissingObjectStorageSecret
r := ctrl.Request{
NamespacedName: types.NamespacedName{
@@ -558,16 +558,16 @@ func TestSetDegradedCondition_WhenExisting_DoNothing(t *testing.T) {
k := &k8sfakes.FakeClient{}
msg := "tell me nothing"
- reason := lokiv1beta1.ReasonMissingObjectStorageSecret
- s := lokiv1beta1.LokiStack{
+ reason := lokiv1.ReasonMissingObjectStorageSecret
+ s := lokiv1.LokiStack{
ObjectMeta: metav1.ObjectMeta{
Name: "my-stack",
Namespace: "some-ns",
},
- Status: lokiv1beta1.LokiStackStatus{
+ Status: lokiv1.LokiStackStatus{
Conditions: []metav1.Condition{
{
- Type: string(lokiv1beta1.ConditionDegraded),
+ Type: string(lokiv1.ConditionDegraded),
Reason: string(reason),
Status: metav1.ConditionTrue,
},
@@ -602,16 +602,16 @@ func TestSetDegradedCondition_WhenExisting_SetDegradedConditionTrue(t *testing.T
k.StatusStub = func() client.StatusWriter { return sw }
msg := "tell me something"
- reason := lokiv1beta1.ReasonMissingObjectStorageSecret
- s := lokiv1beta1.LokiStack{
+ reason := lokiv1.ReasonMissingObjectStorageSecret
+ s := lokiv1.LokiStack{
ObjectMeta: metav1.ObjectMeta{
Name: "my-stack",
Namespace: "some-ns",
},
- Status: lokiv1beta1.LokiStackStatus{
+ Status: lokiv1.LokiStackStatus{
Conditions: []metav1.Condition{
{
- Type: string(lokiv1beta1.ConditionDegraded),
+ Type: string(lokiv1.ConditionDegraded),
Reason: string(reason),
Status: metav1.ConditionFalse,
},
@@ -635,7 +635,7 @@ func TestSetDegradedCondition_WhenExisting_SetDegradedConditionTrue(t *testing.T
}
sw.UpdateStub = func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error {
- actual := obj.(*lokiv1beta1.LokiStack)
+ actual := obj.(*lokiv1.LokiStack)
require.NotEmpty(t, actual.Status.Conditions)
require.Equal(t, metav1.ConditionTrue, actual.Status.Conditions[0].Status)
return nil
@@ -654,8 +654,8 @@ func TestSetDegradedCondition_WhenNoneExisting_AppendDegradedCondition(t *testin
k.StatusStub = func() client.StatusWriter { return sw }
msg := "tell me something"
- reason := lokiv1beta1.ReasonMissingObjectStorageSecret
- s := lokiv1beta1.LokiStack{
+ reason := lokiv1.ReasonMissingObjectStorageSecret
+ s := lokiv1.LokiStack{
ObjectMeta: metav1.ObjectMeta{
Name: "my-stack",
Namespace: "some-ns",
@@ -678,7 +678,7 @@ func TestSetDegradedCondition_WhenNoneExisting_AppendDegradedCondition(t *testin
}
sw.UpdateStub = func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error {
- actual := obj.(*lokiv1beta1.LokiStack)
+ actual := obj.(*lokiv1.LokiStack)
require.NotEmpty(t, actual.Status.Conditions)
return nil
}
diff --git a/operator/internal/status/status.go b/operator/internal/status/status.go
index 3793fd09208f8..247aea4e325a9 100644
--- a/operator/internal/status/status.go
+++ b/operator/internal/status/status.go
@@ -4,7 +4,7 @@ import (
"context"
"github.com/ViaQ/logerr/v2/kverrors"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
corev1 "k8s.io/api/core/v1"
@@ -20,7 +20,7 @@ func Refresh(ctx context.Context, k k8s.Client, req ctrl.Request) error {
return err
}
- var s lokiv1beta1.LokiStack
+ var s lokiv1.LokiStack
if err := k.Get(ctx, req.NamespacedName, &s); err != nil {
if apierrors.IsNotFound(err) {
return nil
diff --git a/operator/internal/status/storage.go b/operator/internal/status/storage.go
index ebfe1d283bb7f..b8a4d1630996c 100644
--- a/operator/internal/status/storage.go
+++ b/operator/internal/status/storage.go
@@ -4,7 +4,7 @@ import (
"context"
"github.com/ViaQ/logerr/v2/kverrors"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -13,8 +13,8 @@ import (
)
// SetStorageSchemaStatus updates the storage status component
-func SetStorageSchemaStatus(ctx context.Context, k k8s.Client, req ctrl.Request, schemas []lokiv1beta1.ObjectStorageSchema) error {
- var s lokiv1beta1.LokiStack
+func SetStorageSchemaStatus(ctx context.Context, k k8s.Client, req ctrl.Request, schemas []lokiv1.ObjectStorageSchema) error {
+ var s lokiv1.LokiStack
if err := k.Get(ctx, req.NamespacedName, &s); err != nil {
if apierrors.IsNotFound(err) {
return nil
@@ -22,7 +22,7 @@ func SetStorageSchemaStatus(ctx context.Context, k k8s.Client, req ctrl.Request,
return kverrors.Wrap(err, "failed to lookup lokistack", "name", req.NamespacedName)
}
- s.Status.Storage = lokiv1beta1.LokiStackStorageStatus{
+ s.Status.Storage = lokiv1.LokiStackStorageStatus{
Schemas: schemas,
}
diff --git a/operator/internal/status/storage_test.go b/operator/internal/status/storage_test.go
index 64458f85bbada..6a8da6529a118 100644
--- a/operator/internal/status/storage_test.go
+++ b/operator/internal/status/storage_test.go
@@ -4,7 +4,7 @@ import (
"context"
"testing"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
"github.com/grafana/loki/operator/internal/status"
"github.com/stretchr/testify/require"
@@ -31,7 +31,7 @@ func TestSetStorageSchemaStatus_WhenGetLokiStackReturnsError_ReturnError(t *test
return apierrors.NewBadRequest("something wasn't found")
}
- err := status.SetStorageSchemaStatus(context.TODO(), k, r, []lokiv1beta1.ObjectStorageSchema{})
+ err := status.SetStorageSchemaStatus(context.TODO(), k, r, []lokiv1.ObjectStorageSchema{})
require.Error(t, err)
}
@@ -49,7 +49,7 @@ func TestSetStorageSchemaStatus_WhenGetLokiStackReturnsNotFound_DoNothing(t *tes
return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found")
}
- err := status.SetStorageSchemaStatus(context.TODO(), k, r, []lokiv1beta1.ObjectStorageSchema{})
+ err := status.SetStorageSchemaStatus(context.TODO(), k, r, []lokiv1.ObjectStorageSchema{})
require.NoError(t, err)
}
@@ -59,16 +59,16 @@ func TestSetStorageSchemaStatus_WhenStorageStatusExists_OverwriteStorageStatus(t
k.StatusStub = func() client.StatusWriter { return sw }
- s := lokiv1beta1.LokiStack{
+ s := lokiv1.LokiStack{
ObjectMeta: metav1.ObjectMeta{
Name: "my-stack",
Namespace: "some-ns",
},
- Status: lokiv1beta1.LokiStackStatus{
- Storage: lokiv1beta1.LokiStackStorageStatus{
- Schemas: []lokiv1beta1.ObjectStorageSchema{
+ Status: lokiv1.LokiStackStatus{
+ Storage: lokiv1.LokiStackStorageStatus{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
},
@@ -83,26 +83,26 @@ func TestSetStorageSchemaStatus_WhenStorageStatusExists_OverwriteStorageStatus(t
},
}
- schemas := []lokiv1beta1.ObjectStorageSchema{
+ schemas := []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
{
- Version: lokiv1beta1.ObjectStorageSchemaV12,
+ Version: lokiv1.ObjectStorageSchemaV12,
EffectiveDate: "2021-10-11",
},
}
- expected := []lokiv1beta1.ObjectStorageSchema{
+ expected := []lokiv1.ObjectStorageSchema{
{
- Version: lokiv1beta1.ObjectStorageSchemaV11,
+ Version: lokiv1.ObjectStorageSchemaV11,
EffectiveDate: "2020-10-11",
},
{
- Version: lokiv1beta1.ObjectStorageSchemaV12,
+ Version: lokiv1.ObjectStorageSchemaV12,
EffectiveDate: "2021-10-11",
},
}
@@ -116,7 +116,7 @@ func TestSetStorageSchemaStatus_WhenStorageStatusExists_OverwriteStorageStatus(t
}
sw.UpdateStub = func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error {
- stack := obj.(*lokiv1beta1.LokiStack)
+ stack := obj.(*lokiv1.LokiStack)
require.Equal(t, expected, stack.Status.Storage.Schemas)
return nil
}
diff --git a/operator/main.go b/operator/main.go
index b3a07f01d698e..b26a68b0dba29 100644
--- a/operator/main.go
+++ b/operator/main.go
@@ -18,6 +18,7 @@ import (
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
ctrlconfigv1 "github.com/grafana/loki/operator/apis/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
lokictrl "github.com/grafana/loki/operator/controllers/loki"
"github.com/grafana/loki/operator/internal/metrics"
@@ -37,7 +38,10 @@ func init() {
utilruntime.Must(lokiv1beta1.AddToScheme(scheme))
+ utilruntime.Must(lokiv1.AddToScheme(scheme))
+
utilruntime.Must(ctrlconfigv1.AddToScheme(scheme))
+
// +kubebuilder:scaffold:scheme
}
@@ -103,7 +107,7 @@ func main() {
os.Exit(1)
}
if ctrlCfg.Gates.LokiStackWebhook {
- if err = (&lokiv1beta1.LokiStack{}).SetupWebhookWithManager(mgr); err != nil {
+ if err = (&lokiv1.LokiStack{}).SetupWebhookWithManager(mgr); err != nil {
logger.Error(err, "unable to create webhook", "webhook", "LokiStack")
os.Exit(1)
}
|
operator
|
Bump loki.grafana.com/LokiStack from v1beta to v1 (#6474)
|
5978f1344c84525e6b8bda45869b867b7e878956
|
2024-09-25 04:43:52
|
Daniel Longeuay
|
feat(helm): :sparkles: add additional service annotations for components in distributed mode (#14131)
| false
|
diff --git a/docs/sources/setup/install/helm/reference.md b/docs/sources/setup/install/helm/reference.md
index 31b3006a84eb9..a2f8abbbc945b 100644
--- a/docs/sources/setup/install/helm/reference.md
+++ b/docs/sources/setup/install/helm/reference.md
@@ -753,6 +753,7 @@ null
"priorityClassName": null,
"replicas": 0,
"resources": {},
+ "serviceAnnotations": {},
"serviceLabels": {},
"terminationGracePeriodSeconds": 30,
"tolerations": []
@@ -1030,6 +1031,15 @@ null
<td><pre lang="json">
{}
</pre>
+</td>
+ </tr>
+ <tr>
+ <td>bloomBuilder.serviceAnnotations</td>
+ <td>object</td>
+ <td>Annotations for bloom-builder service</td>
+ <td><pre lang="json">
+{}
+</pre>
</td>
</tr>
<tr>
@@ -1125,6 +1135,7 @@ null
"imagePullSecrets": [],
"name": null
},
+ "serviceAnnotations": {},
"serviceLabels": {},
"terminationGracePeriodSeconds": 30,
"tolerations": []
@@ -1411,6 +1422,15 @@ true
<td><pre lang="json">
null
</pre>
+</td>
+ </tr>
+ <tr>
+ <td>bloomGateway.serviceAnnotations</td>
+ <td>object</td>
+ <td>Annotations for bloom-gateway service</td>
+ <td><pre lang="json">
+{}
+</pre>
</td>
</tr>
<tr>
@@ -1506,6 +1526,7 @@ null
"imagePullSecrets": [],
"name": null
},
+ "serviceAnnotations": {},
"serviceLabels": {},
"terminationGracePeriodSeconds": 30,
"tolerations": []
@@ -1792,6 +1813,15 @@ true
<td><pre lang="json">
null
</pre>
+</td>
+ </tr>
+ <tr>
+ <td>bloomPlanner.serviceAnnotations</td>
+ <td>object</td>
+ <td>Annotations for bloom-planner service</td>
+ <td><pre lang="json">
+{}
+</pre>
</td>
</tr>
<tr>
@@ -2252,6 +2282,7 @@ null
"imagePullSecrets": [],
"name": null
},
+ "serviceAnnotations": {},
"serviceLabels": {},
"terminationGracePeriodSeconds": 30,
"tolerations": []
@@ -2547,6 +2578,15 @@ true
<td><pre lang="json">
null
</pre>
+</td>
+ </tr>
+ <tr>
+ <td>compactor.serviceAnnotations</td>
+ <td>object</td>
+ <td>Annotations for compactor service</td>
+ <td><pre lang="json">
+{}
+</pre>
</td>
</tr>
<tr>
@@ -2642,6 +2682,7 @@ null
"priorityClassName": null,
"replicas": 0,
"resources": {},
+ "serviceAnnotations": {},
"serviceLabels": {},
"terminationGracePeriodSeconds": 30,
"tolerations": []
@@ -2928,6 +2969,15 @@ null
<td><pre lang="json">
{}
</pre>
+</td>
+ </tr>
+ <tr>
+ <td>distributor.serviceAnnotations</td>
+ <td>object</td>
+ <td>Annotations for distributor service</td>
+ <td><pre lang="json">
+{}
+</pre>
</td>
</tr>
<tr>
@@ -4520,6 +4570,7 @@ null
"priorityClassName": null,
"replicas": 0,
"resources": {},
+ "serviceAnnotations": {},
"serviceLabels": {},
"terminationGracePeriodSeconds": 300,
"tolerations": []
@@ -4770,6 +4821,15 @@ null
<td><pre lang="json">
{}
</pre>
+</td>
+ </tr>
+ <tr>
+ <td>indexGateway.serviceAnnotations</td>
+ <td>object</td>
+ <td>Annotations for index-gateway service</td>
+ <td><pre lang="json">
+{}
+</pre>
</td>
</tr>
<tr>
@@ -4873,6 +4933,7 @@ null
"readinessProbe": {},
"replicas": 0,
"resources": {},
+ "serviceAnnotations": {},
"serviceLabels": {},
"terminationGracePeriodSeconds": 300,
"tolerations": [],
@@ -5253,6 +5314,15 @@ false
<td><pre lang="json">
{}
</pre>
+</td>
+ </tr>
+ <tr>
+ <td>ingester.serviceAnnotations</td>
+ <td>object</td>
+ <td>Annotations for ingestor service</td>
+ <td><pre lang="json">
+{}
+</pre>
</td>
</tr>
<tr>
@@ -7457,6 +7527,7 @@ false
"imagePullSecrets": [],
"name": null
},
+ "serviceAnnotations": {},
"serviceLabels": {},
"terminationGracePeriodSeconds": 30,
"tolerations": []
@@ -7752,6 +7823,15 @@ true
<td><pre lang="json">
null
</pre>
+</td>
+ </tr>
+ <tr>
+ <td>patternIngester.serviceAnnotations</td>
+ <td>object</td>
+ <td>Annotations for pattern ingester service</td>
+ <td><pre lang="json">
+{}
+</pre>
</td>
</tr>
<tr>
@@ -7846,6 +7926,7 @@ null
"priorityClassName": null,
"replicas": 0,
"resources": {},
+ "serviceAnnotations": {},
"serviceLabels": {},
"terminationGracePeriodSeconds": 30,
"tolerations": [],
@@ -8198,6 +8279,15 @@ null
<td><pre lang="json">
{}
</pre>
+</td>
+ </tr>
+ <tr>
+ <td>querier.serviceAnnotations</td>
+ <td>object</td>
+ <td>Annotations for querier service</td>
+ <td><pre lang="json">
+{}
+</pre>
</td>
</tr>
<tr>
@@ -8292,6 +8382,7 @@ Defaults to allow skew no more then 1 node
"priorityClassName": null,
"replicas": 0,
"resources": {},
+ "serviceAnnotations": {},
"serviceLabels": {},
"terminationGracePeriodSeconds": 30,
"tolerations": []
@@ -8569,6 +8660,15 @@ null
<td><pre lang="json">
{}
</pre>
+</td>
+ </tr>
+ <tr>
+ <td>queryFrontend.serviceAnnotations</td>
+ <td>object</td>
+ <td>Annotations for query-frontend service</td>
+ <td><pre lang="json">
+{}
+</pre>
</td>
</tr>
<tr>
@@ -8640,6 +8740,7 @@ null
"priorityClassName": null,
"replicas": 0,
"resources": {},
+ "serviceAnnotations": {},
"serviceLabels": {},
"terminationGracePeriodSeconds": 30,
"tolerations": []
@@ -8818,6 +8919,15 @@ null
<td><pre lang="json">
{}
</pre>
+</td>
+ </tr>
+ <tr>
+ <td>queryScheduler.serviceAnnotations</td>
+ <td>object</td>
+ <td>Annotations for query-scheduler service</td>
+ <td><pre lang="json">
+{}
+</pre>
</td>
</tr>
<tr>
@@ -9739,6 +9849,7 @@ null
"priorityClassName": null,
"replicas": 0,
"resources": {},
+ "serviceAnnotations": {},
"serviceLabels": {},
"terminationGracePeriodSeconds": 300,
"tolerations": []
@@ -9998,6 +10109,15 @@ null
<td><pre lang="json">
{}
</pre>
+</td>
+ </tr>
+ <tr>
+ <td>ruler.serviceAnnotations</td>
+ <td>object</td>
+ <td>Annotations for ruler service</td>
+ <td><pre lang="json">
+{}
+</pre>
</td>
</tr>
<tr>
diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md
index 6052b517068ae..3525bc396547b 100644
--- a/production/helm/loki/CHANGELOG.md
+++ b/production/helm/loki/CHANGELOG.md
@@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang
[//]: # (<AUTOMATED_UPDATES_LOCATOR> : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.)
+## 6.14.0
+
+- [FEATURE] Add additional service annotations for components in distributed mode
+
## 6.13.0
- [CHANGE] Correctly wrap ClusterRoleBinding around `rbac/namespaced` conditional.
diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml
index e3143c14fa041..b05f8ff0fdd04 100644
--- a/production/helm/loki/Chart.yaml
+++ b/production/helm/loki/Chart.yaml
@@ -3,7 +3,7 @@ name: loki
description: Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes.
type: application
appVersion: 3.1.1
-version: 6.13.0
+version: 6.14.0
home: https://grafana.github.io/helm-charts
sources:
- https://github.com/grafana/loki
diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md
index 1781a0043b854..f55b4186debf4 100644
--- a/production/helm/loki/README.md
+++ b/production/helm/loki/README.md
@@ -1,6 +1,6 @@
# loki
-  
+  
Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes.
diff --git a/production/helm/loki/templates/bloom-builder/service-bloom-builder-headless.yaml b/production/helm/loki/templates/bloom-builder/service-bloom-builder-headless.yaml
index e863a2c27866c..938925291a44c 100644
--- a/production/helm/loki/templates/bloom-builder/service-bloom-builder-headless.yaml
+++ b/production/helm/loki/templates/bloom-builder/service-bloom-builder-headless.yaml
@@ -11,10 +11,13 @@ metadata:
{{- toYaml . | nindent 4 }}
{{- end }}
prometheus.io/service-monitor: "false"
- {{- with .Values.loki.serviceAnnotations }}
annotations:
- {{- toYaml . | nindent 4 }}
- {{- end }}
+ {{- with .Values.loki.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
+ {{- with .Values.bloomBuilder.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
spec:
clusterIP: None
type: ClusterIP
diff --git a/production/helm/loki/templates/bloom-builder/service-bloom-builder.yaml b/production/helm/loki/templates/bloom-builder/service-bloom-builder.yaml
index 6351bbdae5664..b3debb08893a1 100644
--- a/production/helm/loki/templates/bloom-builder/service-bloom-builder.yaml
+++ b/production/helm/loki/templates/bloom-builder/service-bloom-builder.yaml
@@ -10,10 +10,13 @@ metadata:
{{- with .Values.bloomBuilder.serviceLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
- {{- with .Values.loki.serviceAnnotations }}
annotations:
- {{- toYaml . | nindent 4 }}
- {{- end }}
+ {{- with .Values.loki.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
+ {{- with .Values.bloomBuilder.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
spec:
type: ClusterIP
publishNotReadyAddresses: true
diff --git a/production/helm/loki/templates/bloom-gateway/service-bloom-gateway-headless.yaml b/production/helm/loki/templates/bloom-gateway/service-bloom-gateway-headless.yaml
index daa61c64ab5e3..852e4cb10006e 100644
--- a/production/helm/loki/templates/bloom-gateway/service-bloom-gateway-headless.yaml
+++ b/production/helm/loki/templates/bloom-gateway/service-bloom-gateway-headless.yaml
@@ -11,10 +11,13 @@ metadata:
{{- with .Values.bloomGateway.serviceLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
- {{- with .Values.loki.serviceAnnotations }}
annotations:
- {{- toYaml . | nindent 4 }}
- {{- end }}
+ {{- with .Values.loki.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
+ {{- with .Values.bloomGateway.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
spec:
type: ClusterIP
clusterIP: None
diff --git a/production/helm/loki/templates/bloom-planner/service-bloom-planner-headless.yaml b/production/helm/loki/templates/bloom-planner/service-bloom-planner-headless.yaml
index 1ad970746226a..78e26336f39fd 100644
--- a/production/helm/loki/templates/bloom-planner/service-bloom-planner-headless.yaml
+++ b/production/helm/loki/templates/bloom-planner/service-bloom-planner-headless.yaml
@@ -10,10 +10,13 @@ metadata:
{{- with .Values.bloomPlanner.serviceLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
- {{- with .Values.loki.serviceAnnotations }}
annotations:
- {{- toYaml . | nindent 4 }}
- {{- end }}
+ {{- with .Values.loki.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
+ {{- with .Values.bloomPlanner.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
spec:
type: ClusterIP
clusterIP: None
diff --git a/production/helm/loki/templates/compactor/service-compactor.yaml b/production/helm/loki/templates/compactor/service-compactor.yaml
index c75e1cee5ae18..f118b6cc9b825 100644
--- a/production/helm/loki/templates/compactor/service-compactor.yaml
+++ b/production/helm/loki/templates/compactor/service-compactor.yaml
@@ -11,10 +11,13 @@ metadata:
{{- toYaml . | nindent 4 }}
{{- end }}
app.kubernetes.io/component: compactor
- {{- with .Values.loki.serviceAnnotations }}
annotations:
- {{- toYaml . | nindent 4 }}
- {{- end }}
+ {{- with .Values.loki.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
+ {{- with .Values.compactor.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
spec:
type: ClusterIP
ports:
diff --git a/production/helm/loki/templates/distributor/service-distributor-headless.yaml b/production/helm/loki/templates/distributor/service-distributor-headless.yaml
index c69bb0add37ed..650b62959d97f 100644
--- a/production/helm/loki/templates/distributor/service-distributor-headless.yaml
+++ b/production/helm/loki/templates/distributor/service-distributor-headless.yaml
@@ -12,10 +12,13 @@ metadata:
{{- end }}
variant: headless
prometheus.io/service-monitor: "false"
- {{- with .Values.loki.serviceAnnotations }}
annotations:
- {{- toYaml . | nindent 4 }}
- {{- end }}
+ {{- with .Values.loki.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
+ {{- with .Values.distributor.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
spec:
type: ClusterIP
clusterIP: None
diff --git a/production/helm/loki/templates/distributor/service-distributor.yaml b/production/helm/loki/templates/distributor/service-distributor.yaml
index 8145834d35097..6a8995677c149 100644
--- a/production/helm/loki/templates/distributor/service-distributor.yaml
+++ b/production/helm/loki/templates/distributor/service-distributor.yaml
@@ -10,10 +10,13 @@ metadata:
{{- with .Values.distributor.serviceLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
- {{- with .Values.loki.serviceAnnotations }}
annotations:
- {{- toYaml . | nindent 4 }}
- {{- end }}
+ {{- with .Values.loki.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
+ {{- with .Values.distributor.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
spec:
type: ClusterIP
ports:
diff --git a/production/helm/loki/templates/index-gateway/service-index-gateway-headless.yaml b/production/helm/loki/templates/index-gateway/service-index-gateway-headless.yaml
index b0c90dc35fd90..06506582f9e36 100644
--- a/production/helm/loki/templates/index-gateway/service-index-gateway-headless.yaml
+++ b/production/helm/loki/templates/index-gateway/service-index-gateway-headless.yaml
@@ -7,6 +7,13 @@ metadata:
labels:
{{- include "loki.indexGatewaySelectorLabels" . | nindent 4 }}
prometheus.io/service-monitor: "false"
+ annotations:
+ {{- with .Values.loki.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
+ {{- with .Values.indexGateway.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
spec:
type: ClusterIP
clusterIP: None
diff --git a/production/helm/loki/templates/index-gateway/service-index-gateway.yaml b/production/helm/loki/templates/index-gateway/service-index-gateway.yaml
index 2d43bb0ed5e9e..822a0ce692d3b 100644
--- a/production/helm/loki/templates/index-gateway/service-index-gateway.yaml
+++ b/production/helm/loki/templates/index-gateway/service-index-gateway.yaml
@@ -9,10 +9,13 @@ metadata:
{{- with .Values.indexGateway.serviceLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
- {{- with .Values.loki.serviceAnnotations }}
annotations:
- {{- toYaml . | nindent 4 }}
- {{- end }}
+ {{- with .Values.loki.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
+ {{- with .Values.indexGateway.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
spec:
type: ClusterIP
ports:
diff --git a/production/helm/loki/templates/ingester/service-ingester-headless.yaml b/production/helm/loki/templates/ingester/service-ingester-headless.yaml
index e83dcf7be4fe6..8a8b92f2ebc5a 100644
--- a/production/helm/loki/templates/ingester/service-ingester-headless.yaml
+++ b/production/helm/loki/templates/ingester/service-ingester-headless.yaml
@@ -8,10 +8,13 @@ metadata:
labels:
{{- include "loki.ingesterSelectorLabels" . | nindent 4 }}
prometheus.io/service-monitor: "false"
- {{- with .Values.loki.serviceAnnotations }}
annotations:
- {{- toYaml . | nindent 4 }}
- {{- end }}
+ {{- with .Values.loki.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
+ {{- with .Values.ingester.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
spec:
type: ClusterIP
clusterIP: None
diff --git a/production/helm/loki/templates/ingester/service-ingester-zone-a-headless.yaml b/production/helm/loki/templates/ingester/service-ingester-zone-a-headless.yaml
index 478ea8c89eff8..03add3b286fc7 100644
--- a/production/helm/loki/templates/ingester/service-ingester-zone-a-headless.yaml
+++ b/production/helm/loki/templates/ingester/service-ingester-zone-a-headless.yaml
@@ -10,10 +10,13 @@ metadata:
{{- with .Values.ingester.serviceLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
- {{- with .Values.loki.serviceAnnotations }}
annotations:
- {{- toYaml . | nindent 4 }}
- {{- end }}
+ {{- with .Values.loki.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
+ {{- with .Values.ingester.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
spec:
clusterIP: None
ports:
diff --git a/production/helm/loki/templates/ingester/service-ingester-zone-b-headless.yaml b/production/helm/loki/templates/ingester/service-ingester-zone-b-headless.yaml
index c19ed4cb1f654..607221922a661 100644
--- a/production/helm/loki/templates/ingester/service-ingester-zone-b-headless.yaml
+++ b/production/helm/loki/templates/ingester/service-ingester-zone-b-headless.yaml
@@ -10,10 +10,13 @@ metadata:
{{- with .Values.ingester.serviceLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
- {{- with .Values.loki.serviceAnnotations }}
annotations:
- {{- toYaml . | nindent 4 }}
- {{- end }}
+ {{- with .Values.loki.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
+ {{- with .Values.ingester.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
spec:
clusterIP: None
ports:
diff --git a/production/helm/loki/templates/ingester/service-ingester-zone-c-headless.yaml b/production/helm/loki/templates/ingester/service-ingester-zone-c-headless.yaml
index 2757fcef94002..554144746ae02 100644
--- a/production/helm/loki/templates/ingester/service-ingester-zone-c-headless.yaml
+++ b/production/helm/loki/templates/ingester/service-ingester-zone-c-headless.yaml
@@ -10,10 +10,13 @@ metadata:
{{- with .Values.ingester.serviceLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
- {{- with .Values.loki.serviceAnnotations }}
annotations:
- {{- toYaml . | nindent 4 }}
- {{- end }}
+ {{- with .Values.loki.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
+ {{- with .Values.ingester.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
spec:
clusterIP: None
ports:
diff --git a/production/helm/loki/templates/ingester/service-ingester.yaml b/production/helm/loki/templates/ingester/service-ingester.yaml
index d762cbf65d95f..94d6f835332b7 100644
--- a/production/helm/loki/templates/ingester/service-ingester.yaml
+++ b/production/helm/loki/templates/ingester/service-ingester.yaml
@@ -10,10 +10,13 @@ metadata:
{{- with .Values.ingester.serviceLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
- {{- with .Values.loki.serviceAnnotations }}
annotations:
- {{- toYaml . | nindent 4 }}
- {{- end }}
+ {{- with .Values.loki.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
+ {{- with .Values.ingester.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
spec:
type: ClusterIP
ports:
diff --git a/production/helm/loki/templates/querier/service-querier.yaml b/production/helm/loki/templates/querier/service-querier.yaml
index ca5a23bbffb26..15c9c6a06c98c 100644
--- a/production/helm/loki/templates/querier/service-querier.yaml
+++ b/production/helm/loki/templates/querier/service-querier.yaml
@@ -10,10 +10,13 @@ metadata:
{{- with .Values.querier.serviceLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
- {{- with .Values.loki.serviceAnnotations }}
annotations:
- {{- toYaml . | nindent 4 }}
- {{- end }}
+ {{- with .Values.loki.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
+ {{- with .Values.querier.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
spec:
type: ClusterIP
ports:
diff --git a/production/helm/loki/templates/query-frontend/service-query-frontend-headless.yaml b/production/helm/loki/templates/query-frontend/service-query-frontend-headless.yaml
index b168ce6ce9520..8da9054155972 100644
--- a/production/helm/loki/templates/query-frontend/service-query-frontend-headless.yaml
+++ b/production/helm/loki/templates/query-frontend/service-query-frontend-headless.yaml
@@ -11,10 +11,13 @@ metadata:
{{- toYaml . | nindent 4 }}
{{- end }}
prometheus.io/service-monitor: "false"
- {{- with .Values.loki.serviceAnnotations }}
annotations:
- {{- toYaml . | nindent 4 }}
- {{- end }}
+ {{- with .Values.loki.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
+ {{- with .Values.queryFrontend.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
spec:
clusterIP: None
type: ClusterIP
diff --git a/production/helm/loki/templates/query-frontend/service-query-frontend.yaml b/production/helm/loki/templates/query-frontend/service-query-frontend.yaml
index b017c5d54aaf2..a2396950d94de 100644
--- a/production/helm/loki/templates/query-frontend/service-query-frontend.yaml
+++ b/production/helm/loki/templates/query-frontend/service-query-frontend.yaml
@@ -10,10 +10,13 @@ metadata:
{{- with .Values.queryFrontend.serviceLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
- {{- with .Values.loki.serviceAnnotations }}
annotations:
- {{- toYaml . | nindent 4 }}
- {{- end }}
+ {{- with .Values.loki.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
+ {{- with .Values.queryFrontend.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
spec:
type: ClusterIP
publishNotReadyAddresses: true
diff --git a/production/helm/loki/templates/query-scheduler/service-query-scheduler.yaml b/production/helm/loki/templates/query-scheduler/service-query-scheduler.yaml
index 2b3f1b2300609..746c7bdfdfb27 100644
--- a/production/helm/loki/templates/query-scheduler/service-query-scheduler.yaml
+++ b/production/helm/loki/templates/query-scheduler/service-query-scheduler.yaml
@@ -10,10 +10,13 @@ metadata:
{{- with .Values.queryScheduler.serviceLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
- {{- with .Values.loki.serviceAnnotations }}
annotations:
- {{- toYaml . | nindent 4 }}
- {{- end }}
+ {{- with .Values.loki.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
+ {{- with .Values.queryScheduler.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
spec:
type: ClusterIP
clusterIP: None
diff --git a/production/helm/loki/templates/ruler/service-ruler.yaml b/production/helm/loki/templates/ruler/service-ruler.yaml
index 1a1f0f4d2e91a..4d58ec85b42ad 100644
--- a/production/helm/loki/templates/ruler/service-ruler.yaml
+++ b/production/helm/loki/templates/ruler/service-ruler.yaml
@@ -9,10 +9,13 @@ metadata:
{{- with .Values.ruler.serviceLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
- {{- with .Values.loki.serviceAnnotations }}
annotations:
- {{- toYaml . | nindent 4 }}
- {{- end }}
+ {{- with .Values.loki.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
+ {{- with .Values.ruler.serviceAnnotations }}
+ {{- toYaml . | nindent 4}}
+ {{- end }}
spec:
type: ClusterIP
clusterIP: None
diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml
index 0f0e4e2afd18c..6c72cc6b38914 100644
--- a/production/helm/loki/values.yaml
+++ b/production/helm/loki/values.yaml
@@ -1733,6 +1733,8 @@ ingester:
# -- The name of the PriorityClass for ingester pods
# -- Labels for ingestor service
serviceLabels: {}
+ # -- Annotations for ingestor service
+ serviceAnnotations: {}
# -- Additional CLI args for the ingester
extraArgs: []
# -- Environment variables to add to the ingester pods
@@ -1908,6 +1910,8 @@ distributor:
podAnnotations: {}
# -- Labels for distributor service
serviceLabels: {}
+ # -- Annotations for distributor service
+ serviceAnnotations: {}
# -- Additional CLI args for the distributor
extraArgs: []
# -- Environment variables to add to the distributor pods
@@ -1998,6 +2002,8 @@ querier:
podAnnotations: {}
# -- Labels for querier service
serviceLabels: {}
+ # -- Annotations for querier service
+ serviceAnnotations: {}
# -- Additional CLI args for the querier
extraArgs: []
# -- Environment variables to add to the querier pods
@@ -2114,6 +2120,8 @@ queryFrontend:
podAnnotations: {}
# -- Labels for query-frontend service
serviceLabels: {}
+ # -- Annotations for query-frontend service
+ serviceAnnotations: {}
# -- Additional CLI args for the query-frontend
extraArgs: []
# -- Environment variables to add to the query-frontend pods
@@ -2175,6 +2183,8 @@ queryScheduler:
podAnnotations: {}
# -- Labels for query-scheduler service
serviceLabels: {}
+ # -- Annotations for query-scheduler service
+ serviceAnnotations: {}
# -- Additional CLI args for the query-scheduler
extraArgs: []
# -- Environment variables to add to the query-scheduler pods
@@ -2235,6 +2245,8 @@ indexGateway:
podAnnotations: {}
# -- Labels for index-gateway service
serviceLabels: {}
+ # -- Annotations for index-gateway service
+ serviceAnnotations: {}
# -- Additional CLI args for the index-gateway
extraArgs: []
# -- Environment variables to add to the index-gateway pods
@@ -2325,6 +2337,8 @@ compactor:
topologyKey: kubernetes.io/hostname
# -- Labels for compactor service
serviceLabels: {}
+ # -- Annotations for compactor service
+ serviceAnnotations: {}
# -- Additional CLI args for the compactor
extraArgs: []
# -- Environment variables to add to the compactor pods
@@ -2431,6 +2445,8 @@ bloomGateway:
topologyKey: kubernetes.io/hostname
# -- Labels for bloom-gateway service
serviceLabels: {}
+ # -- Annotations for bloom-gateway service
+ serviceAnnotations: {}
# -- Additional CLI args for the bloom-gateway
extraArgs: []
# -- Environment variables to add to the bloom-gateway pods
@@ -2528,6 +2544,8 @@ bloomPlanner:
topologyKey: kubernetes.io/hostname
# -- Labels for bloom-planner service
serviceLabels: {}
+ # -- Annotations for bloom-planner service
+ serviceAnnotations: {}
# -- Additional CLI args for the bloom-planner
extraArgs: []
# -- Environment variables to add to the bloom-planner pods
@@ -2643,6 +2661,8 @@ bloomBuilder:
podAnnotations: {}
# -- Labels for bloom-builder service
serviceLabels: {}
+ # -- Annotations for bloom-builder service
+ serviceAnnotations: {}
# -- Additional CLI args for the bloom-builder
extraArgs: []
# -- Environment variables to add to the bloom-builder pods
@@ -2713,6 +2733,8 @@ patternIngester:
topologyKey: kubernetes.io/hostname
# -- Labels for pattern ingester service
serviceLabels: {}
+ # -- Annotations for pattern ingester service
+ serviceAnnotations: {}
# -- Additional CLI args for the pattern ingester
extraArgs: []
# -- Environment variables to add to the pattern ingester pods
@@ -2812,6 +2834,8 @@ ruler:
podAnnotations: {}
# -- Labels for ruler service
serviceLabels: {}
+ # -- Annotations for ruler service
+ serviceAnnotations: {}
# -- Additional CLI args for the ruler
extraArgs: []
# -- Environment variables to add to the ruler pods
|
feat
|
:sparkles: add additional service annotations for components in distributed mode (#14131)
|
99d9f1cffd8f3ec68d4e02ec1afe7bdb3cd2421e
|
2025-01-29 14:04:01
|
honganan
|
fix: Export `exportTSInSecs` field in TSDB identifier to make sure correct gap calculation (#13116)
| false
|
diff --git a/pkg/storage/stores/shipper/indexshipper/tsdb/identifier.go b/pkg/storage/stores/shipper/indexshipper/tsdb/identifier.go
index eab26fe643d54..a758478b93da1 100644
--- a/pkg/storage/stores/shipper/indexshipper/tsdb/identifier.go
+++ b/pkg/storage/stores/shipper/indexshipper/tsdb/identifier.go
@@ -64,11 +64,11 @@ func (p prefixedIdentifier) Name() string {
// Identifier has all the information needed to resolve a TSDB index
// Notably this abstracts away OS path separators, etc.
type SingleTenantTSDBIdentifier struct {
- // exportTSInSecs tells whether creation timestamp should be exported in unix seconds instead of nanoseconds.
+ // ExportTSInSecs tells whether creation timestamp should be exported in unix seconds instead of nanoseconds.
// timestamp in filename could be a unix second or a unix nanosecond so
// helps us to be able to reproduce filename back from parsed identifier.
// Should be true ideally for older files with creation timestamp in seconds.
- exportTSInSecs bool
+ ExportTSInSecs bool
TS time.Time
From, Through model.Time
Checksum uint32
@@ -83,7 +83,7 @@ func (i SingleTenantTSDBIdentifier) Hash(h hash.Hash32) (err error) {
// str builds filename with format <file-creation-ts> + `-` + `compactor` + `-` + <oldest-chunk-start-ts> + `-` + <latest-chunk-end-ts> `-` + <index-checksum>
func (i SingleTenantTSDBIdentifier) str() string {
ts := int64(0)
- if i.exportTSInSecs {
+ if i.ExportTSInSecs {
ts = i.TS.Unix()
} else {
ts = i.TS.UnixNano()
@@ -151,7 +151,7 @@ func ParseSingleTenantTSDBPath(p string) (id SingleTenantTSDBIdentifier, ok bool
parsedTS = time.Unix(0, ts)
}
return SingleTenantTSDBIdentifier{
- exportTSInSecs: len(elems[0]) <= 10,
+ ExportTSInSecs: len(elems[0]) <= 10,
TS: parsedTS,
From: model.Time(from),
Through: model.Time(through),
diff --git a/pkg/storage/stores/shipper/indexshipper/tsdb/identifier_test.go b/pkg/storage/stores/shipper/indexshipper/tsdb/identifier_test.go
index 41e202be5e467..cdfcffa2e8ef7 100644
--- a/pkg/storage/stores/shipper/indexshipper/tsdb/identifier_test.go
+++ b/pkg/storage/stores/shipper/indexshipper/tsdb/identifier_test.go
@@ -1,6 +1,7 @@
package tsdb
import (
+ "encoding/json"
"fmt"
"math"
"testing"
@@ -20,7 +21,7 @@ func TestParseSingleTenantTSDBPath(t *testing.T) {
desc: "simple_works",
input: "1-compactor-1-10-ff.tsdb",
id: SingleTenantTSDBIdentifier{
- exportTSInSecs: true,
+ ExportTSInSecs: true,
TS: time.Unix(1, 0),
From: 1,
Through: 10,
@@ -32,7 +33,7 @@ func TestParseSingleTenantTSDBPath(t *testing.T) {
desc: "simple_works_with_nanosecond",
input: "1712534400000000000-compactor-1-10-ff.tsdb",
id: SingleTenantTSDBIdentifier{
- exportTSInSecs: false,
+ ExportTSInSecs: false,
TS: time.Unix(0, 1712534400000000000),
From: 1,
Through: 10,
@@ -44,7 +45,7 @@ func TestParseSingleTenantTSDBPath(t *testing.T) {
desc: "uint32_max_checksum_works",
input: fmt.Sprintf("1-compactor-1-10-%x.tsdb", math.MaxUint32),
id: SingleTenantTSDBIdentifier{
- exportTSInSecs: true,
+ ExportTSInSecs: true,
TS: time.Unix(1, 0),
From: 1,
Through: 10,
@@ -78,3 +79,29 @@ func TestParseSingleTenantTSDBPath(t *testing.T) {
})
}
}
+
+func TestSingleTenantTSDBIdentifierSerialization(t *testing.T) {
+ for _, tc := range []struct {
+ desc string
+ input SingleTenantTSDBIdentifier
+ }{
+ {
+ desc: "simple_works",
+ input: SingleTenantTSDBIdentifier{ExportTSInSecs: true, TS: time.Unix(1, 0).UTC(), From: 1, Through: 10, Checksum: 255},
+ },
+ {
+ desc: "simple_works_with_nanosecond",
+ input: SingleTenantTSDBIdentifier{ExportTSInSecs: false, TS: time.Unix(0, 1712534400000000000).UTC(), From: 1, Through: 10, Checksum: 255},
+ },
+ } {
+ t.Run(tc.desc, func(t *testing.T) {
+ b, err := json.Marshal(tc.input)
+ require.NoError(t, err)
+
+ var id SingleTenantTSDBIdentifier
+ require.NoError(t, json.Unmarshal(b, &id))
+ require.Equal(t, tc.input.Name(), id.Name())
+ require.Equal(t, tc.input, id)
+ })
+ }
+}
|
fix
|
Export `exportTSInSecs` field in TSDB identifier to make sure correct gap calculation (#13116)
|
bc7f2f5adb3891f67a8c057a9b15845213cce769
|
2022-10-07 18:21:16
|
Gerard Vanloo
|
operator: Add TLS profile support for Loki server and client HTTP and GRPC TLS options (#7322)
| false
|
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md
index ca4d894820c91..5c15b483b94b5 100644
--- a/operator/CHANGELOG.md
+++ b/operator/CHANGELOG.md
@@ -1,5 +1,6 @@
## Main
+- [7322](https://github.com/grafana/loki/pull/7322) **Red-GV**: Configuring server and client HTTP and GRPC TLS options
- [7272](https://github.com/grafana/loki/pull/7272) **aminesnow**: Use cluster monitoring alertmanager by default on openshift clusters
- [7295](https://github.com/grafana/loki/pull/7295) **xperimental**: Add extended-validation for rules on OpenShift
- [6951](https://github.com/grafana/loki/pull/6951) **Red-GV**: Adding operational Lokistack alerts
diff --git a/operator/apis/config/v1/projectconfig_types.go b/operator/apis/config/v1/projectconfig_types.go
index 194916493243c..8b553b6f20bbc 100644
--- a/operator/apis/config/v1/projectconfig_types.go
+++ b/operator/apis/config/v1/projectconfig_types.go
@@ -21,6 +21,10 @@ type OpenShiftFeatureGates struct {
// ExtendedRuleValidation enables extended validation of AlertingRule and RecordingRule
// to enforce tenancy in an OpenShift context.
ExtendedRuleValidation bool `json:"ruleExtendedValidation,omitempty"`
+
+ // ClusterTLSPolicy enables usage of TLS policies set in the API Server.
+ // More details: https://docs.openshift.com/container-platform/4.11/security/tls-security-profiles.html
+ ClusterTLSPolicy bool `json:"clusterTLSPolicy,omitempty"`
}
// FeatureGates is the supported set of all operator feature gates.
@@ -78,7 +82,8 @@ type FeatureGates struct {
// OpenShift contains a set of feature gates supported only on OpenShift.
OpenShift OpenShiftFeatureGates `json:"openshift,omitempty"`
- // TLSProfile allows to chose a TLS security profile.
+ // TLSProfile allows to chose a TLS security profile. Enforced
+ // when using HTTPEncryption or GRPCEncryption.
TLSProfile string `json:"tlsProfile,omitempty"`
}
@@ -98,16 +103,6 @@ const (
TLSProfileModernType TLSProfileType = "Modern"
)
-// TLSProfileSpec is the desired behavior of a TLSProfileType.
-type TLSProfileSpec struct {
- // ciphers is used to specify the cipher algorithms that are negotiated
- // during the TLS handshake.
- Ciphers []string
- // minTLSVersion is used to specify the minimal version of the TLS protocol
- // that is negotiated during the TLS handshake.
- MinTLSVersion string
-}
-
//+kubebuilder:object:root=true
// ProjectConfig is the Schema for the projectconfigs API
diff --git a/operator/apis/config/v1/zz_generated.deepcopy.go b/operator/apis/config/v1/zz_generated.deepcopy.go
index 316a973a93d25..3ff10850cd1ad 100644
--- a/operator/apis/config/v1/zz_generated.deepcopy.go
+++ b/operator/apis/config/v1/zz_generated.deepcopy.go
@@ -65,23 +65,3 @@ func (in *ProjectConfig) DeepCopyObject() runtime.Object {
}
return nil
}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *TLSProfileSpec) DeepCopyInto(out *TLSProfileSpec) {
- *out = *in
- if in.Ciphers != nil {
- in, out := &in.Ciphers, &out.Ciphers
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSProfileSpec.
-func (in *TLSProfileSpec) DeepCopy() *TLSProfileSpec {
- if in == nil {
- return nil
- }
- out := new(TLSProfileSpec)
- in.DeepCopyInto(out)
- return out
-}
diff --git a/operator/bundle/manifests/loki-operator-manager-config_v1_configmap.yaml b/operator/bundle/manifests/loki-operator-manager-config_v1_configmap.yaml
index 96699b3327f32..7b03313b8e93f 100644
--- a/operator/bundle/manifests/loki-operator-manager-config_v1_configmap.yaml
+++ b/operator/bundle/manifests/loki-operator-manager-config_v1_configmap.yaml
@@ -46,6 +46,7 @@ data:
servingCertsService: true
gatewayRoute: true
ruleExtendedValidation: true
+ clusterTLSPolicy: true
kind: ConfigMap
metadata:
labels:
diff --git a/operator/cmd/loki-broker/main.go b/operator/cmd/loki-broker/main.go
index c4c7722be9bc3..b282e33866c91 100644
--- a/operator/cmd/loki-broker/main.go
+++ b/operator/cmd/loki-broker/main.go
@@ -7,13 +7,14 @@ import (
"path"
"strings"
- "github.com/ViaQ/logerr/v2/log"
- "github.com/go-logr/logr"
configv1 "github.com/grafana/loki/operator/apis/config/v1"
- projectconfigv1 "github.com/grafana/loki/operator/apis/config/v1"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests"
"github.com/grafana/loki/operator/internal/manifests/storage"
+
+ "github.com/ViaQ/logerr/v2/log"
+ "github.com/go-logr/logr"
+ openshiftv1 "github.com/openshift/api/config/v1"
"sigs.k8s.io/yaml"
)
@@ -129,22 +130,21 @@ func main() {
}
if cfg.featureFlags.TLSProfile != "" &&
- cfg.featureFlags.TLSProfile != string(projectconfigv1.TLSProfileOldType) &&
- cfg.featureFlags.TLSProfile != string(projectconfigv1.TLSProfileIntermediateType) &&
- cfg.featureFlags.TLSProfile != string(projectconfigv1.TLSProfileModernType) {
+ cfg.featureFlags.TLSProfile != string(configv1.TLSProfileOldType) &&
+ cfg.featureFlags.TLSProfile != string(configv1.TLSProfileIntermediateType) &&
+ cfg.featureFlags.TLSProfile != string(configv1.TLSProfileModernType) {
logger.Error(err, "failed to parse TLS profile. Allowed values: 'Old', 'Intermediate', 'Modern'", "value", cfg.featureFlags.TLSProfile)
os.Exit(1)
}
// Convert config to manifest.Options
opts := manifests.Options{
- Name: cfg.Name,
- Namespace: cfg.Namespace,
- Image: cfg.Image,
- Stack: ls.Spec,
- Gates: cfg.featureFlags,
- ObjectStorage: cfg.objectStorage,
- TLSProfileType: projectconfigv1.TLSProfileType(cfg.featureFlags.TLSProfile),
+ Name: cfg.Name,
+ Namespace: cfg.Namespace,
+ Image: cfg.Image,
+ Stack: ls.Spec,
+ Gates: cfg.featureFlags,
+ ObjectStorage: cfg.objectStorage,
}
if optErr := manifests.ApplyDefaultSettings(&opts); optErr != nil {
@@ -152,6 +152,18 @@ func main() {
os.Exit(1)
}
+ var tlsSecurityProfile *openshiftv1.TLSSecurityProfile = nil
+ if cfg.featureFlags.TLSProfile != "" {
+ tlsSecurityProfile = &openshiftv1.TLSSecurityProfile{
+ Type: openshiftv1.TLSProfileType(cfg.featureFlags.TLSProfile),
+ }
+ }
+
+ if optErr := manifests.ApplyTLSSettings(&opts, tlsSecurityProfile); optErr != nil {
+ logger.Error(optErr, "failed to conform options to tls profile settings")
+ os.Exit(1)
+ }
+
objects, err := manifests.BuildAll(opts)
if err != nil {
logger.Error(err, "failed to build manifests")
diff --git a/operator/config/overlays/openshift/controller_manager_config.yaml b/operator/config/overlays/openshift/controller_manager_config.yaml
index 12c0760257ec8..c69ada2e8edae 100644
--- a/operator/config/overlays/openshift/controller_manager_config.yaml
+++ b/operator/config/overlays/openshift/controller_manager_config.yaml
@@ -43,3 +43,4 @@ featureGates:
servingCertsService: true
gatewayRoute: true
ruleExtendedValidation: true
+ clusterTLSPolicy: true
diff --git a/operator/controllers/loki/lokistack_controller.go b/operator/controllers/loki/lokistack_controller.go
index 9acd6c8b08250..d1afaa31a7901 100644
--- a/operator/controllers/loki/lokistack_controller.go
+++ b/operator/controllers/loki/lokistack_controller.go
@@ -15,6 +15,7 @@ import (
"github.com/grafana/loki/operator/internal/status"
routev1 "github.com/openshift/api/route/v1"
+ openshiftconfigv1 "github.com/openshift/api/config/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
@@ -174,5 +175,9 @@ func (r *LokiStackReconciler) buildController(bld k8s.Builder) error {
bld = bld.Owns(&networkingv1.Ingress{}, updateOrDeleteOnlyPred)
}
+ if r.FeatureGates.OpenShift.ClusterTLSPolicy {
+ bld = bld.Owns(&openshiftconfigv1.APIServer{}, updateOrDeleteOnlyPred)
+ }
+
return bld.Complete(r)
}
diff --git a/operator/controllers/loki/lokistack_controller_test.go b/operator/controllers/loki/lokistack_controller_test.go
index b098f262955ba..91f2c8bb18ee2 100644
--- a/operator/controllers/loki/lokistack_controller_test.go
+++ b/operator/controllers/loki/lokistack_controller_test.go
@@ -12,6 +12,7 @@ import (
"github.com/ViaQ/logerr/v2/log"
"github.com/go-logr/logr"
+ openshiftconfigv1 "github.com/openshift/api/config/v1"
routev1 "github.com/openshift/api/route/v1"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
@@ -74,63 +75,74 @@ func TestLokiStackController_RegisterOwnedResourcesForUpdateOrDeleteOnly(t *test
// Require owned resources
type test struct {
- obj client.Object
- index int
- featureGates configv1.FeatureGates
- pred builder.OwnsOption
+ obj client.Object
+ index int
+ ownCallsCount int
+ featureGates configv1.FeatureGates
+ pred builder.OwnsOption
}
table := []test{
{
- obj: &corev1.ConfigMap{},
- index: 0,
- pred: updateOrDeleteOnlyPred,
+ obj: &corev1.ConfigMap{},
+ index: 0,
+ ownCallsCount: 10,
+ pred: updateOrDeleteOnlyPred,
},
{
- obj: &corev1.ServiceAccount{},
- index: 1,
- pred: updateOrDeleteOnlyPred,
+ obj: &corev1.ServiceAccount{},
+ index: 1,
+ ownCallsCount: 10,
+ pred: updateOrDeleteOnlyPred,
},
{
- obj: &corev1.Service{},
- index: 2,
- pred: updateOrDeleteOnlyPred,
+ obj: &corev1.Service{},
+ index: 2,
+ ownCallsCount: 10,
+ pred: updateOrDeleteOnlyPred,
},
{
- obj: &appsv1.Deployment{},
- index: 3,
- pred: updateOrDeleteOnlyPred,
+ obj: &appsv1.Deployment{},
+ index: 3,
+ ownCallsCount: 10,
+ pred: updateOrDeleteOnlyPred,
},
{
- obj: &appsv1.StatefulSet{},
- index: 4,
- pred: updateOrDeleteOnlyPred,
+ obj: &appsv1.StatefulSet{},
+ index: 4,
+ ownCallsCount: 10,
+ pred: updateOrDeleteOnlyPred,
},
{
- obj: &rbacv1.ClusterRole{},
- index: 5,
- pred: updateOrDeleteOnlyPred,
+ obj: &rbacv1.ClusterRole{},
+ index: 5,
+ ownCallsCount: 10,
+ pred: updateOrDeleteOnlyPred,
},
{
- obj: &rbacv1.ClusterRoleBinding{},
- index: 6,
- pred: updateOrDeleteOnlyPred,
+ obj: &rbacv1.ClusterRoleBinding{},
+ index: 6,
+ ownCallsCount: 10,
+ pred: updateOrDeleteOnlyPred,
},
{
- obj: &rbacv1.Role{},
- index: 7,
- pred: updateOrDeleteOnlyPred,
+ obj: &rbacv1.Role{},
+ index: 7,
+ ownCallsCount: 10,
+ pred: updateOrDeleteOnlyPred,
},
{
- obj: &rbacv1.RoleBinding{},
- index: 8,
- pred: updateOrDeleteOnlyPred,
+ obj: &rbacv1.RoleBinding{},
+ index: 8,
+ ownCallsCount: 10,
+ pred: updateOrDeleteOnlyPred,
},
// The next two share the same index, because the
// controller either reconciles an Ingress (i.e. Kubernetes)
// or a Route (i.e. OpenShift).
{
- obj: &networkingv1.Ingress{},
- index: 9,
+ obj: &networkingv1.Ingress{},
+ index: 9,
+ ownCallsCount: 10,
featureGates: configv1.FeatureGates{
OpenShift: configv1.OpenShiftFeatureGates{
GatewayRoute: false,
@@ -139,8 +151,9 @@ func TestLokiStackController_RegisterOwnedResourcesForUpdateOrDeleteOnly(t *test
pred: updateOrDeleteOnlyPred,
},
{
- obj: &routev1.Route{},
- index: 9,
+ obj: &routev1.Route{},
+ index: 9,
+ ownCallsCount: 10,
featureGates: configv1.FeatureGates{
OpenShift: configv1.OpenShiftFeatureGates{
GatewayRoute: true,
@@ -148,6 +161,17 @@ func TestLokiStackController_RegisterOwnedResourcesForUpdateOrDeleteOnly(t *test
},
pred: updateOrDeleteOnlyPred,
},
+ {
+ obj: &openshiftconfigv1.APIServer{},
+ index: 10,
+ ownCallsCount: 11,
+ featureGates: configv1.FeatureGates{
+ OpenShift: configv1.OpenShiftFeatureGates{
+ ClusterTLSPolicy: true,
+ },
+ },
+ pred: updateOrDeleteOnlyPred,
+ },
}
for _, tst := range table {
b := &k8sfakes.FakeBuilder{}
@@ -159,7 +183,7 @@ func TestLokiStackController_RegisterOwnedResourcesForUpdateOrDeleteOnly(t *test
require.NoError(t, err)
// Require Owns-Calls for all owned resources
- require.Equal(t, 10, b.OwnsCallCount())
+ require.Equal(t, tst.ownCallsCount, b.OwnsCallCount())
// Require Owns-call options to have delete predicate only
obj, opts := b.OwnsArgsForCall(tst.index)
diff --git a/operator/internal/handlers/internal/tlsprofile/tlsprofile.go b/operator/internal/handlers/internal/tlsprofile/tlsprofile.go
index 6ba42da1c82bd..9fb1413881ef0 100644
--- a/operator/internal/handlers/internal/tlsprofile/tlsprofile.go
+++ b/operator/internal/handlers/internal/tlsprofile/tlsprofile.go
@@ -3,69 +3,37 @@ package tlsprofile
import (
"context"
- "github.com/go-logr/logr"
- projectconfigv1 "github.com/grafana/loki/operator/apis/config/v1"
+ configv1 "github.com/grafana/loki/operator/apis/config/v1"
+
+ "github.com/ViaQ/logerr/v2/kverrors"
"github.com/grafana/loki/operator/internal/external/k8s"
- openshiftv1 "github.com/openshift/api/config/v1"
- "github.com/openshift/library-go/pkg/crypto"
+ openshiftconfigv1 "github.com/openshift/api/config/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// APIServerName is the apiserver resource name used to fetch it.
const APIServerName = "cluster"
-// GetSecurityProfileInfo gets the tls profile info to apply.
-func GetSecurityProfileInfo(ctx context.Context, k k8s.Client, log logr.Logger, tlsProfileType projectconfigv1.TLSProfileType) (projectconfigv1.TLSProfileSpec, error) {
- var tlsProfile openshiftv1.TLSSecurityProfile
-
- if tlsProfileType != "" {
- tlsProfile = openshiftv1.TLSSecurityProfile{
- Type: openshiftv1.TLSProfileType(tlsProfileType),
- }
- } else {
- tlsProfile = openshiftv1.TLSSecurityProfile{
- Type: openshiftv1.TLSProfileIntermediateType,
- }
-
- var apiServer openshiftv1.APIServer
+// GetTLSSecurityProfile gets the tls profile info to apply.
+func GetTLSSecurityProfile(ctx context.Context, k k8s.Client, tlsProfileType configv1.TLSProfileType) (*openshiftconfigv1.TLSSecurityProfile, error) {
+ switch tlsProfileType {
+ case configv1.TLSProfileOldType:
+ return &openshiftconfigv1.TLSSecurityProfile{
+ Type: openshiftconfigv1.TLSProfileOldType,
+ }, nil
+ case configv1.TLSProfileIntermediateType:
+ return &openshiftconfigv1.TLSSecurityProfile{
+ Type: openshiftconfigv1.TLSProfileIntermediateType,
+ }, nil
+ case configv1.TLSProfileModernType:
+ return &openshiftconfigv1.TLSSecurityProfile{
+ Type: openshiftconfigv1.TLSProfileModernType,
+ }, nil
+ default:
+ var apiServer openshiftconfigv1.APIServer
if err := k.Get(ctx, client.ObjectKey{Name: APIServerName}, &apiServer); err != nil {
- log.Error(err, "failed to lookup apiServer. Using Intermediate profile")
- }
-
- if apiServer.Spec.TLSSecurityProfile != nil {
- tlsProfile = *apiServer.Spec.TLSSecurityProfile
+ return nil, kverrors.Wrap(err, "failed to lookup openshift apiServer")
}
+ return apiServer.Spec.TLSSecurityProfile, nil
}
-
- tlsMinVersion, ciphers := extractInfoFromTLSProfile(&tlsProfile)
- return projectconfigv1.TLSProfileSpec{
- MinTLSVersion: tlsMinVersion,
- Ciphers: ciphers,
- }, nil
-}
-
-func extractInfoFromTLSProfile(profile *openshiftv1.TLSSecurityProfile) (string, []string) {
- var profileType openshiftv1.TLSProfileType
- if profile == nil {
- profileType = openshiftv1.TLSProfileIntermediateType
- } else {
- profileType = profile.Type
- }
-
- var profileSpec *openshiftv1.TLSProfileSpec
- if profileType == openshiftv1.TLSProfileCustomType {
- if profile.Custom != nil {
- profileSpec = &profile.Custom.TLSProfileSpec
- }
- } else {
- profileSpec = openshiftv1.TLSProfiles[profileType]
- }
-
- // nothing found / custom type set but no actual custom spec
- if profileSpec == nil {
- profileSpec = openshiftv1.TLSProfiles[openshiftv1.TLSProfileIntermediateType]
- }
-
- // need to remap all Ciphers to their respective IANA names used by Go
- return string(profileSpec.MinTLSVersion), crypto.OpenSSLToIANACipherSuites(profileSpec.Ciphers)
}
diff --git a/operator/internal/handlers/internal/tlsprofile/tlsprofile_test.go b/operator/internal/handlers/internal/tlsprofile/tlsprofile_test.go
index 2bf61a518f1ca..ac3a4f7be74ba 100644
--- a/operator/internal/handlers/internal/tlsprofile/tlsprofile_test.go
+++ b/operator/internal/handlers/internal/tlsprofile/tlsprofile_test.go
@@ -4,11 +4,11 @@ import (
"context"
"testing"
- "github.com/go-logr/logr"
- projectconfigv1 "github.com/grafana/loki/operator/apis/config/v1"
+ configv1 "github.com/grafana/loki/operator/apis/config/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
"github.com/grafana/loki/operator/internal/handlers/internal/tlsprofile"
- openshiftv1 "github.com/openshift/api/config/v1"
+
+ openshiftconfigv1 "github.com/openshift/api/config/v1"
"github.com/stretchr/testify/assert"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -17,78 +17,43 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
-var (
- apiServer = openshiftv1.APIServer{
- ObjectMeta: metav1.ObjectMeta{
- Name: "cluster",
- },
- }
- ciphersOld = []string{
- "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
- "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
- "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
- "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
- "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256",
- "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256",
- "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
- "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
- "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
- "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
- "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
- "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
- "TLS_RSA_WITH_AES_128_GCM_SHA256",
- "TLS_RSA_WITH_AES_256_GCM_SHA384",
- "TLS_RSA_WITH_AES_128_CBC_SHA256",
- "TLS_RSA_WITH_AES_128_CBC_SHA",
- "TLS_RSA_WITH_AES_256_CBC_SHA",
- "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
- }
- ciphersIntermediate = []string{
- "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
- "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
- "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
- "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
- "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256",
- "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256",
- }
-)
-
-func TestGetSecurityProfileInfo(t *testing.T) {
+func TestGetTLSSecurityProfile(t *testing.T) {
type tt struct {
desc string
- profile projectconfigv1.TLSProfileType
- expected projectconfigv1.TLSProfileSpec
+ profile configv1.TLSProfileType
+ expected openshiftconfigv1.TLSSecurityProfile
}
tc := []tt{
{
desc: "Old profile",
- profile: projectconfigv1.TLSProfileOldType,
- expected: projectconfigv1.TLSProfileSpec{
- MinTLSVersion: "VersionTLS10",
- Ciphers: ciphersOld,
+ profile: configv1.TLSProfileOldType,
+ expected: openshiftconfigv1.TLSSecurityProfile{
+ Type: openshiftconfigv1.TLSProfileOldType,
},
},
{
desc: "Intermediate profile",
- profile: projectconfigv1.TLSProfileIntermediateType,
- expected: projectconfigv1.TLSProfileSpec{
- MinTLSVersion: "VersionTLS12",
- Ciphers: ciphersIntermediate,
+ profile: configv1.TLSProfileIntermediateType,
+ expected: openshiftconfigv1.TLSSecurityProfile{
+ Type: openshiftconfigv1.TLSProfileIntermediateType,
},
},
{
desc: "Modern profile",
- profile: projectconfigv1.TLSProfileModernType,
- expected: projectconfigv1.TLSProfileSpec{
- MinTLSVersion: "VersionTLS13",
- // Go lib crypto doesn't allow ciphers to be configured for TLS 1.3
- // (Read this and weep: https://github.com/golang/go/issues/29349)
- Ciphers: []string{},
+ profile: configv1.TLSProfileModernType,
+ expected: openshiftconfigv1.TLSSecurityProfile{
+ Type: openshiftconfigv1.TLSProfileModernType,
},
},
}
+ apiServer := openshiftconfigv1.APIServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "cluster",
+ },
+ }
+
sw := &k8sfakes.FakeStatusWriter{}
k := &k8sfakes.FakeClient{}
@@ -106,10 +71,68 @@ func TestGetSecurityProfileInfo(t *testing.T) {
tc := tc
t.Run(tc.desc, func(t *testing.T) {
t.Parallel()
- info, err := tlsprofile.GetSecurityProfileInfo(context.TODO(), k, logr.Logger{}, tc.profile)
+
+ profile, err := tlsprofile.GetTLSSecurityProfile(context.TODO(), k, tc.profile)
+
assert.Nil(t, err)
- assert.NotNil(t, info)
- assert.EqualValues(t, tc.expected, info)
+ assert.NotNil(t, profile)
+ assert.EqualValues(t, &tc.expected, profile)
})
}
}
+
+func TestGetTLSSecurityProfile_CustomProfile(t *testing.T) {
+ sw := &k8sfakes.FakeStatusWriter{}
+ k := &k8sfakes.FakeClient{}
+
+ tlsCustomProfile := &openshiftconfigv1.TLSSecurityProfile{
+ Type: openshiftconfigv1.TLSProfileCustomType,
+ Custom: &openshiftconfigv1.CustomTLSProfile{
+ TLSProfileSpec: openshiftconfigv1.TLSProfileSpec{
+ Ciphers: []string{"custom-cipher"},
+ MinTLSVersion: "VersionTLS12",
+ },
+ },
+ }
+
+ apiServer := openshiftconfigv1.APIServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "cluster",
+ },
+ Spec: openshiftconfigv1.APIServerSpec{
+ TLSSecurityProfile: tlsCustomProfile,
+ },
+ }
+
+ k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error {
+ if apiServer.Name == name.Name {
+ k.SetClientObject(object, &apiServer)
+ return nil
+ }
+ return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found")
+ }
+
+ k.StatusStub = func() client.StatusWriter { return sw }
+
+ profile, err := tlsprofile.GetTLSSecurityProfile(context.TODO(), k, configv1.TLSProfileType("custom"))
+
+ assert.Nil(t, err)
+ assert.NotNil(t, profile)
+ assert.EqualValues(t, tlsCustomProfile, profile)
+}
+
+func TestGetTLSSecurityProfile_APIServerNotFound(t *testing.T) {
+ sw := &k8sfakes.FakeStatusWriter{}
+ k := &k8sfakes.FakeClient{}
+
+ k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error {
+ return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found")
+ }
+
+ k.StatusStub = func() client.StatusWriter { return sw }
+
+ profile, err := tlsprofile.GetTLSSecurityProfile(context.TODO(), k, "")
+
+ assert.NotNil(t, err)
+ assert.Nil(t, profile)
+}
diff --git a/operator/internal/handlers/lokistack_create_or_update.go b/operator/internal/handlers/lokistack_create_or_update.go
index fda5cf3e65228..52df276ab01e3 100644
--- a/operator/internal/handlers/lokistack_create_or_update.go
+++ b/operator/internal/handlers/lokistack_create_or_update.go
@@ -7,7 +7,6 @@ import (
"time"
configv1 "github.com/grafana/loki/operator/apis/config/v1"
- projectconfigv1 "github.com/grafana/loki/operator/apis/config/v1"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
"github.com/grafana/loki/operator/internal/external/k8s"
@@ -175,12 +174,12 @@ func CreateOrUpdateLokiStack(
if stack.Spec.Rules != nil && stack.Spec.Rules.Enabled {
alertingRules, recordingRules, err = rules.List(ctx, k, req.Namespace, stack.Spec.Rules)
if err != nil {
- log.Error(err, "failed to lookup rules", "spec", stack.Spec.Rules)
+ ll.Error(err, "failed to lookup rules", "spec", stack.Spec.Rules)
}
rulerConfig, err = rules.GetRulerConfig(ctx, k, req)
if err != nil {
- log.Error(err, "failed to lookup ruler config", "key", req.NamespacedName)
+ ll.Error(err, "failed to lookup ruler config", "key", req.NamespacedName)
}
if rulerConfig != nil && rulerConfig.RemoteWriteSpec != nil && rulerConfig.RemoteWriteSpec.ClientSpec != nil {
@@ -235,7 +234,6 @@ func CreateOrUpdateLokiStack(
Secrets: tenantSecrets,
Configs: tenantConfigs,
},
- TLSProfileType: projectconfigv1.TLSProfileType(fg.TLSProfile),
OpenShiftOptions: manifests_openshift.Options{
BuildOpts: manifests_openshift.BuildOptions{
AlertManagerEnabled: ocpAmEnabled,
@@ -252,18 +250,27 @@ func CreateOrUpdateLokiStack(
if fg.LokiStackGateway {
if optErr := manifests.ApplyGatewayDefaultOptions(&opts); optErr != nil {
- ll.Error(optErr, "failed to apply defaults options to gateway settings ")
+ ll.Error(optErr, "failed to apply defaults options to gateway settings")
return optErr
}
}
- spec, err := tlsprofile.GetSecurityProfileInfo(ctx, k, ll, opts.TLSProfileType)
+ tlsProfileType := configv1.TLSProfileType(fg.TLSProfile)
+ // Overwrite the profile from the flags and use the profile from the apiserver instead
+ if fg.OpenShift.ClusterTLSPolicy {
+ tlsProfileType = configv1.TLSProfileType("")
+ }
+
+ tlsProfile, err := tlsprofile.GetTLSSecurityProfile(ctx, k, tlsProfileType)
if err != nil {
- ll.Error(err, "failed to get security profile info")
- return err
+ // The API server is not guaranteed to be there nor have a result.
+ ll.Error(err, "failed to get security profile. will use default tls profile.")
}
- opts.TLSProfileSpec = spec
+ if optErr := manifests.ApplyTLSSettings(&opts, tlsProfile); optErr != nil {
+ ll.Error(optErr, "failed to conform options to tls profile settings")
+ return optErr
+ }
objects, err := manifests.BuildAll(opts)
if err != nil {
diff --git a/operator/internal/manifests/build.go b/operator/internal/manifests/build.go
index 9cea54d65e1b5..f27d350d9d93c 100644
--- a/operator/internal/manifests/build.go
+++ b/operator/internal/manifests/build.go
@@ -1,11 +1,13 @@
package manifests
import (
- "github.com/ViaQ/logerr/v2/kverrors"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/internal"
+ "github.com/ViaQ/logerr/v2/kverrors"
"github.com/imdario/mergo"
+ openshiftconfigv1 "github.com/openshift/api/config/v1"
+ "github.com/openshift/library-go/pkg/crypto"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -137,3 +139,33 @@ func ApplyDefaultSettings(opts *Options) error {
return nil
}
+
+// ApplyTLSSettings manipulates the options to conform to the
+// TLS profile specifications
+func ApplyTLSSettings(opts *Options, profile *openshiftconfigv1.TLSSecurityProfile) error {
+ tlsSecurityProfile := &openshiftconfigv1.TLSSecurityProfile{
+ Type: openshiftconfigv1.TLSProfileIntermediateType,
+ }
+
+ if profile != nil {
+ tlsSecurityProfile = profile
+ }
+
+ profileSpec, ok := openshiftconfigv1.TLSProfiles[tlsSecurityProfile.Type]
+
+ if !ok {
+ return kverrors.New("unable to determine tls profile settings")
+ }
+
+ if tlsSecurityProfile.Type == openshiftconfigv1.TLSProfileCustomType && tlsSecurityProfile.Custom != nil {
+ profileSpec = &tlsSecurityProfile.Custom.TLSProfileSpec
+ }
+
+ // need to remap all ciphers to their respective IANA names used by Go
+ opts.TLSProfile = TLSProfileSpec{
+ MinTLSVersion: string(profileSpec.MinTLSVersion),
+ Ciphers: crypto.OpenSSLToIANACipherSuites(profileSpec.Ciphers),
+ }
+
+ return nil
+}
diff --git a/operator/internal/manifests/build_test.go b/operator/internal/manifests/build_test.go
index e22338499eaf6..5d8a042446795 100644
--- a/operator/internal/manifests/build_test.go
+++ b/operator/internal/manifests/build_test.go
@@ -2,8 +2,10 @@ package manifests
import (
"fmt"
+ "strings"
"testing"
+ openshiftconfigv1 "github.com/openshift/api/config/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -11,6 +13,7 @@ import (
configv1 "github.com/grafana/loki/operator/apis/config/v1"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/internal"
+
"github.com/stretchr/testify/require"
)
@@ -83,6 +86,88 @@ func TestApplyUserOptions_AlwaysSetCompactorReplicasToOne(t *testing.T) {
}
}
+func TestApplyTLSSettings_OverrideDefaults(t *testing.T) {
+ type tt struct {
+ desc string
+ profile openshiftconfigv1.TLSSecurityProfile
+ expected TLSProfileSpec
+ }
+
+ tc := []tt{
+ {
+ desc: "Old profile",
+ profile: openshiftconfigv1.TLSSecurityProfile{
+ Type: openshiftconfigv1.TLSProfileOldType,
+ },
+ expected: TLSProfileSpec{
+ MinTLSVersion: "VersionTLS10",
+ Ciphers: []string{
+ "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
+ "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+ "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
+ "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+ "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256",
+ "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256",
+ "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
+ "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
+ "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
+ "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
+ "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
+ "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
+ "TLS_RSA_WITH_AES_128_GCM_SHA256",
+ "TLS_RSA_WITH_AES_256_GCM_SHA384",
+ "TLS_RSA_WITH_AES_128_CBC_SHA256",
+ "TLS_RSA_WITH_AES_128_CBC_SHA",
+ "TLS_RSA_WITH_AES_256_CBC_SHA",
+ "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
+ },
+ },
+ },
+ {
+ desc: "Intermediate profile",
+ profile: openshiftconfigv1.TLSSecurityProfile{
+ Type: openshiftconfigv1.TLSProfileIntermediateType,
+ },
+ expected: TLSProfileSpec{
+ MinTLSVersion: "VersionTLS12",
+ Ciphers: []string{
+ "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
+ "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+ "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
+ "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+ "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256",
+ "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256",
+ },
+ },
+ },
+ {
+ desc: "Modern profile",
+ profile: openshiftconfigv1.TLSSecurityProfile{
+ Type: openshiftconfigv1.TLSProfileModernType,
+ },
+ expected: TLSProfileSpec{
+ MinTLSVersion: "VersionTLS13",
+ // Go lib crypto doesn't allow ciphers to be configured for TLS 1.3
+ // (Read this and weep: https://github.com/golang/go/issues/29349)
+ Ciphers: []string{},
+ },
+ },
+ }
+
+ for _, tc := range tc {
+ tc := tc
+ t.Run(tc.desc, func(t *testing.T) {
+ t.Parallel()
+
+ opts := Options{}
+ err := ApplyTLSSettings(&opts, &tc.profile)
+
+ require.Nil(t, err)
+ require.EqualValues(t, tc.expected, opts.TLSProfile)
+ })
+ }
+}
+
func TestBuildAll_WithFeatureGates_ServiceMonitors(t *testing.T) {
type test struct {
desc string
@@ -242,9 +327,19 @@ func TestBuildAll_WithFeatureGates_HTTPEncryption(t *testing.T) {
HTTPEncryption: true,
},
}
+ ciphers := strings.Join([]string{
+ "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
+ "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+ "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
+ "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+ "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256",
+ "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256",
+ }, ",")
err := ApplyDefaultSettings(&opts)
require.NoError(t, err)
+ err = ApplyTLSSettings(&opts, nil)
+ require.NoError(t, err)
objects, buildErr := BuildAll(opts)
require.NoError(t, buildErr)
@@ -295,6 +390,8 @@ func TestBuildAll_WithFeatureGates_HTTPEncryption(t *testing.T) {
}
require.Contains(t, vms, expVolumeMount)
+ require.Contains(t, args, "-server.tls-min-version=VersionTLS12")
+ require.Contains(t, args, fmt.Sprintf("-server.tls-cipher-suites=%s", ciphers))
require.Contains(t, args, "-server.http-tls-cert-path=/var/run/tls/http/tls.crt")
require.Contains(t, args, "-server.http-tls-key-path=/var/run/tls/http/tls.key")
require.Equal(t, corev1.URISchemeHTTPS, rps)
@@ -484,6 +581,15 @@ func TestBuildAll_WithFeatureGates_GRPCEncryption(t *testing.T) {
"test-ruler": "test-ruler-grpc",
}
+ ciphers := strings.Join([]string{
+ "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
+ "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+ "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
+ "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+ "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256",
+ "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256",
+ }, ",")
+
for _, tst := range table {
tst := tst
t.Run(tst.desc, func(t *testing.T) {
@@ -492,6 +598,9 @@ func TestBuildAll_WithFeatureGates_GRPCEncryption(t *testing.T) {
err := ApplyDefaultSettings(&tst.BuildOptions)
require.NoError(t, err)
+ err = ApplyTLSSettings(&tst.BuildOptions, nil)
+ require.NoError(t, err)
+
objs, err := BuildAll(tst.BuildOptions)
require.NoError(t, err)
@@ -516,6 +625,8 @@ func TestBuildAll_WithFeatureGates_GRPCEncryption(t *testing.T) {
args := []string{
"-server.grpc-tls-cert-path=/var/run/tls/grpc/tls.crt",
"-server.grpc-tls-key-path=/var/run/tls/grpc/tls.key",
+ "-server.tls-min-version=VersionTLS12",
+ fmt.Sprintf("-server.tls-cipher-suites=%s", ciphers),
}
vm := corev1.VolumeMount{
diff --git a/operator/internal/manifests/compactor.go b/operator/internal/manifests/compactor.go
index 6b5a9d178322d..11874f36baede 100644
--- a/operator/internal/manifests/compactor.go
+++ b/operator/internal/manifests/compactor.go
@@ -6,6 +6,7 @@ import (
"github.com/grafana/loki/operator/internal/manifests/internal/config"
"github.com/grafana/loki/operator/internal/manifests/storage"
+
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
@@ -13,7 +14,6 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/pointer"
-
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -21,7 +21,7 @@ import (
func BuildCompactor(opts Options) ([]client.Object, error) {
statefulSet := NewCompactorStatefulSet(opts)
if opts.Gates.HTTPEncryption {
- if err := configureCompactorHTTPServicePKI(statefulSet, opts.Name); err != nil {
+ if err := configureCompactorHTTPServicePKI(statefulSet, opts); err != nil {
return nil, err
}
}
@@ -31,7 +31,7 @@ func BuildCompactor(opts Options) ([]client.Object, error) {
}
if opts.Gates.GRPCEncryption {
- if err := configureCompactorGRPCServicePKI(statefulSet, opts.Name); err != nil {
+ if err := configureCompactorGRPCServicePKI(statefulSet, opts); err != nil {
return nil, err
}
}
@@ -108,6 +108,13 @@ func NewCompactorStatefulSet(opts Options) *appsv1.StatefulSet {
SecurityContext: podSecurityContext(opts.Gates.RuntimeSeccompProfile),
}
+ if opts.Gates.HTTPEncryption || opts.Gates.GRPCEncryption {
+ podSpec.Containers[0].Args = append(podSpec.Containers[0].Args,
+ fmt.Sprintf("-server.tls-cipher-suites=%s", opts.TLSCipherSuites()),
+ fmt.Sprintf("-server.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
+ )
+ }
+
if opts.Stack.Template != nil && opts.Stack.Template.Compactor != nil {
podSpec.Tolerations = opts.Stack.Template.Compactor.Tolerations
podSpec.NodeSelector = opts.Stack.Template.Compactor.NodeSelector
@@ -223,12 +230,12 @@ func NewCompactorHTTPService(opts Options) *corev1.Service {
}
}
-func configureCompactorHTTPServicePKI(statefulSet *appsv1.StatefulSet, stackName string) error {
- serviceName := serviceNameCompactorHTTP(stackName)
+func configureCompactorHTTPServicePKI(statefulSet *appsv1.StatefulSet, opts Options) error {
+ serviceName := serviceNameCompactorHTTP(opts.Name)
return configureHTTPServicePKI(&statefulSet.Spec.Template.Spec, serviceName)
}
-func configureCompactorGRPCServicePKI(sts *appsv1.StatefulSet, stackName string) error {
- serviceName := serviceNameCompactorGRPC(stackName)
+func configureCompactorGRPCServicePKI(sts *appsv1.StatefulSet, opts Options) error {
+ serviceName := serviceNameCompactorGRPC(opts.Name)
return configureGRPCServicePKI(&sts.Spec.Template.Spec, serviceName)
}
diff --git a/operator/internal/manifests/distributor.go b/operator/internal/manifests/distributor.go
index 46ffb7b4332b9..174bb6dde33bb 100644
--- a/operator/internal/manifests/distributor.go
+++ b/operator/internal/manifests/distributor.go
@@ -4,8 +4,9 @@ import (
"fmt"
"path"
- "github.com/ViaQ/logerr/v2/kverrors"
"github.com/grafana/loki/operator/internal/manifests/internal/config"
+
+ "github.com/ViaQ/logerr/v2/kverrors"
"github.com/imdario/mergo"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@@ -20,13 +21,13 @@ import (
func BuildDistributor(opts Options) ([]client.Object, error) {
deployment := NewDistributorDeployment(opts)
if opts.Gates.HTTPEncryption {
- if err := configureDistributorHTTPServicePKI(deployment, opts.Name); err != nil {
+ if err := configureDistributorHTTPServicePKI(deployment, opts); err != nil {
return nil, err
}
}
if opts.Gates.GRPCEncryption {
- if err := configureDistributorGRPCServicePKI(deployment, opts.Name, opts.Namespace); err != nil {
+ if err := configureDistributorGRPCServicePKI(deployment, opts); err != nil {
return nil, err
}
}
@@ -103,6 +104,13 @@ func NewDistributorDeployment(opts Options) *appsv1.Deployment {
SecurityContext: podSecurityContext(opts.Gates.RuntimeSeccompProfile),
}
+ if opts.Gates.HTTPEncryption || opts.Gates.GRPCEncryption {
+ podSpec.Containers[0].Args = append(podSpec.Containers[0].Args,
+ fmt.Sprintf("-server.tls-cipher-suites=%s", opts.TLSCipherSuites()),
+ fmt.Sprintf("-server.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
+ )
+ }
+
if opts.Stack.Template != nil && opts.Stack.Template.Distributor != nil {
podSpec.Tolerations = opts.Stack.Template.Distributor.Tolerations
podSpec.NodeSelector = opts.Stack.Template.Distributor.NodeSelector
@@ -199,13 +207,13 @@ func NewDistributorHTTPService(opts Options) *corev1.Service {
}
}
-func configureDistributorHTTPServicePKI(deployment *appsv1.Deployment, stackName string) error {
- serviceName := serviceNameDistributorHTTP(stackName)
+func configureDistributorHTTPServicePKI(deployment *appsv1.Deployment, opts Options) error {
+ serviceName := serviceNameDistributorHTTP(opts.Name)
return configureHTTPServicePKI(&deployment.Spec.Template.Spec, serviceName)
}
-func configureDistributorGRPCServicePKI(deployment *appsv1.Deployment, stackName, stackNS string) error {
- caBundleName := signingCABundleName(stackName)
+func configureDistributorGRPCServicePKI(deployment *appsv1.Deployment, opts Options) error {
+ caBundleName := signingCABundleName(opts.Name)
secretVolumeSpec := corev1.PodSpec{
Volumes: []corev1.Volume{
{
@@ -232,8 +240,10 @@ func configureDistributorGRPCServicePKI(deployment *appsv1.Deployment, stackName
Args: []string{
// Enable GRPC over TLS for ingester client
"-ingester.client.tls-enabled=true",
+ fmt.Sprintf("-ingester.client.tls-cipher-suites=%s", opts.TLSCipherSuites()),
+ fmt.Sprintf("-ingester.client.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
fmt.Sprintf("-ingester.client.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-ingester.client.tls-server-name=%s", fqdn(serviceNameIngesterGRPC(stackName), stackNS)),
+ fmt.Sprintf("-ingester.client.tls-server-name=%s", fqdn(serviceNameIngesterGRPC(opts.Name), opts.Namespace)),
},
}
@@ -245,6 +255,6 @@ func configureDistributorGRPCServicePKI(deployment *appsv1.Deployment, stackName
return kverrors.Wrap(err, "failed to merge container")
}
- serviceName := serviceNameDistributorGRPC(stackName)
+ serviceName := serviceNameDistributorGRPC(opts.Name)
return configureGRPCServicePKI(&deployment.Spec.Template.Spec, serviceName)
}
diff --git a/operator/internal/manifests/gateway.go b/operator/internal/manifests/gateway.go
index f8b80b77f461e..39e323d85525e 100644
--- a/operator/internal/manifests/gateway.go
+++ b/operator/internal/manifests/gateway.go
@@ -42,8 +42,8 @@ func BuildGateway(opts Options) ([]client.Object, error) {
objs := []client.Object{cm, dpl, svc, ing}
- minTLSVersion := opts.TLSProfileSpec.MinTLSVersion
- ciphersList := opts.TLSProfileSpec.Ciphers
+ minTLSVersion := opts.TLSProfile.MinTLSVersion
+ ciphersList := opts.TLSProfile.Ciphers
ciphers := strings.Join(ciphersList, `,`)
if opts.Gates.HTTPEncryption {
diff --git a/operator/internal/manifests/gateway_test.go b/operator/internal/manifests/gateway_test.go
index bc47528a4ea80..82cd8003a234c 100644
--- a/operator/internal/manifests/gateway_test.go
+++ b/operator/internal/manifests/gateway_test.go
@@ -296,7 +296,7 @@ func TestBuildGateway_WithTLSProfile(t *testing.T) {
HTTPEncryption: true,
TLSProfile: string(configv1.TLSProfileOldType),
},
- TLSProfileSpec: configv1.TLSProfileSpec{
+ TLSProfile: TLSProfileSpec{
MinTLSVersion: "min-version",
Ciphers: []string{"cipher1", "cipher2"},
},
@@ -348,7 +348,7 @@ func TestBuildGateway_WithTLSProfile(t *testing.T) {
HTTPEncryption: true,
TLSProfile: string(configv1.TLSProfileOldType),
},
- TLSProfileSpec: configv1.TLSProfileSpec{
+ TLSProfile: TLSProfileSpec{
MinTLSVersion: "min-version",
Ciphers: []string{"cipher1", "cipher2"},
},
@@ -378,7 +378,7 @@ func TestBuildGateway_WithTLSProfile(t *testing.T) {
HTTPEncryption: true,
TLSProfile: string(configv1.TLSProfileOldType),
},
- TLSProfileSpec: configv1.TLSProfileSpec{
+ TLSProfile: TLSProfileSpec{
MinTLSVersion: "min-version",
Ciphers: []string{"cipher1", "cipher2"},
},
diff --git a/operator/internal/manifests/indexgateway.go b/operator/internal/manifests/indexgateway.go
index 63fa4e68761ea..5c3a685930526 100644
--- a/operator/internal/manifests/indexgateway.go
+++ b/operator/internal/manifests/indexgateway.go
@@ -6,6 +6,7 @@ import (
"github.com/grafana/loki/operator/internal/manifests/internal/config"
"github.com/grafana/loki/operator/internal/manifests/storage"
+
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
@@ -20,7 +21,7 @@ import (
func BuildIndexGateway(opts Options) ([]client.Object, error) {
statefulSet := NewIndexGatewayStatefulSet(opts)
if opts.Gates.HTTPEncryption {
- if err := configureIndexGatewayHTTPServicePKI(statefulSet, opts.Name); err != nil {
+ if err := configureIndexGatewayHTTPServicePKI(statefulSet, opts); err != nil {
return nil, err
}
}
@@ -30,7 +31,7 @@ func BuildIndexGateway(opts Options) ([]client.Object, error) {
}
if opts.Gates.GRPCEncryption {
- if err := configureIndexGatewayGRPCServicePKI(statefulSet, opts.Name); err != nil {
+ if err := configureIndexGatewayGRPCServicePKI(statefulSet, opts); err != nil {
return nil, err
}
}
@@ -107,6 +108,13 @@ func NewIndexGatewayStatefulSet(opts Options) *appsv1.StatefulSet {
SecurityContext: podSecurityContext(opts.Gates.RuntimeSeccompProfile),
}
+ if opts.Gates.HTTPEncryption || opts.Gates.GRPCEncryption {
+ podSpec.Containers[0].Args = append(podSpec.Containers[0].Args,
+ fmt.Sprintf("-server.tls-cipher-suites=%s", opts.TLSCipherSuites()),
+ fmt.Sprintf("-server.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
+ )
+ }
+
if opts.Stack.Template != nil && opts.Stack.Template.IndexGateway != nil {
podSpec.Tolerations = opts.Stack.Template.IndexGateway.Tolerations
podSpec.NodeSelector = opts.Stack.Template.IndexGateway.NodeSelector
@@ -223,12 +231,12 @@ func NewIndexGatewayHTTPService(opts Options) *corev1.Service {
}
}
-func configureIndexGatewayHTTPServicePKI(statefulSet *appsv1.StatefulSet, stackName string) error {
- serviceName := serviceNameIndexGatewayHTTP(stackName)
+func configureIndexGatewayHTTPServicePKI(statefulSet *appsv1.StatefulSet, opts Options) error {
+ serviceName := serviceNameIndexGatewayHTTP(opts.Name)
return configureHTTPServicePKI(&statefulSet.Spec.Template.Spec, serviceName)
}
-func configureIndexGatewayGRPCServicePKI(sts *appsv1.StatefulSet, stackName string) error {
- serviceName := serviceNameIndexGatewayGRPC(stackName)
+func configureIndexGatewayGRPCServicePKI(sts *appsv1.StatefulSet, opts Options) error {
+ serviceName := serviceNameIndexGatewayGRPC(opts.Name)
return configureGRPCServicePKI(&sts.Spec.Template.Spec, serviceName)
}
diff --git a/operator/internal/manifests/ingester.go b/operator/internal/manifests/ingester.go
index 58cd7e9a78ae1..58ab9c98b853d 100644
--- a/operator/internal/manifests/ingester.go
+++ b/operator/internal/manifests/ingester.go
@@ -4,12 +4,11 @@ import (
"fmt"
"path"
- "github.com/ViaQ/logerr/v2/kverrors"
"github.com/grafana/loki/operator/internal/manifests/internal/config"
"github.com/grafana/loki/operator/internal/manifests/storage"
+ "github.com/ViaQ/logerr/v2/kverrors"
"github.com/imdario/mergo"
-
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
@@ -24,7 +23,7 @@ import (
func BuildIngester(opts Options) ([]client.Object, error) {
statefulSet := NewIngesterStatefulSet(opts)
if opts.Gates.HTTPEncryption {
- if err := configureIngesterHTTPServicePKI(statefulSet, opts.Name); err != nil {
+ if err := configureIngesterHTTPServicePKI(statefulSet, opts); err != nil {
return nil, err
}
}
@@ -34,7 +33,7 @@ func BuildIngester(opts Options) ([]client.Object, error) {
}
if opts.Gates.GRPCEncryption {
- if err := configureIngesterGRPCServicePKI(statefulSet, opts.Name, opts.Namespace); err != nil {
+ if err := configureIngesterGRPCServicePKI(statefulSet, opts); err != nil {
return nil, err
}
}
@@ -121,6 +120,13 @@ func NewIngesterStatefulSet(opts Options) *appsv1.StatefulSet {
SecurityContext: podSecurityContext(opts.Gates.RuntimeSeccompProfile),
}
+ if opts.Gates.HTTPEncryption || opts.Gates.GRPCEncryption {
+ podSpec.Containers[0].Args = append(podSpec.Containers[0].Args,
+ fmt.Sprintf("-server.tls-cipher-suites=%s", opts.TLSCipherSuites()),
+ fmt.Sprintf("-server.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
+ )
+ }
+
if opts.Stack.Template != nil && opts.Stack.Template.Ingester != nil {
podSpec.Tolerations = opts.Stack.Template.Ingester.Tolerations
podSpec.NodeSelector = opts.Stack.Template.Ingester.NodeSelector
@@ -255,13 +261,13 @@ func NewIngesterHTTPService(opts Options) *corev1.Service {
}
}
-func configureIngesterHTTPServicePKI(statefulSet *appsv1.StatefulSet, stackName string) error {
- serviceName := serviceNameIngesterHTTP(stackName)
+func configureIngesterHTTPServicePKI(statefulSet *appsv1.StatefulSet, opts Options) error {
+ serviceName := serviceNameIngesterHTTP(opts.Name)
return configureHTTPServicePKI(&statefulSet.Spec.Template.Spec, serviceName)
}
-func configureIngesterGRPCServicePKI(sts *appsv1.StatefulSet, stackName, stackNS string) error {
- caBundleName := signingCABundleName(stackName)
+func configureIngesterGRPCServicePKI(sts *appsv1.StatefulSet, opts Options) error {
+ caBundleName := signingCABundleName(opts.Name)
secretVolumeSpec := corev1.PodSpec{
Volumes: []corev1.Volume{
{
@@ -288,12 +294,16 @@ func configureIngesterGRPCServicePKI(sts *appsv1.StatefulSet, stackName, stackNS
Args: []string{
// Enable GRPC over TLS for ingester client
"-ingester.client.tls-enabled=true",
+ fmt.Sprintf("-ingester.client.tls-cipher-suites=%s", opts.TLSCipherSuites()),
+ fmt.Sprintf("-ingester.client.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
fmt.Sprintf("-ingester.client.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-ingester.client.tls-server-name=%s", fqdn(serviceNameIngesterGRPC(stackName), stackNS)),
+ fmt.Sprintf("-ingester.client.tls-server-name=%s", fqdn(serviceNameIngesterGRPC(opts.Name), opts.Namespace)),
// Enable GRPC over TLS for boltb-shipper index-gateway client
"-boltdb.shipper.index-gateway-client.grpc.tls-enabled=true",
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-cipher-suites=%s", opts.TLSCipherSuites()),
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-server-name=%s", fqdn(serviceNameIndexGatewayGRPC(stackName), stackNS)),
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-server-name=%s", fqdn(serviceNameIndexGatewayGRPC(opts.Name), opts.Namespace)),
},
}
@@ -305,6 +315,6 @@ func configureIngesterGRPCServicePKI(sts *appsv1.StatefulSet, stackName, stackNS
return kverrors.Wrap(err, "failed to merge container")
}
- serviceName := serviceNameIngesterGRPC(stackName)
+ serviceName := serviceNameIngesterGRPC(opts.Name)
return configureGRPCServicePKI(&sts.Spec.Template.Spec, serviceName)
}
diff --git a/operator/internal/manifests/options.go b/operator/internal/manifests/options.go
index fdf60a769544c..ea7c48638d0cd 100644
--- a/operator/internal/manifests/options.go
+++ b/operator/internal/manifests/options.go
@@ -1,8 +1,9 @@
package manifests
import (
+ "strings"
+
configv1 "github.com/grafana/loki/operator/apis/config/v1"
- projectconfigv1 "github.com/grafana/loki/operator/apis/config/v1"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
"github.com/grafana/loki/operator/internal/manifests/internal"
@@ -20,9 +21,6 @@ type Options struct {
GatewayBaseDomain string
ConfigSHA1 string
- TLSProfileType projectconfigv1.TLSProfileType
- TLSProfileSpec projectconfigv1.TLSProfileSpec
-
Gates configv1.FeatureGates
Stack lokiv1.LokiStackSpec
ResourceRequirements internal.ComponentResources
@@ -36,6 +34,8 @@ type Options struct {
OpenShiftOptions openshift.Options
Tenants Tenants
+
+ TLSProfile TLSProfileSpec
}
// Tenants contains the configuration per tenant and secrets for authn/authz.
@@ -88,3 +88,19 @@ type RulerSecret struct {
// BearerToken contains the token used for bearer authentication.
BearerToken string
}
+
+// TLSProfileSpec is the desired behavior of a TLSProfileType.
+type TLSProfileSpec struct {
+ // Ciphers is used to specify the cipher algorithms that are negotiated
+ // during the TLS handshake.
+ Ciphers []string
+ // MinTLSVersion is used to specify the minimal version of the TLS protocol
+ // that is negotiated during the TLS handshake.
+ MinTLSVersion string
+}
+
+// TLSCipherSuites transforms TLSProfileSpec.Ciphers from a slice
+// to a string of elements joined with a comma.
+func (o Options) TLSCipherSuites() string {
+ return strings.Join(o.TLSProfile.Ciphers, ",")
+}
diff --git a/operator/internal/manifests/querier.go b/operator/internal/manifests/querier.go
index d212588cd3ccf..a0a3cc98c7725 100644
--- a/operator/internal/manifests/querier.go
+++ b/operator/internal/manifests/querier.go
@@ -4,11 +4,11 @@ import (
"fmt"
"path"
- "github.com/ViaQ/logerr/v2/kverrors"
"github.com/grafana/loki/operator/internal/manifests/internal/config"
"github.com/grafana/loki/operator/internal/manifests/storage"
- "github.com/imdario/mergo"
+ "github.com/ViaQ/logerr/v2/kverrors"
+ "github.com/imdario/mergo"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -22,7 +22,7 @@ import (
func BuildQuerier(opts Options) ([]client.Object, error) {
deployment := NewQuerierDeployment(opts)
if opts.Gates.HTTPEncryption {
- if err := configureQuerierHTTPServicePKI(deployment, opts.Name); err != nil {
+ if err := configureQuerierHTTPServicePKI(deployment, opts); err != nil {
return nil, err
}
}
@@ -32,7 +32,7 @@ func BuildQuerier(opts Options) ([]client.Object, error) {
}
if opts.Gates.GRPCEncryption {
- if err := configureQuerierGRPCServicePKI(deployment, opts.Name, opts.Namespace); err != nil {
+ if err := configureQuerierGRPCServicePKI(deployment, opts); err != nil {
return nil, err
}
}
@@ -109,6 +109,13 @@ func NewQuerierDeployment(opts Options) *appsv1.Deployment {
SecurityContext: podSecurityContext(opts.Gates.RuntimeSeccompProfile),
}
+ if opts.Gates.HTTPEncryption || opts.Gates.GRPCEncryption {
+ podSpec.Containers[0].Args = append(podSpec.Containers[0].Args,
+ fmt.Sprintf("-server.tls-cipher-suites=%s", opts.TLSCipherSuites()),
+ fmt.Sprintf("-server.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
+ )
+ }
+
if opts.Stack.Template != nil && opts.Stack.Template.Querier != nil {
podSpec.Tolerations = opts.Stack.Template.Querier.Tolerations
podSpec.NodeSelector = opts.Stack.Template.Querier.NodeSelector
@@ -205,13 +212,13 @@ func NewQuerierHTTPService(opts Options) *corev1.Service {
}
}
-func configureQuerierHTTPServicePKI(deployment *appsv1.Deployment, stackName string) error {
- serviceName := serviceNameQuerierHTTP(stackName)
+func configureQuerierHTTPServicePKI(deployment *appsv1.Deployment, opts Options) error {
+ serviceName := serviceNameQuerierHTTP(opts.Name)
return configureHTTPServicePKI(&deployment.Spec.Template.Spec, serviceName)
}
-func configureQuerierGRPCServicePKI(deployment *appsv1.Deployment, stackName, stackNS string) error {
- caBundleName := signingCABundleName(stackName)
+func configureQuerierGRPCServicePKI(deployment *appsv1.Deployment, opts Options) error {
+ caBundleName := signingCABundleName(opts.Name)
secretVolumeSpec := corev1.PodSpec{
Volumes: []corev1.Volume{
{
@@ -238,16 +245,22 @@ func configureQuerierGRPCServicePKI(deployment *appsv1.Deployment, stackName, st
Args: []string{
// Enable GRPC over TLS for ingester client
"-ingester.client.tls-enabled=true",
+ fmt.Sprintf("-ingester.client.tls-cipher-suites=%s", opts.TLSCipherSuites()),
+ fmt.Sprintf("-ingester.client.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
fmt.Sprintf("-ingester.client.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-ingester.client.tls-server-name=%s", fqdn(serviceNameIngesterGRPC(stackName), stackNS)),
+ fmt.Sprintf("-ingester.client.tls-server-name=%s", fqdn(serviceNameIngesterGRPC(opts.Name), opts.Namespace)),
// Enable GRPC over TLS for query frontend client
"-querier.frontend-client.tls-enabled=true",
+ fmt.Sprintf("-querier.frontend-client.tls-cipher-suites=%s", opts.TLSCipherSuites()),
+ fmt.Sprintf("-querier.frontend-client.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
fmt.Sprintf("-querier.frontend-client.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-querier.frontend-client.tls-server-name=%s", fqdn(serviceNameQueryFrontendGRPC(stackName), stackNS)),
+ fmt.Sprintf("-querier.frontend-client.tls-server-name=%s", fqdn(serviceNameQueryFrontendGRPC(opts.Name), opts.Namespace)),
// Enable GRPC over TLS for boltb-shipper index-gateway client
"-boltdb.shipper.index-gateway-client.grpc.tls-enabled=true",
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-cipher-suites=%s", opts.TLSCipherSuites()),
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-server-name=%s", fqdn(serviceNameIndexGatewayGRPC(stackName), stackNS)),
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-server-name=%s", fqdn(serviceNameIndexGatewayGRPC(opts.Name), opts.Namespace)),
},
}
@@ -259,6 +272,6 @@ func configureQuerierGRPCServicePKI(deployment *appsv1.Deployment, stackName, st
return kverrors.Wrap(err, "failed to merge container")
}
- serviceName := serviceNameQuerierGRPC(stackName)
+ serviceName := serviceNameQuerierGRPC(opts.Name)
return configureGRPCServicePKI(&deployment.Spec.Template.Spec, serviceName)
}
diff --git a/operator/internal/manifests/query-frontend.go b/operator/internal/manifests/query-frontend.go
index 80cd9742896c3..4785db7b77f4f 100644
--- a/operator/internal/manifests/query-frontend.go
+++ b/operator/internal/manifests/query-frontend.go
@@ -4,8 +4,9 @@ import (
"fmt"
"path"
- "github.com/ViaQ/logerr/v2/kverrors"
"github.com/grafana/loki/operator/internal/manifests/internal/config"
+
+ "github.com/ViaQ/logerr/v2/kverrors"
"github.com/imdario/mergo"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@@ -20,13 +21,13 @@ import (
func BuildQueryFrontend(opts Options) ([]client.Object, error) {
deployment := NewQueryFrontendDeployment(opts)
if opts.Gates.HTTPEncryption {
- if err := configureQueryFrontendHTTPServicePKI(deployment, opts.Name); err != nil {
+ if err := configureQueryFrontendHTTPServicePKI(deployment, opts); err != nil {
return nil, err
}
}
if opts.Gates.GRPCEncryption {
- if err := configureQueryFrontendGRPCServicePKI(deployment, opts.Name); err != nil {
+ if err := configureQueryFrontendGRPCServicePKI(deployment, opts); err != nil {
return nil, err
}
}
@@ -115,6 +116,13 @@ func NewQueryFrontendDeployment(opts Options) *appsv1.Deployment {
SecurityContext: podSecurityContext(opts.Gates.RuntimeSeccompProfile),
}
+ if opts.Gates.HTTPEncryption || opts.Gates.GRPCEncryption {
+ podSpec.Containers[0].Args = append(podSpec.Containers[0].Args,
+ fmt.Sprintf("-server.tls-cipher-suites=%s", opts.TLSCipherSuites()),
+ fmt.Sprintf("-server.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
+ )
+ }
+
if opts.Stack.Template != nil && opts.Stack.Template.QueryFrontend != nil {
podSpec.Tolerations = opts.Stack.Template.QueryFrontend.Tolerations
podSpec.NodeSelector = opts.Stack.Template.QueryFrontend.NodeSelector
@@ -211,24 +219,34 @@ func NewQueryFrontendHTTPService(opts Options) *corev1.Service {
}
}
-func configureQueryFrontendHTTPServicePKI(deployment *appsv1.Deployment, stackName string) error {
- serviceName := serviceNameQueryFrontendHTTP(stackName)
- caBundleName := signingCABundleName(stackName)
+func configureQueryFrontendHTTPServicePKI(deployment *appsv1.Deployment, opts Options) error {
+ serviceName := serviceNameQueryFrontendHTTP(opts.Name)
+ caBundleName := signingCABundleName(opts.Name)
- if err := configureTailCA(deployment, lokiFrontendContainerName, caBundleName, caBundleDir, caFile); err != nil {
+ err := configureTailCA(
+ deployment,
+ lokiFrontendContainerName,
+ caBundleName,
+ caBundleDir,
+ caFile,
+ opts.TLSProfile.MinTLSVersion,
+ opts.TLSCipherSuites(),
+ )
+ if err != nil {
return err
}
+
return configureHTTPServicePKI(&deployment.Spec.Template.Spec, serviceName)
}
-func configureQueryFrontendGRPCServicePKI(deployment *appsv1.Deployment, stackName string) error {
- serviceName := serviceNameQueryFrontendGRPC(stackName)
+func configureQueryFrontendGRPCServicePKI(deployment *appsv1.Deployment, opts Options) error {
+ serviceName := serviceNameQueryFrontendGRPC(opts.Name)
return configureGRPCServicePKI(&deployment.Spec.Template.Spec, serviceName)
}
// ConfigureQueryFrontendDeployment configures CA certificate when TLS is enabled.
func configureTailCA(d *appsv1.Deployment,
- qfContainerName, caBundleVolumeName, caDir, caFile string,
+ qfContainerName, caBundleVolumeName, caDir, caFile, minTLSVersion, cipherSuites string,
) error {
var qfIdx int
for i, c := range d.Spec.Template.Spec.Containers {
@@ -240,6 +258,8 @@ func configureTailCA(d *appsv1.Deployment,
containerSpec := corev1.Container{
Args: []string{
+ fmt.Sprintf("-frontend.tail-tls-config.tls-cipher-suites=%s", cipherSuites),
+ fmt.Sprintf("-frontend.tail-tls-config.tls-min-version=%s", minTLSVersion),
fmt.Sprintf("-frontend.tail-tls-config.tls-ca-path=%s/%s", caDir, caFile),
},
VolumeMounts: []corev1.VolumeMount{
diff --git a/operator/internal/manifests/query-frontend_test.go b/operator/internal/manifests/query-frontend_test.go
index 92ded54dd0037..fdb811e4e5b6d 100644
--- a/operator/internal/manifests/query-frontend_test.go
+++ b/operator/internal/manifests/query-frontend_test.go
@@ -7,6 +7,7 @@ import (
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/internal/config"
+
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@@ -62,6 +63,10 @@ func TestConfigureQueryFrontendHTTPServicePKI(t *testing.T) {
},
},
},
+ TLSProfile: TLSProfileSpec{
+ MinTLSVersion: "TLSVersion1.2",
+ Ciphers: []string{"TLS_RSA_WITH_AES_128_CBC_SHA"},
+ },
}
d := appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
@@ -119,6 +124,8 @@ func TestConfigureQueryFrontendHTTPServicePKI(t *testing.T) {
Name: lokiFrontendContainerName,
Args: []string{
"-target=query-frontend",
+ "-frontend.tail-tls-config.tls-cipher-suites=TLS_RSA_WITH_AES_128_CBC_SHA",
+ "-frontend.tail-tls-config.tls-min-version=TLSVersion1.2",
fmt.Sprintf("-frontend.tail-tls-config.tls-ca-path=%s/%s", caBundleDir, caFile),
fmt.Sprintf("-server.http-tls-cert-path=%s", path.Join(httpTLSDir, tlsCertFile)),
fmt.Sprintf("-server.http-tls-key-path=%s", path.Join(httpTLSDir, tlsKeyFile)),
@@ -193,7 +200,7 @@ func TestConfigureQueryFrontendHTTPServicePKI(t *testing.T) {
},
}
- err := configureQueryFrontendHTTPServicePKI(&d, opts.Name)
+ err := configureQueryFrontendHTTPServicePKI(&d, opts)
require.Nil(t, err)
require.Equal(t, expected, d)
}
diff --git a/operator/internal/manifests/ruler.go b/operator/internal/manifests/ruler.go
index bdf7be06df140..e8537279f54ee 100644
--- a/operator/internal/manifests/ruler.go
+++ b/operator/internal/manifests/ruler.go
@@ -4,10 +4,11 @@ import (
"fmt"
"path"
- "github.com/ViaQ/logerr/v2/kverrors"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/internal/config"
"github.com/grafana/loki/operator/internal/manifests/openshift"
+
+ "github.com/ViaQ/logerr/v2/kverrors"
"github.com/imdario/mergo"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@@ -23,13 +24,13 @@ import (
func BuildRuler(opts Options) ([]client.Object, error) {
statefulSet := NewRulerStatefulSet(opts)
if opts.Gates.HTTPEncryption {
- if err := configureRulerHTTPServicePKI(statefulSet, opts.Name); err != nil {
+ if err := configureRulerHTTPServicePKI(statefulSet, opts); err != nil {
return nil, err
}
}
if opts.Gates.GRPCEncryption {
- if err := configureRulerGRPCServicePKI(statefulSet, opts.Name, opts.Namespace); err != nil {
+ if err := configureRulerGRPCServicePKI(statefulSet, opts); err != nil {
return nil, err
}
}
@@ -143,6 +144,13 @@ func NewRulerStatefulSet(opts Options) *appsv1.StatefulSet {
SecurityContext: podSecurityContext(opts.Gates.RuntimeSeccompProfile),
}
+ if opts.Gates.HTTPEncryption || opts.Gates.GRPCEncryption {
+ podSpec.Containers[0].Args = append(podSpec.Containers[0].Args,
+ fmt.Sprintf("-server.tls-cipher-suites=%s", opts.TLSCipherSuites()),
+ fmt.Sprintf("-server.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
+ )
+ }
+
if opts.Stack.Template != nil && opts.Stack.Template.Ruler != nil {
podSpec.Tolerations = opts.Stack.Template.Ruler.Tolerations
podSpec.NodeSelector = opts.Stack.Template.Ruler.NodeSelector
@@ -318,13 +326,13 @@ func NewRulerHTTPService(opts Options) *corev1.Service {
}
}
-func configureRulerHTTPServicePKI(statefulSet *appsv1.StatefulSet, stackName string) error {
- serviceName := serviceNameRulerHTTP(stackName)
+func configureRulerHTTPServicePKI(statefulSet *appsv1.StatefulSet, opts Options) error {
+ serviceName := serviceNameRulerHTTP(opts.Name)
return configureHTTPServicePKI(&statefulSet.Spec.Template.Spec, serviceName)
}
-func configureRulerGRPCServicePKI(sts *appsv1.StatefulSet, stackName, stackNs string) error {
- caBundleName := signingCABundleName(stackName)
+func configureRulerGRPCServicePKI(sts *appsv1.StatefulSet, opts Options) error {
+ caBundleName := signingCABundleName(opts.Name)
secretVolumeSpec := corev1.PodSpec{
Volumes: []corev1.Volume{
{
@@ -351,16 +359,22 @@ func configureRulerGRPCServicePKI(sts *appsv1.StatefulSet, stackName, stackNs st
Args: []string{
// Enable GRPC over TLS for ruler client
"-ruler.client.tls-enabled=true",
+ fmt.Sprintf("-ruler.client.tls-cipher-suites=%s", opts.TLSCipherSuites()),
+ fmt.Sprintf("-ruler.client.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
fmt.Sprintf("-ruler.client.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-ruler.client.tls-server-name=%s", fqdn(serviceNameRulerGRPC(stackName), stackNs)),
+ fmt.Sprintf("-ruler.client.tls-server-name=%s", fqdn(serviceNameRulerGRPC(opts.Name), opts.Namespace)),
// Enable GRPC over TLS for ingester client
"-ingester.client.tls-enabled=true",
+ fmt.Sprintf("-ingester.client.tls-cipher-suites=%s", opts.TLSCipherSuites()),
+ fmt.Sprintf("-ingester.client.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
fmt.Sprintf("-ingester.client.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-ingester.client.tls-server-name=%s", fqdn(serviceNameIngesterGRPC(stackName), stackNs)),
+ fmt.Sprintf("-ingester.client.tls-server-name=%s", fqdn(serviceNameIngesterGRPC(opts.Name), opts.Namespace)),
// Enable GRPC over TLS for boltb-shipper index-gateway client
"-boltdb.shipper.index-gateway-client.grpc.tls-enabled=true",
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-cipher-suites=%s", opts.TLSCipherSuites()),
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-server-name=%s", fqdn(serviceNameIndexGatewayGRPC(stackName), stackNs)),
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-server-name=%s", fqdn(serviceNameIndexGatewayGRPC(opts.Name), opts.Namespace)),
},
}
@@ -372,7 +386,7 @@ func configureRulerGRPCServicePKI(sts *appsv1.StatefulSet, stackName, stackNs st
return kverrors.Wrap(err, "failed to merge container")
}
- serviceName := serviceNameRulerGRPC(stackName)
+ serviceName := serviceNameRulerGRPC(opts.Name)
return configureGRPCServicePKI(&sts.Spec.Template.Spec, serviceName)
}
|
operator
|
Add TLS profile support for Loki server and client HTTP and GRPC TLS options (#7322)
|
f42942768e5356b3fe960315d04b87e7eab12235
|
2023-04-15 00:44:52
|
Kaviraj Kanagaraj
|
makefile: Support debug build for `logcli`. (#9093)
| false
|
diff --git a/Makefile b/Makefile
index 0667ad5003c30..800346c9e7dd5 100644
--- a/Makefile
+++ b/Makefile
@@ -131,6 +131,7 @@ check-generated-files: yacc ragel fmt-proto protos clients/pkg/promtail/server/u
##########
.PHONY: cmd/logcli/logcli
logcli: cmd/logcli/logcli
+logcli-debug: cmd/logcli/logcli-debug
logcli-image:
$(SUDO) docker build -t $(IMAGE_PREFIX)/logcli:$(IMAGE_TAG) -f cmd/logcli/Dockerfile .
@@ -138,6 +139,8 @@ logcli-image:
cmd/logcli/logcli:
CGO_ENABLED=0 go build $(GO_FLAGS) -o $@ ./cmd/logcli
+cmd/logcli/logcli-debug:
+ CGO_ENABLED=0 go build $(DEBUG_GO_FLAGS) -o ./cmd/logcli/logcli-debug ./cmd/logcli
########
# Loki #
########
|
makefile
|
Support debug build for `logcli`. (#9093)
|
8b8d3a4eec256a26fa7472744d0fdd0fee64a3b8
|
2020-02-24 19:04:39
|
Irake
|
docs: fix outdated link of fluentd (#1728)
| false
|
diff --git a/docs/README.md b/docs/README.md
index 9b799b0c96d8c..790e2b3ad5c81 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -36,7 +36,7 @@ simplifies the operation and significantly lowers the cost of Loki.
2. [Docker Driver](clients/docker-driver/README.md)
1. [Configuration](clients/docker-driver/configuration.md)
4. [Fluent Bit](../cmd/fluent-bit/README.md)
- 3. [Fluentd](clients/fluentd.md)
+ 3. [Fluentd](clients/fluentd/README.md)
6. [LogQL](logql.md)
7. [Operations](operations/README.md)
1. [Authentication](operations/authentication.md)
|
docs
|
fix outdated link of fluentd (#1728)
|
022847b60731e08f255da65c65c238218bfa8201
|
2024-11-04 17:20:03
|
Christian Haudum
|
chore: Update targets list in documentation (#14567)
| false
|
diff --git a/docs/sources/get-started/deployment-modes.md b/docs/sources/get-started/deployment-modes.md
index b13687d2cf563..60d3ccc74fbeb 100644
--- a/docs/sources/get-started/deployment-modes.md
+++ b/docs/sources/get-started/deployment-modes.md
@@ -45,33 +45,37 @@ The simple scalable deployment mode can scale up to a few TBs of logs per day, h
The three execution paths in simple scalable mode are each activated by appending the following arguments to Loki on startup:
- `-target=write` - The write target is stateful and is controlled by a Kubernetes StatefulSet. It contains the following components:
--- Distributor
--- Ingester
+ * Distributor
+ * Ingester
- `-target=read` - The read target is stateless and can be run as a Kubernetes Deployment that can be scaled automatically (Note that in the official helm chart it is currently deployed as a stateful set). It contains the following components:
--- Query front end
--- Queriers
+ * Query Frontend
+ * Querier
- `-target=backend` - The backend target is stateful, and is controlled by a Kubernetes StatefulSet. Contains the following components:
--- Compactor
--- Index gateways
--- Query scheduler
--- Ruler
+ - Compactor
+ - Index Gateway
+ - Query Scheduler
+ - Ruler
+ - Bloom Planner (experimental)
+ - Bloom Builder (experimental)
+ - Bloom Gateway (experimental)
The simple scalable deployment mode requires a reverse proxy to be deployed in front of Loki, to direct client API requests to either the read or write nodes. The Loki Helm chart includes a default reverse proxy configuration, using Nginx.
## Microservices mode
-The microservices deployment mode runs components of Loki as distinct processes. Each process is invoked specifying its `target`:
-For release 2.9 the components are:
+The microservices deployment mode runs components of Loki as distinct processes. Each process is invoked specifying its `target`.
+For release 3.2 the components are:
-- Cache Generation Loader
+- Bloom Builder (experimental)
+- Bloom Gateway (experimental)
+- Bloom Planner (experimental)
- Compactor
- Distributor
-- Index-gateway
+- Index Gateway
- Ingester
-- Ingester-Querier
- Overrides Exporter
- Querier
-- Query-frontend
-- Query-scheduler
+- Query Frontend
+- Query Scheduler
- Ruler
- Table Manager (deprecated)
@@ -79,7 +83,7 @@ For release 2.9 the components are:
You can see the complete list of targets for your version of Loki by running Loki with the flag `-list-targets`, for example:
```bash
-docker run docker.io/grafana/loki:2.9.2 -config.file=/etc/loki/local-config.yaml -list-targets
+docker run docker.io/grafana/loki:3.2.1 -config.file=/etc/loki/local-config.yaml -list-targets
```
{{% /admonition %}}
diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go
index 5f90cecdcc97e..52af53b1b0558 100644
--- a/pkg/loki/loki.go
+++ b/pkg/loki/loki.go
@@ -685,7 +685,7 @@ func (t *Loki) setupModuleManager() error {
mm.RegisterModule(Store, t.initStore, modules.UserInvisibleModule)
mm.RegisterModule(Querier, t.initQuerier)
mm.RegisterModule(Ingester, t.initIngester)
- mm.RegisterModule(IngesterQuerier, t.initIngesterQuerier)
+ mm.RegisterModule(IngesterQuerier, t.initIngesterQuerier, modules.UserInvisibleModule)
mm.RegisterModule(IngesterGRPCInterceptors, t.initIngesterGRPCInterceptors, modules.UserInvisibleModule)
mm.RegisterModule(QueryFrontendTripperware, t.initQueryFrontendMiddleware, modules.UserInvisibleModule)
mm.RegisterModule(QueryFrontend, t.initQueryFrontend)
@@ -703,8 +703,8 @@ func (t *Loki) setupModuleManager() error {
mm.RegisterModule(BloomGateway, t.initBloomGateway)
mm.RegisterModule(QueryScheduler, t.initQueryScheduler)
mm.RegisterModule(QuerySchedulerRing, t.initQuerySchedulerRing, modules.UserInvisibleModule)
- mm.RegisterModule(Analytics, t.initAnalytics)
- mm.RegisterModule(CacheGenerationLoader, t.initCacheGenerationLoader)
+ mm.RegisterModule(Analytics, t.initAnalytics, modules.UserInvisibleModule)
+ mm.RegisterModule(CacheGenerationLoader, t.initCacheGenerationLoader, modules.UserInvisibleModule)
mm.RegisterModule(PatternRingClient, t.initPatternRingClient, modules.UserInvisibleModule)
mm.RegisterModule(PatternIngesterTee, t.initPatternIngesterTee, modules.UserInvisibleModule)
mm.RegisterModule(PatternIngester, t.initPatternIngester)
|
chore
|
Update targets list in documentation (#14567)
|
b89829421ee3a4589efe34a4b1332fe659c9d8e7
|
2024-10-31 19:07:16
|
renovate[bot]
|
fix(deps): update module github.com/baidubce/bce-sdk-go to v0.9.197 (#14682)
| false
|
diff --git a/go.mod b/go.mod
index aa5f6994a3b18..c60b93722b961 100644
--- a/go.mod
+++ b/go.mod
@@ -19,7 +19,7 @@ require (
github.com/alicebob/miniredis/v2 v2.30.4
github.com/aliyun/aliyun-oss-go-sdk v2.2.10+incompatible
github.com/aws/aws-sdk-go v1.54.19
- github.com/baidubce/bce-sdk-go v0.9.196
+ github.com/baidubce/bce-sdk-go v0.9.197
github.com/bmatcuk/doublestar v1.3.4
github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500
github.com/cespare/xxhash v1.1.0
diff --git a/go.sum b/go.sum
index 07c43b3199206..2f4493d18daec 100644
--- a/go.sum
+++ b/go.sum
@@ -395,8 +395,8 @@ github.com/aws/smithy-go v1.11.1 h1:IQ+lPZVkSM3FRtyaDox41R8YS6iwPMYIreejOgPW49g=
github.com/aws/smithy-go v1.11.1/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM=
github.com/axiomhq/hyperloglog v0.0.0-20240507144631-af9851f82b27 h1:60m4tnanN1ctzIu4V3bfCNJ39BiOPSm1gHFlFjTkRE0=
github.com/axiomhq/hyperloglog v0.0.0-20240507144631-af9851f82b27/go.mod h1:k08r+Yj1PRAmuayFiRK6MYuR5Ve4IuZtTfxErMIh0+c=
-github.com/baidubce/bce-sdk-go v0.9.196 h1:FywykM9euTmPNZknBn6WtNUEYTkccwQLCcqrxHvaFCQ=
-github.com/baidubce/bce-sdk-go v0.9.196/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg=
+github.com/baidubce/bce-sdk-go v0.9.197 h1:TQqa4J+FTagrywhaTQ707ffE1eG3ix1s06eSZ/K+Wk0=
+github.com/baidubce/bce-sdk-go v0.9.197/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg=
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
diff --git a/vendor/github.com/baidubce/bce-sdk-go/bce/config.go b/vendor/github.com/baidubce/bce-sdk-go/bce/config.go
index 2e2f0389b5068..b0c7617569cbb 100644
--- a/vendor/github.com/baidubce/bce-sdk-go/bce/config.go
+++ b/vendor/github.com/baidubce/bce-sdk-go/bce/config.go
@@ -26,7 +26,7 @@ import (
// Constants and default values for the package bce
const (
- SDK_VERSION = "0.9.196"
+ SDK_VERSION = "0.9.197"
URI_PREFIX = "/" // now support uri without prefix "v1" so just set root path
DEFAULT_DOMAIN = "baidubce.com"
DEFAULT_PROTOCOL = "http"
diff --git a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/util.go b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/util.go
index 13a0399503cd4..b2fa1e2e88394 100644
--- a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/util.go
+++ b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/util.go
@@ -395,12 +395,12 @@ func replaceEndpointByBucket(bucket, endpoint string) string {
func setUriAndEndpoint(cli bce.Client, req *bce.BceRequest, ctx *BosContext, endpoint string) {
origin_uri := req.Uri()
bucket := ctx.Bucket
+ protocol := bce.DEFAULT_PROTOCOL
// deal with protocal
if strings.HasPrefix(endpoint, "https://") {
- req.SetProtocol(bce.HTTPS_PROTOCAL)
+ protocol = bce.HTTPS_PROTOCAL
endpoint = strings.TrimPrefix(endpoint, "https://")
} else if strings.HasPrefix(endpoint, "http://") {
- req.SetProtocol(bce.DEFAULT_PROTOCOL)
endpoint = strings.TrimPrefix(endpoint, "http://")
}
// set uri, endpoint for cname, cdn, virtual host
@@ -423,6 +423,7 @@ func setUriAndEndpoint(cli bce.Client, req *bce.BceRequest, ctx *BosContext, end
req.SetEndpoint(endpoint)
}
}
+ req.SetProtocol(protocol)
}
func getDefaultContentType(object string) string {
diff --git a/vendor/modules.txt b/vendor/modules.txt
index f0bb8450cf01e..e546d6d13361f 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -486,7 +486,7 @@ github.com/aws/smithy-go/transport/http/internal/io
# github.com/axiomhq/hyperloglog v0.0.0-20240507144631-af9851f82b27
## explicit; go 1.12
github.com/axiomhq/hyperloglog
-# github.com/baidubce/bce-sdk-go v0.9.196
+# github.com/baidubce/bce-sdk-go v0.9.197
## explicit; go 1.11
github.com/baidubce/bce-sdk-go/auth
github.com/baidubce/bce-sdk-go/bce
|
fix
|
update module github.com/baidubce/bce-sdk-go to v0.9.197 (#14682)
|
5846ea2ea044ed7168313363f70fb55b2f8e2ff0
|
2024-12-20 01:18:33
|
Christian Haudum
|
fix(blooms): Match series to newest block only (#15481)
| false
|
diff --git a/pkg/bloombuild/builder/builder.go b/pkg/bloombuild/builder/builder.go
index b5bb682e8efcd..4e45467eb52f7 100644
--- a/pkg/bloombuild/builder/builder.go
+++ b/pkg/bloombuild/builder/builder.go
@@ -31,7 +31,6 @@ import (
"github.com/grafana/loki/v3/pkg/storage/config"
"github.com/grafana/loki/v3/pkg/storage/stores"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
- "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb"
utillog "github.com/grafana/loki/v3/pkg/util/log"
"github.com/grafana/loki/v3/pkg/util/ring"
)
@@ -415,7 +414,6 @@ func (b *Builder) processTask(
Bounds: gap.Bounds,
},
},
- Sources: []tsdb.SingleTenantTSDBIdentifier{task.TSDB},
}
// Fetch blocks that aren't up to date but are in the desired fingerprint range
@@ -492,6 +490,7 @@ func (b *Builder) processTask(
level.Debug(logger).Log("msg", "uploaded block", "progress_pct", fmt.Sprintf("%.2f", pct))
meta.Blocks = append(meta.Blocks, built.BlockRef)
+ meta.Sources = append(meta.Sources, task.TSDB)
}
if err := newBlocks.Err(); err != nil {
diff --git a/pkg/bloomgateway/resolver.go b/pkg/bloomgateway/resolver.go
index 0f6fe27626958..71f410ad8f3d4 100644
--- a/pkg/bloomgateway/resolver.go
+++ b/pkg/bloomgateway/resolver.go
@@ -2,6 +2,7 @@ package bloomgateway
import (
"context"
+ "slices"
"sort"
"time"
@@ -61,36 +62,55 @@ func (r *defaultBlockResolver) Resolve(ctx context.Context, tenant string, inter
}
func blocksMatchingSeries(metas []bloomshipper.Meta, interval bloomshipper.Interval, series []*logproto.GroupedChunkRefs) []blockWithSeries {
- result := make([]blockWithSeries, 0, len(metas))
-
- for _, meta := range metas {
- for _, block := range meta.Blocks {
+ slices.SortFunc(series, func(a, b *logproto.GroupedChunkRefs) int { return int(a.Fingerprint - b.Fingerprint) })
- // skip blocks that are not within time interval
- if !interval.Overlaps(block.Interval()) {
- continue
+ result := make([]blockWithSeries, 0, len(metas))
+ cache := make(map[bloomshipper.BlockRef]int)
+
+ // find the newest block for each series
+ for _, s := range series {
+ var b *bloomshipper.BlockRef
+ var newestTs time.Time
+
+ for i := range metas {
+ for j := range metas[i].Blocks {
+ block := metas[i].Blocks[j]
+ // To keep backwards compatibility, we can only look at the source at index 0
+ // because in the past the slice had always length 1, see
+ // https://github.com/grafana/loki/blob/b4060154d198e17bef8ba0fbb1c99bb5c93a412d/pkg/bloombuild/builder/builder.go#L418
+ sourceTs := metas[i].Sources[0].TS
+ // Newer metas have len(Sources) == len(Blocks)
+ if len(metas[i].Sources) > j {
+ sourceTs = metas[i].Sources[j].TS
+ }
+ // skip blocks that are not within time interval
+ if !interval.Overlaps(block.Interval()) {
+ continue
+ }
+ // skip blocks that do not contain the series
+ if block.Cmp(s.Fingerprint) != v1.Overlap {
+ continue
+ }
+ // only use the block if it is newer than the previous
+ if sourceTs.After(newestTs) {
+ b = &block
+ newestTs = sourceTs
+ }
}
+ }
- min := sort.Search(len(series), func(i int) bool {
- return block.Cmp(series[i].Fingerprint) > v1.Before
- })
-
- max := sort.Search(len(series), func(i int) bool {
- return block.Cmp(series[i].Fingerprint) == v1.After
- })
-
- // All fingerprints fall outside of the consumer's range
- if min == len(series) || max == 0 || min == max {
- continue
- }
+ if b == nil {
+ continue
+ }
- // At least one fingerprint is within bounds of the blocks
- // so append to results
- dst := make([]*logproto.GroupedChunkRefs, max-min)
- _ = copy(dst, series[min:max])
+ idx, ok := cache[*b]
+ if ok {
+ result[idx].series = append(result[idx].series, s)
+ } else {
+ cache[*b] = len(result)
result = append(result, blockWithSeries{
- block: block,
- series: dst,
+ block: *b,
+ series: []*logproto.GroupedChunkRefs{s},
})
}
}
diff --git a/pkg/bloomgateway/resolver_test.go b/pkg/bloomgateway/resolver_test.go
index e6369cbeff9ea..217f07324da3b 100644
--- a/pkg/bloomgateway/resolver_test.go
+++ b/pkg/bloomgateway/resolver_test.go
@@ -9,6 +9,7 @@ import (
"github.com/grafana/loki/v3/pkg/logproto"
v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb"
)
func makeBlockRef(minFp, maxFp model.Fingerprint, from, through model.Time) bloomshipper.BlockRef {
@@ -28,6 +29,9 @@ func makeMeta(minFp, maxFp model.Fingerprint, from, through model.Time) bloomshi
Blocks: []bloomshipper.BlockRef{
makeBlockRef(minFp, maxFp, from, through),
},
+ Sources: []tsdb.SingleTenantTSDBIdentifier{
+ {TS: through.Time()},
+ },
}
}
@@ -100,14 +104,21 @@ func TestBlockResolver_BlocksMatchingSeries(t *testing.T) {
t.Run("multiple overlapping blocks within time range covering full keyspace", func(t *testing.T) {
metas := []bloomshipper.Meta{
- makeMeta(0x00, 0xdf, 1000, 1999),
- makeMeta(0xc0, 0xff, 1000, 1999),
+ // 2 series overlap
+ makeMeta(0x00, 0xdf, 1000, 1499), // "old" meta covers first 4 series
+ makeMeta(0xc0, 0xff, 1500, 1999), // "new" meta covers last 4 series
}
res := blocksMatchingSeries(metas, interval, series)
+ for i := range res {
+ t.Logf("%s", res[i].block)
+ for j := range res[i].series {
+ t.Logf(" %016x", res[i].series[j].Fingerprint)
+ }
+ }
expected := []blockWithSeries{
{
block: metas[0].Blocks[0],
- series: series[0:4],
+ series: series[0:2], // series 0x00c0 and 0x00d0 are covered in the newer block
},
{
block: metas[1].Blocks[0],
|
fix
|
Match series to newest block only (#15481)
|
46b7c2cfb6d178dd0a79f093c00efc2f5c0b9490
|
2023-03-27 19:25:59
|
Periklis Tsirakidis
|
operator: Prepare community release v0.2.0 (#8651)
| false
|
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md
index 37622ecf5dcc0..9eab3b3c92eb7 100644
--- a/operator/CHANGELOG.md
+++ b/operator/CHANGELOG.md
@@ -1,5 +1,8 @@
## Main
+## 0.2.0 (2023-03-27)
+
+- [8651](https://github.com/grafana/loki/pull/8651) **periklis**: Prepare Community Loki Operator release v0.2.0
- [8881](https://github.com/grafana/loki/pull/8881) **periklis**: Provide community bundle for openshift community hub
- [8863](https://github.com/grafana/loki/pull/8863) **periklis**: Break the API types out into their own module
- [8878](https://github.com/grafana/loki/pull/8878) **periklis**: Refactor all type validations into own package
diff --git a/operator/Makefile b/operator/Makefile
index d0d29f7e83bd5..ff1594d2a7bff 100644
--- a/operator/Makefile
+++ b/operator/Makefile
@@ -21,7 +21,7 @@ LOKI_OPERATOR_NS ?= kubernetes-operators
# To re-generate a bundle for another specific version without changing the standard setup, you can:
# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2)
# - use environment variables to overwrite this value (e.g export VERSION=0.0.2)
-VERSION ?= v0.1.0
+VERSION ?= v0.2.0
CHANNELS ?= "alpha"
DEFAULT_CHANNEL ?= "alpha"
SUPPORTED_OCP_VERSIONS="v4.10"
@@ -33,14 +33,15 @@ REGISTRY_BASE_OPENSHIFT = quay.io/openshift-logging
REGISTRY_BASE ?= $(REGISTRY_BASE_COMMUNITY)
# TODO(@periklis): Replace this image tag with VERSION once we have GH tags
-MAIN_IMAGE_TAG = main-39f2856
+MAIN_IMAGE_TAG = main-99acb9b
-# Customize for variants: community or openshift
+# Customize for variants: community, community-openshift or openshift
VARIANT ?= community
ifeq ($(VARIANT), openshift)
ifeq ($(REGISTRY_BASE), $(REGISTRY_BASE_COMMUNITY))
REGISTRY_BASE = $(REGISTRY_BASE_OPENSHIFT)
endif
+ VERSION = v0.1.0-placeholder
CHANNELS = stable
DEFAULT_CHANNEL = stable
LOKI_OPERATOR_NS = openshift-operators-redhat
@@ -164,8 +165,9 @@ test-unit-prometheus: $(PROMTOOL) ## Run prometheus unit tests
@$(PROMTOOL) test rules ./internal/manifests/internal/alerts/testdata/test.yaml
.PHONY: scorecard
-scorecard: generate go-generate bundle-all ## Run scorecard tests for all bundles (community, openshift)
+scorecard: generate go-generate bundle-all ## Run scorecard tests for all bundles (community, community-openshift, openshift)
$(OPERATOR_SDK) scorecard -c ./bundle/community/tests/scorecard/config.yaml bundle/community
+ $(OPERATOR_SDK) scorecard -c ./bundle/community-openshift/tests/scorecard/config.yaml bundle/community-openshift
$(OPERATOR_SDK) scorecard -c ./bundle/openshift/tests/scorecard/config.yaml bundle/openshift
.PHONY: lint
diff --git a/operator/bundle/community-openshift/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml b/operator/bundle/community-openshift/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml
index 9d20b3f97159d..285b5a175784f 100644
--- a/operator/bundle/community-openshift/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml
+++ b/operator/bundle/community-openshift/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml
@@ -5,11 +5,11 @@ metadata:
service.beta.openshift.io/serving-cert-secret-name: loki-operator-metrics
creationTimestamp: null
labels:
- app.kubernetes.io/instance: loki-operator-v0.1.0
+ app.kubernetes.io/instance: loki-operator-v0.2.0
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: loki-operator
- app.kubernetes.io/version: 0.1.0
+ app.kubernetes.io/version: 0.2.0
name: loki-operator-controller-manager-metrics-service
spec:
ports:
diff --git a/operator/bundle/community-openshift/manifests/loki-operator-manager-config_v1_configmap.yaml b/operator/bundle/community-openshift/manifests/loki-operator-manager-config_v1_configmap.yaml
index 882def702b040..483334b4e9a6b 100644
--- a/operator/bundle/community-openshift/manifests/loki-operator-manager-config_v1_configmap.yaml
+++ b/operator/bundle/community-openshift/manifests/loki-operator-manager-config_v1_configmap.yaml
@@ -59,9 +59,9 @@ data:
kind: ConfigMap
metadata:
labels:
- app.kubernetes.io/instance: loki-operator-v0.1.0
+ app.kubernetes.io/instance: loki-operator-v0.2.0
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: loki-operator
- app.kubernetes.io/version: 0.1.0
+ app.kubernetes.io/version: 0.2.0
name: loki-operator-manager-config
diff --git a/operator/bundle/community-openshift/manifests/loki-operator-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml b/operator/bundle/community-openshift/manifests/loki-operator-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml
index 8d605f131b313..c672b77af660f 100644
--- a/operator/bundle/community-openshift/manifests/loki-operator-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml
+++ b/operator/bundle/community-openshift/manifests/loki-operator-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml
@@ -2,11 +2,11 @@ apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
- app.kubernetes.io/instance: loki-operator-v0.1.0
+ app.kubernetes.io/instance: loki-operator-v0.2.0
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: loki-operator
- app.kubernetes.io/version: 0.1.0
+ app.kubernetes.io/version: 0.2.0
name: loki-operator
name: loki-operator-metrics-monitor
spec:
diff --git a/operator/bundle/community-openshift/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml b/operator/bundle/community-openshift/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml
index e616349aee5a0..2a1bd651e22f6 100644
--- a/operator/bundle/community-openshift/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml
+++ b/operator/bundle/community-openshift/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml
@@ -3,11 +3,11 @@ kind: ClusterRole
metadata:
creationTimestamp: null
labels:
- app.kubernetes.io/instance: loki-operator-v0.1.0
+ app.kubernetes.io/instance: loki-operator-v0.2.0
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: loki-operator
- app.kubernetes.io/version: 0.1.0
+ app.kubernetes.io/version: 0.2.0
name: loki-operator-metrics-reader
rules:
- nonResourceURLs:
diff --git a/operator/bundle/community-openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml b/operator/bundle/community-openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml
index 82424b1a36e13..cf3bfee930179 100644
--- a/operator/bundle/community-openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml
+++ b/operator/bundle/community-openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml
@@ -6,11 +6,11 @@ metadata:
include.release.openshift.io/single-node-developer: "true"
creationTimestamp: null
labels:
- app.kubernetes.io/instance: loki-operator-v0.1.0
+ app.kubernetes.io/instance: loki-operator-v0.2.0
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: loki-operator
- app.kubernetes.io/version: 0.1.0
+ app.kubernetes.io/version: 0.2.0
name: loki-operator-prometheus
rules:
- apiGroups:
diff --git a/operator/bundle/community-openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml b/operator/bundle/community-openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml
index fa95c2ba3f2a5..4a4f1d27f8145 100644
--- a/operator/bundle/community-openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml
+++ b/operator/bundle/community-openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml
@@ -6,11 +6,11 @@ metadata:
include.release.openshift.io/single-node-developer: "true"
creationTimestamp: null
labels:
- app.kubernetes.io/instance: loki-operator-v0.1.0
+ app.kubernetes.io/instance: loki-operator-v0.2.0
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: loki-operator
- app.kubernetes.io/version: 0.1.0
+ app.kubernetes.io/version: 0.2.0
name: loki-operator-prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
diff --git a/operator/bundle/community-openshift/manifests/loki-operator-webhook-service_v1_service.yaml b/operator/bundle/community-openshift/manifests/loki-operator-webhook-service_v1_service.yaml
index 1dcaabf33cdc9..a7fdda6b0780f 100644
--- a/operator/bundle/community-openshift/manifests/loki-operator-webhook-service_v1_service.yaml
+++ b/operator/bundle/community-openshift/manifests/loki-operator-webhook-service_v1_service.yaml
@@ -3,11 +3,11 @@ kind: Service
metadata:
creationTimestamp: null
labels:
- app.kubernetes.io/instance: loki-operator-v0.1.0
+ app.kubernetes.io/instance: loki-operator-v0.2.0
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: loki-operator
- app.kubernetes.io/version: 0.1.0
+ app.kubernetes.io/version: 0.2.0
name: loki-operator-webhook-service
spec:
ports:
diff --git a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml
index 8c3528f178351..e9f84d496966c 100644
--- a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml
@@ -149,8 +149,8 @@ metadata:
capabilities: Full Lifecycle
categories: OpenShift Optional, Logging & Tracing
certified: "false"
- containerImage: docker.io/grafana/loki-operator:main-39f2856
- createdAt: "2023-03-23T18:39:45Z"
+ containerImage: docker.io/grafana/loki-operator:main-99acb9b
+ createdAt: "2023-03-27T11:29:48Z"
description: The Community Loki Operator provides Kubernetes native deployment
and management of Loki and related logging components.
operators.operatorframework.io/builder: operator-sdk-unknown
@@ -160,7 +160,7 @@ metadata:
labels:
operatorframework.io/arch.amd64: supported
operatorframework.io/arch.arm64: supported
- name: loki-operator.v0.1.0
+ name: loki-operator.v0.2.0
namespace: placeholder
spec:
apiservicedefinitions: {}
@@ -1464,11 +1464,11 @@ spec:
serviceAccountName: default
deployments:
- label:
- app.kubernetes.io/instance: loki-operator-v0.1.0
+ app.kubernetes.io/instance: loki-operator-v0.2.0
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: loki-operator
- app.kubernetes.io/version: 0.1.0
+ app.kubernetes.io/version: 0.2.0
control-plane: controller-manager
name: loki-operator-controller-manager
spec:
@@ -1502,7 +1502,7 @@ spec:
value: quay.io/observatorium/api:latest
- name: RELATED_IMAGE_OPA
value: quay.io/observatorium/opa-openshift:latest
- image: docker.io/grafana/loki-operator:main-39f2856
+ image: docker.io/grafana/loki-operator:main-99acb9b
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
@@ -1624,7 +1624,7 @@ spec:
name: gateway
- image: quay.io/observatorium/opa-openshift:latest
name: opa
- version: 0.1.0
+ version: 0.2.0
webhookdefinitions:
- admissionReviewVersions:
- v1
diff --git a/operator/bundle/community-openshift/manifests/loki.grafana.com_alertingrules.yaml b/operator/bundle/community-openshift/manifests/loki.grafana.com_alertingrules.yaml
index 303e985bfed1a..c27eb1b72c6dd 100644
--- a/operator/bundle/community-openshift/manifests/loki.grafana.com_alertingrules.yaml
+++ b/operator/bundle/community-openshift/manifests/loki.grafana.com_alertingrules.yaml
@@ -5,11 +5,11 @@ metadata:
controller-gen.kubebuilder.io/version: v0.11.3
creationTimestamp: null
labels:
- app.kubernetes.io/instance: loki-operator-v0.1.0
+ app.kubernetes.io/instance: loki-operator-v0.2.0
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: loki-operator
- app.kubernetes.io/version: 0.1.0
+ app.kubernetes.io/version: 0.2.0
name: alertingrules.loki.grafana.com
spec:
conversion:
diff --git a/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml
index 6b4d61b05818e..63ac02b90e5fc 100644
--- a/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml
+++ b/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml
@@ -5,11 +5,11 @@ metadata:
controller-gen.kubebuilder.io/version: v0.11.3
creationTimestamp: null
labels:
- app.kubernetes.io/instance: loki-operator-v0.1.0
+ app.kubernetes.io/instance: loki-operator-v0.2.0
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: loki-operator
- app.kubernetes.io/version: 0.1.0
+ app.kubernetes.io/version: 0.2.0
name: lokistacks.loki.grafana.com
spec:
conversion:
diff --git a/operator/bundle/community-openshift/manifests/loki.grafana.com_recordingrules.yaml b/operator/bundle/community-openshift/manifests/loki.grafana.com_recordingrules.yaml
index 615a4aca88c20..4ed6808cfc3c2 100644
--- a/operator/bundle/community-openshift/manifests/loki.grafana.com_recordingrules.yaml
+++ b/operator/bundle/community-openshift/manifests/loki.grafana.com_recordingrules.yaml
@@ -5,11 +5,11 @@ metadata:
controller-gen.kubebuilder.io/version: v0.11.3
creationTimestamp: null
labels:
- app.kubernetes.io/instance: loki-operator-v0.1.0
+ app.kubernetes.io/instance: loki-operator-v0.2.0
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: loki-operator
- app.kubernetes.io/version: 0.1.0
+ app.kubernetes.io/version: 0.2.0
name: recordingrules.loki.grafana.com
spec:
conversion:
diff --git a/operator/bundle/community-openshift/manifests/loki.grafana.com_rulerconfigs.yaml b/operator/bundle/community-openshift/manifests/loki.grafana.com_rulerconfigs.yaml
index 4d025b43f284d..e53d020bedcee 100644
--- a/operator/bundle/community-openshift/manifests/loki.grafana.com_rulerconfigs.yaml
+++ b/operator/bundle/community-openshift/manifests/loki.grafana.com_rulerconfigs.yaml
@@ -5,11 +5,11 @@ metadata:
controller-gen.kubebuilder.io/version: v0.11.3
creationTimestamp: null
labels:
- app.kubernetes.io/instance: loki-operator-v0.1.0
+ app.kubernetes.io/instance: loki-operator-v0.2.0
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: loki-operator
- app.kubernetes.io/version: 0.1.0
+ app.kubernetes.io/version: 0.2.0
name: rulerconfigs.loki.grafana.com
spec:
conversion:
diff --git a/operator/bundle/community/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml b/operator/bundle/community/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml
index 513f11f753a08..24aad2fc2be23 100644
--- a/operator/bundle/community/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml
+++ b/operator/bundle/community/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml
@@ -3,11 +3,11 @@ kind: Service
metadata:
creationTimestamp: null
labels:
- app.kubernetes.io/instance: loki-operator-v0.1.0
+ app.kubernetes.io/instance: loki-operator-v0.2.0
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: loki-operator
- app.kubernetes.io/version: 0.1.0
+ app.kubernetes.io/version: 0.2.0
name: loki-operator-controller-manager-metrics-service
spec:
ports:
diff --git a/operator/bundle/community/manifests/loki-operator-manager-config_v1_configmap.yaml b/operator/bundle/community/manifests/loki-operator-manager-config_v1_configmap.yaml
index 15db38e84baed..c6fd4c6f57a2b 100644
--- a/operator/bundle/community/manifests/loki-operator-manager-config_v1_configmap.yaml
+++ b/operator/bundle/community/manifests/loki-operator-manager-config_v1_configmap.yaml
@@ -24,9 +24,9 @@ data:
kind: ConfigMap
metadata:
labels:
- app.kubernetes.io/instance: loki-operator-v0.1.0
+ app.kubernetes.io/instance: loki-operator-v0.2.0
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: loki-operator
- app.kubernetes.io/version: 0.1.0
+ app.kubernetes.io/version: 0.2.0
name: loki-operator-manager-config
diff --git a/operator/bundle/community/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml b/operator/bundle/community/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml
index e616349aee5a0..2a1bd651e22f6 100644
--- a/operator/bundle/community/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml
+++ b/operator/bundle/community/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml
@@ -3,11 +3,11 @@ kind: ClusterRole
metadata:
creationTimestamp: null
labels:
- app.kubernetes.io/instance: loki-operator-v0.1.0
+ app.kubernetes.io/instance: loki-operator-v0.2.0
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: loki-operator
- app.kubernetes.io/version: 0.1.0
+ app.kubernetes.io/version: 0.2.0
name: loki-operator-metrics-reader
rules:
- nonResourceURLs:
diff --git a/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml b/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml
index 82424b1a36e13..cf3bfee930179 100644
--- a/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml
+++ b/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml
@@ -6,11 +6,11 @@ metadata:
include.release.openshift.io/single-node-developer: "true"
creationTimestamp: null
labels:
- app.kubernetes.io/instance: loki-operator-v0.1.0
+ app.kubernetes.io/instance: loki-operator-v0.2.0
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: loki-operator
- app.kubernetes.io/version: 0.1.0
+ app.kubernetes.io/version: 0.2.0
name: loki-operator-prometheus
rules:
- apiGroups:
diff --git a/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml b/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml
index fa95c2ba3f2a5..4a4f1d27f8145 100644
--- a/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml
+++ b/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml
@@ -6,11 +6,11 @@ metadata:
include.release.openshift.io/single-node-developer: "true"
creationTimestamp: null
labels:
- app.kubernetes.io/instance: loki-operator-v0.1.0
+ app.kubernetes.io/instance: loki-operator-v0.2.0
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: loki-operator
- app.kubernetes.io/version: 0.1.0
+ app.kubernetes.io/version: 0.2.0
name: loki-operator-prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
diff --git a/operator/bundle/community/manifests/loki-operator-webhook-service_v1_service.yaml b/operator/bundle/community/manifests/loki-operator-webhook-service_v1_service.yaml
index 1dcaabf33cdc9..a7fdda6b0780f 100644
--- a/operator/bundle/community/manifests/loki-operator-webhook-service_v1_service.yaml
+++ b/operator/bundle/community/manifests/loki-operator-webhook-service_v1_service.yaml
@@ -3,11 +3,11 @@ kind: Service
metadata:
creationTimestamp: null
labels:
- app.kubernetes.io/instance: loki-operator-v0.1.0
+ app.kubernetes.io/instance: loki-operator-v0.2.0
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: loki-operator
- app.kubernetes.io/version: 0.1.0
+ app.kubernetes.io/version: 0.2.0
name: loki-operator-webhook-service
spec:
ports:
diff --git a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml
index 90e739a7ba0ae..f3bc9f230d30b 100644
--- a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml
@@ -149,8 +149,8 @@ metadata:
capabilities: Full Lifecycle
categories: OpenShift Optional, Logging & Tracing
certified: "false"
- containerImage: docker.io/grafana/loki-operator:main-39f2856
- createdAt: "2023-03-23T18:39:42Z"
+ containerImage: docker.io/grafana/loki-operator:main-99acb9b
+ createdAt: "2023-03-27T11:29:45Z"
description: The Community Loki Operator provides Kubernetes native deployment
and management of Loki and related logging components.
operators.operatorframework.io/builder: operator-sdk-unknown
@@ -160,7 +160,7 @@ metadata:
labels:
operatorframework.io/arch.amd64: supported
operatorframework.io/arch.arm64: supported
- name: loki-operator.v0.1.0
+ name: loki-operator.v0.2.0
namespace: placeholder
spec:
apiservicedefinitions: {}
@@ -1450,11 +1450,11 @@ spec:
serviceAccountName: default
deployments:
- label:
- app.kubernetes.io/instance: loki-operator-v0.1.0
+ app.kubernetes.io/instance: loki-operator-v0.2.0
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: loki-operator
- app.kubernetes.io/version: 0.1.0
+ app.kubernetes.io/version: 0.2.0
control-plane: controller-manager
name: loki-operator-controller-manager
spec:
@@ -1486,7 +1486,9 @@ spec:
value: docker.io/grafana/loki:2.7.4
- name: RELATED_IMAGE_GATEWAY
value: quay.io/observatorium/api:latest
- image: docker.io/grafana/loki-operator:main-39f2856
+ - name: RELATED_IMAGE_OPA
+ value: quay.io/observatorium/opa-openshift:latest
+ image: docker.io/grafana/loki-operator:main-99acb9b
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
@@ -1601,7 +1603,9 @@ spec:
name: loki
- image: quay.io/observatorium/api:latest
name: gateway
- version: 0.1.0
+ - image: quay.io/observatorium/opa-openshift:latest
+ name: opa
+ version: 0.2.0
webhookdefinitions:
- admissionReviewVersions:
- v1
diff --git a/operator/bundle/community/manifests/loki.grafana.com_alertingrules.yaml b/operator/bundle/community/manifests/loki.grafana.com_alertingrules.yaml
index c5381356a6bba..e980d74c4d261 100644
--- a/operator/bundle/community/manifests/loki.grafana.com_alertingrules.yaml
+++ b/operator/bundle/community/manifests/loki.grafana.com_alertingrules.yaml
@@ -5,11 +5,11 @@ metadata:
controller-gen.kubebuilder.io/version: v0.11.3
creationTimestamp: null
labels:
- app.kubernetes.io/instance: loki-operator-v0.1.0
+ app.kubernetes.io/instance: loki-operator-v0.2.0
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: loki-operator
- app.kubernetes.io/version: 0.1.0
+ app.kubernetes.io/version: 0.2.0
name: alertingrules.loki.grafana.com
spec:
conversion:
diff --git a/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml
index aec068b659e09..aa670f77090a9 100644
--- a/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml
+++ b/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml
@@ -5,11 +5,11 @@ metadata:
controller-gen.kubebuilder.io/version: v0.11.3
creationTimestamp: null
labels:
- app.kubernetes.io/instance: loki-operator-v0.1.0
+ app.kubernetes.io/instance: loki-operator-v0.2.0
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: loki-operator
- app.kubernetes.io/version: 0.1.0
+ app.kubernetes.io/version: 0.2.0
name: lokistacks.loki.grafana.com
spec:
conversion:
diff --git a/operator/bundle/community/manifests/loki.grafana.com_recordingrules.yaml b/operator/bundle/community/manifests/loki.grafana.com_recordingrules.yaml
index e6bd7aa9c5f23..6c670abd12579 100644
--- a/operator/bundle/community/manifests/loki.grafana.com_recordingrules.yaml
+++ b/operator/bundle/community/manifests/loki.grafana.com_recordingrules.yaml
@@ -5,11 +5,11 @@ metadata:
controller-gen.kubebuilder.io/version: v0.11.3
creationTimestamp: null
labels:
- app.kubernetes.io/instance: loki-operator-v0.1.0
+ app.kubernetes.io/instance: loki-operator-v0.2.0
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: loki-operator
- app.kubernetes.io/version: 0.1.0
+ app.kubernetes.io/version: 0.2.0
name: recordingrules.loki.grafana.com
spec:
conversion:
diff --git a/operator/bundle/community/manifests/loki.grafana.com_rulerconfigs.yaml b/operator/bundle/community/manifests/loki.grafana.com_rulerconfigs.yaml
index 7fb0aafbbd965..5d436b10011e2 100644
--- a/operator/bundle/community/manifests/loki.grafana.com_rulerconfigs.yaml
+++ b/operator/bundle/community/manifests/loki.grafana.com_rulerconfigs.yaml
@@ -5,11 +5,11 @@ metadata:
controller-gen.kubebuilder.io/version: v0.11.3
creationTimestamp: null
labels:
- app.kubernetes.io/instance: loki-operator-v0.1.0
+ app.kubernetes.io/instance: loki-operator-v0.2.0
app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: loki-operator
- app.kubernetes.io/version: 0.1.0
+ app.kubernetes.io/version: 0.2.0
name: rulerconfigs.loki.grafana.com
spec:
conversion:
diff --git a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
index 70d1ce5cfe2c4..bd7e2b30d50ad 100644
--- a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
@@ -150,7 +150,7 @@ metadata:
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: quay.io/openshift-logging/loki-operator:v0.1.0
- createdAt: "2023-03-23T18:39:47Z"
+ createdAt: "2023-03-27T11:29:50Z"
description: |
The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging.
## Prerequisites and Requirements
@@ -173,7 +173,7 @@ metadata:
operatorframework.io/arch.arm64: supported
operatorframework.io/arch.ppc64le: supported
operatorframework.io/arch.s390x: supported
- name: loki-operator.v0.1.0
+ name: loki-operator.v0.1.0-placeholder
namespace: placeholder
spec:
apiservicedefinitions: {}
@@ -1486,7 +1486,7 @@ spec:
value: quay.io/observatorium/api:latest
- name: RELATED_IMAGE_OPA
value: quay.io/observatorium/opa-openshift:latest
- image: quay.io/openshift-logging/loki-operator:v0.1.0
+ image: quay.io/openshift-logging/loki-operator:v0.1.0-placeholder
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
@@ -1608,7 +1608,7 @@ spec:
name: gateway
- image: quay.io/observatorium/opa-openshift:latest
name: opa
- version: 0.1.0
+ version: 0.1.0-placeholder
webhookdefinitions:
- admissionReviewVersions:
- v1
diff --git a/operator/config/manager/kustomization.yaml b/operator/config/manager/kustomization.yaml
index 86cb69733899b..44ceff8c779d1 100644
--- a/operator/config/manager/kustomization.yaml
+++ b/operator/config/manager/kustomization.yaml
@@ -6,4 +6,4 @@ kind: Kustomization
images:
- name: controller
newName: quay.io/openshift-logging/loki-operator
- newTag: v0.1.0
+ newTag: v0.1.0-placeholder
diff --git a/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml
index 7ab700c601c2b..c28c0414c7897 100644
--- a/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml
+++ b/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml
@@ -6,7 +6,7 @@ metadata:
capabilities: Full Lifecycle
categories: OpenShift Optional, Logging & Tracing
certified: "false"
- containerImage: docker.io/grafana/loki-operator:main-39f2856
+ containerImage: docker.io/grafana/loki-operator:main-99acb9b
createdAt: "2022-12-22T13:28:40+00:00"
description: The Community Loki Operator provides Kubernetes native deployment
and management of Loki and related logging components.
diff --git a/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml
index b98d5a0ac6848..468e74c071dde 100644
--- a/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml
+++ b/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml
@@ -6,7 +6,7 @@ metadata:
capabilities: Full Lifecycle
categories: OpenShift Optional, Logging & Tracing
certified: "false"
- containerImage: docker.io/grafana/loki-operator:main-39f2856
+ containerImage: docker.io/grafana/loki-operator:main-99acb9b
createdAt: "2022-12-22T13:28:40+00:00"
description: The Community Loki Operator provides Kubernetes native deployment
and management of Loki and related logging components.
diff --git a/operator/config/overlays/community-openshift/kustomization.yaml b/operator/config/overlays/community-openshift/kustomization.yaml
index 00e7651a2ae93..13bcdeae8dee6 100644
--- a/operator/config/overlays/community-openshift/kustomization.yaml
+++ b/operator/config/overlays/community-openshift/kustomization.yaml
@@ -11,8 +11,8 @@ labels:
app.kubernetes.io/managed-by: operator-lifecycle-manager
includeSelectors: true
- pairs:
- app.kubernetes.io/instance: loki-operator-v0.1.0
- app.kubernetes.io/version: "0.1.0"
+ app.kubernetes.io/instance: loki-operator-v0.2.0
+ app.kubernetes.io/version: "0.2.0"
configMapGenerator:
- files:
@@ -27,4 +27,4 @@ patchesStrategicMerge:
images:
- name: controller
newName: docker.io/grafana/loki-operator
- newTag: main-39f2856
+ newTag: main-99acb9b
diff --git a/operator/config/overlays/community/kustomization.yaml b/operator/config/overlays/community/kustomization.yaml
index 213f650e0948b..71fe3e417457b 100644
--- a/operator/config/overlays/community/kustomization.yaml
+++ b/operator/config/overlays/community/kustomization.yaml
@@ -22,8 +22,8 @@ labels:
app.kubernetes.io/managed-by: operator-lifecycle-manager
includeSelectors: true
- pairs:
- app.kubernetes.io/instance: loki-operator-v0.1.0
- app.kubernetes.io/version: "0.1.0"
+ app.kubernetes.io/instance: loki-operator-v0.2.0
+ app.kubernetes.io/version: "0.2.0"
generatorOptions:
disableNameSuffixHash: true
@@ -43,7 +43,7 @@ patchesStrategicMerge:
images:
- name: controller
newName: docker.io/grafana/loki-operator
- newTag: main-39f2856
+ newTag: main-99acb9b
# the following config is for teaching kustomize how to do var substitution
vars:
diff --git a/operator/config/overlays/community/manager_related_image_patch.yaml b/operator/config/overlays/community/manager_related_image_patch.yaml
index cbb5b3b919a37..44da69e7bfdfe 100644
--- a/operator/config/overlays/community/manager_related_image_patch.yaml
+++ b/operator/config/overlays/community/manager_related_image_patch.yaml
@@ -12,3 +12,5 @@ spec:
value: docker.io/grafana/loki:2.7.4
- name: RELATED_IMAGE_GATEWAY
value: quay.io/observatorium/api:latest
+ - name: RELATED_IMAGE_OPA
+ value: quay.io/observatorium/opa-openshift:latest
|
operator
|
Prepare community release v0.2.0 (#8651)
|
738c274a5828aab4d88079c38400ddc705c0cb5d
|
2024-05-06 19:56:16
|
Christian Haudum
|
fix(blooms): Fix `partitionSeriesByDay` function (#12900)
| false
|
diff --git a/pkg/bloomgateway/bloomgateway.go b/pkg/bloomgateway/bloomgateway.go
index ab2637b91d8e7..ee0e6f9940fd2 100644
--- a/pkg/bloomgateway/bloomgateway.go
+++ b/pkg/bloomgateway/bloomgateway.go
@@ -274,7 +274,7 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk
"series_requested", len(req.Refs),
)
- if len(seriesByDay) != 1 {
+ if len(seriesByDay) > 1 {
stats.Status = labelFailure
return nil, errors.New("request time range must span exactly one day")
}
diff --git a/pkg/bloomgateway/util.go b/pkg/bloomgateway/util.go
index bef6fcc5d4da1..9617202b948c3 100644
--- a/pkg/bloomgateway/util.go
+++ b/pkg/bloomgateway/util.go
@@ -2,7 +2,6 @@ package bloomgateway
import (
"sort"
- "time"
"github.com/prometheus/common/model"
"golang.org/x/exp/slices"
@@ -13,13 +12,8 @@ import (
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
)
-func getDayTime(ts model.Time) time.Time {
- return ts.Time().UTC().Truncate(Day)
-}
-
func truncateDay(ts model.Time) model.Time {
- // model.minimumTick is time.Millisecond
- return ts - (ts % model.Time(24*time.Hour/time.Millisecond))
+ return model.TimeFromUnix(ts.Time().Truncate(Day).Unix())
}
// getFromThrough assumes a list of ShortRefs sorted by From time
@@ -125,7 +119,7 @@ func partitionSeriesByDay(from, through model.Time, seriesWithChunks []*logproto
})
// All chunks fall outside of the range
- if min == len(chunks) || max == 0 {
+ if min == len(chunks) || max == 0 || min == max {
continue
}
@@ -135,7 +129,6 @@ func partitionSeriesByDay(from, through model.Time, seriesWithChunks []*logproto
if chunks[max-1].Through > maxTs {
maxTs = chunks[max-1].Through
}
- // fmt.Println("day", day, "series", series.Fingerprint, "minTs", minTs, "maxTs", maxTs)
res = append(res, &logproto.GroupedChunkRefs{
Fingerprint: series.Fingerprint,
diff --git a/pkg/bloomgateway/util_test.go b/pkg/bloomgateway/util_test.go
index 9475853cffd34..a3f219c326efd 100644
--- a/pkg/bloomgateway/util_test.go
+++ b/pkg/bloomgateway/util_test.go
@@ -166,10 +166,46 @@ func TestPartitionRequest(t *testing.T) {
exp []seriesWithInterval
}{
- "empty": {
+ "no series": {
inp: &logproto.FilterChunkRefRequest{
- From: ts.Add(-24 * time.Hour),
- Through: ts,
+ From: ts.Add(-12 * time.Hour),
+ Through: ts.Add(12 * time.Hour),
+ Refs: []*logproto.GroupedChunkRefs{},
+ },
+ exp: []seriesWithInterval{},
+ },
+
+ "no chunks for series": {
+ inp: &logproto.FilterChunkRefRequest{
+ From: ts.Add(-12 * time.Hour),
+ Through: ts.Add(12 * time.Hour),
+ Refs: []*logproto.GroupedChunkRefs{
+ {
+ Fingerprint: 0x00,
+ Refs: []*logproto.ShortRef{},
+ },
+ {
+ Fingerprint: 0x10,
+ Refs: []*logproto.ShortRef{},
+ },
+ },
+ },
+ exp: []seriesWithInterval{},
+ },
+
+ "chunks before and after requested day": {
+ inp: &logproto.FilterChunkRefRequest{
+ From: ts.Add(-2 * time.Hour),
+ Through: ts.Add(2 * time.Hour),
+ Refs: []*logproto.GroupedChunkRefs{
+ {
+ Fingerprint: 0x00,
+ Refs: []*logproto.ShortRef{
+ {From: ts.Add(-13 * time.Hour), Through: ts.Add(-12 * time.Hour)},
+ {From: ts.Add(13 * time.Hour), Through: ts.Add(14 * time.Hour)},
+ },
+ },
+ },
},
exp: []seriesWithInterval{},
},
|
fix
|
Fix `partitionSeriesByDay` function (#12900)
|
5d61885f85b77392e7860a2a3f07aaf4bdb44301
|
2024-09-04 18:55:14
|
renovate[bot]
|
chore(deps): update terraform aws to ~> 5.65.0 (#14046)
| false
|
diff --git a/production/terraform/modules/s3/versions.tf b/production/terraform/modules/s3/versions.tf
index 1a8a148737367..ffbae7a8f9edf 100644
--- a/production/terraform/modules/s3/versions.tf
+++ b/production/terraform/modules/s3/versions.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = "~> 5.64.0"
+ version = "~> 5.65.0"
}
random = {
|
chore
|
update terraform aws to ~> 5.65.0 (#14046)
|
8470426605df1982c9aa2131ad4924145370ab18
|
2025-03-08 06:23:59
|
renovate[bot]
|
chore(deps): update dependency eslint to v9.22.0 (main) (#16649)
| false
|
diff --git a/pkg/ui/frontend/package-lock.json b/pkg/ui/frontend/package-lock.json
index e4657a8067b1a..75c66d2c57c55 100644
--- a/pkg/ui/frontend/package-lock.json
+++ b/pkg/ui/frontend/package-lock.json
@@ -869,6 +869,16 @@
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
}
},
+ "node_modules/@eslint/config-helpers": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.1.0.tgz",
+ "integrity": "sha512-kLrdPDJE1ckPo94kmPPf9Hfd0DU0Jw6oKYrhe+pwSC0iTUInmTa+w6fw8sGgcfkFJGNdWOUeOaDM4quW4a7OkA==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ }
+ },
"node_modules/@eslint/core": {
"version": "0.12.0",
"resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.12.0.tgz",
@@ -920,9 +930,9 @@
}
},
"node_modules/@eslint/js": {
- "version": "9.21.0",
- "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.21.0.tgz",
- "integrity": "sha512-BqStZ3HX8Yz6LvsF5ByXYrtigrV5AXADWLAGc7PH/1SxOb7/FIYYMszZZWiUou/GB9P2lXWk2SV4d+Z8h0nknw==",
+ "version": "9.22.0",
+ "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.22.0.tgz",
+ "integrity": "sha512-vLFajx9o8d1/oL2ZkpMYbkLv8nDB6yaIwFNt7nI4+I80U/z03SxmfOMsLbvWr3p7C+Wnoh//aOu2pQW8cS0HCQ==",
"dev": true,
"license": "MIT",
"engines": {
@@ -4062,18 +4072,19 @@
}
},
"node_modules/eslint": {
- "version": "9.21.0",
- "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.21.0.tgz",
- "integrity": "sha512-KjeihdFqTPhOMXTt7StsDxriV4n66ueuF/jfPNC3j/lduHwr/ijDwJMsF+wyMJethgiKi5wniIE243vi07d3pg==",
+ "version": "9.22.0",
+ "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.22.0.tgz",
+ "integrity": "sha512-9V/QURhsRN40xuHXWjV64yvrzMjcz7ZyNoF2jJFmy9j/SLk0u1OLSZgXi28MrXjymnjEGSR80WCdab3RGMDveQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@eslint-community/eslint-utils": "^4.2.0",
"@eslint-community/regexpp": "^4.12.1",
"@eslint/config-array": "^0.19.2",
+ "@eslint/config-helpers": "^0.1.0",
"@eslint/core": "^0.12.0",
"@eslint/eslintrc": "^3.3.0",
- "@eslint/js": "9.21.0",
+ "@eslint/js": "9.22.0",
"@eslint/plugin-kit": "^0.2.7",
"@humanfs/node": "^0.16.6",
"@humanwhocodes/module-importer": "^1.0.1",
@@ -4085,7 +4096,7 @@
"cross-spawn": "^7.0.6",
"debug": "^4.3.2",
"escape-string-regexp": "^4.0.0",
- "eslint-scope": "^8.2.0",
+ "eslint-scope": "^8.3.0",
"eslint-visitor-keys": "^4.2.0",
"espree": "^10.3.0",
"esquery": "^1.5.0",
@@ -4145,9 +4156,9 @@
}
},
"node_modules/eslint-scope": {
- "version": "8.2.0",
- "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.2.0.tgz",
- "integrity": "sha512-PHlWUfG6lvPc3yvP5A4PNyBL1W8fkDUccmI21JUu/+GKZBoH/W5u6usENXUrWFRsyoW5ACUjFGgAFQp5gUlb/A==",
+ "version": "8.3.0",
+ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.3.0.tgz",
+ "integrity": "sha512-pUNxi75F8MJ/GdeKtVLSbYg4ZI34J6C0C7sbL4YOp2exGwen7ZsuBqKzUhXd0qMQ362yET3z+uPwKeg/0C2XCQ==",
"dev": true,
"license": "BSD-2-Clause",
"dependencies": {
|
chore
|
update dependency eslint to v9.22.0 (main) (#16649)
|
ec888ec8a564c7a93937c785c0540e7d2bcde20e
|
2024-04-09 17:20:08
|
Sandeep Sukhani
|
fix: make the tsdb filenames correctly reproducible from the identifier (#12536)
| false
|
diff --git a/pkg/storage/stores/shipper/indexshipper/tsdb/identifier.go b/pkg/storage/stores/shipper/indexshipper/tsdb/identifier.go
index 2b87fb7ffb8c0..149d41bfa9441 100644
--- a/pkg/storage/stores/shipper/indexshipper/tsdb/identifier.go
+++ b/pkg/storage/stores/shipper/indexshipper/tsdb/identifier.go
@@ -64,9 +64,14 @@ func (p prefixedIdentifier) Name() string {
// Identifier has all the information needed to resolve a TSDB index
// Notably this abstracts away OS path separators, etc.
type SingleTenantTSDBIdentifier struct {
- TS time.Time
- From, Through model.Time
- Checksum uint32
+ // exportTSInSecs tells whether creation timestamp should be exported in unix seconds instead of nanoseconds.
+ // timestamp in filename could be a unix second or a unix nanosecond so
+ // helps us to be able to reproduce filename back from parsed identifier.
+ // Should be true ideally for older files with creation timestamp in seconds.
+ exportTSInSecs bool
+ TS time.Time
+ From, Through model.Time
+ Checksum uint32
}
// implement Hash
@@ -77,9 +82,15 @@ func (i SingleTenantTSDBIdentifier) Hash(h hash.Hash32) (err error) {
// str builds filename with format <file-creation-ts> + `-` + `compactor` + `-` + <oldest-chunk-start-ts> + `-` + <latest-chunk-end-ts> `-` + <index-checksum>
func (i SingleTenantTSDBIdentifier) str() string {
+ ts := int64(0)
+ if i.exportTSInSecs {
+ ts = i.TS.Unix()
+ } else {
+ ts = i.TS.UnixNano()
+ }
return fmt.Sprintf(
"%d-%s-%d-%d-%x.tsdb",
- i.TS.UnixNano(),
+ ts,
compactedFileUploader,
i.From,
i.Through,
@@ -140,10 +151,11 @@ func ParseSingleTenantTSDBPath(p string) (id SingleTenantTSDBIdentifier, ok bool
parsedTS = time.Unix(0, ts)
}
return SingleTenantTSDBIdentifier{
- TS: parsedTS,
- From: model.Time(from),
- Through: model.Time(through),
- Checksum: uint32(checksum),
+ exportTSInSecs: len(elems[0]) <= 10,
+ TS: parsedTS,
+ From: model.Time(from),
+ Through: model.Time(through),
+ Checksum: uint32(checksum),
}, true
}
diff --git a/pkg/storage/stores/shipper/indexshipper/tsdb/identifier_test.go b/pkg/storage/stores/shipper/indexshipper/tsdb/identifier_test.go
index 78452c98e076e..41e202be5e467 100644
--- a/pkg/storage/stores/shipper/indexshipper/tsdb/identifier_test.go
+++ b/pkg/storage/stores/shipper/indexshipper/tsdb/identifier_test.go
@@ -20,10 +20,11 @@ func TestParseSingleTenantTSDBPath(t *testing.T) {
desc: "simple_works",
input: "1-compactor-1-10-ff.tsdb",
id: SingleTenantTSDBIdentifier{
- TS: time.Unix(1, 0),
- From: 1,
- Through: 10,
- Checksum: 255,
+ exportTSInSecs: true,
+ TS: time.Unix(1, 0),
+ From: 1,
+ Through: 10,
+ Checksum: 255,
},
ok: true,
},
@@ -31,10 +32,11 @@ func TestParseSingleTenantTSDBPath(t *testing.T) {
desc: "simple_works_with_nanosecond",
input: "1712534400000000000-compactor-1-10-ff.tsdb",
id: SingleTenantTSDBIdentifier{
- TS: time.Unix(0, 1712534400000000000),
- From: 1,
- Through: 10,
- Checksum: 255,
+ exportTSInSecs: false,
+ TS: time.Unix(0, 1712534400000000000),
+ From: 1,
+ Through: 10,
+ Checksum: 255,
},
ok: true,
},
@@ -42,10 +44,11 @@ func TestParseSingleTenantTSDBPath(t *testing.T) {
desc: "uint32_max_checksum_works",
input: fmt.Sprintf("1-compactor-1-10-%x.tsdb", math.MaxUint32),
id: SingleTenantTSDBIdentifier{
- TS: time.Unix(1, 0),
- From: 1,
- Through: 10,
- Checksum: math.MaxUint32,
+ exportTSInSecs: true,
+ TS: time.Unix(1, 0),
+ From: 1,
+ Through: 10,
+ Checksum: math.MaxUint32,
},
ok: true,
},
@@ -69,6 +72,9 @@ func TestParseSingleTenantTSDBPath(t *testing.T) {
id, ok := ParseSingleTenantTSDBPath(tc.input)
require.Equal(t, tc.ok, ok)
require.Equal(t, tc.id, id)
+ if ok {
+ require.Equal(t, tc.input, id.Name())
+ }
})
}
}
|
fix
|
make the tsdb filenames correctly reproducible from the identifier (#12536)
|
41c6b6c2c2f5f56ca76cf75ed05689564b9e9dcd
|
2024-10-15 21:45:24
|
Trevor Whitney
|
fix: always write detected_level when enabled, even if unknown (#14464)
| false
|
diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go
index 6f69f0e02a84e..892384bef60bf 100644
--- a/pkg/distributor/distributor.go
+++ b/pkg/distributor/distributor.go
@@ -502,7 +502,7 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
} else {
logLevel = detectLogLevelFromLogEntry(entry, structuredMetadata)
}
- if logLevel != constants.LogLevelUnknown && logLevel != "" {
+ if logLevel != "" {
entry.StructuredMetadata = append(entry.StructuredMetadata, logproto.LabelAdapter{
Name: constants.LevelLabel,
Value: logLevel,
diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go
index 785d6ce03d0c3..ea06eecd45154 100644
--- a/pkg/distributor/distributor_test.go
+++ b/pkg/distributor/distributor_test.go
@@ -1640,7 +1640,7 @@ func Test_DetectLogLevels(t *testing.T) {
require.NoError(t, err)
topVal := ingester.Peek()
require.Equal(t, `{foo="bar"}`, topVal.Streams[0].Labels)
- require.Len(t, topVal.Streams[0].Entries[0].StructuredMetadata, 0)
+ require.Len(t, topVal.Streams[0].Entries[0].StructuredMetadata, 1)
})
t.Run("log level detection enabled and warn logs", func(t *testing.T) {
|
fix
|
always write detected_level when enabled, even if unknown (#14464)
|
2b6bdc2a29de8d94d78d1c735104f124436a92e5
|
2023-09-12 15:30:15
|
Christian Haudum
|
chore(storage): Simplify store interfaces and abstractions (pt 2) (#10454)
| false
|
diff --git a/clients/pkg/promtail/client/client_test.go b/clients/pkg/promtail/client/client_test.go
index 472b43cd0b241..01cbb87cc1116 100644
--- a/clients/pkg/promtail/client/client_test.go
+++ b/clients/pkg/promtail/client/client_test.go
@@ -21,6 +21,7 @@ import (
"github.com/grafana/loki/clients/pkg/promtail/api"
"github.com/grafana/loki/clients/pkg/promtail/utils"
+
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/push"
lokiflag "github.com/grafana/loki/pkg/util/flagext"
diff --git a/cmd/migrate/main.go b/cmd/migrate/main.go
index 2057cf4d06950..e70f0359a3dbd 100644
--- a/cmd/migrate/main.go
+++ b/cmd/migrate/main.go
@@ -310,7 +310,7 @@ func (m *chunkMover) moveChunks(ctx context.Context, threadID int, syncRangeCh <
var totalBytes uint64
var totalChunks uint64
//log.Printf("%d processing sync range %d - Start: %v, End: %v\n", threadID, sr.number, time.Unix(0, sr.from).UTC(), time.Unix(0, sr.to).UTC())
- schemaGroups, fetchers, err := m.source.GetChunkRefs(m.ctx, m.sourceUser, model.TimeFromUnixNano(sr.from), model.TimeFromUnixNano(sr.to), m.matchers...)
+ schemaGroups, fetchers, err := m.source.GetChunks(m.ctx, m.sourceUser, model.TimeFromUnixNano(sr.from), model.TimeFromUnixNano(sr.to), m.matchers...)
if err != nil {
log.Println(threadID, "Error querying index for chunk refs:", err)
errCh <- err
diff --git a/integration/loki_micro_services_delete_test.go b/integration/loki_micro_services_delete_test.go
index 7744ad6fb4540..4643d3054e86b 100644
--- a/integration/loki_micro_services_delete_test.go
+++ b/integration/loki_micro_services_delete_test.go
@@ -13,6 +13,7 @@ import (
"github.com/grafana/loki/integration/client"
"github.com/grafana/loki/integration/cluster"
+
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/push"
diff --git a/pkg/ingester/flush_test.go b/pkg/ingester/flush_test.go
index 904b4d5824966..f7e61238036e1 100644
--- a/pkg/ingester/flush_test.go
+++ b/pkg/ingester/flush_test.go
@@ -339,6 +339,10 @@ func (s *testStore) Put(ctx context.Context, chunks []chunk.Chunk) error {
return nil
}
+func (s *testStore) PutOne(_ context.Context, _, _ model.Time, _ chunk.Chunk) error {
+ return nil
+}
+
func (s *testStore) IsLocal() bool {
return false
}
@@ -351,7 +355,11 @@ func (s *testStore) SelectSamples(_ context.Context, _ logql.SelectSampleParams)
return nil, nil
}
-func (s *testStore) GetChunkRefs(_ context.Context, _ string, _, _ model.Time, _ ...*labels.Matcher) ([][]chunk.Chunk, []*fetcher.Fetcher, error) {
+func (s *testStore) SelectSeries(_ context.Context, _ logql.SelectLogParams) ([]logproto.SeriesIdentifier, error) {
+ return nil, nil
+}
+
+func (s *testStore) GetChunks(_ context.Context, _ string, _, _ model.Time, _ ...*labels.Matcher) ([][]chunk.Chunk, []*fetcher.Fetcher, error) {
return nil, nil, nil
}
diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go
index dd89ece9527d7..1f0e870352c6b 100644
--- a/pkg/ingester/ingester.go
+++ b/pkg/ingester/ingester.go
@@ -40,8 +40,9 @@ import (
"github.com/grafana/loki/pkg/runtime"
"github.com/grafana/loki/pkg/storage"
"github.com/grafana/loki/pkg/storage/chunk"
- "github.com/grafana/loki/pkg/storage/chunk/fetcher"
"github.com/grafana/loki/pkg/storage/config"
+ "github.com/grafana/loki/pkg/storage/stores"
+ indexstore "github.com/grafana/loki/pkg/storage/stores/index"
"github.com/grafana/loki/pkg/storage/stores/index/seriesvolume"
index_stats "github.com/grafana/loki/pkg/storage/stores/index/stats"
"github.com/grafana/loki/pkg/util"
@@ -166,15 +167,13 @@ type Wrapper interface {
Wrap(wrapped Interface) Interface
}
-// ChunkStore is the interface we need to store chunks.
-type ChunkStore interface {
- Put(ctx context.Context, chunks []chunk.Chunk) error
- SelectLogs(ctx context.Context, req logql.SelectLogParams) (iter.EntryIterator, error)
- SelectSamples(ctx context.Context, req logql.SelectSampleParams) (iter.SampleIterator, error)
- GetChunkRefs(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([][]chunk.Chunk, []*fetcher.Fetcher, error)
- GetSchemaConfigs() []config.PeriodConfig
- Stats(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) (*index_stats.Stats, error)
- Volume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, aggregateBy string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error)
+// Store is the store interface we need on the ingester.
+type Store interface {
+ stores.ChunkWriter
+ stores.ChunkFetcher
+ storage.SelectStore
+ storage.SchemaConfigProvider
+ indexstore.StatsReader
}
// Interface is an interface for the Ingester
@@ -211,7 +210,7 @@ type Ingester struct {
lifecycler *ring.Lifecycler
lifecyclerWatcher *services.FailureWatcher
- store ChunkStore
+ store Store
periodicConfigs []config.PeriodConfig
loopDone sync.WaitGroup
@@ -248,7 +247,7 @@ type Ingester struct {
}
// New makes a new Ingester.
-func New(cfg Config, clientConfig client.Config, store ChunkStore, limits Limits, configs *runtime.TenantConfigs, registerer prometheus.Registerer, writeFailuresCfg writefailures.Cfg) (*Ingester, error) {
+func New(cfg Config, clientConfig client.Config, store Store, limits Limits, configs *runtime.TenantConfigs, registerer prometheus.Registerer, writeFailuresCfg writefailures.Cfg) (*Ingester, error) {
if cfg.ingesterClientFactory == nil {
cfg.ingesterClientFactory = client.New
}
@@ -1006,7 +1005,7 @@ func (i *Ingester) GetChunkIDs(ctx context.Context, req *logproto.GetChunkIDsReq
}
// get chunk references
- chunksGroups, _, err := i.store.GetChunkRefs(ctx, orgID, start, end, matchers...)
+ chunksGroups, _, err := i.store.GetChunks(ctx, orgID, start, end, matchers...)
if err != nil {
return nil, err
}
diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go
index b453e5a9ea0ab..5f2f788e6feb3 100644
--- a/pkg/ingester/ingester_test.go
+++ b/pkg/ingester/ingester_test.go
@@ -433,7 +433,7 @@ func (s *mockStore) SelectSamples(_ context.Context, _ logql.SelectSampleParams)
return nil, nil
}
-func (s *mockStore) GetSeries(_ context.Context, _ logql.SelectLogParams) ([]logproto.SeriesIdentifier, error) {
+func (s *mockStore) SelectSeries(_ context.Context, _ logql.SelectLogParams) ([]logproto.SeriesIdentifier, error) {
return nil, nil
}
@@ -449,7 +449,7 @@ func (s *mockStore) PutOne(_ context.Context, _, _ model.Time, _ chunk.Chunk) er
return nil
}
-func (s *mockStore) GetChunkRefs(_ context.Context, _ string, _, _ model.Time, _ ...*labels.Matcher) ([][]chunk.Chunk, []*fetcher.Fetcher, error) {
+func (s *mockStore) GetChunks(_ context.Context, _ string, _, _ model.Time, _ ...*labels.Matcher) ([][]chunk.Chunk, []*fetcher.Fetcher, error) {
return nil, nil, nil
}
diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go
index 0688b140261ee..df2664db5d484 100644
--- a/pkg/querier/querier.go
+++ b/pkg/querier/querier.go
@@ -8,6 +8,7 @@ import (
"github.com/opentracing/opentracing-go"
+ "github.com/grafana/loki/pkg/storage/stores/index"
"github.com/grafana/loki/pkg/storage/stores/index/seriesvolume"
"github.com/go-kit/log/level"
@@ -100,10 +101,17 @@ type Limits interface {
MaxEntriesLimitPerQuery(context.Context, string) int
}
+// Store is the store interface we need on the querier.
+type Store interface {
+ storage.SelectStore
+ index.BaseReader
+ index.StatsReader
+}
+
// SingleTenantQuerier handles single tenant queries.
type SingleTenantQuerier struct {
cfg Config
- store storage.Store
+ store Store
limits Limits
ingesterQuerier *IngesterQuerier
deleteGetter deleteGetter
@@ -115,7 +123,7 @@ type deleteGetter interface {
}
// New makes a new Querier.
-func New(cfg Config, store storage.Store, ingesterQuerier *IngesterQuerier, limits Limits, d deleteGetter, r prometheus.Registerer) (*SingleTenantQuerier, error) {
+func New(cfg Config, store Store, ingesterQuerier *IngesterQuerier, limits Limits, d deleteGetter, r prometheus.Registerer) (*SingleTenantQuerier, error) {
return &SingleTenantQuerier{
cfg: cfg,
store: store,
diff --git a/pkg/querier/querier_mock_test.go b/pkg/querier/querier_mock_test.go
index c623e3391d0ce..0255ac90e7737 100644
--- a/pkg/querier/querier_mock_test.go
+++ b/pkg/querier/querier_mock_test.go
@@ -319,7 +319,7 @@ func (s *storeMock) SelectSamples(ctx context.Context, req logql.SelectSamplePar
return res.(iter.SampleIterator), args.Error(1)
}
-func (s *storeMock) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([][]chunk.Chunk, []*fetcher.Fetcher, error) {
+func (s *storeMock) GetChunks(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([][]chunk.Chunk, []*fetcher.Fetcher, error) {
args := s.Called(ctx, userID, from, through, matchers)
return args.Get(0).([][]chunk.Chunk), args.Get(0).([]*fetcher.Fetcher), args.Error(2)
}
diff --git a/pkg/storage/async_store.go b/pkg/storage/async_store.go
index 6f02c41cae44b..f41cc1b4e729a 100644
--- a/pkg/storage/async_store.go
+++ b/pkg/storage/async_store.go
@@ -63,7 +63,7 @@ func (a *AsyncStore) shouldQueryIngesters(through, now model.Time) bool {
return a.queryIngestersWithin == 0 || through.After(now.Add(-a.queryIngestersWithin))
}
-func (a *AsyncStore) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([][]chunk.Chunk, []*fetcher.Fetcher, error) {
+func (a *AsyncStore) GetChunks(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([][]chunk.Chunk, []*fetcher.Fetcher, error) {
spanLogger := spanlogger.FromContext(ctx)
errs := make(chan error)
@@ -72,7 +72,7 @@ func (a *AsyncStore) GetChunkRefs(ctx context.Context, userID string, from, thro
var fetchers []*fetcher.Fetcher
go func() {
var err error
- storeChunks, fetchers, err = a.Store.GetChunkRefs(ctx, userID, from, through, matchers...)
+ storeChunks, fetchers, err = a.Store.GetChunks(ctx, userID, from, through, matchers...)
errs <- err
}()
diff --git a/pkg/storage/async_store_test.go b/pkg/storage/async_store_test.go
index 18e14164be432..83aab239ea10d 100644
--- a/pkg/storage/async_store_test.go
+++ b/pkg/storage/async_store_test.go
@@ -29,7 +29,7 @@ func newStoreMock() *storeMock {
return &storeMock{}
}
-func (s *storeMock) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([][]chunk.Chunk, []*fetcher.Fetcher, error) {
+func (s *storeMock) GetChunks(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([][]chunk.Chunk, []*fetcher.Fetcher, error) {
args := s.Called(ctx, userID, from, through, matchers)
return args.Get(0).([][]chunk.Chunk), args.Get(1).([]*fetcher.Fetcher), args.Error(2)
}
@@ -233,7 +233,7 @@ func TestAsyncStore_mergeIngesterAndStoreChunks(t *testing.T) {
} {
t.Run(tc.name, func(t *testing.T) {
store := newStoreMock()
- store.On("GetChunkRefs", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(tc.storeChunks, tc.storeFetcher, nil)
+ store.On("GetChunks", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(tc.storeChunks, tc.storeFetcher, nil)
store.On("GetChunkFetcher", mock.Anything).Return(tc.ingesterFetcher)
ingesterQuerier := newIngesterQuerierMock()
@@ -242,7 +242,7 @@ func TestAsyncStore_mergeIngesterAndStoreChunks(t *testing.T) {
asyncStoreCfg := AsyncStoreCfg{IngesterQuerier: ingesterQuerier}
asyncStore := NewAsyncStore(asyncStoreCfg, store, config.SchemaConfig{})
- chunks, fetchers, err := asyncStore.GetChunkRefs(context.Background(), "fake", model.Now(), model.Now(), nil)
+ chunks, fetchers, err := asyncStore.GetChunks(context.Background(), "fake", model.Now(), model.Now(), nil)
require.NoError(t, err)
require.Equal(t, tc.expectedChunks, chunks)
@@ -293,7 +293,7 @@ func TestAsyncStore_QueryIngestersWithin(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
store := newStoreMock()
- store.On("GetChunkRefs", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([][]chunk.Chunk{}, []*fetcher.Fetcher{}, nil)
+ store.On("GetChunks", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([][]chunk.Chunk{}, []*fetcher.Fetcher{}, nil)
ingesterQuerier := newIngesterQuerierMock()
ingesterQuerier.On("GetChunkIDs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]string{}, nil)
@@ -304,7 +304,7 @@ func TestAsyncStore_QueryIngestersWithin(t *testing.T) {
}
asyncStore := NewAsyncStore(asyncStoreCfg, store, config.SchemaConfig{})
- _, _, err := asyncStore.GetChunkRefs(context.Background(), "fake", tc.queryFrom, tc.queryThrough, nil)
+ _, _, err := asyncStore.GetChunks(context.Background(), "fake", tc.queryFrom, tc.queryThrough, nil)
require.NoError(t, err)
expectedNumCalls := 0
diff --git a/pkg/storage/store.go b/pkg/storage/store.go
index d07c3ffe4ef0d..5cca9deb62048 100644
--- a/pkg/storage/store.go
+++ b/pkg/storage/store.go
@@ -52,10 +52,14 @@ type SelectStore interface {
SelectSeries(ctx context.Context, req logql.SelectLogParams) ([]logproto.SeriesIdentifier, error)
}
+type SchemaConfigProvider interface {
+ GetSchemaConfigs() []config.PeriodConfig
+}
+
type Store interface {
stores.Store
SelectStore
- GetSchemaConfigs() []config.PeriodConfig
+ SchemaConfigProvider
}
type LokiStore struct {
@@ -306,11 +310,11 @@ func (s *LokiStore) storeForPeriod(p config.PeriodConfig, tableRange config.Tabl
}
indexReaderWriter := series.NewIndexReaderWriter(s.schemaCfg, schema, idx, f, s.cfg.MaxChunkBatchSize, s.writeDedupeCache)
- indexReaderWriter = index.NewMonitoredReaderWriter(indexReaderWriter, indexClientReg)
- chunkWriter := stores.NewChunkWriter(f, s.schemaCfg, indexReaderWriter, s.storeCfg.DisableIndexDeduplication)
+ monitoredReaderWriter := index.NewMonitoredReaderWriter(indexReaderWriter, indexClientReg)
+ chunkWriter := stores.NewChunkWriter(f, s.schemaCfg, monitoredReaderWriter, s.storeCfg.DisableIndexDeduplication)
return chunkWriter,
- indexReaderWriter,
+ monitoredReaderWriter,
func() {
chunkClient.Stop()
f.Stop()
@@ -382,7 +386,7 @@ func (s *LokiStore) lazyChunks(ctx context.Context, matchers []*labels.Matcher,
stats := stats.FromContext(ctx)
start := time.Now()
- chks, fetchers, err := s.GetChunkRefs(ctx, userID, from, through, matchers...)
+ chks, fetchers, err := s.GetChunks(ctx, userID, from, through, matchers...)
stats.AddChunkRefsFetchTime(time.Since(start))
if err != nil {
diff --git a/pkg/storage/store_test.go b/pkg/storage/store_test.go
index 13efd8ffa538f..eb9ea6306cc8f 100644
--- a/pkg/storage/store_test.go
+++ b/pkg/storage/store_test.go
@@ -1080,7 +1080,7 @@ func TestStore_indexPrefixChange(t *testing.T) {
}
// get all the chunks from the first period
- chunks, _, err := store.GetChunkRefs(ctx, "fake", timeToModelTime(firstPeriodDate), timeToModelTime(secondPeriodDate), newMatchers(fooLabelsWithName.String())...)
+ chunks, _, err := store.GetChunks(ctx, "fake", timeToModelTime(firstPeriodDate), timeToModelTime(secondPeriodDate), newMatchers(fooLabelsWithName.String())...)
require.NoError(t, err)
var totalChunks int
for _, chks := range chunks {
@@ -1148,7 +1148,7 @@ func TestStore_indexPrefixChange(t *testing.T) {
}
// get all the chunks from both the stores
- chunks, _, err = store.GetChunkRefs(ctx, "fake", timeToModelTime(firstPeriodDate), timeToModelTime(secondPeriodDate.Add(24*time.Hour)), newMatchers(fooLabelsWithName.String())...)
+ chunks, _, err = store.GetChunks(ctx, "fake", timeToModelTime(firstPeriodDate), timeToModelTime(secondPeriodDate.Add(24*time.Hour)), newMatchers(fooLabelsWithName.String())...)
require.NoError(t, err)
totalChunks = 0
@@ -1281,7 +1281,7 @@ func TestStore_MultiPeriod(t *testing.T) {
defer store.Stop()
// get all the chunks from both the stores
- chunks, _, err := store.GetChunkRefs(ctx, "fake", timeToModelTime(firstStoreDate), timeToModelTime(secondStoreDate.Add(24*time.Hour)), newMatchers(fooLabelsWithName.String())...)
+ chunks, _, err := store.GetChunks(ctx, "fake", timeToModelTime(firstStoreDate), timeToModelTime(secondStoreDate.Add(24*time.Hour)), newMatchers(fooLabelsWithName.String())...)
require.NoError(t, err)
var totalChunks int
for _, chks := range chunks {
@@ -1627,7 +1627,7 @@ func TestStore_BoltdbTsdbSameIndexPrefix(t *testing.T) {
defer store.Stop()
// get all the chunks from both the stores
- chunks, _, err := store.GetChunkRefs(ctx, "fake", timeToModelTime(boltdbShipperStartDate), timeToModelTime(tsdbStartDate.Add(24*time.Hour)), newMatchers(fooLabelsWithName.String())...)
+ chunks, _, err := store.GetChunks(ctx, "fake", timeToModelTime(boltdbShipperStartDate), timeToModelTime(tsdbStartDate.Add(24*time.Hour)), newMatchers(fooLabelsWithName.String())...)
require.NoError(t, err)
var totalChunks int
for _, chks := range chunks {
diff --git a/pkg/storage/stores/composite_store.go b/pkg/storage/stores/composite_store.go
index 81fb4eb58a4aa..f5bf6328de561 100644
--- a/pkg/storage/stores/composite_store.go
+++ b/pkg/storage/stores/composite_store.go
@@ -22,16 +22,21 @@ type ChunkWriter interface {
PutOne(ctx context.Context, from, through model.Time, chunk chunk.Chunk) error
}
-type ChunkFetcher interface {
+type ChunkFetcherProvider interface {
GetChunkFetcher(tm model.Time) *fetcher.Fetcher
- GetChunkRefs(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([][]chunk.Chunk, []*fetcher.Fetcher, error)
+}
+
+type ChunkFetcher interface {
+ GetChunks(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([][]chunk.Chunk, []*fetcher.Fetcher, error)
}
type Store interface {
index.BaseReader
+ index.StatsReader
index.Filterable
ChunkWriter
ChunkFetcher
+ ChunkFetcherProvider
Stop()
}
@@ -149,11 +154,11 @@ func (c CompositeStore) LabelNamesForMetricName(ctx context.Context, userID stri
return result.Strings(), err
}
-func (c CompositeStore) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([][]chunk.Chunk, []*fetcher.Fetcher, error) {
+func (c CompositeStore) GetChunks(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([][]chunk.Chunk, []*fetcher.Fetcher, error) {
chunkIDs := [][]chunk.Chunk{}
fetchers := []*fetcher.Fetcher{}
err := c.forStores(ctx, from, through, func(innerCtx context.Context, from, through model.Time, store Store) error {
- ids, fetcher, err := store.GetChunkRefs(innerCtx, userID, from, through, matchers...)
+ ids, fetcher, err := store.GetChunks(innerCtx, userID, from, through, matchers...)
if err != nil {
return err
}
diff --git a/pkg/storage/stores/composite_store_entry.go b/pkg/storage/stores/composite_store_entry.go
index 60a9eef38d01f..1bdc5e9013c5e 100644
--- a/pkg/storage/stores/composite_store_entry.go
+++ b/pkg/storage/stores/composite_store_entry.go
@@ -42,11 +42,11 @@ type storeEntry struct {
ChunkWriter
}
-func (c *storeEntry) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, allMatchers ...*labels.Matcher) ([][]chunk.Chunk, []*fetcher.Fetcher, error) {
+func (c *storeEntry) GetChunks(ctx context.Context, userID string, from, through model.Time, allMatchers ...*labels.Matcher) ([][]chunk.Chunk, []*fetcher.Fetcher, error) {
if ctx.Err() != nil {
return nil, nil, ctx.Err()
}
- sp, ctx := opentracing.StartSpanFromContext(ctx, "GetChunkRefs")
+ sp, ctx := opentracing.StartSpanFromContext(ctx, "GetChunks")
defer sp.Finish()
log := spanlogger.FromContext(ctx)
defer log.Span.Finish()
diff --git a/pkg/storage/stores/composite_store_test.go b/pkg/storage/stores/composite_store_test.go
index 3bb1c09e46b6f..903a3e54de6b4 100644
--- a/pkg/storage/stores/composite_store_test.go
+++ b/pkg/storage/stores/composite_store_test.go
@@ -36,7 +36,7 @@ func (m mockStore) LabelValuesForMetricName(_ context.Context, _ string, _, _ mo
func (m mockStore) SetChunkFilterer(_ chunk.RequestChunkFilterer) {}
-func (m mockStore) GetChunkRefs(_ context.Context, _ string, _, _ model.Time, _ ...*labels.Matcher) ([][]chunk.Chunk, []*fetcher.Fetcher, error) {
+func (m mockStore) GetChunks(_ context.Context, _ string, _, _ model.Time, _ ...*labels.Matcher) ([][]chunk.Chunk, []*fetcher.Fetcher, error) {
return nil, nil, nil
}
diff --git a/pkg/storage/stores/index/index.go b/pkg/storage/stores/index/index.go
index 1bdce80b3cc9d..6883a841f0e84 100644
--- a/pkg/storage/stores/index/index.go
+++ b/pkg/storage/stores/index/index.go
@@ -23,12 +23,16 @@ type BaseReader interface {
GetSeries(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]labels.Labels, error)
LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, labelName string, matchers ...*labels.Matcher) ([]string, error)
LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string) ([]string, error)
+}
+
+type StatsReader interface {
Stats(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) (*stats.Stats, error)
Volume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, aggregateBy string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error)
}
type Reader interface {
BaseReader
+ StatsReader
GetChunkRefs(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]logproto.ChunkRef, error)
Filterable
}
@@ -42,19 +46,19 @@ type ReaderWriter interface {
Writer
}
-type monitoredReaderWriter struct {
+type MonitoredReaderWriter struct {
rw ReaderWriter
metrics *metrics
}
-func NewMonitoredReaderWriter(rw ReaderWriter, reg prometheus.Registerer) ReaderWriter {
- return &monitoredReaderWriter{
+func NewMonitoredReaderWriter(rw ReaderWriter, reg prometheus.Registerer) *MonitoredReaderWriter {
+ return &MonitoredReaderWriter{
rw: rw,
metrics: newMetrics(reg),
}
}
-func (m monitoredReaderWriter) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]logproto.ChunkRef, error) {
+func (m MonitoredReaderWriter) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]logproto.ChunkRef, error) {
var chunks []logproto.ChunkRef
if err := loki_instrument.TimeRequest(ctx, "chunk_refs", instrument.NewHistogramCollector(m.metrics.indexQueryLatency), instrument.ErrorCode, func(ctx context.Context) error {
@@ -68,7 +72,7 @@ func (m monitoredReaderWriter) GetChunkRefs(ctx context.Context, userID string,
return chunks, nil
}
-func (m monitoredReaderWriter) GetSeries(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]labels.Labels, error) {
+func (m MonitoredReaderWriter) GetSeries(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]labels.Labels, error) {
var lbls []labels.Labels
if err := loki_instrument.TimeRequest(ctx, "series", instrument.NewHistogramCollector(m.metrics.indexQueryLatency), instrument.ErrorCode, func(ctx context.Context) error {
var err error
@@ -81,7 +85,7 @@ func (m monitoredReaderWriter) GetSeries(ctx context.Context, userID string, fro
return lbls, nil
}
-func (m monitoredReaderWriter) LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, labelName string, matchers ...*labels.Matcher) ([]string, error) {
+func (m MonitoredReaderWriter) LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, labelName string, matchers ...*labels.Matcher) ([]string, error) {
var values []string
if err := loki_instrument.TimeRequest(ctx, "label_values", instrument.NewHistogramCollector(m.metrics.indexQueryLatency), instrument.ErrorCode, func(ctx context.Context) error {
var err error
@@ -94,7 +98,7 @@ func (m monitoredReaderWriter) LabelValuesForMetricName(ctx context.Context, use
return values, nil
}
-func (m monitoredReaderWriter) LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string) ([]string, error) {
+func (m MonitoredReaderWriter) LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string) ([]string, error) {
var values []string
if err := loki_instrument.TimeRequest(ctx, "label_names", instrument.NewHistogramCollector(m.metrics.indexQueryLatency), instrument.ErrorCode, func(ctx context.Context) error {
var err error
@@ -107,7 +111,7 @@ func (m monitoredReaderWriter) LabelNamesForMetricName(ctx context.Context, user
return values, nil
}
-func (m monitoredReaderWriter) Stats(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) (*stats.Stats, error) {
+func (m MonitoredReaderWriter) Stats(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) (*stats.Stats, error) {
var sts *stats.Stats
if err := loki_instrument.TimeRequest(ctx, "stats", instrument.NewHistogramCollector(m.metrics.indexQueryLatency), instrument.ErrorCode, func(ctx context.Context) error {
var err error
@@ -120,7 +124,7 @@ func (m monitoredReaderWriter) Stats(ctx context.Context, userID string, from, t
return sts, nil
}
-func (m monitoredReaderWriter) Volume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, aggregateBy string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
+func (m MonitoredReaderWriter) Volume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, aggregateBy string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
var vol *logproto.VolumeResponse
if err := loki_instrument.TimeRequest(ctx, "volume", instrument.NewHistogramCollector(m.metrics.indexQueryLatency), instrument.ErrorCode, func(ctx context.Context) error {
var err error
@@ -133,11 +137,11 @@ func (m monitoredReaderWriter) Volume(ctx context.Context, userID string, from,
return vol, nil
}
-func (m monitoredReaderWriter) SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer) {
+func (m MonitoredReaderWriter) SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer) {
m.rw.SetChunkFilterer(chunkFilter)
}
-func (m monitoredReaderWriter) IndexChunk(ctx context.Context, from, through model.Time, chk chunk.Chunk) error {
+func (m MonitoredReaderWriter) IndexChunk(ctx context.Context, from, through model.Time, chk chunk.Chunk) error {
return loki_instrument.TimeRequest(ctx, "index_chunk", instrument.NewHistogramCollector(m.metrics.indexQueryLatency), instrument.ErrorCode, func(ctx context.Context) error {
return m.rw.IndexChunk(ctx, from, through, chk)
})
diff --git a/pkg/storage/stores/series/series_index_gateway_store.go b/pkg/storage/stores/series/series_index_gateway_store.go
index 0ab67464f624a..55639cdf67e39 100644
--- a/pkg/storage/stores/series/series_index_gateway_store.go
+++ b/pkg/storage/stores/series/series_index_gateway_store.go
@@ -12,7 +12,6 @@ import (
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/storage/chunk"
- "github.com/grafana/loki/pkg/storage/stores/index"
"github.com/grafana/loki/pkg/storage/stores/index/stats"
)
@@ -22,7 +21,7 @@ type IndexGatewayClientStore struct {
logger log.Logger
}
-func NewIndexGatewayClientStore(client logproto.IndexGatewayClient, logger log.Logger) index.ReaderWriter {
+func NewIndexGatewayClientStore(client logproto.IndexGatewayClient, logger log.Logger) *IndexGatewayClientStore {
return &IndexGatewayClientStore{
client: client,
logger: logger,
diff --git a/pkg/storage/stores/series/series_index_store.go b/pkg/storage/stores/series/series_index_store.go
index 1b445572bd99c..9498f3a16be42 100644
--- a/pkg/storage/stores/series/series_index_store.go
+++ b/pkg/storage/stores/series/series_index_store.go
@@ -24,7 +24,6 @@ import (
"github.com/grafana/loki/pkg/storage/config"
storageerrors "github.com/grafana/loki/pkg/storage/errors"
"github.com/grafana/loki/pkg/storage/stores"
- "github.com/grafana/loki/pkg/storage/stores/index"
"github.com/grafana/loki/pkg/storage/stores/index/stats"
series_index "github.com/grafana/loki/pkg/storage/stores/series/index"
"github.com/grafana/loki/pkg/util"
@@ -63,7 +62,8 @@ var (
})
)
-type indexReaderWriter struct {
+// IndexReaderWriter implements pkg/storage/stores/index.ReaderWriter
+type IndexReaderWriter struct {
schema series_index.SeriesStoreSchema
index series_index.Client
schemaCfg config.SchemaConfig
@@ -74,8 +74,8 @@ type indexReaderWriter struct {
}
func NewIndexReaderWriter(schemaCfg config.SchemaConfig, schema series_index.SeriesStoreSchema, index series_index.Client,
- fetcher *fetcher.Fetcher, chunkBatchSize int, writeDedupeCache cache.Cache) index.ReaderWriter {
- return &indexReaderWriter{
+ fetcher *fetcher.Fetcher, chunkBatchSize int, writeDedupeCache cache.Cache) *IndexReaderWriter {
+ return &IndexReaderWriter{
schema: schema,
index: index,
schemaCfg: schemaCfg,
@@ -85,7 +85,7 @@ func NewIndexReaderWriter(schemaCfg config.SchemaConfig, schema series_index.Ser
}
}
-func (c *indexReaderWriter) IndexChunk(ctx context.Context, from, through model.Time, chk chunk.Chunk) error {
+func (c *IndexReaderWriter) IndexChunk(ctx context.Context, from, through model.Time, chk chunk.Chunk) error {
writeReqs, keysToCache, err := c.calculateIndexEntries(ctx, from, through, chk)
if err != nil {
return err
@@ -104,7 +104,7 @@ func (c *indexReaderWriter) IndexChunk(ctx context.Context, from, through model.
}
// calculateIndexEntries creates a set of batched WriteRequests for all the chunks it is given.
-func (c *indexReaderWriter) calculateIndexEntries(ctx context.Context, from, through model.Time, chunk chunk.Chunk) (series_index.WriteBatch, []string, error) {
+func (c *IndexReaderWriter) calculateIndexEntries(ctx context.Context, from, through model.Time, chunk chunk.Chunk) (series_index.WriteBatch, []string, error) {
seenIndexEntries := map[string]struct{}{}
entries := []series_index.Entry{}
@@ -149,7 +149,7 @@ func (c *indexReaderWriter) calculateIndexEntries(ctx context.Context, from, thr
return result, missing, nil
}
-func (c *indexReaderWriter) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, allMatchers ...*labels.Matcher) ([]logproto.ChunkRef, error) {
+func (c *IndexReaderWriter) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, allMatchers ...*labels.Matcher) ([]logproto.ChunkRef, error) {
log := util_log.WithContext(ctx, util_log.Logger)
// Check there is a metric name matcher of type equal,
metricNameMatcher, matchers, ok := extract.MetricNameMatcherFromMatchers(allMatchers)
@@ -192,7 +192,7 @@ func (c *indexReaderWriter) GetChunkRefs(ctx context.Context, userID string, fro
return chunks, nil
}
-func (c *indexReaderWriter) SetChunkFilterer(f chunk.RequestChunkFilterer) {
+func (c *IndexReaderWriter) SetChunkFilterer(f chunk.RequestChunkFilterer) {
c.chunkFilterer = f
}
@@ -209,7 +209,7 @@ func (c chunkGroup) Less(i, j int) bool {
return c.schema.ExternalKey(c.chunks[i].ChunkRef) < c.schema.ExternalKey(c.chunks[j].ChunkRef)
}
-func (c *indexReaderWriter) GetSeries(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]labels.Labels, error) {
+func (c *IndexReaderWriter) GetSeries(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]labels.Labels, error) {
chks, err := c.GetChunkRefs(ctx, userID, from, through, matchers...)
if err != nil {
return nil, err
@@ -218,7 +218,7 @@ func (c *indexReaderWriter) GetSeries(ctx context.Context, userID string, from,
return c.chunksToSeries(ctx, chks, matchers)
}
-func (c *indexReaderWriter) chunksToSeries(ctx context.Context, in []logproto.ChunkRef, matchers []*labels.Matcher) ([]labels.Labels, error) {
+func (c *IndexReaderWriter) chunksToSeries(ctx context.Context, in []logproto.ChunkRef, matchers []*labels.Matcher) ([]labels.Labels, error) {
// download one per series and merge
// group chunks by series
chunksBySeries := filterChunkRefsByUniqueFingerprint(in)
@@ -313,7 +313,7 @@ func (c *indexReaderWriter) chunksToSeries(ctx context.Context, in []logproto.Ch
}
// LabelNamesForMetricName retrieves all label names for a metric name.
-func (c *indexReaderWriter) LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string) ([]string, error) {
+func (c *IndexReaderWriter) LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string) ([]string, error) {
sp, ctx := opentracing.StartSpanFromContext(ctx, "SeriesStore.LabelNamesForMetricName")
defer sp.Finish()
log := spanlogger.FromContext(ctx)
@@ -341,7 +341,7 @@ func (c *indexReaderWriter) LabelNamesForMetricName(ctx context.Context, userID
return labelNames, nil
}
-func (c *indexReaderWriter) LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, labelName string, matchers ...*labels.Matcher) ([]string, error) {
+func (c *IndexReaderWriter) LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, labelName string, matchers ...*labels.Matcher) ([]string, error) {
sp, ctx := opentracing.StartSpanFromContext(ctx, "SeriesStore.LabelValuesForMetricName")
defer sp.Finish()
log := spanlogger.FromContext(ctx)
@@ -377,7 +377,7 @@ func (c *indexReaderWriter) LabelValuesForMetricName(ctx context.Context, userID
}
// LabelValuesForMetricName retrieves all label values for a single label name and metric name.
-func (c *indexReaderWriter) labelValuesForMetricNameWithMatchers(ctx context.Context, userID string, from, through model.Time, metricName, labelName string, matchers ...*labels.Matcher) ([]string, error) {
+func (c *IndexReaderWriter) labelValuesForMetricNameWithMatchers(ctx context.Context, userID string, from, through model.Time, metricName, labelName string, matchers ...*labels.Matcher) ([]string, error) {
// Otherwise get series which include other matchers
seriesIDs, err := c.lookupSeriesByMetricNameMatchers(ctx, from, through, userID, metricName, matchers)
if err != nil {
@@ -419,7 +419,7 @@ func (c *indexReaderWriter) labelValuesForMetricNameWithMatchers(ctx context.Con
return result.Strings(), nil
}
-func (c *indexReaderWriter) lookupSeriesByMetricNameMatchers(ctx context.Context, from, through model.Time, userID, metricName string, matchers []*labels.Matcher) ([]string, error) {
+func (c *IndexReaderWriter) lookupSeriesByMetricNameMatchers(ctx context.Context, from, through model.Time, userID, metricName string, matchers []*labels.Matcher) ([]string, error) {
// Check if one of the labels is a shard annotation, pass that information to lookupSeriesByMetricNameMatcher,
// and remove the label.
shard, shardLabelIndex, err := astmapper.ShardFromMatchers(matchers)
@@ -502,13 +502,13 @@ func (c *indexReaderWriter) lookupSeriesByMetricNameMatchers(ctx context.Context
return ids, nil
}
-func (c *indexReaderWriter) lookupSeriesByMetricNameMatcher(ctx context.Context, from, through model.Time, userID, metricName string, matcher *labels.Matcher, shard *astmapper.ShardAnnotation) ([]string, error) {
+func (c *IndexReaderWriter) lookupSeriesByMetricNameMatcher(ctx context.Context, from, through model.Time, userID, metricName string, matcher *labels.Matcher, shard *astmapper.ShardAnnotation) ([]string, error) {
return c.lookupIdsByMetricNameMatcher(ctx, from, through, userID, metricName, matcher, func(queries []series_index.Query) []series_index.Query {
return c.schema.FilterReadQueries(queries, shard)
})
}
-func (c *indexReaderWriter) lookupIdsByMetricNameMatcher(ctx context.Context, from, through model.Time, userID, metricName string, matcher *labels.Matcher, filter func([]series_index.Query) []series_index.Query) ([]string, error) {
+func (c *IndexReaderWriter) lookupIdsByMetricNameMatcher(ctx context.Context, from, through model.Time, userID, metricName string, matcher *labels.Matcher, filter func([]series_index.Query) []series_index.Query) ([]string, error) {
var err error
var queries []series_index.Query
var labelName string
@@ -600,7 +600,7 @@ var entriesPool = sync.Pool{
},
}
-func (c *indexReaderWriter) lookupEntriesByQueries(ctx context.Context, queries []series_index.Query, entries *[]series_index.Entry) error {
+func (c *IndexReaderWriter) lookupEntriesByQueries(ctx context.Context, queries []series_index.Query, entries *[]series_index.Entry) error {
*entries = (*entries)[:0]
// Nothing to do if there are no queries.
if len(queries) == 0 {
@@ -628,7 +628,7 @@ func (c *indexReaderWriter) lookupEntriesByQueries(ctx context.Context, queries
return err
}
-func (c *indexReaderWriter) lookupLabelNamesBySeries(ctx context.Context, from, through model.Time, userID string, seriesIDs []string) ([]string, error) {
+func (c *IndexReaderWriter) lookupLabelNamesBySeries(ctx context.Context, from, through model.Time, userID string, seriesIDs []string) ([]string, error) {
sp, ctx := opentracing.StartSpanFromContext(ctx, "SeriesStore.lookupLabelNamesBySeries")
defer sp.Finish()
log := spanlogger.FromContext(ctx)
@@ -665,7 +665,7 @@ func (c *indexReaderWriter) lookupLabelNamesBySeries(ctx context.Context, from,
return result.Strings(), nil
}
-func (c *indexReaderWriter) lookupLabelNamesByChunks(ctx context.Context, from, through model.Time, userID string, seriesIDs []string) ([]string, error) {
+func (c *IndexReaderWriter) lookupLabelNamesByChunks(ctx context.Context, from, through model.Time, userID string, seriesIDs []string) ([]string, error) {
sp, ctx := opentracing.StartSpanFromContext(ctx, "SeriesStore.lookupLabelNamesByChunks")
defer sp.Finish()
log := spanlogger.FromContext(ctx)
@@ -701,7 +701,7 @@ func (c *indexReaderWriter) lookupLabelNamesByChunks(ctx context.Context, from,
return labelNamesFromChunks(allChunks), nil
}
-func (c *indexReaderWriter) lookupChunksBySeries(ctx context.Context, from, through model.Time, userID string, seriesIDs []string) ([]string, error) {
+func (c *IndexReaderWriter) lookupChunksBySeries(ctx context.Context, from, through model.Time, userID string, seriesIDs []string) ([]string, error) {
queries := make([]series_index.Query, 0, len(seriesIDs))
for _, seriesID := range seriesIDs {
qs, err := c.schema.GetChunksForSeries(from, through, userID, []byte(seriesID))
@@ -722,7 +722,7 @@ func (c *indexReaderWriter) lookupChunksBySeries(ctx context.Context, from, thro
return result, err
}
-func (c *indexReaderWriter) convertChunkIDsToChunks(_ context.Context, userID string, chunkIDs []string) ([]chunk.Chunk, error) {
+func (c *IndexReaderWriter) convertChunkIDsToChunks(_ context.Context, userID string, chunkIDs []string) ([]chunk.Chunk, error) {
chunkSet := make([]chunk.Chunk, 0, len(chunkIDs))
for _, chunkID := range chunkIDs {
chunk, err := chunk.ParseExternalKey(userID, chunkID)
@@ -735,7 +735,7 @@ func (c *indexReaderWriter) convertChunkIDsToChunks(_ context.Context, userID st
return chunkSet, nil
}
-func (c *indexReaderWriter) convertChunkIDsToChunkRefs(_ context.Context, userID string, chunkIDs []string) ([]logproto.ChunkRef, error) {
+func (c *IndexReaderWriter) convertChunkIDsToChunkRefs(_ context.Context, userID string, chunkIDs []string) ([]logproto.ChunkRef, error) {
chunkSet := make([]logproto.ChunkRef, 0, len(chunkIDs))
for _, chunkID := range chunkIDs {
chunk, err := chunk.ParseExternalKey(userID, chunkID)
@@ -749,11 +749,11 @@ func (c *indexReaderWriter) convertChunkIDsToChunkRefs(_ context.Context, userID
}
// old index stores do not implement stats -- skip
-func (c *indexReaderWriter) Stats(_ context.Context, _ string, _, _ model.Time, _ ...*labels.Matcher) (*stats.Stats, error) {
+func (c *IndexReaderWriter) Stats(_ context.Context, _ string, _, _ model.Time, _ ...*labels.Matcher) (*stats.Stats, error) {
return nil, nil
}
// old index stores do not implement label volume -- skip
-func (c *indexReaderWriter) Volume(_ context.Context, _ string, _, _ model.Time, _ int32, _ []string, _ string, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) {
+func (c *IndexReaderWriter) Volume(_ context.Context, _ string, _, _ model.Time, _ int32, _ []string, _ string, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) {
return nil, nil
}
diff --git a/pkg/storage/stores/series/series_store_test.go b/pkg/storage/stores/series/series_store_test.go
index ed7e45e9ccd67..fd68299a86223 100644
--- a/pkg/storage/stores/series/series_store_test.go
+++ b/pkg/storage/stores/series/series_store_test.go
@@ -395,7 +395,7 @@ func TestChunkStore_getMetricNameChunks(t *testing.T) {
t.Fatal(err)
}
- chunks, fetchers, err := store.GetChunkRefs(ctx, userID, now.Add(-time.Hour), now, matchers...)
+ chunks, fetchers, err := store.GetChunks(ctx, userID, now.Add(-time.Hour), now, matchers...)
require.NoError(t, err)
fetchedChunk := []chunk.Chunk{}
for _, f := range fetchers {
@@ -655,7 +655,7 @@ func TestChunkStoreError(t *testing.T) {
require.NoError(t, err)
// Query with ordinary time-range
- _, _, err = store.GetChunkRefs(ctx, userID, tc.from, tc.through, matchers...)
+ _, _, err = store.GetChunks(ctx, userID, tc.from, tc.through, matchers...)
require.EqualError(t, err, tc.err)
})
}
diff --git a/pkg/storage/stores/shipper/index/compactor/util_test.go b/pkg/storage/stores/shipper/index/compactor/util_test.go
index b6e2299ff76ed..f8e83595e61c8 100644
--- a/pkg/storage/stores/shipper/index/compactor/util_test.go
+++ b/pkg/storage/stores/shipper/index/compactor/util_test.go
@@ -168,7 +168,7 @@ func (t *testStore) GetChunks(userID string, from, through model.Time, metric la
matchers = append(matchers, labels.MustNewMatcher(labels.MatchEqual, l.Name, l.Value))
}
ctx := user.InjectOrgID(context.Background(), userID)
- chunks, fetchers, err := t.Store.GetChunkRefs(ctx, userID, from, through, matchers...)
+ chunks, fetchers, err := t.Store.GetChunks(ctx, userID, from, through, matchers...)
require.NoError(t.t, err)
fetchedChunk := []chunk.Chunk{}
for _, f := range fetchers {
diff --git a/pkg/storage/stores/shipper/indexgateway/gateway.go b/pkg/storage/stores/shipper/indexgateway/gateway.go
index 223aca888c7d2..565712c59ca0f 100644
--- a/pkg/storage/stores/shipper/indexgateway/gateway.go
+++ b/pkg/storage/stores/shipper/indexgateway/gateway.go
@@ -33,6 +33,7 @@ const (
type IndexQuerier interface {
stores.ChunkFetcher
index.BaseReader
+ index.StatsReader
Stop()
}
@@ -195,7 +196,7 @@ func (g *Gateway) GetChunkRef(ctx context.Context, req *logproto.GetChunkRefRequ
if err != nil {
return nil, err
}
- chunks, _, err := g.indexQuerier.GetChunkRefs(ctx, instanceID, req.From, req.Through, matchers...)
+ chunks, _, err := g.indexQuerier.GetChunks(ctx, instanceID, req.From, req.Through, matchers...)
if err != nil {
return nil, err
}
diff --git a/pkg/storage/util_test.go b/pkg/storage/util_test.go
index 915509a1cfa35..72ae05d4e99ba 100644
--- a/pkg/storage/util_test.go
+++ b/pkg/storage/util_test.go
@@ -232,7 +232,7 @@ func (m *mockChunkStore) GetChunkFetcher(_ model.Time) *fetcher.Fetcher {
return nil
}
-func (m *mockChunkStore) GetChunkRefs(_ context.Context, _ string, _, _ model.Time, _ ...*labels.Matcher) ([][]chunk.Chunk, []*fetcher.Fetcher, error) {
+func (m *mockChunkStore) GetChunks(_ context.Context, _ string, _, _ model.Time, _ ...*labels.Matcher) ([][]chunk.Chunk, []*fetcher.Fetcher, error) {
refs := make([]chunk.Chunk, 0, len(m.chunks))
// transform real chunks into ref chunks.
for _, c := range m.chunks {
|
chore
|
Simplify store interfaces and abstractions (pt 2) (#10454)
|
197265b41d0c11ff2ed8ec1b4849e2b575ecddd0
|
2025-02-25 20:03:52
|
Cyril Tovena
|
feat(helm): Optionally add the operational UI (#16317)
| false
|
diff --git a/docs/sources/operations/loki-ui/_index.md b/docs/sources/operations/loki-ui/_index.md
new file mode 100644
index 0000000000000..0efc5d26633ac
--- /dev/null
+++ b/docs/sources/operations/loki-ui/_index.md
@@ -0,0 +1,202 @@
+---
+title: Loki Deployment UI
+menuTitle: Loki UI
+description: Describes how to setup and use the Loki Deployment UI
+weight: 100
+---
+# Loki Deployment UI
+
+{{< admonition type="warning" >}}
+The Loki Deployment UI is experimental and subject to change. Before attempting to deploy the Loki make sure to consult the [Considerations](#considerations) section.
+{{< /admonition >}}
+
+The Loki Deployment UI is a web-based user interface that allows you to manage and monitor a Loki deployment in [microservice mode](https://grafana.com/docs/loki/<LOKI_VERSION>/get-started/deployment-modes/#microservices-mode) or [single binary mode](https://grafana.com/docs/loki/<LOKI_VERSION>/get-started/deployment-modes/#monolithic-mode).
+
+{{< figure max-width="100%" src="/media/docs/loki/loki-ui.png" caption="Loki UI Home Page" >}}
+
+## Accessing the Loki Deployment UI
+
+The Loki Deployment UI is available at the following URL:
+
+```
+http://<LOKI_HOST>:<LOKI_PORT>/ui
+```
+
+Where `<LOKI_HOST>` is the hostname or IP address of the Loki server and `<LOKI_PORT>` is the port number that Loki is running on. By default, Loki runs on port `3100`. If you are running Loki in Kubernetes the UI is made accessible via the `loki-gateway` service. Either by port-forwarding or by exposing the service via a LoadBalancer.
+
+{{< admonition type="caution" >}}
+`/ui` currently cannot be changed to another path. This would require further contribution.
+{{< /admonition >}}
+
+## Deployment
+
+Each Loki component now includes the UI service. Discoverability of the UI service needs to be enabled in the Loki configuration. To enable the UI discoverability, add the following paramter to the Loki configuration file:
+
+```yaml
+loki:
+ ui:
+ enabled: true
+```
+
+## Features
+
+The Loki Deployment UI provides the following features to manage and monitor a Loki deployment:
+| Feature | Description | Readyness |
+|---------|-------------|-----------|
+| [Nodes](#nodes) | Displays the status of each Loki component deployed | **Ready** |
+| [Rollouts & Versions](#rollouts--versions) | A historic view of previous deployments | Coming Soon |
+| [Rings](#rings) | Displays the status of the ring members | **Ready** |
+| [Object Storage](#object-storage) | Displays the status of the object storage | Coming Soon |
+| [Data Objects](#data-objects) | The Data Objects Explorer allows you to explore the data objects in the cluster.| Coming Soon |
+| [Analyze Labels](#analyze-labels) | Analyze label distribution across your log streams | **Ready** |
+| [Deletes](#deletes) | View and manage delete requests | **Ready** |
+| [Limits](#limits) | View and manage tenant limits | Coming Soon |
+| [Labels](#labels) | View Label stats | Coming Soon |
+| [Rules](#rules) | View and manage rules | Coming Soon |
+
+
+### Nodes
+
+The Nodes page displays the status of each Loki component deployed. Depending on the deployment mode, each component will either be considered a node or a service. Below is a screenshot of the Nodes table for a microservices deployment:
+
+{{< figure max-width="100%" src="/media/docs/loki/loki-ui-nodes.png" caption="Loki UI Nodes Table" >}}
+
+The Node table provides the following columns:
+
+- **Node Name**: The name of the component. This will also include any unique identifiers such as pod hashes or zone information.
+- **Target**: The target is classifies the type of component the node is. This can be a distributor, ingester, querier, etc.
+- **Version**: The version of the component. This is based upon the container image tag.
+- **Build Date**: How old the the container image is.
+- **Status**: This provides the current status of each service within a component. Each component can many services this is dependent on the type.
+- **Ready**: This provides the readiness of the component. This is based on the status of the services within the component. Services should be in a `ready` state for the component to be considered `ready`. Othetwise the component will be considered `not ready`. `Not ready` status will usually display a red icon and an error message.
+- **Actions**: Take you to the Node specific page. This will allow you to view more detailed information about the component.
+
+#### Node Page
+
+The Node page provides detailed information about the component. The orginal information from the Nodes table is displayed at the top of the page. There will also be an extended fields useful for debugging and monitoring such as; Version, Created, Edition, Architecture, and OS.
+
+{{< figure max-width="100%" src="/media/docs/loki/loki-ui-node-metadata.png" caption="Loki UI Node Page" >}}
+
+Below the metadata section, there are two tools for monitoring a the specific component:
+1. **Log Level**: This will allow you to change the log level of the component.
+2. **Profiling Tools**: This provides the profile data for the component. These are either displayed in a new tab or downloaded as a file.
+
+{{< figure max-width="100%" src="/media/docs/loki/loki-ui-monitoring-tools.png" caption="Loki UI Node Tools" >}}
+
+Finally, three tabs complete the Node page:
+
+1. **Configuration**: This provides the Loki configuration of the component. This is useful for understanding if a specific configuration has been applied to a component.
+2. **Analytic**: This provides the summarized metrics for the component. This is useful for understanding the performance and status of the component. For example ingesters will display; stream count, flushed chunk rate and total, etc.
+3. **Raw Metrics**: This provides the raw metrics for the component.
+
+### Rollouts & Versions
+
+{{< admonition type="note" >}}
+
+This feature is coming soon.
+
+{{< /admonition >}}
+
+
+### Rings
+
+Rings section of the Loki UI provides a view of the status of the ring members. There can be a total of 5 rings in a Loki deployment. Each ring is responsible for a specific task. Note that not all rings will be present in a deployment, this is dependent on how you have configured your Loki deployment. For example Index Gateway and Query Scheduler both require explicit configuration to be enabled.
+
+{{< figure max-width="100%" src="/media/docs/loki/loki-ui-ring.png" caption="Loki UI Rings Table" >}}
+
+The Ring table provides the following columns:
+* **ID**: The unique identifier for the ring member.
+* **Status**: The current status of the ring member. For instance; `ACTIVE`, `UNHEALTHY`, `LEAVING`, etc.
+* **Address**: The IP address of the ring member.
+* **Ownership**: The percentage of the ring member's ownership of tokens.
+* **Zone**: The zone the ring member is in.
+* **Last Heartbeat**: The last time the ring member sent a heartbeat.
+
+### Object Storage
+
+{{< admonition type="note" >}}
+
+This feature is coming soon.
+
+{{< /admonition >}}
+
+### Data Objects
+
+{{< admonition type="note" >}}
+
+This feature is coming soon.
+
+{{< /admonition >}}
+
+### Analyze Labels
+
+Analyze Labels page allows you to analyze label distribution across your log streams within a given tenant and timerange. This feature is useful for understanding label cardinality. The tool can be broken down into three sections:
+
+1. **Analyze Labels**: This provides a forum for entering your `Tenant ID`, `Time Range` and any Matchers you would like to apply. In the below example the Tennant ID is set to `meta` and the Time Range is set to `1h`. The `Matchers` field is empty.
+
+ {{< figure max-width="100%" src="/media/docs/loki/loki-ui-analyze-labels.png" caption="Loki UI Analyze Labels" >}}
+
+ Note that if you are using Loki in single binary mode, or have disabled the `auth_enabled` paramater in the Loki config then the Tenant ID will be `fake`.
+
+2. **Label Distribution**: This provides a visual representation of the label distribution. The labels are displayed in a bar chart:
+
+ {{< figure max-width="100%" src="/media/docs/loki/loki-ui-analyze-labels-bar.png" caption="Loki UI Analyze Labels Pie Chart" >}}
+
+ In this example there are a total of 11 unique lables with `instance` containing the most unique values at 22. This is a label to closely monitor as it could be a high cardinality label.
+
+3. **Label Details**: This provides a table view of the labels and a sample of their unique values. Each label can be expanded to view the unique values.
+
+ {{< figure max-width="100%" src="/media/docs/loki/loki-ui-analyze-labels-table.png" caption="Loki UI Analyze Labels Table" >}}
+
+ In Label Distribution example above, the `instance` label has 22 unique values. By expanding the `instance` label in the Label Details table a sample of the unique values can be viewed. There are a considerable amount of unique tokens within an instance value such as `frontend-59b8c46fcb-ft29z`. Based on the [labels best practices](https://grafana.com/docs/loki/<LOKI_VERSION>/get-started/labels/bp-labels/) this should be moved into [Structured metadata](https://grafana.com/docs/loki/<LOKI_VERSION>/get-started/labels/structured-metadata/).
+
+### Deletes
+
+The Deletes page allows you to view scheduled compactor deletes as well as manually delete data.
+
+{{< figure max-width="100%" src="/media/docs/loki/loki-ui-deletes.png" caption="Loki UI Deletes" >}}
+
+The Deletes table provides the following columns:
+* **Status**: The status of the delete request.
+* **User**: In this case this will be filled with the Tenant ID for the delete request.
+* **Created At**: The time the delete request was created.
+* **Range**: The time range of the delete request.
+* **Deleted Lines**: The number of lines deleted.
+* **Query**: The query used to delete the data.
+
+#### New Delete Request
+
+To create a new delete request, click the `New Delete Request` button. This will open a forum where you can enter the `Tenant ID`, `Time Range`, and `Query` for the delete request.
+
+{{< figure max-width="100%" src="/media/docs/loki/loki-ui-new-delete-request.png" caption="Loki UI New Delete Request" >}}
+
+### Limits
+
+{{< admonition type="note" >}}
+
+This feature is coming soon.
+
+{{< /admonition >}}
+
+### Labels
+
+{{< admonition type="note" >}}
+
+This feature is coming soon.
+
+{{< /admonition >}}
+
+### Rules
+
+{{< admonition type="note" >}}
+
+This feature is coming soon.
+
+{{< /admonition >}}
+
+## Considerations
+
+The Loki Deployment UI is experimental and subject to change. Before attempting to deploy the Loki make sure to consult the following considerations:
+1. Not all features are available in the Loki Deployment UI. Some features are still in development.
+2. There is no authentication or authorization in the Loki Deployment UI. This means that anyone with access to the UI can make make delete requests, change log levels, etc.
+3. This UI is not intended to replace meta monitoring for Loki.
\ No newline at end of file
diff --git a/docs/sources/setup/install/helm/reference.md b/docs/sources/setup/install/helm/reference.md
index 7faa744d011d9..7c165cae811b3 100644
--- a/docs/sources/setup/install/helm/reference.md
+++ b/docs/sources/setup/install/helm/reference.md
@@ -6516,6 +6516,20 @@ null
"enabled": false
}
</pre>
+</td>
+ </tr>
+ <tr>
+ <td>loki.ui</td>
+ <td>object</td>
+ <td>Optional Loki UI: Provides access to a operators UI for Loki distributed. When enabled UI will be available at /ui/ of loki-gateway</td>
+ <td><pre lang="json">
+{
+ "enabled": false,
+ "gateway": {
+ "enabled": true
+ }
+}
+</pre>
</td>
</tr>
<tr>
diff --git a/production/helm/loki/Makefile b/production/helm/loki/Makefile
index 4b56414df7ed1..373086a32e3ad 100644
--- a/production/helm/loki/Makefile
+++ b/production/helm/loki/Makefile
@@ -1,7 +1,84 @@
.DEFAULT_GOAL := all
-.PHONY: lint lint-yaml
+.PHONY: lint lint-yaml install-distributed install-single-binary uninstall update-chart update
+
+# Optional image override, example: make install-distributed IMAGE=grafana/loki:2.9.0
+IMAGE ?=
+
+# Optional helm arguments, example: make install-distributed ARGS="--set loki.auth.enabled=true"
+ARGS ?=
+
+# Default arguments to disable affinity for testing
+DEFAULT_ARGS = --set gateway.affinity=null \
+ --set ingester.affinity=null \
+ --set distributor.affinity=null \
+ --set querier.affinity=null \
+ --set queryFrontend.affinity=null \
+ --set queryScheduler.affinity=null \
+ --set indexGateway.affinity=null \
+ --set compactor.affinity=null \
+ --set ruler.affinity=null \
+ --set backend.affinity=null \
+ --set read.affinity=null \
+ --set write.affinity=null \
+ --set singleBinary.affinity=null \
+ --set memcachedChunks.affinity=null \
+ --set memcachedFrontend.affinity=null \
+ --set memcachedIndexQueries.affinity=null \
+ --set memcachedMetadata.affinity=null \
+ --set memcachedResults.affinity=null \
+ --set global.podAntiAffinity=null \
+ --set global.podAntiAffinityTopologyKey=null
+
+# Generate image override flag if IMAGE is provided
+IMAGE_FLAG = $(if $(IMAGE),\
+ $(eval PARTS=$(subst :, ,$(IMAGE)))\
+ $(eval REPO_PARTS=$(subst /, ,$(word 1,$(PARTS))))\
+ $(eval TAG=$(word 2,$(PARTS)))\
+ $(eval REPO_COUNT=$(words $(REPO_PARTS)))\
+ $(if $(filter 3,$(REPO_COUNT)),\
+ --set loki.image.registry=$(word 1,$(REPO_PARTS))/$(word 2,$(REPO_PARTS)) --set loki.image.repository=$(word 3,$(REPO_PARTS)),\
+ --set loki.image.registry=$(word 1,$(REPO_PARTS)) --set loki.image.repository=$(word 2,$(REPO_PARTS))\
+ ) --set loki.image.tag=$(TAG),)
lint: lint-yaml
lint-yaml:
yamllint -c $(CURDIR)/src/.yamllint.yaml $(CURDIR)/src
+
+# Helm chart installation targets
+install-distributed:
+ helm upgrade --install loki . \
+ -f distributed-values.yaml \
+ --create-namespace \
+ --namespace loki \
+ $(DEFAULT_ARGS) \
+ $(IMAGE_FLAG) \
+ $(ARGS)
+
+install-single-binary:
+ helm upgrade --install loki . \
+ -f single-binary-values.yaml \
+ --create-namespace \
+ --namespace loki \
+ $(DEFAULT_ARGS) \
+ $(IMAGE_FLAG) \
+ $(ARGS)
+
+# Uninstall Loki helm release and optionally delete the namespace
+uninstall:
+ helm uninstall loki --namespace loki
+ kubectl delete namespace loki --ignore-not-found
+
+# Update Helm chart dependencies
+update-chart:
+ helm dependency update .
+
+# Update existing installation with latest changes
+update:
+ @if [ "$$(helm get values loki -n loki -o yaml | grep "deploymentMode: Distributed")" ]; then \
+ echo "Updating distributed deployment..."; \
+ helm upgrade loki . -f distributed-values.yaml --namespace loki $(DEFAULT_ARGS) $(IMAGE_FLAG) $(ARGS); \
+ else \
+ echo "Updating single binary deployment..."; \
+ helm upgrade loki . -f single-binary-values.yaml --namespace loki $(DEFAULT_ARGS) $(IMAGE_FLAG) $(ARGS); \
+ fi
diff --git a/production/helm/loki/templates/_helpers.tpl b/production/helm/loki/templates/_helpers.tpl
index 7434350eb840d..327c2bd09b309 100644
--- a/production/helm/loki/templates/_helpers.tpl
+++ b/production/helm/loki/templates/_helpers.tpl
@@ -877,6 +877,12 @@ http {
{{- $schedulerUrl = $backendUrl }}
{{- end -}}
+ {{- if .Values.loki.ui.gateway.enabled }}
+ location ^~ /ui {
+ proxy_pass {{ $distributorUrl }}$request_uri;
+ }
+ {{- end }}
+
# Distributor
location = /api/prom/push {
proxy_pass {{ $distributorUrl }}$request_uri;
diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml
index 3c2c85bc5fc48..5853809c4d2b6 100644
--- a/production/helm/loki/values.yaml
+++ b/production/helm/loki/values.yaml
@@ -282,6 +282,12 @@ loki:
{{- tpl (. | toYaml) $ | nindent 4 }}
{{- end }}
+ {{- if .Values.loki.ui.enabled }}
+ ui:
+ discovery:
+ join_peers:
+ - '{{ include "loki.distributorFullname" . }}.{{ $.Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}'
+ {{- end }}
{{- with .Values.loki.querier }}
querier:
{{- tpl (. | toYaml) $ | nindent 4 }}
@@ -417,27 +423,38 @@ loki:
object_store:
# Type of object store. Valid options are: s3, gcs, azure
type: s3
- storage_prefix: null # Optional prefix for storage keys
-
+ # Optional prefix for storage keys
+ storage_prefix: null
# S3 configuration (when type is "s3")
s3:
- endpoint: null # S3 endpoint URL
- region: null # Optional region
- access_key_id: null # Optional access key
- secret_access_key: null # Optional secret key
- insecure: false # Optional. Enable if using self-signed TLS
- sse: {} # Optional server-side encryption configuration
- http: {} # Optional HTTP client configuration
+ # S3 endpoint URL
+ endpoint: null
+ # Optional region
+ region: null
+ # Optional access key
+ access_key_id: null
+ # Optional secret key
+ secret_access_key: null
+ # Optional. Enable if using self-signed TLS
+ insecure: false
+ # Optional server-side encryption configuration
+ sse: {}
+ # Optional HTTP client configuration
+ http: {}
# GCS configuration (when type is "gcs")
gcs:
- bucket_name: null # Name of the bucket
- service_account: null # Optional service account JSON
+ # Name of the bucket
+ bucket_name: null
+ # Optional service account JSON
+ service_account: null
# Azure configuration (when type is "azure")
azure:
- account_name: null # Storage account name
- account_key: null # Optional storage account key
+ # Storage account name
+ account_name: null
+ # Optional storage account key
+ account_key: null
# -- Configure memcached as an external cache for chunk and results cache. Disabled by default
# must enable and specify a host for each cache you would like to use.
@@ -497,6 +514,13 @@ loki:
enabled: false
# -- Optional analytics configuration
analytics: {}
+ # -- Optional Loki UI: Provides access to a operators UI for Loki distributed. When enabled UI will be available at /ui/ of loki-gateway
+ ui:
+ # Disabled by default for backwards compatibility. Enable to use the Loki UI.
+ enabled: false
+ gateway:
+ # enable gateway proxying to UI under /ui
+ enabled: true
# -- Optional querier configuration
query_range: {}
# -- Optional querier configuration
|
feat
|
Optionally add the operational UI (#16317)
|
c62dc2eaa2c2471a8b468a01559a2fe614062706
|
2025-01-22 02:19:28
|
renovate[bot]
|
chore(deps): update terraform google to v6.17.0 (#15873)
| false
|
diff --git a/tools/gcplog/main.tf b/tools/gcplog/main.tf
index fe6b2d84d8c31..4860c44fc52e4 100644
--- a/tools/gcplog/main.tf
+++ b/tools/gcplog/main.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
google = {
source = "hashicorp/google"
- version = "6.16.0"
+ version = "6.17.0"
}
}
}
|
chore
|
update terraform google to v6.17.0 (#15873)
|
8d43969d6b75a9038fb9fd70727553efff108506
|
2023-02-01 13:18:16
|
Karsten Jeschkies
|
helm: Align ingress timeout and size limits. (#8352)
| false
|
diff --git a/production/helm/loki/templates/_helpers.tpl b/production/helm/loki/templates/_helpers.tpl
index 4959c5f359a2d..3441ec86039a5 100644
--- a/production/helm/loki/templates/_helpers.tpl
+++ b/production/helm/loki/templates/_helpers.tpl
@@ -530,6 +530,12 @@ http {
uwsgi_temp_path /tmp/uwsgi_temp;
scgi_temp_path /tmp/scgi_temp;
+ client_max_body_size 4M;
+
+ proxy_read_timeout 600; ## 6 minutes
+ proxy_send_timeout 600;
+ proxy_connect_timeout 600;
+
proxy_http_version 1.1;
default_type application/octet-stream;
|
helm
|
Align ingress timeout and size limits. (#8352)
|
bfc2312427722ef3bed6daff4256a12ba1406920
|
2023-12-13 00:55:43
|
Periklis Tsirakidis
|
operator: Update Loki operand to v2.9.3 (#11448)
| false
|
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md
index 61cb05abb8f10..cfe393c0f4314 100644
--- a/operator/CHANGELOG.md
+++ b/operator/CHANGELOG.md
@@ -1,5 +1,6 @@
## Main
+- [11448](https://github.com/grafana/loki/pull/11448) **periklis**: Update Loki operand to v2.9.3
- [11357](https://github.com/grafana/loki/pull/11357) **periklis**: Fix storing authentication credentials in the Loki ConfigMap
- [11393](https://github.com/grafana/loki/pull/11393) **periklis**: Add infra annotations for OpenShift based deployments
- [11094](https://github.com/grafana/loki/pull/11094) **periklis**: Add support for blocking queries per tenant
diff --git a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml
index a315fd044f750..36151790a2099 100644
--- a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml
@@ -150,7 +150,7 @@ metadata:
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: docker.io/grafana/loki-operator:0.5.0
- createdAt: "2023-12-06T06:30:12Z"
+ createdAt: "2023-12-12T09:22:19Z"
description: The Community Loki Operator provides Kubernetes native deployment
and management of Loki and related logging components.
features.operators.openshift.io/disconnected: "true"
@@ -1697,7 +1697,7 @@ spec:
- /manager
env:
- name: RELATED_IMAGE_LOKI
- value: docker.io/grafana/loki:2.9.2
+ value: docker.io/grafana/loki:2.9.3
- name: RELATED_IMAGE_GATEWAY
value: quay.io/observatorium/api:latest
- name: RELATED_IMAGE_OPA
@@ -1820,7 +1820,7 @@ spec:
provider:
name: Grafana Loki SIG Operator
relatedImages:
- - image: docker.io/grafana/loki:2.9.2
+ - image: docker.io/grafana/loki:2.9.3
name: loki
- image: quay.io/observatorium/api:latest
name: gateway
diff --git a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml
index dc3ab04f245b0..322bc606611f3 100644
--- a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml
@@ -150,7 +150,7 @@ metadata:
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: docker.io/grafana/loki-operator:0.5.0
- createdAt: "2023-12-06T06:30:10Z"
+ createdAt: "2023-12-12T09:22:17Z"
description: The Community Loki Operator provides Kubernetes native deployment
and management of Loki and related logging components.
operators.operatorframework.io/builder: operator-sdk-unknown
@@ -1677,7 +1677,7 @@ spec:
- /manager
env:
- name: RELATED_IMAGE_LOKI
- value: docker.io/grafana/loki:2.9.2
+ value: docker.io/grafana/loki:2.9.3
- name: RELATED_IMAGE_GATEWAY
value: quay.io/observatorium/api:latest
- name: RELATED_IMAGE_OPA
@@ -1788,7 +1788,7 @@ spec:
provider:
name: Grafana Loki SIG Operator
relatedImages:
- - image: docker.io/grafana/loki:2.9.2
+ - image: docker.io/grafana/loki:2.9.3
name: loki
- image: quay.io/observatorium/api:latest
name: gateway
diff --git a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
index 71f096c4fd269..f4a951400e946 100644
--- a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
@@ -150,7 +150,7 @@ metadata:
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: quay.io/openshift-logging/loki-operator:0.1.0
- createdAt: "2023-12-06T06:30:15Z"
+ createdAt: "2023-12-12T09:22:21Z"
description: |
The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging.
## Prerequisites and Requirements
@@ -1682,7 +1682,7 @@ spec:
- /manager
env:
- name: RELATED_IMAGE_LOKI
- value: quay.io/openshift-logging/loki:v2.9.2
+ value: quay.io/openshift-logging/loki:v2.9.3
- name: RELATED_IMAGE_GATEWAY
value: quay.io/observatorium/api:latest
- name: RELATED_IMAGE_OPA
@@ -1805,7 +1805,7 @@ spec:
provider:
name: Red Hat
relatedImages:
- - image: quay.io/openshift-logging/loki:v2.9.2
+ - image: quay.io/openshift-logging/loki:v2.9.3
name: loki
- image: quay.io/observatorium/api:latest
name: gateway
diff --git a/operator/config/overlays/community-openshift/manager_related_image_patch.yaml b/operator/config/overlays/community-openshift/manager_related_image_patch.yaml
index 9e3748688d14b..e9c5435287a80 100644
--- a/operator/config/overlays/community-openshift/manager_related_image_patch.yaml
+++ b/operator/config/overlays/community-openshift/manager_related_image_patch.yaml
@@ -9,7 +9,7 @@ spec:
- name: manager
env:
- name: RELATED_IMAGE_LOKI
- value: docker.io/grafana/loki:2.9.2
+ value: docker.io/grafana/loki:2.9.3
- name: RELATED_IMAGE_GATEWAY
value: quay.io/observatorium/api:latest
- name: RELATED_IMAGE_OPA
diff --git a/operator/config/overlays/community/manager_related_image_patch.yaml b/operator/config/overlays/community/manager_related_image_patch.yaml
index 9e3748688d14b..e9c5435287a80 100644
--- a/operator/config/overlays/community/manager_related_image_patch.yaml
+++ b/operator/config/overlays/community/manager_related_image_patch.yaml
@@ -9,7 +9,7 @@ spec:
- name: manager
env:
- name: RELATED_IMAGE_LOKI
- value: docker.io/grafana/loki:2.9.2
+ value: docker.io/grafana/loki:2.9.3
- name: RELATED_IMAGE_GATEWAY
value: quay.io/observatorium/api:latest
- name: RELATED_IMAGE_OPA
diff --git a/operator/config/overlays/development/manager_related_image_patch.yaml b/operator/config/overlays/development/manager_related_image_patch.yaml
index 7e8f357a89bd7..f9a2449bceed8 100644
--- a/operator/config/overlays/development/manager_related_image_patch.yaml
+++ b/operator/config/overlays/development/manager_related_image_patch.yaml
@@ -9,6 +9,6 @@ spec:
- name: manager
env:
- name: RELATED_IMAGE_LOKI
- value: docker.io/grafana/loki:2.9.2
+ value: docker.io/grafana/loki:2.9.3
- name: RELATED_IMAGE_GATEWAY
value: quay.io/observatorium/api:latest
diff --git a/operator/config/overlays/openshift/manager_related_image_patch.yaml b/operator/config/overlays/openshift/manager_related_image_patch.yaml
index c2fab2ad3f9b6..5e64be8752f20 100644
--- a/operator/config/overlays/openshift/manager_related_image_patch.yaml
+++ b/operator/config/overlays/openshift/manager_related_image_patch.yaml
@@ -9,7 +9,7 @@ spec:
- name: manager
env:
- name: RELATED_IMAGE_LOKI
- value: quay.io/openshift-logging/loki:v2.9.2
+ value: quay.io/openshift-logging/loki:v2.9.3
- name: RELATED_IMAGE_GATEWAY
value: quay.io/observatorium/api:latest
- name: RELATED_IMAGE_OPA
diff --git a/operator/docs/operator/compatibility.md b/operator/docs/operator/compatibility.md
index ee09abb6b71ae..36550f06a7062 100644
--- a/operator/docs/operator/compatibility.md
+++ b/operator/docs/operator/compatibility.md
@@ -36,3 +36,4 @@ The versions of Loki compatible to be run with the Loki Operator are:
* v2.9.0
* v2.9.1
* v2.9.2
+* v2.9.3
diff --git a/operator/hack/addons_dev.yaml b/operator/hack/addons_dev.yaml
index 5ecad9181c301..adf6aa053add4 100644
--- a/operator/hack/addons_dev.yaml
+++ b/operator/hack/addons_dev.yaml
@@ -29,7 +29,7 @@ spec:
spec:
containers:
- name: logcli
- image: docker.io/grafana/logcli:2.9.2-amd64
+ image: docker.io/grafana/logcli:2.9.3-amd64
imagePullPolicy: IfNotPresent
command:
- /bin/sh
@@ -73,7 +73,7 @@ spec:
spec:
containers:
- name: promtail
- image: docker.io/grafana/promtail:2.9.2
+ image: docker.io/grafana/promtail:2.9.3
args:
- -config.file=/etc/promtail/promtail.yaml
- -log.level=info
diff --git a/operator/hack/addons_ocp.yaml b/operator/hack/addons_ocp.yaml
index da62de2936409..1a0ff7325a62a 100644
--- a/operator/hack/addons_ocp.yaml
+++ b/operator/hack/addons_ocp.yaml
@@ -29,7 +29,7 @@ spec:
spec:
containers:
- name: logcli
- image: docker.io/grafana/logcli:2.9.2-amd64
+ image: docker.io/grafana/logcli:2.9.3-amd64
imagePullPolicy: IfNotPresent
command:
- /bin/sh
@@ -70,7 +70,7 @@ spec:
spec:
containers:
- name: promtail
- image: docker.io/grafana/promtail:2.9.2
+ image: docker.io/grafana/promtail:2.9.3
args:
- -config.file=/etc/promtail/promtail.yaml
- -log.level=info
diff --git a/operator/internal/manifests/var.go b/operator/internal/manifests/var.go
index e4d7c0d1a9b4f..6468e4426bf0e 100644
--- a/operator/internal/manifests/var.go
+++ b/operator/internal/manifests/var.go
@@ -59,7 +59,7 @@ const (
EnvRelatedImageGateway = "RELATED_IMAGE_GATEWAY"
// DefaultContainerImage declares the default fallback for loki image.
- DefaultContainerImage = "docker.io/grafana/loki:2.9.2"
+ DefaultContainerImage = "docker.io/grafana/loki:2.9.3"
// DefaultLokiStackGatewayImage declares the default image for lokiStack-gateway.
DefaultLokiStackGatewayImage = "quay.io/observatorium/api:latest"
diff --git a/operator/jsonnet/jsonnetfile.json b/operator/jsonnet/jsonnetfile.json
index 139ecf1db8a86..ea10dcdf83cab 100644
--- a/operator/jsonnet/jsonnetfile.json
+++ b/operator/jsonnet/jsonnetfile.json
@@ -8,7 +8,7 @@
"subdir": "production/loki-mixin"
}
},
- "version": "v2.9.2"
+ "version": "v2.9.3"
}
],
"legacyImports": true
diff --git a/operator/jsonnet/jsonnetfile.lock.json b/operator/jsonnet/jsonnetfile.lock.json
index 7e26d6b3d0384..ffd87e9ff9537 100644
--- a/operator/jsonnet/jsonnetfile.lock.json
+++ b/operator/jsonnet/jsonnetfile.lock.json
@@ -38,7 +38,7 @@
"subdir": "production/loki-mixin"
}
},
- "version": "cbad5587450a93af43394e5675c4056235df5df3",
+ "version": "567b0fb44b750ead8a22fbd0078940c14f559b79",
"sum": "a/71V1QzEB46ewPIE2nyNp2HlYFwmDqmSddNulZPP40="
},
{
|
operator
|
Update Loki operand to v2.9.3 (#11448)
|
3f1666c53df08b63de6e1fb4b299b6b1e1a94c72
|
2024-11-25 20:08:02
|
renovate[bot]
|
fix(deps): update module github.com/stretchr/testify to v1.10.0 (#15090)
| false
|
diff --git a/go.mod b/go.mod
index ff5549e24abc8..2391249a1289c 100644
--- a/go.mod
+++ b/go.mod
@@ -92,7 +92,7 @@ require (
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546
github.com/sony/gobreaker/v2 v2.0.0
github.com/spf13/afero v1.11.0
- github.com/stretchr/testify v1.9.0
+ github.com/stretchr/testify v1.10.0
github.com/uber/jaeger-client-go v2.30.0+incompatible
github.com/xdg-go/scram v1.1.2
go.etcd.io/bbolt v1.3.11
diff --git a/go.sum b/go.sum
index 470d67ed0f786..ab6894ccbbd88 100644
--- a/go.sum
+++ b/go.sum
@@ -2594,8 +2594,9 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
-github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw=
github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0=
github.com/tencentcloud/tencentcloud-sdk-go v1.0.162/go.mod h1:asUz5BPXxgoPGaRgZaVm1iGcUAuHyYUo1nXqKa83cvI=
diff --git a/pkg/logproto/timeseries_test.go b/pkg/logproto/timeseries_test.go
index 94a8c89e1c4d5..0d3e980a18e05 100644
--- a/pkg/logproto/timeseries_test.go
+++ b/pkg/logproto/timeseries_test.go
@@ -32,7 +32,7 @@ func TestPreallocTimeseriesSliceFromPool(t *testing.T) {
first := PreallocTimeseriesSliceFromPool()
second := PreallocTimeseriesSliceFromPool()
- assert.NotSame(t, first, second)
+ assert.NotSame(t, &first, &second)
})
t.Run("instance is cleaned before reusing", func(t *testing.T) {
@@ -50,11 +50,12 @@ func TestTimeseriesFromPool(t *testing.T) {
first := TimeseriesFromPool()
second := TimeseriesFromPool()
- assert.NotSame(t, first, second)
+ assert.NotSame(t, &first, &second)
})
t.Run("instance is cleaned before reusing", func(t *testing.T) {
ts := TimeseriesFromPool()
+
ts.Labels = []LabelAdapter{{Name: "foo", Value: "bar"}}
ts.Samples = []LegacySample{{Value: 1, TimestampMs: 2}}
ReuseTimeseries(ts)
diff --git a/pkg/push/go.mod b/pkg/push/go.mod
index f413854df6b42..eafc950f99330 100644
--- a/pkg/push/go.mod
+++ b/pkg/push/go.mod
@@ -6,7 +6,7 @@ toolchain go1.23.3
require (
github.com/gogo/protobuf v1.3.2
- github.com/stretchr/testify v1.9.0
+ github.com/stretchr/testify v1.10.0
golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8
google.golang.org/grpc v1.68.0
)
diff --git a/pkg/push/go.sum b/pkg/push/go.sum
index 6df4afefcd84d..ddaa328926d2a 100644
--- a/pkg/push/go.sum
+++ b/pkg/push/go.sum
@@ -17,8 +17,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
-github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
diff --git a/pkg/util/mapmerge_test.go b/pkg/util/mapmerge_test.go
index f671a4caa380c..2d5d8266eaa6e 100644
--- a/pkg/util/mapmerge_test.go
+++ b/pkg/util/mapmerge_test.go
@@ -37,7 +37,7 @@ func TestCopy(t *testing.T) {
cp := CopyMap(base)
require.EqualValues(t, base, cp)
- require.NotSame(t, base, cp)
+ require.NotSame(t, &base, &cp)
}
func TestNilCopy(t *testing.T) {
@@ -45,7 +45,7 @@ func TestNilCopy(t *testing.T) {
cp := CopyMap(base)
require.EqualValues(t, base, cp)
- require.NotSame(t, base, cp)
+ require.NotSame(t, &base, &cp)
}
func TestNilBase(t *testing.T) {
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
index 4d4b4aad6fe88..7e19eba0904d6 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
@@ -7,10 +7,13 @@ import (
"time"
)
-type CompareType int
+// Deprecated: CompareType has only ever been for internal use and has accidentally been published since v1.6.0. Do not use it.
+type CompareType = compareResult
+
+type compareResult int
const (
- compareLess CompareType = iota - 1
+ compareLess compareResult = iota - 1
compareEqual
compareGreater
)
@@ -39,7 +42,7 @@ var (
bytesType = reflect.TypeOf([]byte{})
)
-func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
+func compare(obj1, obj2 interface{}, kind reflect.Kind) (compareResult, bool) {
obj1Value := reflect.ValueOf(obj1)
obj2Value := reflect.ValueOf(obj2)
@@ -325,7 +328,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time)
}
- return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64)
+ if timeObj1.Before(timeObj2) {
+ return compareLess, true
+ }
+ if timeObj1.Equal(timeObj2) {
+ return compareEqual, true
+ }
+ return compareGreater, true
}
case reflect.Slice:
{
@@ -345,7 +354,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte)
}
- return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true
+ return compareResult(bytes.Compare(bytesObj1, bytesObj2)), true
}
case reflect.Uintptr:
{
@@ -381,7 +390,7 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface
if h, ok := t.(tHelper); ok {
h.Helper()
}
- return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
+ return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
}
// GreaterOrEqual asserts that the first element is greater than or equal to the second
@@ -394,7 +403,7 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in
if h, ok := t.(tHelper); ok {
h.Helper()
}
- return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
+ return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
}
// Less asserts that the first element is less than the second
@@ -406,7 +415,7 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{})
if h, ok := t.(tHelper); ok {
h.Helper()
}
- return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
+ return compareTwoValues(t, e1, e2, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
}
// LessOrEqual asserts that the first element is less than or equal to the second
@@ -419,7 +428,7 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter
if h, ok := t.(tHelper); ok {
h.Helper()
}
- return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
+ return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
}
// Positive asserts that the specified element is positive
@@ -431,7 +440,7 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
h.Helper()
}
zero := reflect.Zero(reflect.TypeOf(e))
- return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...)
+ return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, "\"%v\" is not positive", msgAndArgs...)
}
// Negative asserts that the specified element is negative
@@ -443,10 +452,10 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
h.Helper()
}
zero := reflect.Zero(reflect.TypeOf(e))
- return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...)
+ return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, "\"%v\" is not negative", msgAndArgs...)
}
-func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
+func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
@@ -469,7 +478,7 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare
return true
}
-func containsValue(values []CompareType, value CompareType) bool {
+func containsValue(values []compareResult, value compareResult) bool {
for _, v := range values {
if v == value {
return true
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
index 3ddab109ad9ec..1906341657745 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_format.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -104,8 +104,8 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{},
return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...)
}
-// EqualValuesf asserts that two objects are equal or convertible to the same types
-// and equal.
+// EqualValuesf asserts that two objects are equal or convertible to the larger
+// type and equal.
//
// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
@@ -186,7 +186,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick
// assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") {
// // add assertions as needed; any assertion failure will fail the current tick
// assert.True(c, externalValue, "expected 'externalValue' to be true")
-// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
+// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
func EventuallyWithTf(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -568,6 +568,23 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a
return NotContains(t, s, contains, append([]interface{}{msg}, args...)...)
}
+// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should not match.
+// This is an inverse of ElementsMatch.
+//
+// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false
+//
+// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true
+//
+// assert.NotElementsMatchf(t, [1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true
+func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
+}
+
// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
@@ -604,7 +621,16 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s
return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
}
-// NotErrorIsf asserts that at none of the errors in err's chain matches target.
+// NotErrorAsf asserts that none of the errors in err's chain matches target,
+// but if so, sets target to that error value.
+func NotErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotErrorAs(t, err, target, append([]interface{}{msg}, args...)...)
+}
+
+// NotErrorIsf asserts that none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
index a84e09bd40908..21629087baf76 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -186,8 +186,8 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface
return EqualExportedValuesf(a.t, expected, actual, msg, args...)
}
-// EqualValues asserts that two objects are equal or convertible to the same types
-// and equal.
+// EqualValues asserts that two objects are equal or convertible to the larger
+// type and equal.
//
// a.EqualValues(uint32(123), int32(123))
func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
@@ -197,8 +197,8 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
return EqualValues(a.t, expected, actual, msgAndArgs...)
}
-// EqualValuesf asserts that two objects are equal or convertible to the same types
-// and equal.
+// EqualValuesf asserts that two objects are equal or convertible to the larger
+// type and equal.
//
// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
@@ -336,7 +336,7 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti
// a.EventuallyWithT(func(c *assert.CollectT) {
// // add assertions as needed; any assertion failure will fail the current tick
// assert.True(c, externalValue, "expected 'externalValue' to be true")
-// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
+// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -361,7 +361,7 @@ func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor
// a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") {
// // add assertions as needed; any assertion failure will fail the current tick
// assert.True(c, externalValue, "expected 'externalValue' to be true")
-// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
+// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
func (a *Assertions) EventuallyWithTf(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1128,6 +1128,40 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin
return NotContainsf(a.t, s, contains, msg, args...)
}
+// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should not match.
+// This is an inverse of ElementsMatch.
+//
+// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false
+//
+// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true
+//
+// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true
+func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotElementsMatch(a.t, listA, listB, msgAndArgs...)
+}
+
+// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should not match.
+// This is an inverse of ElementsMatch.
+//
+// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false
+//
+// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true
+//
+// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true
+func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotElementsMatchf(a.t, listA, listB, msg, args...)
+}
+
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
@@ -1200,7 +1234,25 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str
return NotEqualf(a.t, expected, actual, msg, args...)
}
-// NotErrorIs asserts that at none of the errors in err's chain matches target.
+// NotErrorAs asserts that none of the errors in err's chain matches target,
+// but if so, sets target to that error value.
+func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotErrorAs(a.t, err, target, msgAndArgs...)
+}
+
+// NotErrorAsf asserts that none of the errors in err's chain matches target,
+// but if so, sets target to that error value.
+func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotErrorAsf(a.t, err, target, msg, args...)
+}
+
+// NotErrorIs asserts that none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
@@ -1209,7 +1261,7 @@ func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface
return NotErrorIs(a.t, err, target, msgAndArgs...)
}
-// NotErrorIsf asserts that at none of the errors in err's chain matches target.
+// NotErrorIsf asserts that none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go
index 00df62a05992d..1d2f71824aa93 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_order.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go
@@ -6,7 +6,7 @@ import (
)
// isOrdered checks that collection contains orderable elements.
-func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
+func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool {
objKind := reflect.TypeOf(object).Kind()
if objKind != reflect.Slice && objKind != reflect.Array {
return false
@@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT
// assert.IsIncreasing(t, []float{1, 2})
// assert.IsIncreasing(t, []string{"a", "b"})
func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
+ return isOrdered(t, object, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
}
// IsNonIncreasing asserts that the collection is not increasing
@@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo
// assert.IsNonIncreasing(t, []float{2, 1})
// assert.IsNonIncreasing(t, []string{"b", "a"})
func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
+ return isOrdered(t, object, []compareResult{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
}
// IsDecreasing asserts that the collection is decreasing
@@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{})
// assert.IsDecreasing(t, []float{2, 1})
// assert.IsDecreasing(t, []string{"b", "a"})
func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
+ return isOrdered(t, object, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
}
// IsNonDecreasing asserts that the collection is not decreasing
@@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo
// assert.IsNonDecreasing(t, []float{1, 2})
// assert.IsNonDecreasing(t, []string{"a", "b"})
func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
+ return isOrdered(t, object, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
}
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
index 0b7570f21c631..4e91332bb51c3 100644
--- a/vendor/github.com/stretchr/testify/assert/assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -19,7 +19,9 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/pmezard/go-difflib/difflib"
- "gopkg.in/yaml.v3"
+
+ // Wrapper around gopkg.in/yaml.v3
+ "github.com/stretchr/testify/assert/yaml"
)
//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl"
@@ -45,6 +47,10 @@ type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool
// for table driven tests.
type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool
+// PanicAssertionFunc is a common function prototype when validating a panic value. Can be useful
+// for table driven tests.
+type PanicAssertionFunc = func(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool
+
// Comparison is a custom function that returns true on success and false on failure
type Comparison func() (success bool)
@@ -496,7 +502,13 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b
h.Helper()
}
- if !samePointers(expected, actual) {
+ same, ok := samePointers(expected, actual)
+ if !ok {
+ return Fail(t, "Both arguments must be pointers", msgAndArgs...)
+ }
+
+ if !same {
+ // both are pointers but not the same type & pointing to the same address
return Fail(t, fmt.Sprintf("Not same: \n"+
"expected: %p %#v\n"+
"actual : %p %#v", expected, expected, actual, actual), msgAndArgs...)
@@ -516,7 +528,13 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
h.Helper()
}
- if samePointers(expected, actual) {
+ same, ok := samePointers(expected, actual)
+ if !ok {
+ //fails when the arguments are not pointers
+ return !(Fail(t, "Both arguments must be pointers", msgAndArgs...))
+ }
+
+ if same {
return Fail(t, fmt.Sprintf(
"Expected and actual point to the same object: %p %#v",
expected, expected), msgAndArgs...)
@@ -524,21 +542,23 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
return true
}
-// samePointers compares two generic interface objects and returns whether
-// they point to the same object
-func samePointers(first, second interface{}) bool {
+// samePointers checks if two generic interface objects are pointers of the same
+// type pointing to the same object. It returns two values: same indicating if
+// they are the same type and point to the same object, and ok indicating that
+// both inputs are pointers.
+func samePointers(first, second interface{}) (same bool, ok bool) {
firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second)
if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr {
- return false
+ return false, false //not both are pointers
}
firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second)
if firstType != secondType {
- return false
+ return false, true // both are pointers, but of different types
}
// compare pointer addresses
- return first == second
+ return first == second, true
}
// formatUnequalValues takes two values of arbitrary types and returns string
@@ -572,8 +592,8 @@ func truncatingFormat(data interface{}) string {
return value
}
-// EqualValues asserts that two objects are equal or convertible to the same types
-// and equal.
+// EqualValues asserts that two objects are equal or convertible to the larger
+// type and equal.
//
// assert.EqualValues(t, uint32(123), int32(123))
func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
@@ -615,21 +635,6 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs ..
return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...)
}
- if aType.Kind() == reflect.Ptr {
- aType = aType.Elem()
- }
- if bType.Kind() == reflect.Ptr {
- bType = bType.Elem()
- }
-
- if aType.Kind() != reflect.Struct {
- return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...)
- }
-
- if bType.Kind() != reflect.Struct {
- return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...)
- }
-
expected = copyExportedFields(expected)
actual = copyExportedFields(actual)
@@ -1170,6 +1175,39 @@ func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) stri
return msg.String()
}
+// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should not match.
+// This is an inverse of ElementsMatch.
+//
+// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 1, 2, 3]) -> false
+//
+// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 2, 3]) -> true
+//
+// assert.NotElementsMatch(t, [1, 2, 3], [1, 2, 4]) -> true
+func NotElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if isEmpty(listA) && isEmpty(listB) {
+ return Fail(t, "listA and listB contain the same elements", msgAndArgs)
+ }
+
+ if !isList(t, listA, msgAndArgs...) {
+ return Fail(t, "listA is not a list type", msgAndArgs...)
+ }
+ if !isList(t, listB, msgAndArgs...) {
+ return Fail(t, "listB is not a list type", msgAndArgs...)
+ }
+
+ extraA, extraB := diffLists(listA, listB)
+ if len(extraA) == 0 && len(extraB) == 0 {
+ return Fail(t, "listA and listB contain the same elements", msgAndArgs)
+ }
+
+ return true
+}
+
// Condition uses a Comparison to assert a complex condition.
func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
@@ -1488,6 +1526,9 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd
if err != nil {
return Fail(t, err.Error(), msgAndArgs...)
}
+ if math.IsNaN(actualEpsilon) {
+ return Fail(t, "relative error is NaN", msgAndArgs...)
+ }
if actualEpsilon > epsilon {
return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+
" < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...)
@@ -1611,7 +1652,6 @@ func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...in
// matchRegexp return true if a specified regexp matches a string.
func matchRegexp(rx interface{}, str interface{}) bool {
-
var r *regexp.Regexp
if rr, ok := rx.(*regexp.Regexp); ok {
r = rr
@@ -1619,7 +1659,14 @@ func matchRegexp(rx interface{}, str interface{}) bool {
r = regexp.MustCompile(fmt.Sprint(rx))
}
- return (r.FindStringIndex(fmt.Sprint(str)) != nil)
+ switch v := str.(type) {
+ case []byte:
+ return r.Match(v)
+ case string:
+ return r.MatchString(v)
+ default:
+ return r.MatchString(fmt.Sprint(v))
+ }
}
@@ -1872,7 +1919,7 @@ var spewConfigStringerEnabled = spew.ConfigState{
MaxDepth: 10,
}
-type tHelper interface {
+type tHelper = interface {
Helper()
}
@@ -1911,6 +1958,9 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t
// CollectT implements the TestingT interface and collects all errors.
type CollectT struct {
+ // A slice of errors. Non-nil slice denotes a failure.
+ // If it's non-nil but len(c.errors) == 0, this is also a failure
+ // obtained by direct c.FailNow() call.
errors []error
}
@@ -1919,9 +1969,10 @@ func (c *CollectT) Errorf(format string, args ...interface{}) {
c.errors = append(c.errors, fmt.Errorf(format, args...))
}
-// FailNow panics.
-func (*CollectT) FailNow() {
- panic("Assertion failed")
+// FailNow stops execution by calling runtime.Goexit.
+func (c *CollectT) FailNow() {
+ c.fail()
+ runtime.Goexit()
}
// Deprecated: That was a method for internal usage that should not have been published. Now just panics.
@@ -1934,6 +1985,16 @@ func (*CollectT) Copy(TestingT) {
panic("Copy() is deprecated")
}
+func (c *CollectT) fail() {
+ if !c.failed() {
+ c.errors = []error{} // Make it non-nil to mark a failure.
+ }
+}
+
+func (c *CollectT) failed() bool {
+ return c.errors != nil
+}
+
// EventuallyWithT asserts that given condition will be met in waitFor time,
// periodically checking target function each tick. In contrast to Eventually,
// it supplies a CollectT to the condition function, so that the condition
@@ -1951,14 +2012,14 @@ func (*CollectT) Copy(TestingT) {
// assert.EventuallyWithT(t, func(c *assert.CollectT) {
// // add assertions as needed; any assertion failure will fail the current tick
// assert.True(c, externalValue, "expected 'externalValue' to be true")
-// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
+// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
var lastFinishedTickErrs []error
- ch := make(chan []error, 1)
+ ch := make(chan *CollectT, 1)
timer := time.NewTimer(waitFor)
defer timer.Stop()
@@ -1978,16 +2039,16 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time
go func() {
collect := new(CollectT)
defer func() {
- ch <- collect.errors
+ ch <- collect
}()
condition(collect)
}()
- case errs := <-ch:
- if len(errs) == 0 {
+ case collect := <-ch:
+ if !collect.failed() {
return true
}
// Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached.
- lastFinishedTickErrs = errs
+ lastFinishedTickErrs = collect.errors
tick = ticker.C
}
}
@@ -2049,7 +2110,7 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
), msgAndArgs...)
}
-// NotErrorIs asserts that at none of the errors in err's chain matches target.
+// NotErrorIs asserts that none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
@@ -2090,6 +2151,24 @@ func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{
), msgAndArgs...)
}
+// NotErrorAs asserts that none of the errors in err's chain matches target,
+// but if so, sets target to that error value.
+func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if !errors.As(err, target) {
+ return true
+ }
+
+ chain := buildErrorChainString(err)
+
+ return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+
+ "found: %q\n"+
+ "in chain: %s", target, chain,
+ ), msgAndArgs...)
+}
+
func buildErrorChainString(err error) string {
if err == nil {
return ""
diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go
new file mode 100644
index 0000000000000..baa0cc7d7fca7
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go
@@ -0,0 +1,25 @@
+//go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default
+// +build testify_yaml_custom,!testify_yaml_fail,!testify_yaml_default
+
+// Package yaml is an implementation of YAML functions that calls a pluggable implementation.
+//
+// This implementation is selected with the testify_yaml_custom build tag.
+//
+// go test -tags testify_yaml_custom
+//
+// This implementation can be used at build time to replace the default implementation
+// to avoid linking with [gopkg.in/yaml.v3].
+//
+// In your test package:
+//
+// import assertYaml "github.com/stretchr/testify/assert/yaml"
+//
+// func init() {
+// assertYaml.Unmarshal = func (in []byte, out interface{}) error {
+// // ...
+// return nil
+// }
+// }
+package yaml
+
+var Unmarshal func(in []byte, out interface{}) error
diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go
new file mode 100644
index 0000000000000..b83c6cf64c2a1
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go
@@ -0,0 +1,37 @@
+//go:build !testify_yaml_fail && !testify_yaml_custom
+// +build !testify_yaml_fail,!testify_yaml_custom
+
+// Package yaml is just an indirection to handle YAML deserialization.
+//
+// This package is just an indirection that allows the builder to override the
+// indirection with an alternative implementation of this package that uses
+// another implementation of YAML deserialization. This allows to not either not
+// use YAML deserialization at all, or to use another implementation than
+// [gopkg.in/yaml.v3] (for example for license compatibility reasons, see [PR #1120]).
+//
+// Alternative implementations are selected using build tags:
+//
+// - testify_yaml_fail: [Unmarshal] always fails with an error
+// - testify_yaml_custom: [Unmarshal] is a variable. Caller must initialize it
+// before calling any of [github.com/stretchr/testify/assert.YAMLEq] or
+// [github.com/stretchr/testify/assert.YAMLEqf].
+//
+// Usage:
+//
+// go test -tags testify_yaml_fail
+//
+// You can check with "go list" which implementation is linked:
+//
+// go list -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml
+// go list -tags testify_yaml_fail -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml
+// go list -tags testify_yaml_custom -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml
+//
+// [PR #1120]: https://github.com/stretchr/testify/pull/1120
+package yaml
+
+import goyaml "gopkg.in/yaml.v3"
+
+// Unmarshal is just a wrapper of [gopkg.in/yaml.v3.Unmarshal].
+func Unmarshal(in []byte, out interface{}) error {
+ return goyaml.Unmarshal(in, out)
+}
diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go
new file mode 100644
index 0000000000000..e78f7dfe69a16
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go
@@ -0,0 +1,18 @@
+//go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default
+// +build testify_yaml_fail,!testify_yaml_custom,!testify_yaml_default
+
+// Package yaml is an implementation of YAML functions that always fail.
+//
+// This implementation can be used at build time to replace the default implementation
+// to avoid linking with [gopkg.in/yaml.v3]:
+//
+// go test -tags testify_yaml_fail
+package yaml
+
+import "errors"
+
+var errNotImplemented = errors.New("YAML functions are not available (see https://pkg.go.dev/github.com/stretchr/testify/assert/yaml)")
+
+func Unmarshal([]byte, interface{}) error {
+ return errNotImplemented
+}
diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go
index 213bde2ea6366..eb5682df97891 100644
--- a/vendor/github.com/stretchr/testify/mock/mock.go
+++ b/vendor/github.com/stretchr/testify/mock/mock.go
@@ -80,12 +80,12 @@ type Call struct {
requires []*Call
}
-func newCall(parent *Mock, methodName string, callerInfo []string, methodArguments ...interface{}) *Call {
+func newCall(parent *Mock, methodName string, callerInfo []string, methodArguments Arguments, returnArguments Arguments) *Call {
return &Call{
Parent: parent,
Method: methodName,
Arguments: methodArguments,
- ReturnArguments: make([]interface{}, 0),
+ ReturnArguments: returnArguments,
callerInfo: callerInfo,
Repeatability: 0,
WaitFor: nil,
@@ -256,7 +256,7 @@ func (c *Call) Unset() *Call {
// calls have been called as expected. The referenced calls may be from the
// same mock instance and/or other mock instances.
//
-// Mock.On("Do").Return(nil).Notbefore(
+// Mock.On("Do").Return(nil).NotBefore(
// Mock.On("Init").Return(nil)
// )
func (c *Call) NotBefore(calls ...*Call) *Call {
@@ -273,6 +273,20 @@ func (c *Call) NotBefore(calls ...*Call) *Call {
return c
}
+// InOrder defines the order in which the calls should be made
+//
+// For example:
+//
+// InOrder(
+// Mock.On("init").Return(nil),
+// Mock.On("Do").Return(nil),
+// )
+func InOrder(calls ...*Call) {
+ for i := 1; i < len(calls); i++ {
+ calls[i].NotBefore(calls[i-1])
+ }
+}
+
// Mock is the workhorse used to track activity on another object.
// For an example of its usage, refer to the "Example Usage" section at the top
// of this document.
@@ -351,7 +365,8 @@ func (m *Mock) On(methodName string, arguments ...interface{}) *Call {
m.mutex.Lock()
defer m.mutex.Unlock()
- c := newCall(m, methodName, assert.CallerInfo(), arguments...)
+
+ c := newCall(m, methodName, assert.CallerInfo(), arguments, make([]interface{}, 0))
m.ExpectedCalls = append(m.ExpectedCalls, c)
return c
}
@@ -491,11 +506,12 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen
m.mutex.Unlock()
if closestCall != nil {
- m.fail("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n\n%s\nDiff: %s",
+ m.fail("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n\n%s\nDiff: %s\nat: %s\n",
callString(methodName, arguments, true),
callString(methodName, closestCall.Arguments, true),
diffArguments(closestCall.Arguments, arguments),
strings.TrimSpace(mismatch),
+ assert.CallerInfo(),
)
} else {
m.fail("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", methodName, methodName, callString(methodName, arguments, true), assert.CallerInfo())
@@ -529,7 +545,7 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen
call.totalCalls++
// add the call
- m.Calls = append(m.Calls, *newCall(m, methodName, assert.CallerInfo(), arguments...))
+ m.Calls = append(m.Calls, *newCall(m, methodName, assert.CallerInfo(), arguments, call.ReturnArguments))
m.mutex.Unlock()
// block if specified
@@ -764,9 +780,17 @@ const (
)
// AnythingOfTypeArgument contains the type of an argument
-// for use when type checking. Used in Diff and Assert.
+// for use when type checking. Used in [Arguments.Diff] and [Arguments.Assert].
//
-// Deprecated: this is an implementation detail that must not be used. Use [AnythingOfType] instead.
+// Deprecated: this is an implementation detail that must not be used. Use the [AnythingOfType] constructor instead, example:
+//
+// m.On("Do", mock.AnythingOfType("string"))
+//
+// All explicit type declarations can be replaced with interface{} as is expected by [Mock.On], example:
+//
+// func anyString interface{} {
+// return mock.AnythingOfType("string")
+// }
type AnythingOfTypeArgument = anythingOfTypeArgument
// anythingOfTypeArgument is a string that contains the type of an argument
@@ -780,53 +804,54 @@ type anythingOfTypeArgument string
//
// For example:
//
-// Assert(t, AnythingOfType("string"), AnythingOfType("int"))
+// args.Assert(t, AnythingOfType("string"), AnythingOfType("int"))
func AnythingOfType(t string) AnythingOfTypeArgument {
return anythingOfTypeArgument(t)
}
// IsTypeArgument is a struct that contains the type of an argument
-// for use when type checking. This is an alternative to AnythingOfType.
-// Used in Diff and Assert.
+// for use when type checking. This is an alternative to [AnythingOfType].
+// Used in [Arguments.Diff] and [Arguments.Assert].
type IsTypeArgument struct {
t reflect.Type
}
// IsType returns an IsTypeArgument object containing the type to check for.
// You can provide a zero-value of the type to check. This is an
-// alternative to AnythingOfType. Used in Diff and Assert.
+// alternative to [AnythingOfType]. Used in [Arguments.Diff] and [Arguments.Assert].
//
// For example:
-// Assert(t, IsType(""), IsType(0))
+//
+// args.Assert(t, IsType(""), IsType(0))
func IsType(t interface{}) *IsTypeArgument {
return &IsTypeArgument{t: reflect.TypeOf(t)}
}
-// FunctionalOptionsArgument is a struct that contains the type and value of an functional option argument
-// for use when type checking.
+// FunctionalOptionsArgument contains a list of functional options arguments
+// expected for use when matching a list of arguments.
type FunctionalOptionsArgument struct {
- value interface{}
+ values []interface{}
}
// String returns the string representation of FunctionalOptionsArgument
func (f *FunctionalOptionsArgument) String() string {
var name string
- tValue := reflect.ValueOf(f.value)
- if tValue.Len() > 0 {
- name = "[]" + reflect.TypeOf(tValue.Index(0).Interface()).String()
+ if len(f.values) > 0 {
+ name = "[]" + reflect.TypeOf(f.values[0]).String()
}
- return strings.Replace(fmt.Sprintf("%#v", f.value), "[]interface {}", name, 1)
+ return strings.Replace(fmt.Sprintf("%#v", f.values), "[]interface {}", name, 1)
}
-// FunctionalOptions returns an FunctionalOptionsArgument object containing the functional option type
-// and the values to check of
+// FunctionalOptions returns an [FunctionalOptionsArgument] object containing
+// the expected functional-options to check for.
//
// For example:
-// Assert(t, FunctionalOptions("[]foo.FunctionalOption", foo.Opt1(), foo.Opt2()))
-func FunctionalOptions(value ...interface{}) *FunctionalOptionsArgument {
+//
+// args.Assert(t, FunctionalOptions(foo.Opt1("strValue"), foo.Opt2(613)))
+func FunctionalOptions(values ...interface{}) *FunctionalOptionsArgument {
return &FunctionalOptionsArgument{
- value: value,
+ values: values,
}
}
@@ -873,10 +898,11 @@ func (f argumentMatcher) String() string {
// and false otherwise.
//
// Example:
-// m.On("Do", MatchedBy(func(req *http.Request) bool { return req.Host == "example.com" }))
//
-// |fn|, must be a function accepting a single argument (of the expected type)
-// which returns a bool. If |fn| doesn't match the required signature,
+// m.On("Do", MatchedBy(func(req *http.Request) bool { return req.Host == "example.com" }))
+//
+// fn must be a function accepting a single argument (of the expected type)
+// which returns a bool. If fn doesn't match the required signature,
// MatchedBy() panics.
func MatchedBy(fn interface{}) argumentMatcher {
fnType := reflect.TypeOf(fn)
@@ -979,20 +1005,17 @@ func (args Arguments) Diff(objects []interface{}) (string, int) {
output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected.t.Name(), actualT.Name(), actualFmt)
}
case *FunctionalOptionsArgument:
- t := expected.value
-
var name string
- tValue := reflect.ValueOf(t)
- if tValue.Len() > 0 {
- name = "[]" + reflect.TypeOf(tValue.Index(0).Interface()).String()
+ if len(expected.values) > 0 {
+ name = "[]" + reflect.TypeOf(expected.values[0]).String()
}
- tName := reflect.TypeOf(t).Name()
- if name != reflect.TypeOf(actual).String() && tValue.Len() != 0 {
+ const tName = "[]interface{}"
+ if name != reflect.TypeOf(actual).String() && len(expected.values) != 0 {
differences++
output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, tName, reflect.TypeOf(actual).Name(), actualFmt)
} else {
- if ef, af := assertOpts(t, actual); ef == "" && af == "" {
+ if ef, af := assertOpts(expected.values, actual); ef == "" && af == "" {
// match
output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, tName, tName)
} else {
@@ -1092,7 +1115,7 @@ func (args Arguments) Error(index int) error {
return nil
}
if s, ok = obj.(error); !ok {
- panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, args.Get(index)))
+ panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, obj))
}
return s
}
@@ -1181,32 +1204,38 @@ type tHelper interface {
func assertOpts(expected, actual interface{}) (expectedFmt, actualFmt string) {
expectedOpts := reflect.ValueOf(expected)
actualOpts := reflect.ValueOf(actual)
+
+ var expectedFuncs []*runtime.Func
var expectedNames []string
for i := 0; i < expectedOpts.Len(); i++ {
- expectedNames = append(expectedNames, funcName(expectedOpts.Index(i).Interface()))
+ f := runtimeFunc(expectedOpts.Index(i).Interface())
+ expectedFuncs = append(expectedFuncs, f)
+ expectedNames = append(expectedNames, funcName(f))
}
+ var actualFuncs []*runtime.Func
var actualNames []string
for i := 0; i < actualOpts.Len(); i++ {
- actualNames = append(actualNames, funcName(actualOpts.Index(i).Interface()))
+ f := runtimeFunc(actualOpts.Index(i).Interface())
+ actualFuncs = append(actualFuncs, f)
+ actualNames = append(actualNames, funcName(f))
}
- if !assert.ObjectsAreEqual(expectedNames, actualNames) {
+
+ if expectedOpts.Len() != actualOpts.Len() {
expectedFmt = fmt.Sprintf("%v", expectedNames)
actualFmt = fmt.Sprintf("%v", actualNames)
return
}
for i := 0; i < expectedOpts.Len(); i++ {
- expectedOpt := expectedOpts.Index(i).Interface()
- actualOpt := actualOpts.Index(i).Interface()
-
- expectedFunc := expectedNames[i]
- actualFunc := actualNames[i]
- if expectedFunc != actualFunc {
- expectedFmt = expectedFunc
- actualFmt = actualFunc
+ if !isFuncSame(expectedFuncs[i], actualFuncs[i]) {
+ expectedFmt = expectedNames[i]
+ actualFmt = actualNames[i]
return
}
+ expectedOpt := expectedOpts.Index(i).Interface()
+ actualOpt := actualOpts.Index(i).Interface()
+
ot := reflect.TypeOf(expectedOpt)
var expectedValues []reflect.Value
var actualValues []reflect.Value
@@ -1224,9 +1253,9 @@ func assertOpts(expected, actual interface{}) (expectedFmt, actualFmt string) {
reflect.ValueOf(actualOpt).Call(actualValues)
for i := 0; i < ot.NumIn(); i++ {
- if !assert.ObjectsAreEqual(expectedValues[i].Interface(), actualValues[i].Interface()) {
- expectedFmt = fmt.Sprintf("%s %+v", expectedNames[i], expectedValues[i].Interface())
- actualFmt = fmt.Sprintf("%s %+v", expectedNames[i], actualValues[i].Interface())
+ if expectedArg, actualArg := expectedValues[i].Interface(), actualValues[i].Interface(); !assert.ObjectsAreEqual(expectedArg, actualArg) {
+ expectedFmt = fmt.Sprintf("%s(%T) -> %#v", expectedNames[i], expectedArg, expectedArg)
+ actualFmt = fmt.Sprintf("%s(%T) -> %#v", expectedNames[i], actualArg, actualArg)
return
}
}
@@ -1235,7 +1264,25 @@ func assertOpts(expected, actual interface{}) (expectedFmt, actualFmt string) {
return "", ""
}
-func funcName(opt interface{}) string {
- n := runtime.FuncForPC(reflect.ValueOf(opt).Pointer()).Name()
- return strings.TrimSuffix(path.Base(n), path.Ext(n))
+func runtimeFunc(opt interface{}) *runtime.Func {
+ return runtime.FuncForPC(reflect.ValueOf(opt).Pointer())
+}
+
+func funcName(f *runtime.Func) string {
+ name := f.Name()
+ trimmed := strings.TrimSuffix(path.Base(name), path.Ext(name))
+ splitted := strings.Split(trimmed, ".")
+
+ if len(splitted) == 0 {
+ return trimmed
+ }
+
+ return splitted[len(splitted)-1]
+}
+
+func isFuncSame(f1, f2 *runtime.Func) bool {
+ f1File, f1Loc := f1.FileLine(f1.Entry())
+ f2File, f2Loc := f2.FileLine(f2.Entry())
+
+ return f1File == f2File && f1Loc == f2Loc
}
diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go
index 506a82f807775..d8921950d7bef 100644
--- a/vendor/github.com/stretchr/testify/require/require.go
+++ b/vendor/github.com/stretchr/testify/require/require.go
@@ -34,9 +34,9 @@ func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interfac
// Contains asserts that the specified string, list(array, slice...) or map contains the
// specified substring or element.
//
-// assert.Contains(t, "Hello World", "World")
-// assert.Contains(t, ["Hello", "World"], "World")
-// assert.Contains(t, {"Hello": "World"}, "Hello")
+// require.Contains(t, "Hello World", "World")
+// require.Contains(t, ["Hello", "World"], "World")
+// require.Contains(t, {"Hello": "World"}, "Hello")
func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -50,9 +50,9 @@ func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...int
// Containsf asserts that the specified string, list(array, slice...) or map contains the
// specified substring or element.
//
-// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
-// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
-// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
+// require.Containsf(t, "Hello World", "World", "error message %s", "formatted")
+// require.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
+// require.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -91,7 +91,7 @@ func DirExistsf(t TestingT, path string, msg string, args ...interface{}) {
// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
// the number of appearances of each of them in both lists should match.
//
-// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2])
+// require.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2])
func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -106,7 +106,7 @@ func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs
// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
// the number of appearances of each of them in both lists should match.
//
-// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted")
+// require.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted")
func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -120,7 +120,7 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
-// assert.Empty(t, obj)
+// require.Empty(t, obj)
func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -134,7 +134,7 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
-// assert.Emptyf(t, obj, "error message %s", "formatted")
+// require.Emptyf(t, obj, "error message %s", "formatted")
func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -147,7 +147,7 @@ func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) {
// Equal asserts that two objects are equal.
//
-// assert.Equal(t, 123, 123)
+// require.Equal(t, 123, 123)
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses). Function equality
@@ -166,7 +166,7 @@ func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...i
// and that it is equal to the provided error.
//
// actualObj, err := SomeFunction()
-// assert.EqualError(t, err, expectedErrorString)
+// require.EqualError(t, err, expectedErrorString)
func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -181,7 +181,7 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte
// and that it is equal to the provided error.
//
// actualObj, err := SomeFunction()
-// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted")
+// require.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted")
func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -200,8 +200,8 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args
// Exported int
// notExported int
// }
-// assert.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true
-// assert.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false
+// require.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true
+// require.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false
func EqualExportedValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -220,8 +220,8 @@ func EqualExportedValues(t TestingT, expected interface{}, actual interface{}, m
// Exported int
// notExported int
// }
-// assert.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true
-// assert.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false
+// require.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true
+// require.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false
func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -232,10 +232,10 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{},
t.FailNow()
}
-// EqualValues asserts that two objects are equal or convertible to the same types
-// and equal.
+// EqualValues asserts that two objects are equal or convertible to the larger
+// type and equal.
//
-// assert.EqualValues(t, uint32(123), int32(123))
+// require.EqualValues(t, uint32(123), int32(123))
func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -246,10 +246,10 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg
t.FailNow()
}
-// EqualValuesf asserts that two objects are equal or convertible to the same types
-// and equal.
+// EqualValuesf asserts that two objects are equal or convertible to the larger
+// type and equal.
//
-// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
+// require.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -262,7 +262,7 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri
// Equalf asserts that two objects are equal.
//
-// assert.Equalf(t, 123, 123, "error message %s", "formatted")
+// require.Equalf(t, 123, 123, "error message %s", "formatted")
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses). Function equality
@@ -280,8 +280,8 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar
// Error asserts that a function returned an error (i.e. not `nil`).
//
// actualObj, err := SomeFunction()
-// if assert.Error(t, err) {
-// assert.Equal(t, expectedError, err)
+// if require.Error(t, err) {
+// require.Equal(t, expectedError, err)
// }
func Error(t TestingT, err error, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
@@ -321,7 +321,7 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int
// and that the error contains the specified substring.
//
// actualObj, err := SomeFunction()
-// assert.ErrorContains(t, err, expectedErrorSubString)
+// require.ErrorContains(t, err, expectedErrorSubString)
func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -336,7 +336,7 @@ func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...in
// and that the error contains the specified substring.
//
// actualObj, err := SomeFunction()
-// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted")
+// require.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted")
func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -374,8 +374,8 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface
// Errorf asserts that a function returned an error (i.e. not `nil`).
//
// actualObj, err := SomeFunction()
-// if assert.Errorf(t, err, "error message %s", "formatted") {
-// assert.Equal(t, expectedErrorf, err)
+// if require.Errorf(t, err, "error message %s", "formatted") {
+// require.Equal(t, expectedErrorf, err)
// }
func Errorf(t TestingT, err error, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
@@ -390,7 +390,7 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) {
// Eventually asserts that given condition will be met in waitFor time,
// periodically checking target function each tick.
//
-// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond)
+// require.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond)
func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -415,10 +415,10 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t
// time.Sleep(8*time.Second)
// externalValue = true
// }()
-// assert.EventuallyWithT(t, func(c *assert.CollectT) {
+// require.EventuallyWithT(t, func(c *require.CollectT) {
// // add assertions as needed; any assertion failure will fail the current tick
-// assert.True(c, externalValue, "expected 'externalValue' to be true")
-// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
+// require.True(c, externalValue, "expected 'externalValue' to be true")
+// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
func EventuallyWithT(t TestingT, condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -443,10 +443,10 @@ func EventuallyWithT(t TestingT, condition func(collect *assert.CollectT), waitF
// time.Sleep(8*time.Second)
// externalValue = true
// }()
-// assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") {
+// require.EventuallyWithTf(t, func(c *require.CollectT, "error message %s", "formatted") {
// // add assertions as needed; any assertion failure will fail the current tick
-// assert.True(c, externalValue, "expected 'externalValue' to be true")
-// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
+// require.True(c, externalValue, "expected 'externalValue' to be true")
+// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
func EventuallyWithTf(t TestingT, condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -460,7 +460,7 @@ func EventuallyWithTf(t TestingT, condition func(collect *assert.CollectT), wait
// Eventuallyf asserts that given condition will be met in waitFor time,
// periodically checking target function each tick.
//
-// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+// require.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -473,7 +473,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick
// Exactly asserts that two objects are equal in value and type.
//
-// assert.Exactly(t, int32(123), int64(123))
+// require.Exactly(t, int32(123), int64(123))
func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -486,7 +486,7 @@ func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ..
// Exactlyf asserts that two objects are equal in value and type.
//
-// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted")
+// require.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted")
func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -543,7 +543,7 @@ func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) {
// False asserts that the specified value is false.
//
-// assert.False(t, myBool)
+// require.False(t, myBool)
func False(t TestingT, value bool, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -556,7 +556,7 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) {
// Falsef asserts that the specified value is false.
//
-// assert.Falsef(t, myBool, "error message %s", "formatted")
+// require.Falsef(t, myBool, "error message %s", "formatted")
func Falsef(t TestingT, value bool, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -593,9 +593,9 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) {
// Greater asserts that the first element is greater than the second
//
-// assert.Greater(t, 2, 1)
-// assert.Greater(t, float64(2), float64(1))
-// assert.Greater(t, "b", "a")
+// require.Greater(t, 2, 1)
+// require.Greater(t, float64(2), float64(1))
+// require.Greater(t, "b", "a")
func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -608,10 +608,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface
// GreaterOrEqual asserts that the first element is greater than or equal to the second
//
-// assert.GreaterOrEqual(t, 2, 1)
-// assert.GreaterOrEqual(t, 2, 2)
-// assert.GreaterOrEqual(t, "b", "a")
-// assert.GreaterOrEqual(t, "b", "b")
+// require.GreaterOrEqual(t, 2, 1)
+// require.GreaterOrEqual(t, 2, 2)
+// require.GreaterOrEqual(t, "b", "a")
+// require.GreaterOrEqual(t, "b", "b")
func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -624,10 +624,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in
// GreaterOrEqualf asserts that the first element is greater than or equal to the second
//
-// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted")
-// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted")
-// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted")
-// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted")
+// require.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted")
+// require.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted")
+// require.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted")
+// require.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted")
func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -640,9 +640,9 @@ func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, arg
// Greaterf asserts that the first element is greater than the second
//
-// assert.Greaterf(t, 2, 1, "error message %s", "formatted")
-// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted")
-// assert.Greaterf(t, "b", "a", "error message %s", "formatted")
+// require.Greaterf(t, 2, 1, "error message %s", "formatted")
+// require.Greaterf(t, float64(2), float64(1), "error message %s", "formatted")
+// require.Greaterf(t, "b", "a", "error message %s", "formatted")
func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -656,7 +656,7 @@ func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...in
// HTTPBodyContains asserts that a specified handler returns a
// body that contains a string.
//
-// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+// require.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) {
@@ -672,7 +672,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url s
// HTTPBodyContainsf asserts that a specified handler returns a
// body that contains a string.
//
-// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+// require.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) {
@@ -688,7 +688,7 @@ func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url
// HTTPBodyNotContains asserts that a specified handler returns a
// body that does not contain a string.
//
-// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+// require.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) {
@@ -704,7 +704,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, ur
// HTTPBodyNotContainsf asserts that a specified handler returns a
// body that does not contain a string.
//
-// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+// require.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) {
@@ -719,7 +719,7 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u
// HTTPError asserts that a specified handler returns an error status code.
//
-// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+// require.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) {
@@ -734,7 +734,7 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string,
// HTTPErrorf asserts that a specified handler returns an error status code.
//
-// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+// require.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
@@ -749,7 +749,7 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string,
// HTTPRedirect asserts that a specified handler returns a redirect status code.
//
-// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+// require.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) {
@@ -764,7 +764,7 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url strin
// HTTPRedirectf asserts that a specified handler returns a redirect status code.
//
-// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+// require.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
@@ -779,7 +779,7 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri
// HTTPStatusCode asserts that a specified handler returns a specified status code.
//
-// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501)
+// require.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501)
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) {
@@ -794,7 +794,7 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url str
// HTTPStatusCodef asserts that a specified handler returns a specified status code.
//
-// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
+// require.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) {
@@ -809,7 +809,7 @@ func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url st
// HTTPSuccess asserts that a specified handler returns a success status code.
//
-// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
+// require.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) {
@@ -824,7 +824,7 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string
// HTTPSuccessf asserts that a specified handler returns a success status code.
//
-// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+// require.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
@@ -839,7 +839,7 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin
// Implements asserts that an object is implemented by the specified interface.
//
-// assert.Implements(t, (*MyInterface)(nil), new(MyObject))
+// require.Implements(t, (*MyInterface)(nil), new(MyObject))
func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -852,7 +852,7 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg
// Implementsf asserts that an object is implemented by the specified interface.
//
-// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+// require.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -865,7 +865,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms
// InDelta asserts that the two numerals are within delta of each other.
//
-// assert.InDelta(t, math.Pi, 22/7.0, 0.01)
+// require.InDelta(t, math.Pi, 22/7.0, 0.01)
func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -922,7 +922,7 @@ func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta f
// InDeltaf asserts that the two numerals are within delta of each other.
//
-// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
+// require.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -979,9 +979,9 @@ func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon fl
// IsDecreasing asserts that the collection is decreasing
//
-// assert.IsDecreasing(t, []int{2, 1, 0})
-// assert.IsDecreasing(t, []float{2, 1})
-// assert.IsDecreasing(t, []string{"b", "a"})
+// require.IsDecreasing(t, []int{2, 1, 0})
+// require.IsDecreasing(t, []float{2, 1})
+// require.IsDecreasing(t, []string{"b", "a"})
func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -994,9 +994,9 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) {
// IsDecreasingf asserts that the collection is decreasing
//
-// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted")
-// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted")
-// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
+// require.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted")
+// require.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted")
+// require.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1009,9 +1009,9 @@ func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface
// IsIncreasing asserts that the collection is increasing
//
-// assert.IsIncreasing(t, []int{1, 2, 3})
-// assert.IsIncreasing(t, []float{1, 2})
-// assert.IsIncreasing(t, []string{"a", "b"})
+// require.IsIncreasing(t, []int{1, 2, 3})
+// require.IsIncreasing(t, []float{1, 2})
+// require.IsIncreasing(t, []string{"a", "b"})
func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1024,9 +1024,9 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) {
// IsIncreasingf asserts that the collection is increasing
//
-// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted")
-// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted")
-// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
+// require.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted")
+// require.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted")
+// require.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1039,9 +1039,9 @@ func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface
// IsNonDecreasing asserts that the collection is not decreasing
//
-// assert.IsNonDecreasing(t, []int{1, 1, 2})
-// assert.IsNonDecreasing(t, []float{1, 2})
-// assert.IsNonDecreasing(t, []string{"a", "b"})
+// require.IsNonDecreasing(t, []int{1, 1, 2})
+// require.IsNonDecreasing(t, []float{1, 2})
+// require.IsNonDecreasing(t, []string{"a", "b"})
func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1054,9 +1054,9 @@ func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{})
// IsNonDecreasingf asserts that the collection is not decreasing
//
-// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted")
-// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted")
-// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
+// require.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted")
+// require.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted")
+// require.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1069,9 +1069,9 @@ func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interf
// IsNonIncreasing asserts that the collection is not increasing
//
-// assert.IsNonIncreasing(t, []int{2, 1, 1})
-// assert.IsNonIncreasing(t, []float{2, 1})
-// assert.IsNonIncreasing(t, []string{"b", "a"})
+// require.IsNonIncreasing(t, []int{2, 1, 1})
+// require.IsNonIncreasing(t, []float{2, 1})
+// require.IsNonIncreasing(t, []string{"b", "a"})
func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1084,9 +1084,9 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{})
// IsNonIncreasingf asserts that the collection is not increasing
//
-// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted")
-// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted")
-// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
+// require.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted")
+// require.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted")
+// require.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1121,7 +1121,7 @@ func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg strin
// JSONEq asserts that two JSON strings are equivalent.
//
-// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+// require.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1134,7 +1134,7 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{
// JSONEqf asserts that two JSON strings are equivalent.
//
-// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+// require.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1148,7 +1148,7 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int
// Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept.
//
-// assert.Len(t, mySlice, 3)
+// require.Len(t, mySlice, 3)
func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1162,7 +1162,7 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{})
// Lenf asserts that the specified object has specific length.
// Lenf also fails if the object has a type that len() not accept.
//
-// assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
+// require.Lenf(t, mySlice, 3, "error message %s", "formatted")
func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1175,9 +1175,9 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf
// Less asserts that the first element is less than the second
//
-// assert.Less(t, 1, 2)
-// assert.Less(t, float64(1), float64(2))
-// assert.Less(t, "a", "b")
+// require.Less(t, 1, 2)
+// require.Less(t, float64(1), float64(2))
+// require.Less(t, "a", "b")
func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1190,10 +1190,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{})
// LessOrEqual asserts that the first element is less than or equal to the second
//
-// assert.LessOrEqual(t, 1, 2)
-// assert.LessOrEqual(t, 2, 2)
-// assert.LessOrEqual(t, "a", "b")
-// assert.LessOrEqual(t, "b", "b")
+// require.LessOrEqual(t, 1, 2)
+// require.LessOrEqual(t, 2, 2)
+// require.LessOrEqual(t, "a", "b")
+// require.LessOrEqual(t, "b", "b")
func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1206,10 +1206,10 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter
// LessOrEqualf asserts that the first element is less than or equal to the second
//
-// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted")
-// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted")
-// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted")
-// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted")
+// require.LessOrEqualf(t, 1, 2, "error message %s", "formatted")
+// require.LessOrEqualf(t, 2, 2, "error message %s", "formatted")
+// require.LessOrEqualf(t, "a", "b", "error message %s", "formatted")
+// require.LessOrEqualf(t, "b", "b", "error message %s", "formatted")
func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1222,9 +1222,9 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args .
// Lessf asserts that the first element is less than the second
//
-// assert.Lessf(t, 1, 2, "error message %s", "formatted")
-// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted")
-// assert.Lessf(t, "a", "b", "error message %s", "formatted")
+// require.Lessf(t, 1, 2, "error message %s", "formatted")
+// require.Lessf(t, float64(1), float64(2), "error message %s", "formatted")
+// require.Lessf(t, "a", "b", "error message %s", "formatted")
func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1237,8 +1237,8 @@ func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...inter
// Negative asserts that the specified element is negative
//
-// assert.Negative(t, -1)
-// assert.Negative(t, -1.23)
+// require.Negative(t, -1)
+// require.Negative(t, -1.23)
func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1251,8 +1251,8 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) {
// Negativef asserts that the specified element is negative
//
-// assert.Negativef(t, -1, "error message %s", "formatted")
-// assert.Negativef(t, -1.23, "error message %s", "formatted")
+// require.Negativef(t, -1, "error message %s", "formatted")
+// require.Negativef(t, -1.23, "error message %s", "formatted")
func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1266,7 +1266,7 @@ func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) {
// Never asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
-// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond)
+// require.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond)
func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1280,7 +1280,7 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D
// Neverf asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
-// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+// require.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1293,7 +1293,7 @@ func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.
// Nil asserts that the specified object is nil.
//
-// assert.Nil(t, err)
+// require.Nil(t, err)
func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1306,7 +1306,7 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) {
// Nilf asserts that the specified object is nil.
//
-// assert.Nilf(t, err, "error message %s", "formatted")
+// require.Nilf(t, err, "error message %s", "formatted")
func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1344,8 +1344,8 @@ func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) {
// NoError asserts that a function returned no error (i.e. `nil`).
//
// actualObj, err := SomeFunction()
-// if assert.NoError(t, err) {
-// assert.Equal(t, expectedObj, actualObj)
+// if require.NoError(t, err) {
+// require.Equal(t, expectedObj, actualObj)
// }
func NoError(t TestingT, err error, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
@@ -1360,8 +1360,8 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) {
// NoErrorf asserts that a function returned no error (i.e. `nil`).
//
// actualObj, err := SomeFunction()
-// if assert.NoErrorf(t, err, "error message %s", "formatted") {
-// assert.Equal(t, expectedObj, actualObj)
+// if require.NoErrorf(t, err, "error message %s", "formatted") {
+// require.Equal(t, expectedObj, actualObj)
// }
func NoErrorf(t TestingT, err error, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
@@ -1400,9 +1400,9 @@ func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) {
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
-// assert.NotContains(t, "Hello World", "Earth")
-// assert.NotContains(t, ["Hello", "World"], "Earth")
-// assert.NotContains(t, {"Hello": "World"}, "Earth")
+// require.NotContains(t, "Hello World", "Earth")
+// require.NotContains(t, ["Hello", "World"], "Earth")
+// require.NotContains(t, {"Hello": "World"}, "Earth")
func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1416,9 +1416,9 @@ func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...
// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
-// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
-// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
-// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
+// require.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
+// require.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
+// require.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1429,11 +1429,51 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a
t.FailNow()
}
+// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should not match.
+// This is an inverse of ElementsMatch.
+//
+// require.NotElementsMatch(t, [1, 1, 2, 3], [1, 1, 2, 3]) -> false
+//
+// require.NotElementsMatch(t, [1, 1, 2, 3], [1, 2, 3]) -> true
+//
+// require.NotElementsMatch(t, [1, 2, 3], [1, 2, 4]) -> true
+func NotElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.NotElementsMatch(t, listA, listB, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should not match.
+// This is an inverse of ElementsMatch.
+//
+// require.NotElementsMatchf(t, [1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false
+//
+// require.NotElementsMatchf(t, [1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true
+//
+// require.NotElementsMatchf(t, [1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true
+func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.NotElementsMatchf(t, listA, listB, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
-// if assert.NotEmpty(t, obj) {
-// assert.Equal(t, "two", obj[1])
+// if require.NotEmpty(t, obj) {
+// require.Equal(t, "two", obj[1])
// }
func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
@@ -1448,8 +1488,8 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
-// if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
-// assert.Equal(t, "two", obj[1])
+// if require.NotEmptyf(t, obj, "error message %s", "formatted") {
+// require.Equal(t, "two", obj[1])
// }
func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
@@ -1463,7 +1503,7 @@ func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{})
// NotEqual asserts that the specified values are NOT equal.
//
-// assert.NotEqual(t, obj1, obj2)
+// require.NotEqual(t, obj1, obj2)
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses).
@@ -1479,7 +1519,7 @@ func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs .
// NotEqualValues asserts that two objects are not equal even when converted to the same type
//
-// assert.NotEqualValues(t, obj1, obj2)
+// require.NotEqualValues(t, obj1, obj2)
func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1492,7 +1532,7 @@ func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAnd
// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
//
-// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted")
+// require.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted")
func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1505,7 +1545,7 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s
// NotEqualf asserts that the specified values are NOT equal.
//
-// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
+// require.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses).
@@ -1519,7 +1559,31 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string,
t.FailNow()
}
-// NotErrorIs asserts that at none of the errors in err's chain matches target.
+// NotErrorAs asserts that none of the errors in err's chain matches target,
+// but if so, sets target to that error value.
+func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.NotErrorAs(t, err, target, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// NotErrorAsf asserts that none of the errors in err's chain matches target,
+// but if so, sets target to that error value.
+func NotErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.NotErrorAsf(t, err, target, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
+// NotErrorIs asserts that none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func NotErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
@@ -1531,7 +1595,7 @@ func NotErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{})
t.FailNow()
}
-// NotErrorIsf asserts that at none of the errors in err's chain matches target.
+// NotErrorIsf asserts that none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
@@ -1545,7 +1609,7 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf
// NotImplements asserts that an object does not implement the specified interface.
//
-// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject))
+// require.NotImplements(t, (*MyInterface)(nil), new(MyObject))
func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1558,7 +1622,7 @@ func NotImplements(t TestingT, interfaceObject interface{}, object interface{},
// NotImplementsf asserts that an object does not implement the specified interface.
//
-// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+// require.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1571,7 +1635,7 @@ func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{},
// NotNil asserts that the specified object is not nil.
//
-// assert.NotNil(t, err)
+// require.NotNil(t, err)
func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1584,7 +1648,7 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) {
// NotNilf asserts that the specified object is not nil.
//
-// assert.NotNilf(t, err, "error message %s", "formatted")
+// require.NotNilf(t, err, "error message %s", "formatted")
func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1597,7 +1661,7 @@ func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) {
// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
//
-// assert.NotPanics(t, func(){ RemainCalm() })
+// require.NotPanics(t, func(){ RemainCalm() })
func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1610,7 +1674,7 @@ func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
//
-// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
+// require.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1623,8 +1687,8 @@ func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interfac
// NotRegexp asserts that a specified regexp does not match a string.
//
-// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
-// assert.NotRegexp(t, "^start", "it's not starting")
+// require.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
+// require.NotRegexp(t, "^start", "it's not starting")
func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1637,8 +1701,8 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf
// NotRegexpf asserts that a specified regexp does not match a string.
//
-// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
-// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
+// require.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
+// require.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1651,7 +1715,7 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ..
// NotSame asserts that two pointers do not reference the same object.
//
-// assert.NotSame(t, ptr1, ptr2)
+// require.NotSame(t, ptr1, ptr2)
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
@@ -1667,7 +1731,7 @@ func NotSame(t TestingT, expected interface{}, actual interface{}, msgAndArgs ..
// NotSamef asserts that two pointers do not reference the same object.
//
-// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted")
+// require.NotSamef(t, ptr1, ptr2, "error message %s", "formatted")
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
@@ -1685,8 +1749,8 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string,
// contain all elements given in the specified subset list(array, slice...) or
// map.
//
-// assert.NotSubset(t, [1, 3, 4], [1, 2])
-// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3})
+// require.NotSubset(t, [1, 3, 4], [1, 2])
+// require.NotSubset(t, {"x": 1, "y": 2}, {"z": 3})
func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1701,8 +1765,8 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i
// contain all elements given in the specified subset list(array, slice...) or
// map.
//
-// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted")
-// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
+// require.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted")
+// require.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1737,7 +1801,7 @@ func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) {
// Panics asserts that the code inside the specified PanicTestFunc panics.
//
-// assert.Panics(t, func(){ GoCrazy() })
+// require.Panics(t, func(){ GoCrazy() })
func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1752,7 +1816,7 @@ func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
// panics, and that the recovered panic value is an error that satisfies the
// EqualError comparison.
//
-// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() })
+// require.PanicsWithError(t, "crazy error", func(){ GoCrazy() })
func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1767,7 +1831,7 @@ func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAn
// panics, and that the recovered panic value is an error that satisfies the
// EqualError comparison.
//
-// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+// require.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1781,7 +1845,7 @@ func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg
// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
// the recovered panic value equals the expected panic value.
//
-// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() })
+// require.PanicsWithValue(t, "crazy error", func(){ GoCrazy() })
func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1795,7 +1859,7 @@ func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, m
// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
// the recovered panic value equals the expected panic value.
//
-// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+// require.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1808,7 +1872,7 @@ func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc,
// Panicsf asserts that the code inside the specified PanicTestFunc panics.
//
-// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
+// require.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1821,8 +1885,8 @@ func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}
// Positive asserts that the specified element is positive
//
-// assert.Positive(t, 1)
-// assert.Positive(t, 1.23)
+// require.Positive(t, 1)
+// require.Positive(t, 1.23)
func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1835,8 +1899,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) {
// Positivef asserts that the specified element is positive
//
-// assert.Positivef(t, 1, "error message %s", "formatted")
-// assert.Positivef(t, 1.23, "error message %s", "formatted")
+// require.Positivef(t, 1, "error message %s", "formatted")
+// require.Positivef(t, 1.23, "error message %s", "formatted")
func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1849,8 +1913,8 @@ func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) {
// Regexp asserts that a specified regexp matches a string.
//
-// assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
-// assert.Regexp(t, "start...$", "it's not starting")
+// require.Regexp(t, regexp.MustCompile("start"), "it's starting")
+// require.Regexp(t, "start...$", "it's not starting")
func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1863,8 +1927,8 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface
// Regexpf asserts that a specified regexp matches a string.
//
-// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
-// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
+// require.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
+// require.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1877,7 +1941,7 @@ func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...in
// Same asserts that two pointers reference the same object.
//
-// assert.Same(t, ptr1, ptr2)
+// require.Same(t, ptr1, ptr2)
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
@@ -1893,7 +1957,7 @@ func Same(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...in
// Samef asserts that two pointers reference the same object.
//
-// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted")
+// require.Samef(t, ptr1, ptr2, "error message %s", "formatted")
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
@@ -1910,8 +1974,8 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg
// Subset asserts that the specified list(array, slice...) or map contains all
// elements given in the specified subset list(array, slice...) or map.
//
-// assert.Subset(t, [1, 2, 3], [1, 2])
-// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1})
+// require.Subset(t, [1, 2, 3], [1, 2])
+// require.Subset(t, {"x": 1, "y": 2}, {"x": 1})
func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1925,8 +1989,8 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte
// Subsetf asserts that the specified list(array, slice...) or map contains all
// elements given in the specified subset list(array, slice...) or map.
//
-// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted")
-// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
+// require.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted")
+// require.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1939,7 +2003,7 @@ func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args
// True asserts that the specified value is true.
//
-// assert.True(t, myBool)
+// require.True(t, myBool)
func True(t TestingT, value bool, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1952,7 +2016,7 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) {
// Truef asserts that the specified value is true.
//
-// assert.Truef(t, myBool, "error message %s", "formatted")
+// require.Truef(t, myBool, "error message %s", "formatted")
func Truef(t TestingT, value bool, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1965,7 +2029,7 @@ func Truef(t TestingT, value bool, msg string, args ...interface{}) {
// WithinDuration asserts that the two times are within duration delta of each other.
//
-// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second)
+// require.WithinDuration(t, time.Now(), time.Now(), 10*time.Second)
func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1978,7 +2042,7 @@ func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time
// WithinDurationf asserts that the two times are within duration delta of each other.
//
-// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+// require.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1991,7 +2055,7 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim
// WithinRange asserts that a time is within a time range (inclusive).
//
-// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
+// require.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -2004,7 +2068,7 @@ func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, m
// WithinRangef asserts that a time is within a time range (inclusive).
//
-// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
+// require.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
diff --git a/vendor/github.com/stretchr/testify/require/require.go.tmpl b/vendor/github.com/stretchr/testify/require/require.go.tmpl
index 55e42ddebdc45..8b32836850936 100644
--- a/vendor/github.com/stretchr/testify/require/require.go.tmpl
+++ b/vendor/github.com/stretchr/testify/require/require.go.tmpl
@@ -1,4 +1,4 @@
-{{.Comment}}
+{{ replace .Comment "assert." "require."}}
func {{.DocInfo.Name}}(t TestingT, {{.Params}}) {
if h, ok := t.(tHelper); ok { h.Helper() }
if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return }
diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go
index eee8310a5fa91..1bd87304f4315 100644
--- a/vendor/github.com/stretchr/testify/require/require_forward.go
+++ b/vendor/github.com/stretchr/testify/require/require_forward.go
@@ -187,8 +187,8 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface
EqualExportedValuesf(a.t, expected, actual, msg, args...)
}
-// EqualValues asserts that two objects are equal or convertible to the same types
-// and equal.
+// EqualValues asserts that two objects are equal or convertible to the larger
+// type and equal.
//
// a.EqualValues(uint32(123), int32(123))
func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
@@ -198,8 +198,8 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
EqualValues(a.t, expected, actual, msgAndArgs...)
}
-// EqualValuesf asserts that two objects are equal or convertible to the same types
-// and equal.
+// EqualValuesf asserts that two objects are equal or convertible to the larger
+// type and equal.
//
// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) {
@@ -337,7 +337,7 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti
// a.EventuallyWithT(func(c *assert.CollectT) {
// // add assertions as needed; any assertion failure will fail the current tick
// assert.True(c, externalValue, "expected 'externalValue' to be true")
-// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
+// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
func (a *Assertions) EventuallyWithT(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -362,7 +362,7 @@ func (a *Assertions) EventuallyWithT(condition func(collect *assert.CollectT), w
// a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") {
// // add assertions as needed; any assertion failure will fail the current tick
// assert.True(c, externalValue, "expected 'externalValue' to be true")
-// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
+// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
func (a *Assertions) EventuallyWithTf(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1129,6 +1129,40 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin
NotContainsf(a.t, s, contains, msg, args...)
}
+// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should not match.
+// This is an inverse of ElementsMatch.
+//
+// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false
+//
+// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true
+//
+// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true
+func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ NotElementsMatch(a.t, listA, listB, msgAndArgs...)
+}
+
+// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should not match.
+// This is an inverse of ElementsMatch.
+//
+// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false
+//
+// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true
+//
+// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true
+func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ NotElementsMatchf(a.t, listA, listB, msg, args...)
+}
+
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
@@ -1201,7 +1235,25 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str
NotEqualf(a.t, expected, actual, msg, args...)
}
-// NotErrorIs asserts that at none of the errors in err's chain matches target.
+// NotErrorAs asserts that none of the errors in err's chain matches target,
+// but if so, sets target to that error value.
+func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ NotErrorAs(a.t, err, target, msgAndArgs...)
+}
+
+// NotErrorAsf asserts that none of the errors in err's chain matches target,
+// but if so, sets target to that error value.
+func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ NotErrorAsf(a.t, err, target, msg, args...)
+}
+
+// NotErrorIs asserts that none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
@@ -1210,7 +1262,7 @@ func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface
NotErrorIs(a.t, err, target, msgAndArgs...)
}
-// NotErrorIsf asserts that at none of the errors in err's chain matches target.
+// NotErrorIsf asserts that none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/require/requirements.go b/vendor/github.com/stretchr/testify/require/requirements.go
index 91772dfeb9192..6b7ce929eb1c7 100644
--- a/vendor/github.com/stretchr/testify/require/requirements.go
+++ b/vendor/github.com/stretchr/testify/require/requirements.go
@@ -6,7 +6,7 @@ type TestingT interface {
FailNow()
}
-type tHelper interface {
+type tHelper = interface {
Helper()
}
diff --git a/vendor/github.com/stretchr/testify/suite/doc.go b/vendor/github.com/stretchr/testify/suite/doc.go
index 8d55a3aa8923d..05a562f721964 100644
--- a/vendor/github.com/stretchr/testify/suite/doc.go
+++ b/vendor/github.com/stretchr/testify/suite/doc.go
@@ -5,6 +5,8 @@
// or individual tests (depending on which interface(s) you
// implement).
//
+// The suite package does not support parallel tests. See [issue 934].
+//
// A testing suite is usually built by first extending the built-in
// suite functionality from suite.Suite in testify. Alternatively,
// you could reproduce that logic on your own if you wanted (you
@@ -63,4 +65,6 @@
// func TestExampleTestSuite(t *testing.T) {
// suite.Run(t, new(ExampleTestSuite))
// }
+//
+// [issue 934]: https://github.com/stretchr/testify/issues/934
package suite
diff --git a/vendor/modules.txt b/vendor/modules.txt
index f5ef74d30e20c..5ae1cad037db6 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1549,9 +1549,10 @@ github.com/spf13/pflag
# github.com/stretchr/objx v0.5.2
## explicit; go 1.20
github.com/stretchr/objx
-# github.com/stretchr/testify v1.9.0
+# github.com/stretchr/testify v1.10.0
## explicit; go 1.17
github.com/stretchr/testify/assert
+github.com/stretchr/testify/assert/yaml
github.com/stretchr/testify/mock
github.com/stretchr/testify/require
github.com/stretchr/testify/suite
|
fix
|
update module github.com/stretchr/testify to v1.10.0 (#15090)
|
e382cfe95ddd8cb84b9d554d86799f9d14182f72
|
2024-12-10 03:04:01
|
renovate[bot]
|
fix(deps): update module github.com/axiomhq/hyperloglog to v0.2.1 (#15322)
| false
|
diff --git a/go.mod b/go.mod
index 4e010d25c205b..bac9587705b5e 100644
--- a/go.mod
+++ b/go.mod
@@ -116,7 +116,7 @@ require (
github.com/DmitriyVTitov/size v1.5.0
github.com/IBM/go-sdk-core/v5 v5.18.1
github.com/IBM/ibm-cos-sdk-go v1.12.0
- github.com/axiomhq/hyperloglog v0.2.0
+ github.com/axiomhq/hyperloglog v0.2.1
github.com/buger/jsonparser v1.1.1
github.com/d4l3k/messagediff v1.2.1
github.com/dolthub/swiss v0.2.1
@@ -171,6 +171,7 @@ require (
github.com/gorilla/handlers v1.5.2 // indirect
github.com/hashicorp/golang-lru v0.6.0 // indirect
github.com/imdario/mergo v0.3.16 // indirect
+ github.com/kamstrup/intmap v0.5.0 // indirect
github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/sys/userns v0.1.0 // indirect
diff --git a/go.sum b/go.sum
index 6cb4957429f32..f366e1c40596b 100644
--- a/go.sum
+++ b/go.sum
@@ -1006,8 +1006,8 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.16.1 h1:xsOtPAvHqhvQvBza5ohaUcfq1Lce
github.com/aws/aws-sdk-go-v2/service/sts v1.16.1/go.mod h1:Aq2/Qggh2oemSfyHH+EO4UBbgWG6zFCXLHYI4ILTY7w=
github.com/aws/smithy-go v1.11.1 h1:IQ+lPZVkSM3FRtyaDox41R8YS6iwPMYIreejOgPW49g=
github.com/aws/smithy-go v1.11.1/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM=
-github.com/axiomhq/hyperloglog v0.2.0 h1:u1XT3yyY1rjzlWuP6NQIrV4bRYHOaqZaovqjcBEvZJo=
-github.com/axiomhq/hyperloglog v0.2.0/go.mod h1:GcgMjz9gaDKZ3G0UMS6Fq/VkZ4l7uGgcJyxA7M+omIM=
+github.com/axiomhq/hyperloglog v0.2.1 h1:z+rouIlYdpZ+DVfnQigBimhQL6OKHIL3e8+hMiud5/c=
+github.com/axiomhq/hyperloglog v0.2.1/go.mod h1:WCdOZ8PNJKNcBw3xFZ7iHlnUn1nDVHK/XToLjjmySh4=
github.com/baidubce/bce-sdk-go v0.9.205 h1:9cx93gC4FSu3W3G4NkDfFl0XMUycCpvQN+nB3doNmvg=
github.com/baidubce/bce-sdk-go v0.9.205/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
@@ -2030,6 +2030,8 @@ github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4d
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
+github.com/kamstrup/intmap v0.5.0 h1:WY7OJQeG7Ujc9zpPTO6PraDGSveG9js9wCPoI2q8wJQ=
+github.com/kamstrup/intmap v0.5.0/go.mod h1:gWUVWHKzWj8xpJVFf5GC0O26bWmv3GqdnIX/LMT6Aq4=
github.com/kardianos/service v1.0.0/go.mod h1:8CzDhVuCuugtsHyZoTvsOBuvonN/UDBvl0kH+BUxvbo=
github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
diff --git a/vendor/github.com/axiomhq/hyperloglog/hyperloglog.go b/vendor/github.com/axiomhq/hyperloglog/hyperloglog.go
index 638b291cd23a9..24b39e43562aa 100644
--- a/vendor/github.com/axiomhq/hyperloglog/hyperloglog.go
+++ b/vendor/github.com/axiomhq/hyperloglog/hyperloglog.go
@@ -18,7 +18,7 @@ type Sketch struct {
p uint8
m uint32
alpha float64
- tmpSet set
+ tmpSet *set
sparseList *compressedList
regs []uint8
}
@@ -45,7 +45,7 @@ func NewSketch(precision uint8, sparse bool) (*Sketch, error) {
alpha: alpha(float64(m)),
}
if sparse {
- s.tmpSet = set{}
+ s.tmpSet = newSet(0)
s.sparseList = newCompressedList(0)
} else {
s.regs = make([]uint8, m)
@@ -65,7 +65,7 @@ func (sk *Sketch) Clone() *Sketch {
}
func (sk *Sketch) maybeToNormal() {
- if uint32(len(sk.tmpSet))*100 > sk.m {
+ if uint32(sk.tmpSet.Len())*100 > sk.m {
sk.mergeSparse()
if uint32(sk.sparseList.Len()) > sk.m {
sk.toNormal()
@@ -90,9 +90,7 @@ func (sk *Sketch) Merge(other *Sketch) error {
}
func (sk *Sketch) mergeSparseSketch(other *Sketch) {
- for k := range other.tmpSet {
- sk.tmpSet.add(k)
- }
+ sk.tmpSet.Merge(other.tmpSet)
for iter := other.sparseList.Iter(); iter.HasNext(); {
sk.tmpSet.add(iter.Next())
}
@@ -105,10 +103,10 @@ func (sk *Sketch) mergeDenseSketch(other *Sketch) {
}
if other.sparse() {
- for k := range other.tmpSet {
+ other.tmpSet.ForEach(func(k uint32) {
i, r := decodeHash(k, other.p, pp)
sk.insert(i, r)
- }
+ })
for iter := other.sparseList.Iter(); iter.HasNext(); {
i, r := decodeHash(iter.Next(), other.p, pp)
sk.insert(i, r)
@@ -123,7 +121,7 @@ func (sk *Sketch) mergeDenseSketch(other *Sketch) {
}
func (sk *Sketch) toNormal() {
- if len(sk.tmpSet) > 0 {
+ if sk.tmpSet.Len() > 0 {
sk.mergeSparse()
}
@@ -165,17 +163,17 @@ func (sk *Sketch) Estimate() uint64 {
}
func (sk *Sketch) mergeSparse() {
- if len(sk.tmpSet) == 0 {
+ if sk.tmpSet.Len() == 0 {
return
}
- keys := make(uint64Slice, 0, len(sk.tmpSet))
- for k := range sk.tmpSet {
+ keys := make(uint64Slice, 0, sk.tmpSet.Len())
+ sk.tmpSet.ForEach(func(k uint32) {
keys = append(keys, k)
- }
+ })
sort.Sort(keys)
- newList := newCompressedList(4*len(sk.tmpSet) + len(sk.sparseList.b))
+ newList := newCompressedList(4*sk.tmpSet.Len() + sk.sparseList.Len())
for iter, i := sk.sparseList.Iter(), 0; iter.HasNext() || i < len(keys); {
if !iter.HasNext() {
newList.Append(keys[i])
@@ -201,7 +199,7 @@ func (sk *Sketch) mergeSparse() {
}
sk.sparseList = newList
- sk.tmpSet = set{}
+ sk.tmpSet = newSet(0)
}
// MarshalBinary implements the encoding.BinaryMarshaler interface.
@@ -277,7 +275,7 @@ func (sk *Sketch) UnmarshalBinary(data []byte) error {
sparse := data[3] == byte(1)
// Make a newSketch Sketch if the precision doesn't match or if the Sketch was used
- if sk.p != p || sk.regs != nil || len(sk.tmpSet) > 0 || (sk.sparseList != nil && sk.sparseList.Len() > 0) {
+ if sk.p != p || sk.regs != nil || sk.tmpSet.Len() > 0 || (sk.sparseList != nil && sk.sparseList.Len() > 0) {
newh, err := NewSketch(p, sparse)
if err != nil {
return err
@@ -292,14 +290,14 @@ func (sk *Sketch) UnmarshalBinary(data []byte) error {
// Unmarshal the tmp_set.
tssz := binary.BigEndian.Uint32(data[4:8])
- sk.tmpSet = make(map[uint32]struct{}, tssz)
+ sk.tmpSet = newSet(int(tssz))
// We need to unmarshal tssz values in total, and each value requires us
// to read 4 bytes.
tsLastByte := int((tssz * 4) + 8)
for i := 8; i < tsLastByte; i += 4 {
k := binary.BigEndian.Uint32(data[i : i+4])
- sk.tmpSet[k] = struct{}{}
+ sk.tmpSet.add(k)
}
// Unmarshal the sparse Sketch.
diff --git a/vendor/github.com/axiomhq/hyperloglog/sparse.go b/vendor/github.com/axiomhq/hyperloglog/sparse.go
index 8c457d3278224..0151740df9859 100644
--- a/vendor/github.com/axiomhq/hyperloglog/sparse.go
+++ b/vendor/github.com/axiomhq/hyperloglog/sparse.go
@@ -2,6 +2,8 @@ package hyperloglog
import (
"math/bits"
+
+ "github.com/kamstrup/intmap"
)
func getIndex(k uint32, p, pp uint8) uint32 {
@@ -34,37 +36,61 @@ func decodeHash(k uint32, p, pp uint8) (uint32, uint8) {
return getIndex(k, p, pp), r
}
-type set map[uint32]struct{}
+type set struct {
+ m *intmap.Set[uint32]
+}
+
+func newSet(size int) *set {
+ return &set{m: intmap.NewSet[uint32](size)}
+}
+
+func (s *set) ForEach(fn func(v uint32)) {
+ s.m.ForEach(func(v uint32) bool {
+ fn(v)
+ return true
+ })
+}
+
+func (s *set) Merge(other *set) {
+ other.m.ForEach(func(v uint32) bool {
+ s.m.Add(v)
+ return true
+ })
+}
+
+func (s *set) Len() int {
+ return s.m.Len()
+}
-func (s set) add(v uint32) bool {
- _, ok := s[v]
- if ok {
+func (s *set) add(v uint32) bool {
+ if s.m.Has(v) {
return false
}
- s[v] = struct{}{}
+ s.m.Add(v)
return true
}
-func (s set) Clone() set {
+func (s *set) Clone() *set {
if s == nil {
return nil
}
- newS := make(map[uint32]struct{}, len(s))
- for k, v := range s {
- newS[k] = v
- }
- return newS
+ newS := intmap.NewSet[uint32](s.m.Len())
+ s.m.ForEach(func(v uint32) bool {
+ newS.Add(v)
+ return true
+ })
+ return &set{m: newS}
}
-func (s set) MarshalBinary() (data []byte, err error) {
+func (s *set) MarshalBinary() (data []byte, err error) {
// 4 bytes for the size of the set, and 4 bytes for each key.
// list.
- data = make([]byte, 0, 4+(4*len(s)))
+ data = make([]byte, 0, 4+(4*s.m.Len()))
// Length of the set. We only need 32 bits because the size of the set
// couldn't exceed that on 32 bit architectures.
- sl := len(s)
+ sl := s.m.Len()
data = append(data, []byte{
byte(sl >> 24),
byte(sl >> 16),
@@ -73,14 +99,15 @@ func (s set) MarshalBinary() (data []byte, err error) {
}...)
// Marshal each element in the set.
- for k := range s {
+ s.m.ForEach(func(k uint32) bool {
data = append(data, []byte{
byte(k >> 24),
byte(k >> 16),
byte(k >> 8),
byte(k),
}...)
- }
+ return true
+ })
return data, nil
}
diff --git a/vendor/github.com/kamstrup/intmap/.gitignore b/vendor/github.com/kamstrup/intmap/.gitignore
new file mode 100644
index 0000000000000..1377554ebea6f
--- /dev/null
+++ b/vendor/github.com/kamstrup/intmap/.gitignore
@@ -0,0 +1 @@
+*.swp
diff --git a/vendor/github.com/kamstrup/intmap/LICENSE b/vendor/github.com/kamstrup/intmap/LICENSE
new file mode 100644
index 0000000000000..1eac633b0cd30
--- /dev/null
+++ b/vendor/github.com/kamstrup/intmap/LICENSE
@@ -0,0 +1,23 @@
+Copyright (c) 2016, Brent Pedersen - Bioinformatics
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/kamstrup/intmap/README.md b/vendor/github.com/kamstrup/intmap/README.md
new file mode 100644
index 0000000000000..e1a1e7003aff8
--- /dev/null
+++ b/vendor/github.com/kamstrup/intmap/README.md
@@ -0,0 +1,52 @@
+Fast hashmap with integer keys for Golang
+
+[](https://godoc.org/github.com/kamstrup/intmap)
+[](https://goreportcard.com/report/github.com/kamstrup/intmap)
+
+# intmap
+
+ import "github.com/kamstrup/intmap"
+
+Package intmap is a fast hashmap implementation for Golang, specialized for maps with integer type keys.
+The values can be of any type.
+
+It is a full port of https://github.com/brentp/intintmap to use type parameters (aka generics).
+
+It interleaves keys and values in the same underlying array to improve locality.
+This is also known as open addressing with linear probing.
+
+It is up to 3X faster than the builtin map:
+```
+name time/op
+Map64Fill-8 201ms ± 5%
+IntIntMapFill-8 207ms ±31%
+StdMapFill-8 371ms ±11%
+Map64Get10PercentHitRate-8 148µs ±40%
+IntIntMapGet10PercentHitRate-8 171µs ±50%
+StdMapGet10PercentHitRate-8 171µs ±33%
+Map64Get100PercentHitRate-8 4.50ms ± 5%
+IntIntMapGet100PercentHitRate-8 4.82ms ± 6%
+StdMapGet100PercentHitRate-8 15.5ms ±32%
+```
+
+## Usage
+
+```go
+m := intmap.New[int64,int64](32768)
+m.Put(int64(1234), int64(-222))
+m.Put(int64(123), int64(33))
+
+v, ok := m.Get(int64(222))
+v, ok := m.Get(int64(333))
+
+m.Del(int64(222))
+m.Del(int64(333))
+
+fmt.Println(m.Len())
+
+m.ForEach(func(k int64, v int64) {
+ fmt.Printf("key: %d, value: %d\n", k, v)
+})
+
+m.Clear() // all gone, but buffers kept
+```
diff --git a/vendor/github.com/kamstrup/intmap/map64.go b/vendor/github.com/kamstrup/intmap/map64.go
new file mode 100644
index 0000000000000..ec8084db9f776
--- /dev/null
+++ b/vendor/github.com/kamstrup/intmap/map64.go
@@ -0,0 +1,442 @@
+// Package intmap contains a fast hashmap implementation for maps with keys of any integer type
+package intmap
+
+import (
+ "iter"
+ "math"
+)
+
+// IntKey is a type constraint for values that can be used as keys in Map
+type IntKey interface {
+ ~int | ~uint | ~int64 | ~uint64 | ~int32 | ~uint32 | ~int16 | ~uint16 | ~int8 | ~uint8 | ~uintptr
+}
+
+type pair[K IntKey, V any] struct {
+ K K
+ V V
+}
+
+const fillFactor64 = 0.7
+
+func phiMix64(x int) int {
+ h := x * 0x9E3779B9
+ return h ^ (h >> 16)
+}
+
+// Map is a hashmap where the keys are some any integer type.
+// It is valid to call methods that read a nil map, similar to a standard Go map.
+// Methods valid on a nil map are Has, Get, Len, and ForEach.
+type Map[K IntKey, V any] struct {
+ data []pair[K, V] // key-value pairs
+ size int
+
+ zeroVal V // value of 'zero' key
+ hasZeroKey bool // do we have 'zero' key in the map?
+}
+
+// New creates a new map with keys being any integer subtype.
+// The map can store up to the given capacity before reallocation and rehashing occurs.
+func New[K IntKey, V any](capacity int) *Map[K, V] {
+ return &Map[K, V]{
+ data: make([]pair[K, V], arraySize(capacity, fillFactor64)),
+ }
+}
+
+// Has checks if the given key exists in the map.
+// Calling this method on a nil map will return false.
+func (m *Map[K, V]) Has(key K) bool {
+ if m == nil {
+ return false
+ }
+
+ if key == K(0) {
+ return m.hasZeroKey
+ }
+
+ idx := m.startIndex(key)
+ p := m.data[idx]
+
+ if p.K == K(0) { // end of chain already
+ return false
+ }
+ if p.K == key { // we check zero prior to this call
+ return true
+ }
+
+ // hash collision, seek next hash match, bailing on first empty
+ for {
+ idx = m.nextIndex(idx)
+ p = m.data[idx]
+ if p.K == K(0) {
+ return false
+ }
+ if p.K == key {
+ return true
+ }
+ }
+}
+
+// Get returns the value if the key is found.
+// If you just need to check for existence it is easier to use Has.
+// Calling this method on a nil map will return the zero value for V and false.
+func (m *Map[K, V]) Get(key K) (V, bool) {
+ if m == nil {
+ var zero V
+ return zero, false
+ }
+
+ if key == K(0) {
+ if m.hasZeroKey {
+ return m.zeroVal, true
+ }
+ var zero V
+ return zero, false
+ }
+
+ idx := m.startIndex(key)
+ p := m.data[idx]
+
+ if p.K == K(0) { // end of chain already
+ var zero V
+ return zero, false
+ }
+ if p.K == key { // we check zero prior to this call
+ return p.V, true
+ }
+
+ // hash collision, seek next hash match, bailing on first empty
+ for {
+ idx = m.nextIndex(idx)
+ p = m.data[idx]
+ if p.K == K(0) {
+ var zero V
+ return zero, false
+ }
+ if p.K == key {
+ return p.V, true
+ }
+ }
+}
+
+// Put adds or updates key with value val.
+func (m *Map[K, V]) Put(key K, val V) {
+ if key == K(0) {
+ if !m.hasZeroKey {
+ m.size++
+ }
+ m.zeroVal = val
+ m.hasZeroKey = true
+ return
+ }
+
+ idx := m.startIndex(key)
+ p := &m.data[idx]
+
+ if p.K == K(0) { // end of chain already
+ p.K = key
+ p.V = val
+ if m.size >= m.sizeThreshold() {
+ m.rehash()
+ } else {
+ m.size++
+ }
+ return
+ } else if p.K == key { // overwrite existing value
+ p.V = val
+ return
+ }
+
+ // hash collision, seek next empty or key match
+ for {
+ idx = m.nextIndex(idx)
+ p = &m.data[idx]
+
+ if p.K == K(0) {
+ p.K = key
+ p.V = val
+ if m.size >= m.sizeThreshold() {
+ m.rehash()
+ } else {
+ m.size++
+ }
+ return
+ } else if p.K == key {
+ p.V = val
+ return
+ }
+ }
+}
+
+// PutIfNotExists adds the key-value pair only if the key does not already exist
+// in the map, and returns the current value associated with the key and a boolean
+// indicating whether the value was newly added or not.
+func (m *Map[K, V]) PutIfNotExists(key K, val V) (V, bool) {
+ if key == K(0) {
+ if m.hasZeroKey {
+ return m.zeroVal, false
+ }
+ m.zeroVal = val
+ m.hasZeroKey = true
+ m.size++
+ return val, true
+ }
+
+ idx := m.startIndex(key)
+ p := &m.data[idx]
+
+ if p.K == K(0) { // end of chain already
+ p.K = key
+ p.V = val
+ m.size++
+ if m.size >= m.sizeThreshold() {
+ m.rehash()
+ }
+ return val, true
+ } else if p.K == key {
+ return p.V, false
+ }
+
+ // hash collision, seek next hash match, bailing on first empty
+ for {
+ idx = m.nextIndex(idx)
+ p = &m.data[idx]
+
+ if p.K == K(0) {
+ p.K = key
+ p.V = val
+ m.size++
+ if m.size >= m.sizeThreshold() {
+ m.rehash()
+ }
+ return val, true
+ } else if p.K == key {
+ return p.V, false
+ }
+ }
+}
+
+// ForEach iterates through key-value pairs in the map while the function f returns true.
+// This method returns immediately if invoked on a nil map.
+//
+// The iteration order of a Map is not defined, so please avoid relying on it.
+func (m *Map[K, V]) ForEach(f func(K, V) bool) {
+ if m == nil {
+ return
+ }
+
+ if m.hasZeroKey && !f(K(0), m.zeroVal) {
+ return
+ }
+ forEach64(m.data, f)
+}
+
+// All returns an iterator over key-value pairs from m.
+// The iterator returns immediately if invoked on a nil map.
+//
+// The iteration order of a Map is not defined, so please avoid relying on it.
+func (m *Map[K, V]) All() iter.Seq2[K, V] {
+ return m.ForEach
+}
+
+// Keys returns an iterator over keys in m.
+// The iterator returns immediately if invoked on a nil map.
+//
+// The iteration order of a Map is not defined, so please avoid relying on it.
+func (m *Map[K, V]) Keys() iter.Seq[K] {
+ return func(yield func(k K) bool) {
+ if m == nil {
+ return
+ }
+
+ if m.hasZeroKey && !yield(K(0)) {
+ return
+ }
+
+ for _, p := range m.data {
+ if p.K != K(0) && !yield(p.K) {
+ return
+ }
+ }
+ }
+}
+
+// Values returns an iterator over values in m.
+// The iterator returns immediately if invoked on a nil map.
+//
+// The iteration order of a Map is not defined, so please avoid relying on it.
+func (m *Map[K, V]) Values() iter.Seq[V] {
+ return func(yield func(v V) bool) {
+ if m == nil {
+ return
+ }
+
+ if m.hasZeroKey && !yield(m.zeroVal) {
+ return
+ }
+
+ for _, p := range m.data {
+ if p.K != K(0) && !yield(p.V) {
+ return
+ }
+ }
+ }
+}
+
+// Clear removes all items from the map, but keeps the internal buffers for reuse.
+func (m *Map[K, V]) Clear() {
+ var zero V
+ m.hasZeroKey = false
+ m.zeroVal = zero
+
+ // compiles down to runtime.memclr()
+ for i := range m.data {
+ m.data[i] = pair[K, V]{}
+ }
+
+ m.size = 0
+}
+
+func (m *Map[K, V]) rehash() {
+ oldData := m.data
+ m.data = make([]pair[K, V], 2*len(m.data))
+
+ // reset size
+ if m.hasZeroKey {
+ m.size = 1
+ } else {
+ m.size = 0
+ }
+
+ forEach64(oldData, func(k K, v V) bool {
+ m.Put(k, v)
+ return true
+ })
+}
+
+// Len returns the number of elements in the map.
+// The length of a nil map is defined to be zero.
+func (m *Map[K, V]) Len() int {
+ if m == nil {
+ return 0
+ }
+
+ return m.size
+}
+
+func (m *Map[K, V]) sizeThreshold() int {
+ return int(math.Floor(float64(len(m.data)) * fillFactor64))
+}
+
+func (m *Map[K, V]) startIndex(key K) int {
+ return phiMix64(int(key)) & (len(m.data) - 1)
+}
+
+func (m *Map[K, V]) nextIndex(idx int) int {
+ return (idx + 1) & (len(m.data) - 1)
+}
+
+func forEach64[K IntKey, V any](pairs []pair[K, V], f func(k K, v V) bool) {
+ for _, p := range pairs {
+ if p.K != K(0) && !f(p.K, p.V) {
+ return
+ }
+ }
+}
+
+// Del deletes a key and its value, returning true iff the key was found
+func (m *Map[K, V]) Del(key K) bool {
+ if key == K(0) {
+ if m.hasZeroKey {
+ m.hasZeroKey = false
+ m.size--
+ return true
+ }
+ return false
+ }
+
+ idx := m.startIndex(key)
+ p := m.data[idx]
+
+ if p.K == key {
+ // any keys that were pushed back needs to be shifted nack into the empty slot
+ // to avoid breaking the chain
+ m.shiftKeys(idx)
+ m.size--
+ return true
+ } else if p.K == K(0) { // end of chain already
+ return false
+ }
+
+ for {
+ idx = m.nextIndex(idx)
+ p = m.data[idx]
+
+ if p.K == key {
+ // any keys that were pushed back needs to be shifted nack into the empty slot
+ // to avoid breaking the chain
+ m.shiftKeys(idx)
+ m.size--
+ return true
+ } else if p.K == K(0) {
+ return false
+ }
+
+ }
+}
+
+func (m *Map[K, V]) shiftKeys(idx int) int {
+ // Shift entries with the same hash.
+ // We need to do this on deletion to ensure we don't have zeroes in the hash chain
+ for {
+ var p pair[K, V]
+ lastIdx := idx
+ idx = m.nextIndex(idx)
+ for {
+ p = m.data[idx]
+ if p.K == K(0) {
+ m.data[lastIdx] = pair[K, V]{}
+ return lastIdx
+ }
+
+ slot := m.startIndex(p.K)
+ if lastIdx <= idx {
+ if lastIdx >= slot || slot > idx {
+ break
+ }
+ } else {
+ if lastIdx >= slot && slot > idx {
+ break
+ }
+ }
+ idx = m.nextIndex(idx)
+ }
+ m.data[lastIdx] = p
+ }
+}
+
+func nextPowerOf2(x uint32) uint32 {
+ if x == math.MaxUint32 {
+ return x
+ }
+
+ if x == 0 {
+ return 1
+ }
+
+ x--
+ x |= x >> 1
+ x |= x >> 2
+ x |= x >> 4
+ x |= x >> 8
+ x |= x >> 16
+
+ return x + 1
+}
+
+func arraySize(exp int, fill float64) int {
+ s := nextPowerOf2(uint32(math.Ceil(float64(exp) / fill)))
+ if s < 2 {
+ s = 2
+ }
+ return int(s)
+}
diff --git a/vendor/github.com/kamstrup/intmap/set.go b/vendor/github.com/kamstrup/intmap/set.go
new file mode 100644
index 0000000000000..b81ce224b6036
--- /dev/null
+++ b/vendor/github.com/kamstrup/intmap/set.go
@@ -0,0 +1,59 @@
+package intmap
+
+import "iter"
+
+// Set is a specialization of Map modelling a set of integers.
+// Like Map, methods that read from the set are valid on the nil Set.
+// This include Has, Len, and ForEach.
+type Set[K IntKey] Map[K, struct{}]
+
+// NewSet creates a new Set with a given initial capacity.
+func NewSet[K IntKey](capacity int) *Set[K] {
+ return (*Set[K])(New[K, struct{}](capacity))
+}
+
+// Add an element to the set. Returns true if the element was not already present.
+func (s *Set[K]) Add(k K) bool {
+ _, found := (*Map[K, struct{}])(s).PutIfNotExists(k, struct{}{})
+ return found
+}
+
+// Del deletes a key, returning true iff the key was found
+func (s *Set[K]) Del(k K) bool {
+ return (*Map[K, struct{}])(s).Del(k)
+}
+
+// Clear removes all items from the Set, but keeps the internal buffers for reuse.
+func (s *Set[K]) Clear() {
+ (*Map[K, struct{}])(s).Clear()
+}
+
+// Has returns true if the key is in the set.
+// If the set is nil this method always return false.
+func (s *Set[K]) Has(k K) bool {
+ return (*Map[K, struct{}])(s).Has(k)
+}
+
+// Len returns the number of elements in the set.
+// If the set is nil this method return 0.
+func (s *Set[K]) Len() int {
+ return (*Map[K, struct{}])(s).Len()
+}
+
+// ForEach iterates over the elements in the set while the visit function returns true.
+// This method returns immediately if the set is nil.
+//
+// The iteration order of a Set is not defined, so please avoid relying on it.
+func (s *Set[K]) ForEach(visit func(k K) bool) {
+ (*Map[K, struct{}])(s).ForEach(func(k K, _ struct{}) bool {
+ return visit(k)
+ })
+}
+
+// All returns an iterator over keys from the set.
+// The iterator returns immediately if the set is nil.
+//
+// The iteration order of a Set is not defined, so please avoid relying on it.
+func (s *Set[K]) All() iter.Seq[K] {
+ return s.ForEach
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 3168c6735b76c..7810d1ce504bf 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -462,8 +462,8 @@ github.com/aws/smithy-go/rand
github.com/aws/smithy-go/time
github.com/aws/smithy-go/transport/http
github.com/aws/smithy-go/transport/http/internal/io
-# github.com/axiomhq/hyperloglog v0.2.0
-## explicit; go 1.21
+# github.com/axiomhq/hyperloglog v0.2.1
+## explicit; go 1.23
github.com/axiomhq/hyperloglog
# github.com/baidubce/bce-sdk-go v0.9.205
## explicit; go 1.11
@@ -1169,6 +1169,9 @@ github.com/json-iterator/go
# github.com/julienschmidt/httprouter v1.3.0
## explicit; go 1.7
github.com/julienschmidt/httprouter
+# github.com/kamstrup/intmap v0.5.0
+## explicit; go 1.23
+github.com/kamstrup/intmap
# github.com/klauspost/compress v1.17.11
## explicit; go 1.21
github.com/klauspost/compress
|
fix
|
update module github.com/axiomhq/hyperloglog to v0.2.1 (#15322)
|
f73e9b299374934b7bb8716e3c9a460668092137
|
2023-04-19 21:21:34
|
Mohamed-Amine Bouqsimi
|
operator: Add PodDisruptionBudgets to the query path (#9188)
| false
|
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md
index 0bd456ebee5ce..c07317e3be5de 100644
--- a/operator/CHANGELOG.md
+++ b/operator/CHANGELOG.md
@@ -1,4 +1,5 @@
## Main
+- [9188](https://github.com/grafana/loki/pull/9188) **aminesnow**: Add PodDisruptionBudgets to the query path
- [9162](https://github.com/grafana/loki/pull/9162) **aminesnow**: Add a PodDisruptionBudget to lokistack-gateway
- [9049](https://github.com/grafana/loki/pull/9049) **alanconway**: Revert 1x.extra-small changes, add 1x.demo
- [8661](https://github.com/grafana/loki/pull/8661) **xuanyunhui**: Add a new Object Storage Type for AlibabaCloud OSS
diff --git a/operator/internal/manifests/build_test.go b/operator/internal/manifests/build_test.go
index f9789518a2687..bad81afc675ff 100644
--- a/operator/internal/manifests/build_test.go
+++ b/operator/internal/manifests/build_test.go
@@ -8,6 +8,7 @@ import (
openshiftconfigv1 "github.com/openshift/api/config/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
+ policyv1 "k8s.io/api/policy/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
configv1 "github.com/grafana/loki/operator/apis/config/v1"
@@ -989,7 +990,7 @@ func extractAffinity(raw client.Object) (*corev1.Affinity, bool, error) {
return obj.Spec.Template.Spec.Affinity, false, nil
case *appsv1.StatefulSet:
return obj.Spec.Template.Spec.Affinity, false, nil
- case *corev1.ConfigMap, *corev1.Service:
+ case *corev1.ConfigMap, *corev1.Service, *policyv1.PodDisruptionBudget:
return nil, true, nil
default:
}
diff --git a/operator/internal/manifests/indexgateway.go b/operator/internal/manifests/indexgateway.go
index 0c31ab4528f7a..a75492197db95 100644
--- a/operator/internal/manifests/indexgateway.go
+++ b/operator/internal/manifests/indexgateway.go
@@ -9,6 +9,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
+ policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@@ -55,6 +56,7 @@ func BuildIndexGateway(opts Options) ([]client.Object, error) {
statefulSet,
NewIndexGatewayGRPCService(opts),
NewIndexGatewayHTTPService(opts),
+ NewIndexGatewayPodDisruptionBudget(opts),
}, nil
}
@@ -238,6 +240,30 @@ func NewIndexGatewayHTTPService(opts Options) *corev1.Service {
}
}
+// NewIndexGatewayPodDisruptionBudget returns a PodDisruptionBudget for the LokiStack
+// index-gateway pods.
+func NewIndexGatewayPodDisruptionBudget(opts Options) *policyv1.PodDisruptionBudget {
+ l := ComponentLabels(LabelIndexGatewayComponent, opts.Name)
+ ma := intstr.FromInt(1)
+ return &policyv1.PodDisruptionBudget{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "PodDisruptionBudget",
+ APIVersion: policyv1.SchemeGroupVersion.String(),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: l,
+ Name: IndexGatewayName(opts.Name),
+ Namespace: opts.Namespace,
+ },
+ Spec: policyv1.PodDisruptionBudgetSpec{
+ Selector: &metav1.LabelSelector{
+ MatchLabels: l,
+ },
+ MinAvailable: &ma,
+ },
+ }
+}
+
func configureIndexGatewayHTTPServicePKI(statefulSet *appsv1.StatefulSet, opts Options) error {
serviceName := serviceNameIndexGatewayHTTP(opts.Name)
return configureHTTPServicePKI(&statefulSet.Spec.Template.Spec, serviceName)
diff --git a/operator/internal/manifests/indexgateway_test.go b/operator/internal/manifests/indexgateway_test.go
index 48499e4b196a6..e69e7b64d54d9 100644
--- a/operator/internal/manifests/indexgateway_test.go
+++ b/operator/internal/manifests/indexgateway_test.go
@@ -6,6 +6,7 @@ import (
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests"
"github.com/stretchr/testify/require"
+ policyv1 "k8s.io/api/policy/v1"
)
func TestNewIndexGatewayStatefulSet_HasTemplateConfigHashAnnotation(t *testing.T) {
@@ -75,3 +76,30 @@ func TestNewIndexGatewayStatefulSet_SelectorMatchesLabels(t *testing.T) {
require.Equal(t, l[key], value)
}
}
+
+func TestBuildIndexGateway_PodDisruptionBudget(t *testing.T) {
+ opts := manifests.Options{
+ Name: "abcd",
+ Namespace: "efgh",
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ IndexGateway: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ },
+ },
+ }
+ objs, err := manifests.BuildIndexGateway(opts)
+
+ require.NoError(t, err)
+ require.Len(t, objs, 4)
+
+ pdb := objs[3].(*policyv1.PodDisruptionBudget)
+ require.NotNil(t, pdb)
+ require.Equal(t, "abcd-index-gateway", pdb.Name)
+ require.Equal(t, "efgh", pdb.Namespace)
+ require.NotNil(t, pdb.Spec.MinAvailable.IntVal)
+ require.Equal(t, int32(1), pdb.Spec.MinAvailable.IntVal)
+ require.EqualValues(t, manifests.ComponentLabels(manifests.LabelIndexGatewayComponent, opts.Name),
+ pdb.Spec.Selector.MatchLabels)
+}
diff --git a/operator/internal/manifests/querier.go b/operator/internal/manifests/querier.go
index 4d0bc3a7e79ba..b0f8d20cb5fa4 100644
--- a/operator/internal/manifests/querier.go
+++ b/operator/internal/manifests/querier.go
@@ -2,6 +2,7 @@ package manifests
import (
"fmt"
+ "math"
"path"
"github.com/grafana/loki/operator/internal/manifests/internal/config"
@@ -9,6 +10,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
+ policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
@@ -54,6 +56,7 @@ func BuildQuerier(opts Options) ([]client.Object, error) {
deployment,
NewQuerierGRPCService(opts),
NewQuerierHTTPService(opts),
+ NewQuerierPodDisruptionBudget(opts),
}, nil
}
@@ -217,6 +220,33 @@ func NewQuerierHTTPService(opts Options) *corev1.Service {
}
}
+// NewQuerierPodDisruptionBudget returns a PodDisruptionBudget for the LokiStack querier pods.
+func NewQuerierPodDisruptionBudget(opts Options) *policyv1.PodDisruptionBudget {
+ l := ComponentLabels(LabelQuerierComponent, opts.Name)
+
+ // Have at least N-1 replicas available, unless N==1 in which case the minimum available is 1.
+ replicas := opts.Stack.Template.Querier.Replicas
+ ma := intstr.FromInt(int(math.Max(1, float64(replicas-1))))
+
+ return &policyv1.PodDisruptionBudget{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "PodDisruptionBudget",
+ APIVersion: policyv1.SchemeGroupVersion.String(),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: l,
+ Name: QuerierName(opts.Name),
+ Namespace: opts.Namespace,
+ },
+ Spec: policyv1.PodDisruptionBudgetSpec{
+ Selector: &metav1.LabelSelector{
+ MatchLabels: l,
+ },
+ MinAvailable: &ma,
+ },
+ }
+}
+
func configureQuerierHTTPServicePKI(deployment *appsv1.Deployment, opts Options) error {
serviceName := serviceNameQuerierHTTP(opts.Name)
return configureHTTPServicePKI(&deployment.Spec.Template.Spec, serviceName)
diff --git a/operator/internal/manifests/querier_test.go b/operator/internal/manifests/querier_test.go
index fe81d32a52f71..8342b79b02c0c 100644
--- a/operator/internal/manifests/querier_test.go
+++ b/operator/internal/manifests/querier_test.go
@@ -6,6 +6,9 @@ import (
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests"
"github.com/stretchr/testify/require"
+ policyv1 "k8s.io/api/policy/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
)
func TestNewQuerierDeployment_HasTemplateConfigHashAnnotation(t *testing.T) {
@@ -75,3 +78,108 @@ func TestNewQuerierDeployment_SelectorMatchesLabels(t *testing.T) {
require.Equal(t, l[key], value)
}
}
+
+func TestBuildQuerier_PodDisruptionBudget(t *testing.T) {
+ tt := []struct {
+ name string
+ opts manifests.Options
+ want policyv1.PodDisruptionBudget
+ }{
+ {
+ name: "Querier with 1 replica",
+ opts: manifests.Options{
+ Name: "abcd",
+ Namespace: "efgh",
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Querier: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ },
+ },
+ },
+ want: policyv1.PodDisruptionBudget{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "abcd-querier",
+ Namespace: "efgh",
+ Labels: manifests.ComponentLabels(manifests.LabelQuerierComponent, "abcd"),
+ },
+ Spec: policyv1.PodDisruptionBudgetSpec{
+ MinAvailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1},
+ Selector: &metav1.LabelSelector{
+ MatchLabels: manifests.ComponentLabels(manifests.LabelQuerierComponent, "abcd"),
+ },
+ },
+ },
+ },
+ {
+ name: "Querier with 2 replicas",
+ opts: manifests.Options{
+ Name: "abcd",
+ Namespace: "efgh",
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Querier: &lokiv1.LokiComponentSpec{
+ Replicas: 2,
+ },
+ },
+ },
+ },
+ want: policyv1.PodDisruptionBudget{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "abcd-querier",
+ Namespace: "efgh",
+ Labels: manifests.ComponentLabels(manifests.LabelQuerierComponent, "abcd"),
+ },
+ Spec: policyv1.PodDisruptionBudgetSpec{
+ MinAvailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1},
+ Selector: &metav1.LabelSelector{
+ MatchLabels: manifests.ComponentLabels(manifests.LabelQuerierComponent, "abcd"),
+ },
+ },
+ },
+ },
+ {
+ name: "Querier with 3 replicas",
+ opts: manifests.Options{
+ Name: "abcd",
+ Namespace: "efgh",
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Querier: &lokiv1.LokiComponentSpec{
+ Replicas: 3,
+ },
+ },
+ },
+ },
+ want: policyv1.PodDisruptionBudget{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "abcd-querier",
+ Namespace: "efgh",
+ Labels: manifests.ComponentLabels(manifests.LabelQuerierComponent, "abcd"),
+ },
+ Spec: policyv1.PodDisruptionBudgetSpec{
+ MinAvailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 2},
+ Selector: &metav1.LabelSelector{
+ MatchLabels: manifests.ComponentLabels(manifests.LabelQuerierComponent, "abcd"),
+ },
+ },
+ },
+ },
+ }
+
+ for _, tc := range tt {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+ objs, err := manifests.BuildQuerier(tc.opts)
+ require.NoError(t, err)
+ require.Len(t, objs, 4)
+
+ pdb := objs[3].(*policyv1.PodDisruptionBudget)
+ require.NotNil(t, pdb)
+ require.Equal(t, tc.want.ObjectMeta, pdb.ObjectMeta)
+ require.Equal(t, tc.want.Spec, pdb.Spec)
+ })
+ }
+}
diff --git a/operator/internal/manifests/query-frontend.go b/operator/internal/manifests/query-frontend.go
index 405da3daa6a32..9a67cba83066f 100644
--- a/operator/internal/manifests/query-frontend.go
+++ b/operator/internal/manifests/query-frontend.go
@@ -8,6 +8,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
+ policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
@@ -49,6 +50,7 @@ func BuildQueryFrontend(opts Options) ([]client.Object, error) {
deployment,
NewQueryFrontendGRPCService(opts),
NewQueryFrontendHTTPService(opts),
+ NewQueryFrontendPodDisruptionBudget(opts),
}, nil
}
@@ -224,6 +226,30 @@ func NewQueryFrontendHTTPService(opts Options) *corev1.Service {
}
}
+// NewQueryFrontendPodDisruptionBudget returns a PodDisruptionBudget for the LokiStack
+// query-frontend pods.
+func NewQueryFrontendPodDisruptionBudget(opts Options) *policyv1.PodDisruptionBudget {
+ l := ComponentLabels(LabelQueryFrontendComponent, opts.Name)
+ ma := intstr.FromInt(1)
+ return &policyv1.PodDisruptionBudget{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "PodDisruptionBudget",
+ APIVersion: policyv1.SchemeGroupVersion.String(),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: l,
+ Name: QueryFrontendName(opts.Name),
+ Namespace: opts.Namespace,
+ },
+ Spec: policyv1.PodDisruptionBudgetSpec{
+ Selector: &metav1.LabelSelector{
+ MatchLabels: l,
+ },
+ MinAvailable: &ma,
+ },
+ }
+}
+
func configureQueryFrontendHTTPServicePKI(deployment *appsv1.Deployment, opts Options) error {
serviceName := serviceNameQueryFrontendHTTP(opts.Name)
return configureHTTPServicePKI(&deployment.Spec.Template.Spec, serviceName)
diff --git a/operator/internal/manifests/query-frontend_test.go b/operator/internal/manifests/query-frontend_test.go
index c31bc1004aef6..09d0fdb4d70ff 100644
--- a/operator/internal/manifests/query-frontend_test.go
+++ b/operator/internal/manifests/query-frontend_test.go
@@ -5,6 +5,7 @@ import (
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/stretchr/testify/require"
+ policyv1 "k8s.io/api/policy/v1"
)
func TestNewQueryFrontendDeployment_SelectorMatchesLabels(t *testing.T) {
@@ -64,3 +65,29 @@ func TestNewQueryFrontendDeployment_HasTemplateCertRotationRequiredAtAnnotation(
require.Contains(t, annotations, expected)
require.Equal(t, annotations[expected], "deadbeef")
}
+
+func TestBuildQueryFrontend_PodDisruptionBudget(t *testing.T) {
+ opts := Options{
+ Name: "abcd",
+ Namespace: "efgh",
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ QueryFrontend: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ },
+ },
+ }
+ objs, err := BuildQueryFrontend(opts)
+
+ require.NoError(t, err)
+ require.Len(t, objs, 4)
+
+ pdb := objs[3].(*policyv1.PodDisruptionBudget)
+ require.NotNil(t, pdb)
+ require.Equal(t, "abcd-query-frontend", pdb.Name)
+ require.Equal(t, "efgh", pdb.Namespace)
+ require.NotNil(t, pdb.Spec.MinAvailable.IntVal)
+ require.Equal(t, int32(1), pdb.Spec.MinAvailable.IntVal)
+ require.EqualValues(t, ComponentLabels(LabelQueryFrontendComponent, opts.Name), pdb.Spec.Selector.MatchLabels)
+}
|
operator
|
Add PodDisruptionBudgets to the query path (#9188)
|
cf71ac7114a6fb3a2c4bf71c7fbcd9c4e91a05eb
|
2024-03-29 20:13:18
|
Salva Corts
|
fix(blooms): Remove blocks not matching any series in task (#12401)
| false
|
diff --git a/pkg/bloomgateway/processor.go b/pkg/bloomgateway/processor.go
index 5cf805b11a74d..5d43e79eece3e 100644
--- a/pkg/bloomgateway/processor.go
+++ b/pkg/bloomgateway/processor.go
@@ -175,7 +175,7 @@ func (p *processor) processBlock(_ context.Context, blockQuerier *v1.BlockQuerie
if sp := opentracing.SpanFromContext(task.ctx); sp != nil {
md, _ := blockQuerier.Metadata()
blk := bloomshipper.BlockRefFrom(task.Tenant, task.table.String(), md)
- sp.LogKV("process block", blk.String())
+ sp.LogKV("process block", blk.String(), "series", len(task.series))
}
it := v1.NewPeekingIter(task.RequestIter(tokenizer))
diff --git a/pkg/bloomgateway/util.go b/pkg/bloomgateway/util.go
index 5f2f2e31f79c2..e07c5740fdc6d 100644
--- a/pkg/bloomgateway/util.go
+++ b/pkg/bloomgateway/util.go
@@ -78,7 +78,7 @@ func partitionTasks(tasks []Task, blocks []bloomshipper.BlockRef) []blockWithTas
})
// All fingerprints fall outside of the consumer's range
- if min == len(refs) || max == 0 {
+ if min == len(refs) || max == 0 || min == max {
continue
}
diff --git a/pkg/bloomgateway/util_test.go b/pkg/bloomgateway/util_test.go
index 9bd158219e134..f624d337092b0 100644
--- a/pkg/bloomgateway/util_test.go
+++ b/pkg/bloomgateway/util_test.go
@@ -136,6 +136,26 @@ func TestPartitionTasks(t *testing.T) {
require.Len(t, res.tasks[0].series, 90)
}
})
+
+ t.Run("block series before and after task series", func(t *testing.T) {
+ bounds := []bloomshipper.BlockRef{
+ mkBlockRef(100, 200),
+ }
+
+ tasks := []Task{
+ {
+ series: []*logproto.GroupedChunkRefs{
+ {Fingerprint: 50},
+ {Fingerprint: 75},
+ {Fingerprint: 250},
+ {Fingerprint: 300},
+ },
+ },
+ }
+
+ results := partitionTasks(tasks, bounds)
+ require.Len(t, results, 0)
+ })
}
func TestPartitionRequest(t *testing.T) {
|
fix
|
Remove blocks not matching any series in task (#12401)
|
f8e582a71682b4333299f76a46b750004f1c995b
|
2023-01-11 19:29:56
|
Ed Welch
|
promtail: Remove noisy error message when cleaning up filesystem watches which are already removed. (#8086)
| false
|
diff --git a/clients/pkg/promtail/targets/file/filetarget.go b/clients/pkg/promtail/targets/file/filetarget.go
index e80623526a52b..e1ba15cce320c 100644
--- a/clients/pkg/promtail/targets/file/filetarget.go
+++ b/clients/pkg/promtail/targets/file/filetarget.go
@@ -7,12 +7,12 @@ import (
"time"
"github.com/bmatcuk/doublestar"
+ "github.com/fsnotify/fsnotify"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
- fsnotify "gopkg.in/fsnotify.v1"
"github.com/grafana/loki/clients/pkg/promtail/api"
"github.com/grafana/loki/clients/pkg/promtail/client"
diff --git a/clients/pkg/promtail/targets/file/filetarget_test.go b/clients/pkg/promtail/targets/file/filetarget_test.go
index 30e9272a5e753..43d23e5fa0b06 100644
--- a/clients/pkg/promtail/targets/file/filetarget_test.go
+++ b/clients/pkg/promtail/targets/file/filetarget_test.go
@@ -9,11 +9,11 @@ import (
"testing"
"time"
+ "github.com/fsnotify/fsnotify"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/atomic"
- "gopkg.in/fsnotify.v1"
"github.com/go-kit/log"
diff --git a/clients/pkg/promtail/targets/file/filetargetmanager.go b/clients/pkg/promtail/targets/file/filetargetmanager.go
index 969192ef30a79..f72f316993b34 100644
--- a/clients/pkg/promtail/targets/file/filetargetmanager.go
+++ b/clients/pkg/promtail/targets/file/filetargetmanager.go
@@ -8,8 +8,10 @@ import (
"sync"
"github.com/bmatcuk/doublestar"
+ "github.com/fsnotify/fsnotify"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
+ "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery"
@@ -17,7 +19,6 @@ import (
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel"
- "gopkg.in/fsnotify.v1"
"github.com/grafana/loki/clients/pkg/logentry/stages"
"github.com/grafana/loki/clients/pkg/promtail/api"
@@ -155,7 +156,9 @@ func (tm *FileTargetManager) watchTargetEvents(ctx context.Context) {
}
case fileTargetEventWatchStop:
if err := tm.watcher.Remove(event.path); err != nil {
- level.Error(tm.log).Log("msg", " failed to remove directory from watcher", "error", err)
+ if !errors.Is(err, fsnotify.ErrNonExistentWatch) {
+ level.Error(tm.log).Log("msg", " failed to remove directory from watcher", "error", err)
+ }
}
}
case <-ctx.Done():
diff --git a/clients/pkg/promtail/targets/file/filetargetmanager_test.go b/clients/pkg/promtail/targets/file/filetargetmanager_test.go
index 83c7adb1195f1..f71218be27476 100644
--- a/clients/pkg/promtail/targets/file/filetargetmanager_test.go
+++ b/clients/pkg/promtail/targets/file/filetargetmanager_test.go
@@ -7,6 +7,7 @@ import (
"testing"
"time"
+ "github.com/fsnotify/fsnotify"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -14,7 +15,6 @@ import (
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup"
- "gopkg.in/fsnotify.v1"
"github.com/grafana/loki/clients/pkg/promtail/api"
"github.com/grafana/loki/clients/pkg/promtail/client/fake"
diff --git a/go.mod b/go.mod
index 92a9091087663..8a3874beee2b5 100644
--- a/go.mod
+++ b/go.mod
@@ -106,7 +106,6 @@ require (
google.golang.org/api v0.102.0
google.golang.org/grpc v1.50.1
gopkg.in/alecthomas/kingpin.v2 v2.2.6
- gopkg.in/fsnotify.v1 v1.4.7
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6
@@ -114,6 +113,7 @@ require (
)
require (
+ github.com/fsnotify/fsnotify v1.6.0
github.com/heroku/x v0.0.50
github.com/prometheus/alertmanager v0.24.0
github.com/prometheus/common/sigv4 v0.1.0
@@ -182,7 +182,6 @@ require (
github.com/envoyproxy/go-control-plane v0.10.3 // indirect
github.com/envoyproxy/protoc-gen-validate v0.6.13 // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
- github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-kit/kit v0.12.0 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
diff --git a/go.sum b/go.sum
index a67f2e6007e3a..af13161474f23 100644
--- a/go.sum
+++ b/go.sum
@@ -2097,7 +2097,6 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY=
gopkg.in/fsnotify.v1 v1.2.1/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/fsnotify/fsnotify.v1 v1.4.7 h1:XNNYLJHt73EyYiCZi6+xjupS9CpvmiDgjPTAjrBlQbo=
gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE=
diff --git a/vendor/gopkg.in/fsnotify.v1/.editorconfig b/vendor/gopkg.in/fsnotify.v1/.editorconfig
deleted file mode 100644
index ba49e3c234913..0000000000000
--- a/vendor/gopkg.in/fsnotify.v1/.editorconfig
+++ /dev/null
@@ -1,5 +0,0 @@
-root = true
-
-[*]
-indent_style = tab
-indent_size = 4
diff --git a/vendor/gopkg.in/fsnotify.v1/.gitignore b/vendor/gopkg.in/fsnotify.v1/.gitignore
deleted file mode 100644
index 4cd0cbaf432cc..0000000000000
--- a/vendor/gopkg.in/fsnotify.v1/.gitignore
+++ /dev/null
@@ -1,6 +0,0 @@
-# Setup a Global .gitignore for OS and editor generated files:
-# https://help.github.com/articles/ignoring-files
-# git config --global core.excludesfile ~/.gitignore_global
-
-.vagrant
-*.sublime-project
diff --git a/vendor/gopkg.in/fsnotify.v1/.travis.yml b/vendor/gopkg.in/fsnotify.v1/.travis.yml
deleted file mode 100644
index 981d1bb8132d1..0000000000000
--- a/vendor/gopkg.in/fsnotify.v1/.travis.yml
+++ /dev/null
@@ -1,30 +0,0 @@
-sudo: false
-language: go
-
-go:
- - 1.8.x
- - 1.9.x
- - tip
-
-matrix:
- allow_failures:
- - go: tip
- fast_finish: true
-
-before_script:
- - go get -u github.com/golang/lint/golint
-
-script:
- - go test -v --race ./...
-
-after_script:
- - test -z "$(gofmt -s -l -w . | tee /dev/stderr)"
- - test -z "$(golint ./... | tee /dev/stderr)"
- - go vet ./...
-
-os:
- - linux
- - osx
-
-notifications:
- email: false
diff --git a/vendor/gopkg.in/fsnotify.v1/AUTHORS b/vendor/gopkg.in/fsnotify.v1/AUTHORS
deleted file mode 100644
index 5ab5d41c54729..0000000000000
--- a/vendor/gopkg.in/fsnotify.v1/AUTHORS
+++ /dev/null
@@ -1,52 +0,0 @@
-# Names should be added to this file as
-# Name or Organization <email address>
-# The email address is not required for organizations.
-
-# You can update this list using the following command:
-#
-# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
-
-# Please keep the list sorted.
-
-Aaron L <[email protected]>
-Adrien Bustany <[email protected]>
-Amit Krishnan <[email protected]>
-Anmol Sethi <[email protected]>
-Bjørn Erik Pedersen <[email protected]>
-Bruno Bigras <[email protected]>
-Caleb Spare <[email protected]>
-Case Nelson <[email protected]>
-Chris Howey <[email protected]> <[email protected]>
-Christoffer Buchholz <[email protected]>
-Daniel Wagner-Hall <[email protected]>
-Dave Cheney <[email protected]>
-Evan Phoenix <[email protected]>
-Francisco Souza <[email protected]>
-Hari haran <[email protected]>
-John C Barstow
-Kelvin Fo <[email protected]>
-Ken-ichirou MATSUZAWA <[email protected]>
-Matt Layher <[email protected]>
-Nathan Youngman <[email protected]>
-Nickolai Zeldovich <[email protected]>
-Patrick <[email protected]>
-Paul Hammond <[email protected]>
-Pawel Knap <[email protected]>
-Pieter Droogendijk <[email protected]>
-Pursuit92 <[email protected]>
-Riku Voipio <[email protected]>
-Rob Figueiredo <[email protected]>
-Rodrigo Chiossi <[email protected]>
-Slawek Ligus <[email protected]>
-Soge Zhang <[email protected]>
-Tiffany Jernigan <[email protected]>
-Tilak Sharma <[email protected]>
-Tom Payne <[email protected]>
-Travis Cline <[email protected]>
-Tudor Golubenco <[email protected]>
-Vahe Khachikyan <[email protected]>
-Yukang <[email protected]>
-bronze1man <[email protected]>
-debrando <[email protected]>
-henrikedwards <[email protected]>
-铁哥 <[email protected]>
diff --git a/vendor/gopkg.in/fsnotify.v1/CHANGELOG.md b/vendor/gopkg.in/fsnotify.v1/CHANGELOG.md
deleted file mode 100644
index be4d7ea2c145e..0000000000000
--- a/vendor/gopkg.in/fsnotify.v1/CHANGELOG.md
+++ /dev/null
@@ -1,317 +0,0 @@
-# Changelog
-
-## v1.4.7 / 2018-01-09
-
-* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
-* Tests: Fix missing verb on format string (thanks @rchiossi)
-* Linux: Fix deadlock in Remove (thanks @aarondl)
-* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
-* Docs: Moved FAQ into the README (thanks @vahe)
-* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
-* Docs: replace references to OS X with macOS
-
-## v1.4.2 / 2016-10-10
-
-* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
-
-## v1.4.1 / 2016-10-04
-
-* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
-
-## v1.4.0 / 2016-10-01
-
-* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
-
-## v1.3.1 / 2016-06-28
-
-* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
-
-## v1.3.0 / 2016-04-19
-
-* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
-
-## v1.2.10 / 2016-03-02
-
-* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
-
-## v1.2.9 / 2016-01-13
-
-kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
-
-## v1.2.8 / 2015-12-17
-
-* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
-* inotify: fix race in test
-* enable race detection for continuous integration (Linux, Mac, Windows)
-
-## v1.2.5 / 2015-10-17
-
-* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
-* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
-* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
-* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
-
-## v1.2.1 / 2015-10-14
-
-* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
-
-## v1.2.0 / 2015-02-08
-
-* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
-* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
-* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
-
-## v1.1.1 / 2015-02-05
-
-* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
-
-## v1.1.0 / 2014-12-12
-
-* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
- * add low-level functions
- * only need to store flags on directories
- * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
- * done can be an unbuffered channel
- * remove calls to os.NewSyscallError
-* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
-* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
-* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
-
-## v1.0.4 / 2014-09-07
-
-* kqueue: add dragonfly to the build tags.
-* Rename source code files, rearrange code so exported APIs are at the top.
-* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
-
-## v1.0.3 / 2014-08-19
-
-* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
-
-## v1.0.2 / 2014-08-17
-
-* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
-* [Fix] Make ./path and path equivalent. (thanks @zhsso)
-
-## v1.0.0 / 2014-08-15
-
-* [API] Remove AddWatch on Windows, use Add.
-* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
-* Minor updates based on feedback from golint.
-
-## dev / 2014-07-09
-
-* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
-* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
-
-## dev / 2014-07-04
-
-* kqueue: fix incorrect mutex used in Close()
-* Update example to demonstrate usage of Op.
-
-## dev / 2014-06-28
-
-* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
-* Fix for String() method on Event (thanks Alex Brainman)
-* Don't build on Plan 9 or Solaris (thanks @4ad)
-
-## dev / 2014-06-21
-
-* Events channel of type Event rather than *Event.
-* [internal] use syscall constants directly for inotify and kqueue.
-* [internal] kqueue: rename events to kevents and fileEvent to event.
-
-## dev / 2014-06-19
-
-* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
-* [internal] remove cookie from Event struct (unused).
-* [internal] Event struct has the same definition across every OS.
-* [internal] remove internal watch and removeWatch methods.
-
-## dev / 2014-06-12
-
-* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
-* [API] Pluralized channel names: Events and Errors.
-* [API] Renamed FileEvent struct to Event.
-* [API] Op constants replace methods like IsCreate().
-
-## dev / 2014-06-12
-
-* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
-
-## dev / 2014-05-23
-
-* [API] Remove current implementation of WatchFlags.
- * current implementation doesn't take advantage of OS for efficiency
- * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
- * no tests for the current implementation
- * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
-
-## v0.9.3 / 2014-12-31
-
-* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
-
-## v0.9.2 / 2014-08-17
-
-* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
-
-## v0.9.1 / 2014-06-12
-
-* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
-
-## v0.9.0 / 2014-01-17
-
-* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
-* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
-* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
-
-## v0.8.12 / 2013-11-13
-
-* [API] Remove FD_SET and friends from Linux adapter
-
-## v0.8.11 / 2013-11-02
-
-* [Doc] Add Changelog [#72][] (thanks @nathany)
-* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
-
-## v0.8.10 / 2013-10-19
-
-* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
-* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
-* [Doc] specify OS-specific limits in README (thanks @debrando)
-
-## v0.8.9 / 2013-09-08
-
-* [Doc] Contributing (thanks @nathany)
-* [Doc] update package path in example code [#63][] (thanks @paulhammond)
-* [Doc] GoCI badge in README (Linux only) [#60][]
-* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
-
-## v0.8.8 / 2013-06-17
-
-* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
-
-## v0.8.7 / 2013-06-03
-
-* [API] Make syscall flags internal
-* [Fix] inotify: ignore event changes
-* [Fix] race in symlink test [#45][] (reported by @srid)
-* [Fix] tests on Windows
-* lower case error messages
-
-## v0.8.6 / 2013-05-23
-
-* kqueue: Use EVT_ONLY flag on Darwin
-* [Doc] Update README with full example
-
-## v0.8.5 / 2013-05-09
-
-* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
-
-## v0.8.4 / 2013-04-07
-
-* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
-
-## v0.8.3 / 2013-03-13
-
-* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
-* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
-
-## v0.8.2 / 2013-02-07
-
-* [Doc] add Authors
-* [Fix] fix data races for map access [#29][] (thanks @fsouza)
-
-## v0.8.1 / 2013-01-09
-
-* [Fix] Windows path separators
-* [Doc] BSD License
-
-## v0.8.0 / 2012-11-09
-
-* kqueue: directory watching improvements (thanks @vmirage)
-* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
-* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
-
-## v0.7.4 / 2012-10-09
-
-* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
-* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
-* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
-* [Fix] kqueue: modify after recreation of file
-
-## v0.7.3 / 2012-09-27
-
-* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
-* [Fix] kqueue: no longer get duplicate CREATE events
-
-## v0.7.2 / 2012-09-01
-
-* kqueue: events for created directories
-
-## v0.7.1 / 2012-07-14
-
-* [Fix] for renaming files
-
-## v0.7.0 / 2012-07-02
-
-* [Feature] FSNotify flags
-* [Fix] inotify: Added file name back to event path
-
-## v0.6.0 / 2012-06-06
-
-* kqueue: watch files after directory created (thanks @tmc)
-
-## v0.5.1 / 2012-05-22
-
-* [Fix] inotify: remove all watches before Close()
-
-## v0.5.0 / 2012-05-03
-
-* [API] kqueue: return errors during watch instead of sending over channel
-* kqueue: match symlink behavior on Linux
-* inotify: add `DELETE_SELF` (requested by @taralx)
-* [Fix] kqueue: handle EINTR (reported by @robfig)
-* [Doc] Godoc example [#1][] (thanks @davecheney)
-
-## v0.4.0 / 2012-03-30
-
-* Go 1 released: build with go tool
-* [Feature] Windows support using winfsnotify
-* Windows does not have attribute change notifications
-* Roll attribute notifications into IsModify
-
-## v0.3.0 / 2012-02-19
-
-* kqueue: add files when watch directory
-
-## v0.2.0 / 2011-12-30
-
-* update to latest Go weekly code
-
-## v0.1.0 / 2011-10-19
-
-* kqueue: add watch on file creation to match inotify
-* kqueue: create file event
-* inotify: ignore `IN_IGNORED` events
-* event String()
-* linux: common FileEvent functions
-* initial commit
-
-[#79]: https://github.com/howeyc/fsnotify/pull/79
-[#77]: https://github.com/howeyc/fsnotify/pull/77
-[#72]: https://github.com/howeyc/fsnotify/issues/72
-[#71]: https://github.com/howeyc/fsnotify/issues/71
-[#70]: https://github.com/howeyc/fsnotify/issues/70
-[#63]: https://github.com/howeyc/fsnotify/issues/63
-[#62]: https://github.com/howeyc/fsnotify/issues/62
-[#60]: https://github.com/howeyc/fsnotify/issues/60
-[#59]: https://github.com/howeyc/fsnotify/issues/59
-[#49]: https://github.com/howeyc/fsnotify/issues/49
-[#45]: https://github.com/howeyc/fsnotify/issues/45
-[#40]: https://github.com/howeyc/fsnotify/issues/40
-[#36]: https://github.com/howeyc/fsnotify/issues/36
-[#33]: https://github.com/howeyc/fsnotify/issues/33
-[#29]: https://github.com/howeyc/fsnotify/issues/29
-[#25]: https://github.com/howeyc/fsnotify/issues/25
-[#24]: https://github.com/howeyc/fsnotify/issues/24
-[#21]: https://github.com/howeyc/fsnotify/issues/21
diff --git a/vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md b/vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md
deleted file mode 100644
index 828a60b24ba26..0000000000000
--- a/vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md
+++ /dev/null
@@ -1,77 +0,0 @@
-# Contributing
-
-## Issues
-
-* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues).
-* Please indicate the platform you are using fsnotify on.
-* A code example to reproduce the problem is appreciated.
-
-## Pull Requests
-
-### Contributor License Agreement
-
-fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
-
-Please indicate that you have signed the CLA in your pull request.
-
-### How fsnotify is Developed
-
-* Development is done on feature branches.
-* Tests are run on BSD, Linux, macOS and Windows.
-* Pull requests are reviewed and [applied to master][am] using [hub][].
- * Maintainers may modify or squash commits rather than asking contributors to.
-* To issue a new release, the maintainers will:
- * Update the CHANGELOG
- * Tag a version, which will become available through gopkg.in.
-
-### How to Fork
-
-For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
-
-1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
-2. Create your feature branch (`git checkout -b my-new-feature`)
-3. Ensure everything works and the tests pass (see below)
-4. Commit your changes (`git commit -am 'Add some feature'`)
-
-Contribute upstream:
-
-1. Fork fsnotify on GitHub
-2. Add your remote (`git remote add fork [email protected]:mycompany/repo.git`)
-3. Push to the branch (`git push fork my-new-feature`)
-4. Create a new Pull Request on GitHub
-
-This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
-
-### Testing
-
-fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
-
-Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
-
-To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
-
-* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
-* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
-* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
-* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
-* When you're done, you will want to halt or destroy the Vagrant boxes.
-
-Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
-
-Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
-
-### Maintainers
-
-Help maintaining fsnotify is welcome. To be a maintainer:
-
-* Submit a pull request and sign the CLA as above.
-* You must be able to run the test suite on Mac, Windows, Linux and BSD.
-
-To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
-
-All code changes should be internal pull requests.
-
-Releases are tagged using [Semantic Versioning](http://semver.org/).
-
-[hub]: https://github.com/github/hub
-[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs
diff --git a/vendor/gopkg.in/fsnotify.v1/LICENSE b/vendor/gopkg.in/fsnotify.v1/LICENSE
deleted file mode 100644
index f21e54080090f..0000000000000
--- a/vendor/gopkg.in/fsnotify.v1/LICENSE
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright (c) 2012 The Go Authors. All rights reserved.
-Copyright (c) 2012 fsnotify Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/gopkg.in/fsnotify.v1/README.md b/vendor/gopkg.in/fsnotify.v1/README.md
deleted file mode 100644
index 3993207413a75..0000000000000
--- a/vendor/gopkg.in/fsnotify.v1/README.md
+++ /dev/null
@@ -1,79 +0,0 @@
-# File system notifications for Go
-
-[](https://godoc.org/github.com/fsnotify/fsnotify) [](https://goreportcard.com/report/github.com/fsnotify/fsnotify)
-
-fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:
-
-```console
-go get -u golang.org/x/sys/...
-```
-
-Cross platform: Windows, Linux, BSD and macOS.
-
-|Adapter |OS |Status |
-|----------|----------|----------|
-|inotify |Linux 2.6.27 or later, Android\*|Supported [](https://travis-ci.org/fsnotify/fsnotify)|
-|kqueue |BSD, macOS, iOS\*|Supported [](https://travis-ci.org/fsnotify/fsnotify)|
-|ReadDirectoryChangesW|Windows|Supported [](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)|
-|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
-|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)|
-|fanotify |Linux 2.6.37+ | |
-|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)|
-|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)|
-
-\* Android and iOS are untested.
-
-Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
-
-## API stability
-
-fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
-
-All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
-
-Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
-
-## Contributing
-
-Please refer to [CONTRIBUTING][] before opening an issue or pull request.
-
-## Example
-
-See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
-
-## FAQ
-
-**When a file is moved to another directory is it still being watched?**
-
-No (it shouldn't be, unless you are watching where it was moved to).
-
-**When I watch a directory, are all subdirectories watched as well?**
-
-No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
-
-**Do I have to watch the Error and Event channels in a separate goroutine?**
-
-As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
-
-**Why am I receiving multiple events for the same file on OS X?**
-
-Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
-
-**How many files can be watched at once?**
-
-There are OS-specific limits as to how many watches can be created:
-* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
-* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
-
-[#62]: https://github.com/howeyc/fsnotify/issues/62
-[#18]: https://github.com/fsnotify/fsnotify/issues/18
-[#11]: https://github.com/fsnotify/fsnotify/issues/11
-[#7]: https://github.com/howeyc/fsnotify/issues/7
-
-[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
-
-## Related Projects
-
-* [notify](https://github.com/rjeczalik/notify)
-* [fsevents](https://github.com/fsnotify/fsevents)
-
diff --git a/vendor/gopkg.in/fsnotify.v1/fen.go b/vendor/gopkg.in/fsnotify.v1/fen.go
deleted file mode 100644
index ced39cb881e68..0000000000000
--- a/vendor/gopkg.in/fsnotify.v1/fen.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build solaris
-
-package fsnotify
-
-import (
- "errors"
-)
-
-// Watcher watches a set of files, delivering events to a channel.
-type Watcher struct {
- Events chan Event
- Errors chan error
-}
-
-// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
-func NewWatcher() (*Watcher, error) {
- return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
-}
-
-// Close removes all watches and closes the events channel.
-func (w *Watcher) Close() error {
- return nil
-}
-
-// Add starts watching the named file or directory (non-recursively).
-func (w *Watcher) Add(name string) error {
- return nil
-}
-
-// Remove stops watching the the named file or directory (non-recursively).
-func (w *Watcher) Remove(name string) error {
- return nil
-}
diff --git a/vendor/gopkg.in/fsnotify.v1/fsnotify.go b/vendor/gopkg.in/fsnotify.v1/fsnotify.go
deleted file mode 100644
index 190bf0de57562..0000000000000
--- a/vendor/gopkg.in/fsnotify.v1/fsnotify.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !plan9
-
-// Package fsnotify provides a platform-independent interface for file system notifications.
-package fsnotify
-
-import (
- "bytes"
- "errors"
- "fmt"
-)
-
-// Event represents a single file system notification.
-type Event struct {
- Name string // Relative path to the file or directory.
- Op Op // File operation that triggered the event.
-}
-
-// Op describes a set of file operations.
-type Op uint32
-
-// These are the generalized file operations that can trigger a notification.
-const (
- Create Op = 1 << iota
- Write
- Remove
- Rename
- Chmod
-)
-
-func (op Op) String() string {
- // Use a buffer for efficient string concatenation
- var buffer bytes.Buffer
-
- if op&Create == Create {
- buffer.WriteString("|CREATE")
- }
- if op&Remove == Remove {
- buffer.WriteString("|REMOVE")
- }
- if op&Write == Write {
- buffer.WriteString("|WRITE")
- }
- if op&Rename == Rename {
- buffer.WriteString("|RENAME")
- }
- if op&Chmod == Chmod {
- buffer.WriteString("|CHMOD")
- }
- if buffer.Len() == 0 {
- return ""
- }
- return buffer.String()[1:] // Strip leading pipe
-}
-
-// String returns a string representation of the event in the form
-// "file: REMOVE|WRITE|..."
-func (e Event) String() string {
- return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
-}
-
-// Common errors that can be reported by a watcher
-var ErrEventOverflow = errors.New("fsnotify queue overflow")
diff --git a/vendor/gopkg.in/fsnotify.v1/inotify.go b/vendor/gopkg.in/fsnotify.v1/inotify.go
deleted file mode 100644
index d9fd1b88a05f2..0000000000000
--- a/vendor/gopkg.in/fsnotify.v1/inotify.go
+++ /dev/null
@@ -1,337 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux
-
-package fsnotify
-
-import (
- "errors"
- "fmt"
- "io"
- "os"
- "path/filepath"
- "strings"
- "sync"
- "unsafe"
-
- "golang.org/x/sys/unix"
-)
-
-// Watcher watches a set of files, delivering events to a channel.
-type Watcher struct {
- Events chan Event
- Errors chan error
- mu sync.Mutex // Map access
- fd int
- poller *fdPoller
- watches map[string]*watch // Map of inotify watches (key: path)
- paths map[int]string // Map of watched paths (key: watch descriptor)
- done chan struct{} // Channel for sending a "quit message" to the reader goroutine
- doneResp chan struct{} // Channel to respond to Close
-}
-
-// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
-func NewWatcher() (*Watcher, error) {
- // Create inotify fd
- fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
- if fd == -1 {
- return nil, errno
- }
- // Create epoll
- poller, err := newFdPoller(fd)
- if err != nil {
- unix.Close(fd)
- return nil, err
- }
- w := &Watcher{
- fd: fd,
- poller: poller,
- watches: make(map[string]*watch),
- paths: make(map[int]string),
- Events: make(chan Event),
- Errors: make(chan error),
- done: make(chan struct{}),
- doneResp: make(chan struct{}),
- }
-
- go w.readEvents()
- return w, nil
-}
-
-func (w *Watcher) isClosed() bool {
- select {
- case <-w.done:
- return true
- default:
- return false
- }
-}
-
-// Close removes all watches and closes the events channel.
-func (w *Watcher) Close() error {
- if w.isClosed() {
- return nil
- }
-
- // Send 'close' signal to goroutine, and set the Watcher to closed.
- close(w.done)
-
- // Wake up goroutine
- w.poller.wake()
-
- // Wait for goroutine to close
- <-w.doneResp
-
- return nil
-}
-
-// Add starts watching the named file or directory (non-recursively).
-func (w *Watcher) Add(name string) error {
- name = filepath.Clean(name)
- if w.isClosed() {
- return errors.New("inotify instance already closed")
- }
-
- const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
- unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
- unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
-
- var flags uint32 = agnosticEvents
-
- w.mu.Lock()
- defer w.mu.Unlock()
- watchEntry := w.watches[name]
- if watchEntry != nil {
- flags |= watchEntry.flags | unix.IN_MASK_ADD
- }
- wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
- if wd == -1 {
- return errno
- }
-
- if watchEntry == nil {
- w.watches[name] = &watch{wd: uint32(wd), flags: flags}
- w.paths[wd] = name
- } else {
- watchEntry.wd = uint32(wd)
- watchEntry.flags = flags
- }
-
- return nil
-}
-
-// Remove stops watching the named file or directory (non-recursively).
-func (w *Watcher) Remove(name string) error {
- name = filepath.Clean(name)
-
- // Fetch the watch.
- w.mu.Lock()
- defer w.mu.Unlock()
- watch, ok := w.watches[name]
-
- // Remove it from inotify.
- if !ok {
- return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
- }
-
- // We successfully removed the watch if InotifyRmWatch doesn't return an
- // error, we need to clean up our internal state to ensure it matches
- // inotify's kernel state.
- delete(w.paths, int(watch.wd))
- delete(w.watches, name)
-
- // inotify_rm_watch will return EINVAL if the file has been deleted;
- // the inotify will already have been removed.
- // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
- // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
- // so that EINVAL means that the wd is being rm_watch()ed or its file removed
- // by another thread and we have not received IN_IGNORE event.
- success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
- if success == -1 {
- // TODO: Perhaps it's not helpful to return an error here in every case.
- // the only two possible errors are:
- // EBADF, which happens when w.fd is not a valid file descriptor of any kind.
- // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
- // Watch descriptors are invalidated when they are removed explicitly or implicitly;
- // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
- return errno
- }
-
- return nil
-}
-
-type watch struct {
- wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
- flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
-}
-
-// readEvents reads from the inotify file descriptor, converts the
-// received events into Event objects and sends them via the Events channel
-func (w *Watcher) readEvents() {
- var (
- buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
- n int // Number of bytes read with read()
- errno error // Syscall errno
- ok bool // For poller.wait
- )
-
- defer close(w.doneResp)
- defer close(w.Errors)
- defer close(w.Events)
- defer unix.Close(w.fd)
- defer w.poller.close()
-
- for {
- // See if we have been closed.
- if w.isClosed() {
- return
- }
-
- ok, errno = w.poller.wait()
- if errno != nil {
- select {
- case w.Errors <- errno:
- case <-w.done:
- return
- }
- continue
- }
-
- if !ok {
- continue
- }
-
- n, errno = unix.Read(w.fd, buf[:])
- // If a signal interrupted execution, see if we've been asked to close, and try again.
- // http://man7.org/linux/man-pages/man7/signal.7.html :
- // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
- if errno == unix.EINTR {
- continue
- }
-
- // unix.Read might have been woken up by Close. If so, we're done.
- if w.isClosed() {
- return
- }
-
- if n < unix.SizeofInotifyEvent {
- var err error
- if n == 0 {
- // If EOF is received. This should really never happen.
- err = io.EOF
- } else if n < 0 {
- // If an error occurred while reading.
- err = errno
- } else {
- // Read was too short.
- err = errors.New("notify: short read in readEvents()")
- }
- select {
- case w.Errors <- err:
- case <-w.done:
- return
- }
- continue
- }
-
- var offset uint32
- // We don't know how many events we just read into the buffer
- // While the offset points to at least one whole event...
- for offset <= uint32(n-unix.SizeofInotifyEvent) {
- // Point "raw" to the event in the buffer
- raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
-
- mask := uint32(raw.Mask)
- nameLen := uint32(raw.Len)
-
- if mask&unix.IN_Q_OVERFLOW != 0 {
- select {
- case w.Errors <- ErrEventOverflow:
- case <-w.done:
- return
- }
- }
-
- // If the event happened to the watched directory or the watched file, the kernel
- // doesn't append the filename to the event, but we would like to always fill the
- // the "Name" field with a valid filename. We retrieve the path of the watch from
- // the "paths" map.
- w.mu.Lock()
- name, ok := w.paths[int(raw.Wd)]
- // IN_DELETE_SELF occurs when the file/directory being watched is removed.
- // This is a sign to clean up the maps, otherwise we are no longer in sync
- // with the inotify kernel state which has already deleted the watch
- // automatically.
- if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
- delete(w.paths, int(raw.Wd))
- delete(w.watches, name)
- }
- w.mu.Unlock()
-
- if nameLen > 0 {
- // Point "bytes" at the first byte of the filename
- bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
- // The filename is padded with NULL bytes. TrimRight() gets rid of those.
- name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
- }
-
- event := newEvent(name, mask)
-
- // Send the events that are not ignored on the events channel
- if !event.ignoreLinux(mask) {
- select {
- case w.Events <- event:
- case <-w.done:
- return
- }
- }
-
- // Move to the next event in the buffer
- offset += unix.SizeofInotifyEvent + nameLen
- }
- }
-}
-
-// Certain types of events can be "ignored" and not sent over the Events
-// channel. Such as events marked ignore by the kernel, or MODIFY events
-// against files that do not exist.
-func (e *Event) ignoreLinux(mask uint32) bool {
- // Ignore anything the inotify API says to ignore
- if mask&unix.IN_IGNORED == unix.IN_IGNORED {
- return true
- }
-
- // If the event is not a DELETE or RENAME, the file must exist.
- // Otherwise the event is ignored.
- // *Note*: this was put in place because it was seen that a MODIFY
- // event was sent after the DELETE. This ignores that MODIFY and
- // assumes a DELETE will come or has come if the file doesn't exist.
- if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
- _, statErr := os.Lstat(e.Name)
- return os.IsNotExist(statErr)
- }
- return false
-}
-
-// newEvent returns an platform-independent Event based on an inotify mask.
-func newEvent(name string, mask uint32) Event {
- e := Event{Name: name}
- if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
- e.Op |= Create
- }
- if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
- e.Op |= Remove
- }
- if mask&unix.IN_MODIFY == unix.IN_MODIFY {
- e.Op |= Write
- }
- if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
- e.Op |= Rename
- }
- if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
- e.Op |= Chmod
- }
- return e
-}
diff --git a/vendor/gopkg.in/fsnotify.v1/inotify_poller.go b/vendor/gopkg.in/fsnotify.v1/inotify_poller.go
deleted file mode 100644
index cc7db4b22ef5b..0000000000000
--- a/vendor/gopkg.in/fsnotify.v1/inotify_poller.go
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux
-
-package fsnotify
-
-import (
- "errors"
-
- "golang.org/x/sys/unix"
-)
-
-type fdPoller struct {
- fd int // File descriptor (as returned by the inotify_init() syscall)
- epfd int // Epoll file descriptor
- pipe [2]int // Pipe for waking up
-}
-
-func emptyPoller(fd int) *fdPoller {
- poller := new(fdPoller)
- poller.fd = fd
- poller.epfd = -1
- poller.pipe[0] = -1
- poller.pipe[1] = -1
- return poller
-}
-
-// Create a new inotify poller.
-// This creates an inotify handler, and an epoll handler.
-func newFdPoller(fd int) (*fdPoller, error) {
- var errno error
- poller := emptyPoller(fd)
- defer func() {
- if errno != nil {
- poller.close()
- }
- }()
- poller.fd = fd
-
- // Create epoll fd
- poller.epfd, errno = unix.EpollCreate1(0)
- if poller.epfd == -1 {
- return nil, errno
- }
- // Create pipe; pipe[0] is the read end, pipe[1] the write end.
- errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK)
- if errno != nil {
- return nil, errno
- }
-
- // Register inotify fd with epoll
- event := unix.EpollEvent{
- Fd: int32(poller.fd),
- Events: unix.EPOLLIN,
- }
- errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
- if errno != nil {
- return nil, errno
- }
-
- // Register pipe fd with epoll
- event = unix.EpollEvent{
- Fd: int32(poller.pipe[0]),
- Events: unix.EPOLLIN,
- }
- errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
- if errno != nil {
- return nil, errno
- }
-
- return poller, nil
-}
-
-// Wait using epoll.
-// Returns true if something is ready to be read,
-// false if there is not.
-func (poller *fdPoller) wait() (bool, error) {
- // 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
- // I don't know whether epoll_wait returns the number of events returned,
- // or the total number of events ready.
- // I decided to catch both by making the buffer one larger than the maximum.
- events := make([]unix.EpollEvent, 7)
- for {
- n, errno := unix.EpollWait(poller.epfd, events, -1)
- if n == -1 {
- if errno == unix.EINTR {
- continue
- }
- return false, errno
- }
- if n == 0 {
- // If there are no events, try again.
- continue
- }
- if n > 6 {
- // This should never happen. More events were returned than should be possible.
- return false, errors.New("epoll_wait returned more events than I know what to do with")
- }
- ready := events[:n]
- epollhup := false
- epollerr := false
- epollin := false
- for _, event := range ready {
- if event.Fd == int32(poller.fd) {
- if event.Events&unix.EPOLLHUP != 0 {
- // This should not happen, but if it does, treat it as a wakeup.
- epollhup = true
- }
- if event.Events&unix.EPOLLERR != 0 {
- // If an error is waiting on the file descriptor, we should pretend
- // something is ready to read, and let unix.Read pick up the error.
- epollerr = true
- }
- if event.Events&unix.EPOLLIN != 0 {
- // There is data to read.
- epollin = true
- }
- }
- if event.Fd == int32(poller.pipe[0]) {
- if event.Events&unix.EPOLLHUP != 0 {
- // Write pipe descriptor was closed, by us. This means we're closing down the
- // watcher, and we should wake up.
- }
- if event.Events&unix.EPOLLERR != 0 {
- // If an error is waiting on the pipe file descriptor.
- // This is an absolute mystery, and should never ever happen.
- return false, errors.New("Error on the pipe descriptor.")
- }
- if event.Events&unix.EPOLLIN != 0 {
- // This is a regular wakeup, so we have to clear the buffer.
- err := poller.clearWake()
- if err != nil {
- return false, err
- }
- }
- }
- }
-
- if epollhup || epollerr || epollin {
- return true, nil
- }
- return false, nil
- }
-}
-
-// Close the write end of the poller.
-func (poller *fdPoller) wake() error {
- buf := make([]byte, 1)
- n, errno := unix.Write(poller.pipe[1], buf)
- if n == -1 {
- if errno == unix.EAGAIN {
- // Buffer is full, poller will wake.
- return nil
- }
- return errno
- }
- return nil
-}
-
-func (poller *fdPoller) clearWake() error {
- // You have to be woken up a LOT in order to get to 100!
- buf := make([]byte, 100)
- n, errno := unix.Read(poller.pipe[0], buf)
- if n == -1 {
- if errno == unix.EAGAIN {
- // Buffer is empty, someone else cleared our wake.
- return nil
- }
- return errno
- }
- return nil
-}
-
-// Close all poller file descriptors, but not the one passed to it.
-func (poller *fdPoller) close() {
- if poller.pipe[1] != -1 {
- unix.Close(poller.pipe[1])
- }
- if poller.pipe[0] != -1 {
- unix.Close(poller.pipe[0])
- }
- if poller.epfd != -1 {
- unix.Close(poller.epfd)
- }
-}
diff --git a/vendor/gopkg.in/fsnotify.v1/kqueue.go b/vendor/gopkg.in/fsnotify.v1/kqueue.go
deleted file mode 100644
index 86e76a3d67683..0000000000000
--- a/vendor/gopkg.in/fsnotify.v1/kqueue.go
+++ /dev/null
@@ -1,521 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build freebsd openbsd netbsd dragonfly darwin
-
-package fsnotify
-
-import (
- "errors"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "sync"
- "time"
-
- "golang.org/x/sys/unix"
-)
-
-// Watcher watches a set of files, delivering events to a channel.
-type Watcher struct {
- Events chan Event
- Errors chan error
- done chan struct{} // Channel for sending a "quit message" to the reader goroutine
-
- kq int // File descriptor (as returned by the kqueue() syscall).
-
- mu sync.Mutex // Protects access to watcher data
- watches map[string]int // Map of watched file descriptors (key: path).
- externalWatches map[string]bool // Map of watches added by user of the library.
- dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
- paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
- fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
- isClosed bool // Set to true when Close() is first called
-}
-
-type pathInfo struct {
- name string
- isDir bool
-}
-
-// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
-func NewWatcher() (*Watcher, error) {
- kq, err := kqueue()
- if err != nil {
- return nil, err
- }
-
- w := &Watcher{
- kq: kq,
- watches: make(map[string]int),
- dirFlags: make(map[string]uint32),
- paths: make(map[int]pathInfo),
- fileExists: make(map[string]bool),
- externalWatches: make(map[string]bool),
- Events: make(chan Event),
- Errors: make(chan error),
- done: make(chan struct{}),
- }
-
- go w.readEvents()
- return w, nil
-}
-
-// Close removes all watches and closes the events channel.
-func (w *Watcher) Close() error {
- w.mu.Lock()
- if w.isClosed {
- w.mu.Unlock()
- return nil
- }
- w.isClosed = true
-
- // copy paths to remove while locked
- var pathsToRemove = make([]string, 0, len(w.watches))
- for name := range w.watches {
- pathsToRemove = append(pathsToRemove, name)
- }
- w.mu.Unlock()
- // unlock before calling Remove, which also locks
-
- for _, name := range pathsToRemove {
- w.Remove(name)
- }
-
- // send a "quit" message to the reader goroutine
- close(w.done)
-
- return nil
-}
-
-// Add starts watching the named file or directory (non-recursively).
-func (w *Watcher) Add(name string) error {
- w.mu.Lock()
- w.externalWatches[name] = true
- w.mu.Unlock()
- _, err := w.addWatch(name, noteAllEvents)
- return err
-}
-
-// Remove stops watching the the named file or directory (non-recursively).
-func (w *Watcher) Remove(name string) error {
- name = filepath.Clean(name)
- w.mu.Lock()
- watchfd, ok := w.watches[name]
- w.mu.Unlock()
- if !ok {
- return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
- }
-
- const registerRemove = unix.EV_DELETE
- if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
- return err
- }
-
- unix.Close(watchfd)
-
- w.mu.Lock()
- isDir := w.paths[watchfd].isDir
- delete(w.watches, name)
- delete(w.paths, watchfd)
- delete(w.dirFlags, name)
- w.mu.Unlock()
-
- // Find all watched paths that are in this directory that are not external.
- if isDir {
- var pathsToRemove []string
- w.mu.Lock()
- for _, path := range w.paths {
- wdir, _ := filepath.Split(path.name)
- if filepath.Clean(wdir) == name {
- if !w.externalWatches[path.name] {
- pathsToRemove = append(pathsToRemove, path.name)
- }
- }
- }
- w.mu.Unlock()
- for _, name := range pathsToRemove {
- // Since these are internal, not much sense in propagating error
- // to the user, as that will just confuse them with an error about
- // a path they did not explicitly watch themselves.
- w.Remove(name)
- }
- }
-
- return nil
-}
-
-// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
-const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
-
-// keventWaitTime to block on each read from kevent
-var keventWaitTime = durationToTimespec(100 * time.Millisecond)
-
-// addWatch adds name to the watched file set.
-// The flags are interpreted as described in kevent(2).
-// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
-func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
- var isDir bool
- // Make ./name and name equivalent
- name = filepath.Clean(name)
-
- w.mu.Lock()
- if w.isClosed {
- w.mu.Unlock()
- return "", errors.New("kevent instance already closed")
- }
- watchfd, alreadyWatching := w.watches[name]
- // We already have a watch, but we can still override flags.
- if alreadyWatching {
- isDir = w.paths[watchfd].isDir
- }
- w.mu.Unlock()
-
- if !alreadyWatching {
- fi, err := os.Lstat(name)
- if err != nil {
- return "", err
- }
-
- // Don't watch sockets.
- if fi.Mode()&os.ModeSocket == os.ModeSocket {
- return "", nil
- }
-
- // Don't watch named pipes.
- if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
- return "", nil
- }
-
- // Follow Symlinks
- // Unfortunately, Linux can add bogus symlinks to watch list without
- // issue, and Windows can't do symlinks period (AFAIK). To maintain
- // consistency, we will act like everything is fine. There will simply
- // be no file events for broken symlinks.
- // Hence the returns of nil on errors.
- if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
- name, err = filepath.EvalSymlinks(name)
- if err != nil {
- return "", nil
- }
-
- w.mu.Lock()
- _, alreadyWatching = w.watches[name]
- w.mu.Unlock()
-
- if alreadyWatching {
- return name, nil
- }
-
- fi, err = os.Lstat(name)
- if err != nil {
- return "", nil
- }
- }
-
- watchfd, err = unix.Open(name, openMode, 0700)
- if watchfd == -1 {
- return "", err
- }
-
- isDir = fi.IsDir()
- }
-
- const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
- if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
- unix.Close(watchfd)
- return "", err
- }
-
- if !alreadyWatching {
- w.mu.Lock()
- w.watches[name] = watchfd
- w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
- w.mu.Unlock()
- }
-
- if isDir {
- // Watch the directory if it has not been watched before,
- // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
- w.mu.Lock()
-
- watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
- (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
- // Store flags so this watch can be updated later
- w.dirFlags[name] = flags
- w.mu.Unlock()
-
- if watchDir {
- if err := w.watchDirectoryFiles(name); err != nil {
- return "", err
- }
- }
- }
- return name, nil
-}
-
-// readEvents reads from kqueue and converts the received kevents into
-// Event values that it sends down the Events channel.
-func (w *Watcher) readEvents() {
- eventBuffer := make([]unix.Kevent_t, 10)
-
-loop:
- for {
- // See if there is a message on the "done" channel
- select {
- case <-w.done:
- break loop
- default:
- }
-
- // Get new events
- kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
- // EINTR is okay, the syscall was interrupted before timeout expired.
- if err != nil && err != unix.EINTR {
- select {
- case w.Errors <- err:
- case <-w.done:
- break loop
- }
- continue
- }
-
- // Flush the events we received to the Events channel
- for len(kevents) > 0 {
- kevent := &kevents[0]
- watchfd := int(kevent.Ident)
- mask := uint32(kevent.Fflags)
- w.mu.Lock()
- path := w.paths[watchfd]
- w.mu.Unlock()
- event := newEvent(path.name, mask)
-
- if path.isDir && !(event.Op&Remove == Remove) {
- // Double check to make sure the directory exists. This can happen when
- // we do a rm -fr on a recursively watched folders and we receive a
- // modification event first but the folder has been deleted and later
- // receive the delete event
- if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
- // mark is as delete event
- event.Op |= Remove
- }
- }
-
- if event.Op&Rename == Rename || event.Op&Remove == Remove {
- w.Remove(event.Name)
- w.mu.Lock()
- delete(w.fileExists, event.Name)
- w.mu.Unlock()
- }
-
- if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
- w.sendDirectoryChangeEvents(event.Name)
- } else {
- // Send the event on the Events channel.
- select {
- case w.Events <- event:
- case <-w.done:
- break loop
- }
- }
-
- if event.Op&Remove == Remove {
- // Look for a file that may have overwritten this.
- // For example, mv f1 f2 will delete f2, then create f2.
- if path.isDir {
- fileDir := filepath.Clean(event.Name)
- w.mu.Lock()
- _, found := w.watches[fileDir]
- w.mu.Unlock()
- if found {
- // make sure the directory exists before we watch for changes. When we
- // do a recursive watch and perform rm -fr, the parent directory might
- // have gone missing, ignore the missing directory and let the
- // upcoming delete event remove the watch from the parent directory.
- if _, err := os.Lstat(fileDir); err == nil {
- w.sendDirectoryChangeEvents(fileDir)
- }
- }
- } else {
- filePath := filepath.Clean(event.Name)
- if fileInfo, err := os.Lstat(filePath); err == nil {
- w.sendFileCreatedEventIfNew(filePath, fileInfo)
- }
- }
- }
-
- // Move to next event
- kevents = kevents[1:]
- }
- }
-
- // cleanup
- err := unix.Close(w.kq)
- if err != nil {
- // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors.
- select {
- case w.Errors <- err:
- default:
- }
- }
- close(w.Events)
- close(w.Errors)
-}
-
-// newEvent returns an platform-independent Event based on kqueue Fflags.
-func newEvent(name string, mask uint32) Event {
- e := Event{Name: name}
- if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
- e.Op |= Remove
- }
- if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
- e.Op |= Write
- }
- if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
- e.Op |= Rename
- }
- if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
- e.Op |= Chmod
- }
- return e
-}
-
-func newCreateEvent(name string) Event {
- return Event{Name: name, Op: Create}
-}
-
-// watchDirectoryFiles to mimic inotify when adding a watch on a directory
-func (w *Watcher) watchDirectoryFiles(dirPath string) error {
- // Get all files
- files, err := ioutil.ReadDir(dirPath)
- if err != nil {
- return err
- }
-
- for _, fileInfo := range files {
- filePath := filepath.Join(dirPath, fileInfo.Name())
- filePath, err = w.internalWatch(filePath, fileInfo)
- if err != nil {
- return err
- }
-
- w.mu.Lock()
- w.fileExists[filePath] = true
- w.mu.Unlock()
- }
-
- return nil
-}
-
-// sendDirectoryEvents searches the directory for newly created files
-// and sends them over the event channel. This functionality is to have
-// the BSD version of fsnotify match Linux inotify which provides a
-// create event for files created in a watched directory.
-func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
- // Get all files
- files, err := ioutil.ReadDir(dirPath)
- if err != nil {
- select {
- case w.Errors <- err:
- case <-w.done:
- return
- }
- }
-
- // Search for new files
- for _, fileInfo := range files {
- filePath := filepath.Join(dirPath, fileInfo.Name())
- err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
-
- if err != nil {
- return
- }
- }
-}
-
-// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
-func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
- w.mu.Lock()
- _, doesExist := w.fileExists[filePath]
- w.mu.Unlock()
- if !doesExist {
- // Send create event
- select {
- case w.Events <- newCreateEvent(filePath):
- case <-w.done:
- return
- }
- }
-
- // like watchDirectoryFiles (but without doing another ReadDir)
- filePath, err = w.internalWatch(filePath, fileInfo)
- if err != nil {
- return err
- }
-
- w.mu.Lock()
- w.fileExists[filePath] = true
- w.mu.Unlock()
-
- return nil
-}
-
-func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
- if fileInfo.IsDir() {
- // mimic Linux providing delete events for subdirectories
- // but preserve the flags used if currently watching subdirectory
- w.mu.Lock()
- flags := w.dirFlags[name]
- w.mu.Unlock()
-
- flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
- return w.addWatch(name, flags)
- }
-
- // watch file to mimic Linux inotify
- return w.addWatch(name, noteAllEvents)
-}
-
-// kqueue creates a new kernel event queue and returns a descriptor.
-func kqueue() (kq int, err error) {
- kq, err = unix.Kqueue()
- if kq == -1 {
- return kq, err
- }
- return kq, nil
-}
-
-// register events with the queue
-func register(kq int, fds []int, flags int, fflags uint32) error {
- changes := make([]unix.Kevent_t, len(fds))
-
- for i, fd := range fds {
- // SetKevent converts int to the platform-specific types:
- unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
- changes[i].Fflags = fflags
- }
-
- // register the events
- success, err := unix.Kevent(kq, changes, nil, nil)
- if success == -1 {
- return err
- }
- return nil
-}
-
-// read retrieves pending events, or waits until an event occurs.
-// A timeout of nil blocks indefinitely, while 0 polls the queue.
-func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
- n, err := unix.Kevent(kq, nil, events, timeout)
- if err != nil {
- return nil, err
- }
- return events[0:n], nil
-}
-
-// durationToTimespec prepares a timeout value
-func durationToTimespec(d time.Duration) unix.Timespec {
- return unix.NsecToTimespec(d.Nanoseconds())
-}
diff --git a/vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go b/vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go
deleted file mode 100644
index 7d8de14513ede..0000000000000
--- a/vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build freebsd openbsd netbsd dragonfly
-
-package fsnotify
-
-import "golang.org/x/sys/unix"
-
-const openMode = unix.O_NONBLOCK | unix.O_RDONLY
diff --git a/vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go b/vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go
deleted file mode 100644
index 9139e17161bfb..0000000000000
--- a/vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin
-
-package fsnotify
-
-import "golang.org/x/sys/unix"
-
-// note: this constant is not defined on BSD
-const openMode = unix.O_EVTONLY
diff --git a/vendor/gopkg.in/fsnotify.v1/windows.go b/vendor/gopkg.in/fsnotify.v1/windows.go
deleted file mode 100644
index 09436f31d8217..0000000000000
--- a/vendor/gopkg.in/fsnotify.v1/windows.go
+++ /dev/null
@@ -1,561 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build windows
-
-package fsnotify
-
-import (
- "errors"
- "fmt"
- "os"
- "path/filepath"
- "runtime"
- "sync"
- "syscall"
- "unsafe"
-)
-
-// Watcher watches a set of files, delivering events to a channel.
-type Watcher struct {
- Events chan Event
- Errors chan error
- isClosed bool // Set to true when Close() is first called
- mu sync.Mutex // Map access
- port syscall.Handle // Handle to completion port
- watches watchMap // Map of watches (key: i-number)
- input chan *input // Inputs to the reader are sent on this channel
- quit chan chan<- error
-}
-
-// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
-func NewWatcher() (*Watcher, error) {
- port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
- if e != nil {
- return nil, os.NewSyscallError("CreateIoCompletionPort", e)
- }
- w := &Watcher{
- port: port,
- watches: make(watchMap),
- input: make(chan *input, 1),
- Events: make(chan Event, 50),
- Errors: make(chan error),
- quit: make(chan chan<- error, 1),
- }
- go w.readEvents()
- return w, nil
-}
-
-// Close removes all watches and closes the events channel.
-func (w *Watcher) Close() error {
- if w.isClosed {
- return nil
- }
- w.isClosed = true
-
- // Send "quit" message to the reader goroutine
- ch := make(chan error)
- w.quit <- ch
- if err := w.wakeupReader(); err != nil {
- return err
- }
- return <-ch
-}
-
-// Add starts watching the named file or directory (non-recursively).
-func (w *Watcher) Add(name string) error {
- if w.isClosed {
- return errors.New("watcher already closed")
- }
- in := &input{
- op: opAddWatch,
- path: filepath.Clean(name),
- flags: sysFSALLEVENTS,
- reply: make(chan error),
- }
- w.input <- in
- if err := w.wakeupReader(); err != nil {
- return err
- }
- return <-in.reply
-}
-
-// Remove stops watching the the named file or directory (non-recursively).
-func (w *Watcher) Remove(name string) error {
- in := &input{
- op: opRemoveWatch,
- path: filepath.Clean(name),
- reply: make(chan error),
- }
- w.input <- in
- if err := w.wakeupReader(); err != nil {
- return err
- }
- return <-in.reply
-}
-
-const (
- // Options for AddWatch
- sysFSONESHOT = 0x80000000
- sysFSONLYDIR = 0x1000000
-
- // Events
- sysFSACCESS = 0x1
- sysFSALLEVENTS = 0xfff
- sysFSATTRIB = 0x4
- sysFSCLOSE = 0x18
- sysFSCREATE = 0x100
- sysFSDELETE = 0x200
- sysFSDELETESELF = 0x400
- sysFSMODIFY = 0x2
- sysFSMOVE = 0xc0
- sysFSMOVEDFROM = 0x40
- sysFSMOVEDTO = 0x80
- sysFSMOVESELF = 0x800
-
- // Special events
- sysFSIGNORED = 0x8000
- sysFSQOVERFLOW = 0x4000
-)
-
-func newEvent(name string, mask uint32) Event {
- e := Event{Name: name}
- if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
- e.Op |= Create
- }
- if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
- e.Op |= Remove
- }
- if mask&sysFSMODIFY == sysFSMODIFY {
- e.Op |= Write
- }
- if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
- e.Op |= Rename
- }
- if mask&sysFSATTRIB == sysFSATTRIB {
- e.Op |= Chmod
- }
- return e
-}
-
-const (
- opAddWatch = iota
- opRemoveWatch
-)
-
-const (
- provisional uint64 = 1 << (32 + iota)
-)
-
-type input struct {
- op int
- path string
- flags uint32
- reply chan error
-}
-
-type inode struct {
- handle syscall.Handle
- volume uint32
- index uint64
-}
-
-type watch struct {
- ov syscall.Overlapped
- ino *inode // i-number
- path string // Directory path
- mask uint64 // Directory itself is being watched with these notify flags
- names map[string]uint64 // Map of names being watched and their notify flags
- rename string // Remembers the old name while renaming a file
- buf [4096]byte
-}
-
-type indexMap map[uint64]*watch
-type watchMap map[uint32]indexMap
-
-func (w *Watcher) wakeupReader() error {
- e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
- if e != nil {
- return os.NewSyscallError("PostQueuedCompletionStatus", e)
- }
- return nil
-}
-
-func getDir(pathname string) (dir string, err error) {
- attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
- if e != nil {
- return "", os.NewSyscallError("GetFileAttributes", e)
- }
- if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
- dir = pathname
- } else {
- dir, _ = filepath.Split(pathname)
- dir = filepath.Clean(dir)
- }
- return
-}
-
-func getIno(path string) (ino *inode, err error) {
- h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
- syscall.FILE_LIST_DIRECTORY,
- syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
- nil, syscall.OPEN_EXISTING,
- syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
- if e != nil {
- return nil, os.NewSyscallError("CreateFile", e)
- }
- var fi syscall.ByHandleFileInformation
- if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
- syscall.CloseHandle(h)
- return nil, os.NewSyscallError("GetFileInformationByHandle", e)
- }
- ino = &inode{
- handle: h,
- volume: fi.VolumeSerialNumber,
- index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
- }
- return ino, nil
-}
-
-// Must run within the I/O thread.
-func (m watchMap) get(ino *inode) *watch {
- if i := m[ino.volume]; i != nil {
- return i[ino.index]
- }
- return nil
-}
-
-// Must run within the I/O thread.
-func (m watchMap) set(ino *inode, watch *watch) {
- i := m[ino.volume]
- if i == nil {
- i = make(indexMap)
- m[ino.volume] = i
- }
- i[ino.index] = watch
-}
-
-// Must run within the I/O thread.
-func (w *Watcher) addWatch(pathname string, flags uint64) error {
- dir, err := getDir(pathname)
- if err != nil {
- return err
- }
- if flags&sysFSONLYDIR != 0 && pathname != dir {
- return nil
- }
- ino, err := getIno(dir)
- if err != nil {
- return err
- }
- w.mu.Lock()
- watchEntry := w.watches.get(ino)
- w.mu.Unlock()
- if watchEntry == nil {
- if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
- syscall.CloseHandle(ino.handle)
- return os.NewSyscallError("CreateIoCompletionPort", e)
- }
- watchEntry = &watch{
- ino: ino,
- path: dir,
- names: make(map[string]uint64),
- }
- w.mu.Lock()
- w.watches.set(ino, watchEntry)
- w.mu.Unlock()
- flags |= provisional
- } else {
- syscall.CloseHandle(ino.handle)
- }
- if pathname == dir {
- watchEntry.mask |= flags
- } else {
- watchEntry.names[filepath.Base(pathname)] |= flags
- }
- if err = w.startRead(watchEntry); err != nil {
- return err
- }
- if pathname == dir {
- watchEntry.mask &= ^provisional
- } else {
- watchEntry.names[filepath.Base(pathname)] &= ^provisional
- }
- return nil
-}
-
-// Must run within the I/O thread.
-func (w *Watcher) remWatch(pathname string) error {
- dir, err := getDir(pathname)
- if err != nil {
- return err
- }
- ino, err := getIno(dir)
- if err != nil {
- return err
- }
- w.mu.Lock()
- watch := w.watches.get(ino)
- w.mu.Unlock()
- if watch == nil {
- return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
- }
- if pathname == dir {
- w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
- watch.mask = 0
- } else {
- name := filepath.Base(pathname)
- w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
- delete(watch.names, name)
- }
- return w.startRead(watch)
-}
-
-// Must run within the I/O thread.
-func (w *Watcher) deleteWatch(watch *watch) {
- for name, mask := range watch.names {
- if mask&provisional == 0 {
- w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
- }
- delete(watch.names, name)
- }
- if watch.mask != 0 {
- if watch.mask&provisional == 0 {
- w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
- }
- watch.mask = 0
- }
-}
-
-// Must run within the I/O thread.
-func (w *Watcher) startRead(watch *watch) error {
- if e := syscall.CancelIo(watch.ino.handle); e != nil {
- w.Errors <- os.NewSyscallError("CancelIo", e)
- w.deleteWatch(watch)
- }
- mask := toWindowsFlags(watch.mask)
- for _, m := range watch.names {
- mask |= toWindowsFlags(m)
- }
- if mask == 0 {
- if e := syscall.CloseHandle(watch.ino.handle); e != nil {
- w.Errors <- os.NewSyscallError("CloseHandle", e)
- }
- w.mu.Lock()
- delete(w.watches[watch.ino.volume], watch.ino.index)
- w.mu.Unlock()
- return nil
- }
- e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
- uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
- if e != nil {
- err := os.NewSyscallError("ReadDirectoryChanges", e)
- if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
- // Watched directory was probably removed
- if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
- if watch.mask&sysFSONESHOT != 0 {
- watch.mask = 0
- }
- }
- err = nil
- }
- w.deleteWatch(watch)
- w.startRead(watch)
- return err
- }
- return nil
-}
-
-// readEvents reads from the I/O completion port, converts the
-// received events into Event objects and sends them via the Events channel.
-// Entry point to the I/O thread.
-func (w *Watcher) readEvents() {
- var (
- n, key uint32
- ov *syscall.Overlapped
- )
- runtime.LockOSThread()
-
- for {
- e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
- watch := (*watch)(unsafe.Pointer(ov))
-
- if watch == nil {
- select {
- case ch := <-w.quit:
- w.mu.Lock()
- var indexes []indexMap
- for _, index := range w.watches {
- indexes = append(indexes, index)
- }
- w.mu.Unlock()
- for _, index := range indexes {
- for _, watch := range index {
- w.deleteWatch(watch)
- w.startRead(watch)
- }
- }
- var err error
- if e := syscall.CloseHandle(w.port); e != nil {
- err = os.NewSyscallError("CloseHandle", e)
- }
- close(w.Events)
- close(w.Errors)
- ch <- err
- return
- case in := <-w.input:
- switch in.op {
- case opAddWatch:
- in.reply <- w.addWatch(in.path, uint64(in.flags))
- case opRemoveWatch:
- in.reply <- w.remWatch(in.path)
- }
- default:
- }
- continue
- }
-
- switch e {
- case syscall.ERROR_MORE_DATA:
- if watch == nil {
- w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
- } else {
- // The i/o succeeded but the buffer is full.
- // In theory we should be building up a full packet.
- // In practice we can get away with just carrying on.
- n = uint32(unsafe.Sizeof(watch.buf))
- }
- case syscall.ERROR_ACCESS_DENIED:
- // Watched directory was probably removed
- w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
- w.deleteWatch(watch)
- w.startRead(watch)
- continue
- case syscall.ERROR_OPERATION_ABORTED:
- // CancelIo was called on this handle
- continue
- default:
- w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
- continue
- case nil:
- }
-
- var offset uint32
- for {
- if n == 0 {
- w.Events <- newEvent("", sysFSQOVERFLOW)
- w.Errors <- errors.New("short read in readEvents()")
- break
- }
-
- // Point "raw" to the event in the buffer
- raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
- buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
- name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
- fullname := filepath.Join(watch.path, name)
-
- var mask uint64
- switch raw.Action {
- case syscall.FILE_ACTION_REMOVED:
- mask = sysFSDELETESELF
- case syscall.FILE_ACTION_MODIFIED:
- mask = sysFSMODIFY
- case syscall.FILE_ACTION_RENAMED_OLD_NAME:
- watch.rename = name
- case syscall.FILE_ACTION_RENAMED_NEW_NAME:
- if watch.names[watch.rename] != 0 {
- watch.names[name] |= watch.names[watch.rename]
- delete(watch.names, watch.rename)
- mask = sysFSMOVESELF
- }
- }
-
- sendNameEvent := func() {
- if w.sendEvent(fullname, watch.names[name]&mask) {
- if watch.names[name]&sysFSONESHOT != 0 {
- delete(watch.names, name)
- }
- }
- }
- if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
- sendNameEvent()
- }
- if raw.Action == syscall.FILE_ACTION_REMOVED {
- w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
- delete(watch.names, name)
- }
- if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
- if watch.mask&sysFSONESHOT != 0 {
- watch.mask = 0
- }
- }
- if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
- fullname = filepath.Join(watch.path, watch.rename)
- sendNameEvent()
- }
-
- // Move to the next event in the buffer
- if raw.NextEntryOffset == 0 {
- break
- }
- offset += raw.NextEntryOffset
-
- // Error!
- if offset >= n {
- w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
- break
- }
- }
-
- if err := w.startRead(watch); err != nil {
- w.Errors <- err
- }
- }
-}
-
-func (w *Watcher) sendEvent(name string, mask uint64) bool {
- if mask == 0 {
- return false
- }
- event := newEvent(name, uint32(mask))
- select {
- case ch := <-w.quit:
- w.quit <- ch
- case w.Events <- event:
- }
- return true
-}
-
-func toWindowsFlags(mask uint64) uint32 {
- var m uint32
- if mask&sysFSACCESS != 0 {
- m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
- }
- if mask&sysFSMODIFY != 0 {
- m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
- }
- if mask&sysFSATTRIB != 0 {
- m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
- }
- if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
- m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
- }
- return m
-}
-
-func toFSnotifyFlags(action uint32) uint64 {
- switch action {
- case syscall.FILE_ACTION_ADDED:
- return sysFSCREATE
- case syscall.FILE_ACTION_REMOVED:
- return sysFSDELETE
- case syscall.FILE_ACTION_MODIFIED:
- return sysFSMODIFY
- case syscall.FILE_ACTION_RENAMED_OLD_NAME:
- return sysFSMOVEDFROM
- case syscall.FILE_ACTION_RENAMED_NEW_NAME:
- return sysFSMOVEDTO
- }
- return 0
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 980d4d2e19d48..aa4e232495280 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1661,9 +1661,6 @@ google.golang.org/protobuf/types/known/wrapperspb
# gopkg.in/alecthomas/kingpin.v2 v2.2.6
## explicit
gopkg.in/alecthomas/kingpin.v2
-# gopkg.in/fsnotify.v1 v1.4.7
-## explicit
-gopkg.in/fsnotify.v1
# gopkg.in/fsnotify/fsnotify.v1 v1.4.7
## explicit
gopkg.in/fsnotify/fsnotify.v1
|
promtail
|
Remove noisy error message when cleaning up filesystem watches which are already removed. (#8086)
|
afd9e363065ee2bb07844f8dbb30cc8dfb310c9d
|
2024-05-08 17:25:39
|
Dylan Guedes
|
chore: Call `shardstreams.Config` by value instead of by reference (#12915)
| false
|
diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md
index df40267e9ae84..9e933b87d4de7 100644
--- a/docs/sources/shared/configuration.md
+++ b/docs/sources/shared/configuration.md
@@ -3305,12 +3305,22 @@ ruler_remote_write_sigv4_config:
# Deprecated: Use deletion_mode per tenant configuration instead.
[allow_deletes: <boolean>]
+# Define streams sharding behavior.
shard_streams:
- [enabled: <boolean>]
+ # Automatically shard streams to keep them under the per-stream rate limit.
+ # Sharding is dictated by the desired rate.
+ # CLI flag: -shard-streams.enabled
+ [enabled: <boolean> | default = true]
- [logging_enabled: <boolean>]
+ # Whether to log sharding streams behavior or not. Not recommended for
+ # production environments.
+ # CLI flag: -shard-streams.logging-enabled
+ [logging_enabled: <boolean> | default = false]
- [desired_rate: <int>]
+ # Threshold used to cut a new shard. Default (1536KB) means if a rate is above
+ # 1536KB/s, it will be sharded into two streams.
+ # CLI flag: -shard-streams.desired-rate
+ [desired_rate: <int> | default = 1536KB]
[blocked_queries: <blocked_query...>]
diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go
index 268db96e897ac..87036b5e23c37 100644
--- a/pkg/distributor/distributor.go
+++ b/pkg/distributor/distributor.go
@@ -589,7 +589,7 @@ func (d *Distributor) shardStream(stream logproto.Stream, pushSize int, tenantID
return d.divideEntriesBetweenShards(tenantID, shardCount, shardStreamsCfg, stream)
}
-func (d *Distributor) divideEntriesBetweenShards(tenantID string, totalShards int, shardStreamsCfg *shardstreams.Config, stream logproto.Stream) []KeyedStream {
+func (d *Distributor) divideEntriesBetweenShards(tenantID string, totalShards int, shardStreamsCfg shardstreams.Config, stream logproto.Stream) []KeyedStream {
derivedStreams := d.createShards(stream, totalShards, tenantID, shardStreamsCfg)
for i := 0; i < len(stream.Entries); i++ {
@@ -601,7 +601,7 @@ func (d *Distributor) divideEntriesBetweenShards(tenantID string, totalShards in
return derivedStreams
}
-func (d *Distributor) createShards(stream logproto.Stream, totalShards int, tenantID string, shardStreamsCfg *shardstreams.Config) []KeyedStream {
+func (d *Distributor) createShards(stream logproto.Stream, totalShards int, tenantID string, shardStreamsCfg shardstreams.Config) []KeyedStream {
var (
streamLabels = labelTemplate(stream.Labels, d.logger)
streamPattern = streamLabels.String()
@@ -809,7 +809,7 @@ func (d *Distributor) parseStreamLabels(vContext validationContext, key string,
// based on the rate stored in the rate store and will store the new evaluated number of shards.
//
// desiredRate is expected to be given in bytes.
-func (d *Distributor) shardCountFor(logger log.Logger, stream *logproto.Stream, pushSize int, tenantID string, streamShardcfg *shardstreams.Config) int {
+func (d *Distributor) shardCountFor(logger log.Logger, stream *logproto.Stream, pushSize int, tenantID string, streamShardcfg shardstreams.Config) int {
if streamShardcfg.DesiredRate.Val() <= 0 {
if streamShardcfg.LoggingEnabled {
level.Error(logger).Log("msg", "invalid desired rate", "desired_rate", streamShardcfg.DesiredRate.String())
diff --git a/pkg/distributor/limits.go b/pkg/distributor/limits.go
index 05734db4184f0..a207570c25d51 100644
--- a/pkg/distributor/limits.go
+++ b/pkg/distributor/limits.go
@@ -25,7 +25,7 @@ type Limits interface {
DiscoverServiceName(userID string) []string
DiscoverLogLevels(userID string) bool
- ShardStreams(userID string) *shardstreams.Config
+ ShardStreams(userID string) shardstreams.Config
IngestionRateStrategy() string
IngestionRateBytes(userID string) float64
IngestionBurstSizeBytes(userID string) int
diff --git a/pkg/distributor/ratestore_test.go b/pkg/distributor/ratestore_test.go
index af9fa9f0adb70..5bfacf96ebd46 100644
--- a/pkg/distributor/ratestore_test.go
+++ b/pkg/distributor/ratestore_test.go
@@ -341,15 +341,15 @@ type fakeOverrides struct {
func (c *fakeOverrides) AllByUserID() map[string]*validation.Limits {
return map[string]*validation.Limits{
"ingester0": {
- ShardStreams: &shardstreams.Config{
+ ShardStreams: shardstreams.Config{
Enabled: c.enabled,
},
},
}
}
-func (c *fakeOverrides) ShardStreams(_ string) *shardstreams.Config {
- return &shardstreams.Config{
+func (c *fakeOverrides) ShardStreams(_ string) shardstreams.Config {
+ return shardstreams.Config{
Enabled: c.enabled,
}
}
diff --git a/pkg/distributor/shardstreams/config.go b/pkg/distributor/shardstreams/config.go
index 1bf1f89f961c6..5c39fcc28d6c5 100644
--- a/pkg/distributor/shardstreams/config.go
+++ b/pkg/distributor/shardstreams/config.go
@@ -7,12 +7,13 @@ import (
)
type Config struct {
- Enabled bool `yaml:"enabled" json:"enabled"`
- LoggingEnabled bool `yaml:"logging_enabled" json:"logging_enabled"`
+ Enabled bool `yaml:"enabled" json:"enabled" doc:"description=Automatically shard streams to keep them under the per-stream rate limit. Sharding is dictated by the desired rate."`
+
+ LoggingEnabled bool `yaml:"logging_enabled" json:"logging_enabled" doc:"description=Whether to log sharding streams behavior or not. Not recommended for production environments."`
// DesiredRate is the threshold used to shard the stream into smaller pieces.
// Expected to be in bytes.
- DesiredRate flagext.ByteSize `yaml:"desired_rate" json:"desired_rate"`
+ DesiredRate flagext.ByteSize `yaml:"desired_rate" json:"desired_rate" doc:"description=Threshold used to cut a new shard. Default (1536KB) means if a rate is above 1536KB/s, it will be sharded into two streams."`
}
func (cfg *Config) RegisterFlagsWithPrefix(prefix string, fs *flag.FlagSet) {
diff --git a/pkg/ingester/instance_test.go b/pkg/ingester/instance_test.go
index acc5864fc5573..88b613aa8db2d 100644
--- a/pkg/ingester/instance_test.go
+++ b/pkg/ingester/instance_test.go
@@ -1049,7 +1049,7 @@ func (f fakeLimits) AllByUserID() map[string]*validation.Limits {
func TestStreamShardingUsage(t *testing.T) {
setupCustomTenantLimit := func(perStreamLimit string) *validation.Limits {
- shardStreamsCfg := &shardstreams.Config{Enabled: true, LoggingEnabled: true}
+ shardStreamsCfg := shardstreams.Config{Enabled: true, LoggingEnabled: true}
shardStreamsCfg.DesiredRate.Set("6MB") //nolint:errcheck
customTenantLimits := &validation.Limits{}
diff --git a/pkg/ingester/limiter.go b/pkg/ingester/limiter.go
index 193209a54f6b9..94c77a30be7e3 100644
--- a/pkg/ingester/limiter.go
+++ b/pkg/ingester/limiter.go
@@ -27,7 +27,7 @@ type Limits interface {
MaxLocalStreamsPerUser(userID string) int
MaxGlobalStreamsPerUser(userID string) int
PerStreamRateLimit(userID string) validation.RateLimit
- ShardStreams(userID string) *shardstreams.Config
+ ShardStreams(userID string) shardstreams.Config
}
// Limiter implements primitives to get the maximum number of streams
diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go
index aef0bd440d56d..ff9c00e0ed598 100644
--- a/pkg/loki/loki.go
+++ b/pkg/loki/loki.go
@@ -97,7 +97,7 @@ type Config struct {
CompactorConfig compactor.Config `yaml:"compactor,omitempty"`
CompactorHTTPClient compactorclient.HTTPConfig `yaml:"compactor_client,omitempty" doc:"hidden"`
CompactorGRPCClient compactorclient.GRPCConfig `yaml:"compactor_grpc_client,omitempty"`
- LimitsConfig validation.Limits `yaml:"limits_config,omitempty"`
+ LimitsConfig validation.Limits `yaml:"limits_config"`
Worker worker.Config `yaml:"frontend_worker,omitempty"`
TableManager index.TableManagerConfig `yaml:"table_manager,omitempty"`
MemberlistKV memberlist.KVConfig `yaml:"memberlist"`
diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go
index 27e6702dc9889..a4280a2ee9e36 100644
--- a/pkg/validation/limits.go
+++ b/pkg/validation/limits.go
@@ -187,7 +187,7 @@ type Limits struct {
// Deprecated
CompactorDeletionEnabled bool `yaml:"allow_deletes" json:"allow_deletes" doc:"deprecated|description=Use deletion_mode per tenant configuration instead."`
- ShardStreams *shardstreams.Config `yaml:"shard_streams" json:"shard_streams"`
+ ShardStreams shardstreams.Config `yaml:"shard_streams" json:"shard_streams" doc:"description=Define streams sharding behavior."`
BlockedQueries []*validation.BlockedQuery `yaml:"blocked_queries,omitempty" json:"blocked_queries,omitempty"`
@@ -388,7 +388,6 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
),
)
- l.ShardStreams = &shardstreams.Config{}
l.ShardStreams.RegisterFlagsWithPrefix("shard-streams", f)
f.IntVar(&l.VolumeMaxSeries, "limits.volume-max-series", 1000, "The default number of aggregated series or labels that can be returned from a log-volume endpoint")
@@ -900,7 +899,7 @@ func (o *Overrides) DeletionMode(userID string) string {
return o.getOverridesForUser(userID).DeletionMode
}
-func (o *Overrides) ShardStreams(userID string) *shardstreams.Config {
+func (o *Overrides) ShardStreams(userID string) shardstreams.Config {
return o.getOverridesForUser(userID).ShardStreams
}
|
chore
|
Call `shardstreams.Config` by value instead of by reference (#12915)
|
9f0834793b24d9324a6948c11dba2147f6541d09
|
2023-02-10 02:46:50
|
Ed Welch
|
loki: set a maximum number of shards for "limited" queries instead of fixed number (#8487)
| false
|
diff --git a/pkg/querier/queryrange/querysharding.go b/pkg/querier/queryrange/querysharding.go
index cea3eb7126271..83b5b12a7dbba 100644
--- a/pkg/querier/queryrange/querysharding.go
+++ b/pkg/querier/queryrange/querysharding.go
@@ -36,7 +36,7 @@ func NewQueryShardMiddleware(
middlewareMetrics *queryrangebase.InstrumentMiddlewareMetrics,
shardingMetrics *logql.MapperMetrics,
limits Limits,
- resolver logql.ShardResolver,
+ maxShards int,
) queryrangebase.Middleware {
noshards := !hasShards(confs)
@@ -50,7 +50,7 @@ func NewQueryShardMiddleware(
}
mapperware := queryrangebase.MiddlewareFunc(func(next queryrangebase.Handler) queryrangebase.Handler {
- return newASTMapperware(confs, next, logger, shardingMetrics, limits, resolver)
+ return newASTMapperware(confs, next, logger, shardingMetrics, limits, maxShards)
})
return queryrangebase.MiddlewareFunc(func(next queryrangebase.Handler) queryrangebase.Handler {
@@ -72,27 +72,27 @@ func newASTMapperware(
logger log.Logger,
metrics *logql.MapperMetrics,
limits Limits,
- resolver logql.ShardResolver,
+ maxShards int,
) *astMapperware {
return &astMapperware{
- confs: confs,
- logger: log.With(logger, "middleware", "QueryShard.astMapperware"),
- limits: limits,
- next: next,
- ng: logql.NewDownstreamEngine(logql.EngineOpts{LogExecutingQuery: false}, DownstreamHandler{next: next, limits: limits}, limits, logger),
- metrics: metrics,
- shardResolver: resolver,
+ confs: confs,
+ logger: log.With(logger, "middleware", "QueryShard.astMapperware"),
+ limits: limits,
+ next: next,
+ ng: logql.NewDownstreamEngine(logql.EngineOpts{LogExecutingQuery: false}, DownstreamHandler{next: next, limits: limits}, limits, logger),
+ metrics: metrics,
+ maxShards: maxShards,
}
}
type astMapperware struct {
- confs ShardingConfigs
- logger log.Logger
- limits Limits
- next queryrangebase.Handler
- ng *logql.DownstreamEngine
- metrics *logql.MapperMetrics
- shardResolver logql.ShardResolver
+ confs ShardingConfigs
+ logger log.Logger
+ limits Limits
+ next queryrangebase.Handler
+ ng *logql.DownstreamEngine
+ metrics *logql.MapperMetrics
+ maxShards int
}
func (ast *astMapperware) Do(ctx context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
@@ -119,23 +119,18 @@ func (ast *astMapperware) Do(ctx context.Context, r queryrangebase.Request) (que
return nil, err
}
- var resolver logql.ShardResolver
- if ast.shardResolver != nil {
- resolver = ast.shardResolver
- } else {
- var ok bool
- resolver, ok = shardResolverForConf(
- ctx,
- conf,
- ast.ng.Opts().MaxLookBackPeriod,
- ast.logger,
- MinWeightedParallelism(ctx, tenants, ast.confs, ast.limits, model.Time(r.GetStart()), model.Time(r.GetEnd())),
- r,
- ast.next,
- )
- if !ok {
- return ast.next.Do(ctx, r)
- }
+ resolver, ok := shardResolverForConf(
+ ctx,
+ conf,
+ ast.ng.Opts().MaxLookBackPeriod,
+ ast.logger,
+ MinWeightedParallelism(ctx, tenants, ast.confs, ast.limits, model.Time(r.GetStart()), model.Time(r.GetEnd())),
+ ast.maxShards,
+ r,
+ ast.next,
+ )
+ if !ok {
+ return ast.next.Do(ctx, r)
}
mapper := logql.NewShardMapper(resolver, ast.metrics)
diff --git a/pkg/querier/queryrange/querysharding_test.go b/pkg/querier/queryrange/querysharding_test.go
index ff04c6956c08b..dec954e386191 100644
--- a/pkg/querier/queryrange/querysharding_test.go
+++ b/pkg/querier/queryrange/querysharding_test.go
@@ -167,7 +167,7 @@ func Test_astMapper(t *testing.T) {
log.NewNopLogger(),
nilShardingMetrics,
fakeLimits{maxSeries: math.MaxInt32, maxQueryParallelism: 1, queryTimeout: time.Second},
- nil,
+ 0,
)
resp, err := mware.Do(user.InjectOrgID(context.Background(), "1"), defaultReq().WithQuery(`{food="bar"}`))
@@ -201,7 +201,7 @@ func Test_ShardingByPass(t *testing.T) {
log.NewNopLogger(),
nilShardingMetrics,
fakeLimits{maxSeries: math.MaxInt32, maxQueryParallelism: 1},
- nil,
+ 0,
)
_, err := mware.Do(user.InjectOrgID(context.Background(), "1"), defaultReq().WithQuery(`1+1`))
@@ -275,7 +275,7 @@ func Test_InstantSharding(t *testing.T) {
maxQueryParallelism: 10,
queryTimeout: time.Second,
},
- nil)
+ 0)
response, err := sharding.Wrap(queryrangebase.HandlerFunc(func(c context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
lock.Lock()
defer lock.Unlock()
@@ -556,7 +556,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) {
log.NewNopLogger(),
nilShardingMetrics,
fakeLimits{maxSeries: math.MaxInt32, maxQueryParallelism: 1, queryTimeout: time.Second},
- nil,
+ 0,
)
resp, err := mware.Do(user.InjectOrgID(context.Background(), "1"), tc.req)
diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go
index 3c2945deed3db..564e8d2840728 100644
--- a/pkg/querier/queryrange/roundtrip.go
+++ b/pkg/querier/queryrange/roundtrip.go
@@ -321,7 +321,7 @@ func NewLogFilterTripperware(
metrics.InstrumentMiddlewareMetrics, // instrumentation is included in the sharding middleware
metrics.MiddlewareMapperMetrics.shardMapper,
limits,
- nil,
+ 0, // 0 is unlimited shards
),
)
}
@@ -391,7 +391,7 @@ func NewLimitedTripperware(
limits,
// Too many shards on limited queries results in slowing down this type of query
// and overwhelming the frontend, therefore we fix the number of shards to prevent this.
- logql.ConstantShards(32),
+ 32,
),
)
}
@@ -565,7 +565,7 @@ func NewMetricTripperware(
metrics.InstrumentMiddlewareMetrics, // instrumentation is included in the sharding middleware
metrics.MiddlewareMapperMetrics.shardMapper,
limits,
- nil,
+ 0, // 0 is unlimited shards
),
)
}
@@ -613,7 +613,7 @@ func NewInstantMetricTripperware(
metrics.InstrumentMiddlewareMetrics, // instrumentation is included in the sharding middleware
metrics.MiddlewareMapperMetrics.shardMapper,
limits,
- nil,
+ 0, // 0 is unlimited shards
),
)
}
diff --git a/pkg/querier/queryrange/shard_resolver.go b/pkg/querier/queryrange/shard_resolver.go
index 245d1537329af..1cff18cd3b0ea 100644
--- a/pkg/querier/queryrange/shard_resolver.go
+++ b/pkg/querier/queryrange/shard_resolver.go
@@ -28,6 +28,7 @@ func shardResolverForConf(
defaultLookback time.Duration,
logger log.Logger,
maxParallelism int,
+ maxShards int,
r queryrangebase.Request,
handler queryrangebase.Handler,
) (logql.ShardResolver, bool) {
@@ -39,6 +40,7 @@ func shardResolverForConf(
from: model.Time(r.GetStart()),
through: model.Time(r.GetEnd()),
maxParallelism: maxParallelism,
+ maxShards: maxShards,
defaultLookback: defaultLookback,
}, true
}
@@ -55,6 +57,7 @@ type dynamicShardResolver struct {
from, through model.Time
maxParallelism int
+ maxShards int
defaultLookback time.Duration
}
@@ -122,6 +125,9 @@ func (r *dynamicShardResolver) Shards(e syntax.Expr) (int, error) {
combined := stats.MergeStats(results...)
factor := guessShardFactor(combined)
+ if r.maxShards >= 0 && factor > r.maxShards {
+ factor = r.maxShards
+ }
var bytesPerShard = combined.Bytes
if factor > 0 {
bytesPerShard = combined.Bytes / uint64(factor)
|
loki
|
set a maximum number of shards for "limited" queries instead of fixed number (#8487)
|
6d49d911dc4bd802f350a99f5598c903cf236a90
|
2025-02-04 21:21:51
|
renovate[bot]
|
fix(deps): update module golang.org/x/sys to v0.30.0 (main) (#16087)
| false
|
diff --git a/go.mod b/go.mod
index 2ae12f5d18c2e..aaf1cc9a29a46 100644
--- a/go.mod
+++ b/go.mod
@@ -101,7 +101,7 @@ require (
golang.org/x/crypto v0.32.0
golang.org/x/net v0.34.0
golang.org/x/sync v0.11.0
- golang.org/x/sys v0.29.0
+ golang.org/x/sys v0.30.0
golang.org/x/time v0.9.0
google.golang.org/api v0.219.0
google.golang.org/grpc v1.70.0
diff --git a/go.sum b/go.sum
index 0e1a136281545..aaca3e8d03276 100644
--- a/go.sum
+++ b/go.sum
@@ -1458,8 +1458,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
-golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
+golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go
index 02609d5b21d56..9c105f23afcdc 100644
--- a/vendor/golang.org/x/sys/cpu/cpu.go
+++ b/vendor/golang.org/x/sys/cpu/cpu.go
@@ -72,6 +72,9 @@ var X86 struct {
HasSSSE3 bool // Supplemental streaming SIMD extension 3
HasSSE41 bool // Streaming SIMD extension 4 and 4.1
HasSSE42 bool // Streaming SIMD extension 4 and 4.2
+ HasAVXIFMA bool // Advanced vector extension Integer Fused Multiply Add
+ HasAVXVNNI bool // Advanced vector extension Vector Neural Network Instructions
+ HasAVXVNNIInt8 bool // Advanced vector extension Vector Neural Network Int8 instructions
_ CacheLinePad
}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go
index 600a6807861e7..1e642f3304fa8 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_x86.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go
@@ -53,6 +53,9 @@ func initOptions() {
{Name: "sse41", Feature: &X86.HasSSE41},
{Name: "sse42", Feature: &X86.HasSSE42},
{Name: "ssse3", Feature: &X86.HasSSSE3},
+ {Name: "avxifma", Feature: &X86.HasAVXIFMA},
+ {Name: "avxvnni", Feature: &X86.HasAVXVNNI},
+ {Name: "avxvnniint8", Feature: &X86.HasAVXVNNIInt8},
// These capabilities should always be enabled on amd64:
{Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"},
@@ -106,7 +109,7 @@ func archInit() {
return
}
- _, ebx7, ecx7, edx7 := cpuid(7, 0)
+ eax7, ebx7, ecx7, edx7 := cpuid(7, 0)
X86.HasBMI1 = isSet(3, ebx7)
X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX
X86.HasBMI2 = isSet(8, ebx7)
@@ -134,14 +137,24 @@ func archInit() {
X86.HasAVX512VAES = isSet(9, ecx7)
X86.HasAVX512VBMI2 = isSet(6, ecx7)
X86.HasAVX512BITALG = isSet(12, ecx7)
-
- eax71, _, _, _ := cpuid(7, 1)
- X86.HasAVX512BF16 = isSet(5, eax71)
}
X86.HasAMXTile = isSet(24, edx7)
X86.HasAMXInt8 = isSet(25, edx7)
X86.HasAMXBF16 = isSet(22, edx7)
+
+ // These features depend on the second level of extended features.
+ if eax7 >= 1 {
+ eax71, _, _, edx71 := cpuid(7, 1)
+ if X86.HasAVX512 {
+ X86.HasAVX512BF16 = isSet(5, eax71)
+ }
+ if X86.HasAVX {
+ X86.HasAVXIFMA = isSet(23, eax71)
+ X86.HasAVXVNNI = isSet(4, eax71)
+ X86.HasAVXVNNIInt8 = isSet(4, edx71)
+ }
+ }
}
func isSet(bitpos uint, value uint32) bool {
diff --git a/vendor/golang.org/x/sys/unix/auxv.go b/vendor/golang.org/x/sys/unix/auxv.go
new file mode 100644
index 0000000000000..37a82528f580f
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/auxv.go
@@ -0,0 +1,36 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos)
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+//go:linkname runtime_getAuxv runtime.getAuxv
+func runtime_getAuxv() []uintptr
+
+// Auxv returns the ELF auxiliary vector as a sequence of key/value pairs.
+// The returned slice is always a fresh copy, owned by the caller.
+// It returns an error on non-ELF platforms, or if the auxiliary vector cannot be accessed,
+// which happens in some locked-down environments and build modes.
+func Auxv() ([][2]uintptr, error) {
+ vec := runtime_getAuxv()
+ vecLen := len(vec)
+
+ if vecLen == 0 {
+ return nil, syscall.ENOENT
+ }
+
+ if vecLen%2 != 0 {
+ return nil, syscall.EINVAL
+ }
+
+ result := make([]uintptr, vecLen)
+ copy(result, vec)
+ return unsafe.Slice((*[2]uintptr)(unsafe.Pointer(&result[0])), vecLen/2), nil
+}
diff --git a/vendor/golang.org/x/sys/unix/auxv_unsupported.go b/vendor/golang.org/x/sys/unix/auxv_unsupported.go
new file mode 100644
index 0000000000000..1200487f2e86c
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/auxv_unsupported.go
@@ -0,0 +1,13 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos)
+
+package unix
+
+import "syscall"
+
+func Auxv() ([][2]uintptr, error) {
+ return nil, syscall.ENOTSUP
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go
index 21974af064ddc..abc3955477c7d 100644
--- a/vendor/golang.org/x/sys/unix/syscall_solaris.go
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go
@@ -1102,3 +1102,90 @@ func (s *Strioctl) SetInt(i int) {
func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) {
return ioctlPtrRet(fd, req, unsafe.Pointer(s))
}
+
+// Ucred Helpers
+// See ucred(3c) and getpeerucred(3c)
+
+//sys getpeerucred(fd uintptr, ucred *uintptr) (err error)
+//sys ucredFree(ucred uintptr) = ucred_free
+//sys ucredGet(pid int) (ucred uintptr, err error) = ucred_get
+//sys ucredGeteuid(ucred uintptr) (uid int) = ucred_geteuid
+//sys ucredGetegid(ucred uintptr) (gid int) = ucred_getegid
+//sys ucredGetruid(ucred uintptr) (uid int) = ucred_getruid
+//sys ucredGetrgid(ucred uintptr) (gid int) = ucred_getrgid
+//sys ucredGetsuid(ucred uintptr) (uid int) = ucred_getsuid
+//sys ucredGetsgid(ucred uintptr) (gid int) = ucred_getsgid
+//sys ucredGetpid(ucred uintptr) (pid int) = ucred_getpid
+
+// Ucred is an opaque struct that holds user credentials.
+type Ucred struct {
+ ucred uintptr
+}
+
+// We need to ensure that ucredFree is called on the underlying ucred
+// when the Ucred is garbage collected.
+func ucredFinalizer(u *Ucred) {
+ ucredFree(u.ucred)
+}
+
+func GetPeerUcred(fd uintptr) (*Ucred, error) {
+ var ucred uintptr
+ err := getpeerucred(fd, &ucred)
+ if err != nil {
+ return nil, err
+ }
+ result := &Ucred{
+ ucred: ucred,
+ }
+ // set the finalizer on the result so that the ucred will be freed
+ runtime.SetFinalizer(result, ucredFinalizer)
+ return result, nil
+}
+
+func UcredGet(pid int) (*Ucred, error) {
+ ucred, err := ucredGet(pid)
+ if err != nil {
+ return nil, err
+ }
+ result := &Ucred{
+ ucred: ucred,
+ }
+ // set the finalizer on the result so that the ucred will be freed
+ runtime.SetFinalizer(result, ucredFinalizer)
+ return result, nil
+}
+
+func (u *Ucred) Geteuid() int {
+ defer runtime.KeepAlive(u)
+ return ucredGeteuid(u.ucred)
+}
+
+func (u *Ucred) Getruid() int {
+ defer runtime.KeepAlive(u)
+ return ucredGetruid(u.ucred)
+}
+
+func (u *Ucred) Getsuid() int {
+ defer runtime.KeepAlive(u)
+ return ucredGetsuid(u.ucred)
+}
+
+func (u *Ucred) Getegid() int {
+ defer runtime.KeepAlive(u)
+ return ucredGetegid(u.ucred)
+}
+
+func (u *Ucred) Getrgid() int {
+ defer runtime.KeepAlive(u)
+ return ucredGetrgid(u.ucred)
+}
+
+func (u *Ucred) Getsgid() int {
+ defer runtime.KeepAlive(u)
+ return ucredGetsgid(u.ucred)
+}
+
+func (u *Ucred) Getpid() int {
+ defer runtime.KeepAlive(u)
+ return ucredGetpid(u.ucred)
+}
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go
index 6ebc48b3fecd7..4f432bfe8feee 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -1245,6 +1245,7 @@ const (
FAN_REPORT_DFID_NAME = 0xc00
FAN_REPORT_DFID_NAME_TARGET = 0x1e00
FAN_REPORT_DIR_FID = 0x400
+ FAN_REPORT_FD_ERROR = 0x2000
FAN_REPORT_FID = 0x200
FAN_REPORT_NAME = 0x800
FAN_REPORT_PIDFD = 0x80
@@ -1330,8 +1331,10 @@ const (
FUSE_SUPER_MAGIC = 0x65735546
FUTEXFS_SUPER_MAGIC = 0xbad1dea
F_ADD_SEALS = 0x409
+ F_CREATED_QUERY = 0x404
F_DUPFD = 0x0
F_DUPFD_CLOEXEC = 0x406
+ F_DUPFD_QUERY = 0x403
F_EXLCK = 0x4
F_GETFD = 0x1
F_GETFL = 0x3
@@ -1551,6 +1554,7 @@ const (
IPPROTO_ROUTING = 0x2b
IPPROTO_RSVP = 0x2e
IPPROTO_SCTP = 0x84
+ IPPROTO_SMC = 0x100
IPPROTO_TCP = 0x6
IPPROTO_TP = 0x1d
IPPROTO_UDP = 0x11
@@ -1623,6 +1627,8 @@ const (
IPV6_UNICAST_IF = 0x4c
IPV6_USER_FLOW = 0xe
IPV6_V6ONLY = 0x1a
+ IPV6_VERSION = 0x60
+ IPV6_VERSION_MASK = 0xf0
IPV6_XFRM_POLICY = 0x23
IP_ADD_MEMBERSHIP = 0x23
IP_ADD_SOURCE_MEMBERSHIP = 0x27
@@ -1867,6 +1873,7 @@ const (
MADV_UNMERGEABLE = 0xd
MADV_WILLNEED = 0x3
MADV_WIPEONFORK = 0x12
+ MAP_DROPPABLE = 0x8
MAP_FILE = 0x0
MAP_FIXED = 0x10
MAP_FIXED_NOREPLACE = 0x100000
@@ -1967,6 +1974,7 @@ const (
MSG_PEEK = 0x2
MSG_PROXY = 0x10
MSG_RST = 0x1000
+ MSG_SOCK_DEVMEM = 0x2000000
MSG_SYN = 0x400
MSG_TRUNC = 0x20
MSG_TRYHARD = 0x4
@@ -2083,6 +2091,7 @@ const (
NFC_ATR_REQ_MAXSIZE = 0x40
NFC_ATR_RES_GB_MAXSIZE = 0x2f
NFC_ATR_RES_MAXSIZE = 0x40
+ NFC_ATS_MAXSIZE = 0x14
NFC_COMM_ACTIVE = 0x0
NFC_COMM_PASSIVE = 0x1
NFC_DEVICE_NAME_MAXSIZE = 0x8
@@ -2163,6 +2172,7 @@ const (
NFNL_SUBSYS_QUEUE = 0x3
NFNL_SUBSYS_ULOG = 0x4
NFS_SUPER_MAGIC = 0x6969
+ NFT_BITWISE_BOOL = 0x0
NFT_CHAIN_FLAGS = 0x7
NFT_CHAIN_MAXNAMELEN = 0x100
NFT_CT_MAX = 0x17
@@ -2491,6 +2501,7 @@ const (
PR_GET_PDEATHSIG = 0x2
PR_GET_SECCOMP = 0x15
PR_GET_SECUREBITS = 0x1b
+ PR_GET_SHADOW_STACK_STATUS = 0x4a
PR_GET_SPECULATION_CTRL = 0x34
PR_GET_TAGGED_ADDR_CTRL = 0x38
PR_GET_THP_DISABLE = 0x2a
@@ -2499,6 +2510,7 @@ const (
PR_GET_TIMING = 0xd
PR_GET_TSC = 0x19
PR_GET_UNALIGN = 0x5
+ PR_LOCK_SHADOW_STACK_STATUS = 0x4c
PR_MCE_KILL = 0x21
PR_MCE_KILL_CLEAR = 0x0
PR_MCE_KILL_DEFAULT = 0x2
@@ -2525,6 +2537,8 @@ const (
PR_PAC_GET_ENABLED_KEYS = 0x3d
PR_PAC_RESET_KEYS = 0x36
PR_PAC_SET_ENABLED_KEYS = 0x3c
+ PR_PMLEN_MASK = 0x7f000000
+ PR_PMLEN_SHIFT = 0x18
PR_PPC_DEXCR_CTRL_CLEAR = 0x4
PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10
PR_PPC_DEXCR_CTRL_EDITABLE = 0x1
@@ -2592,6 +2606,7 @@ const (
PR_SET_PTRACER = 0x59616d61
PR_SET_SECCOMP = 0x16
PR_SET_SECUREBITS = 0x1c
+ PR_SET_SHADOW_STACK_STATUS = 0x4b
PR_SET_SPECULATION_CTRL = 0x35
PR_SET_SYSCALL_USER_DISPATCH = 0x3b
PR_SET_TAGGED_ADDR_CTRL = 0x37
@@ -2602,6 +2617,9 @@ const (
PR_SET_UNALIGN = 0x6
PR_SET_VMA = 0x53564d41
PR_SET_VMA_ANON_NAME = 0x0
+ PR_SHADOW_STACK_ENABLE = 0x1
+ PR_SHADOW_STACK_PUSH = 0x4
+ PR_SHADOW_STACK_WRITE = 0x2
PR_SME_GET_VL = 0x40
PR_SME_SET_VL = 0x3f
PR_SME_SET_VL_ONEXEC = 0x40000
@@ -2911,7 +2929,6 @@ const (
RTM_NEWNEXTHOP = 0x68
RTM_NEWNEXTHOPBUCKET = 0x74
RTM_NEWNSID = 0x58
- RTM_NEWNVLAN = 0x70
RTM_NEWPREFIX = 0x34
RTM_NEWQDISC = 0x24
RTM_NEWROUTE = 0x18
@@ -2920,6 +2937,7 @@ const (
RTM_NEWTCLASS = 0x28
RTM_NEWTFILTER = 0x2c
RTM_NEWTUNNEL = 0x78
+ RTM_NEWVLAN = 0x70
RTM_NR_FAMILIES = 0x1b
RTM_NR_MSGTYPES = 0x6c
RTM_SETDCB = 0x4f
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index c0d45e320505f..75207613c785d 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -116,6 +116,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -304,6 +306,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index c731d24f02529..c68acda53522d 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -116,6 +116,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -305,6 +307,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index 680018a4a7c9f..a8c607ab86b51 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -310,6 +312,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index a63909f308d6d..18563dd8d33a0 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -109,6 +109,7 @@ const (
F_SETOWN = 0x8
F_UNLCK = 0x2
F_WRLCK = 0x1
+ GCS_MAGIC = 0x47435300
HIDIOCGRAWINFO = 0x80084803
HIDIOCGRDESC = 0x90044802
HIDIOCGRDESCSIZE = 0x80044801
@@ -119,6 +120,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -302,6 +305,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
index 9b0a2573fe3fb..22912cdaa9448 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
@@ -116,6 +116,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -297,6 +299,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index 958e6e0645acd..29344eb37ab55 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
+ IPV6_FLOWINFO_MASK = 0xfffffff
+ IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -303,6 +305,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index 50c7f25bd16c6..20d51fb96a897 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
+ IPV6_FLOWINFO_MASK = 0xfffffff
+ IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -303,6 +305,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index ced21d66d955a..321b60902ae5c 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -303,6 +305,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index 226c044190235..9bacdf1e27910 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -303,6 +305,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
index 3122737cd464f..c2242726156a9 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
+ IPV6_FLOWINFO_MASK = 0xfffffff
+ IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x80
IUCLC = 0x1000
IXOFF = 0x400
@@ -358,6 +360,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index eb5d3467edf0c..6270c8ee13e3f 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
+ IPV6_FLOWINFO_MASK = 0xfffffff
+ IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x80
IUCLC = 0x1000
IXOFF = 0x400
@@ -362,6 +364,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index e921ebc60b714..9966c1941f830 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x80
IUCLC = 0x1000
IXOFF = 0x400
@@ -362,6 +364,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
index 38ba81c55c1fd..848e5fcc42e6f 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -294,6 +296,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index 71f0400977b36..669b2adb80b77 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
+ IPV6_FLOWINFO_MASK = 0xfffffff
+ IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -366,6 +368,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
index c44a313322c54..4834e57514e44 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
@@ -119,6 +119,8 @@ const (
IN_CLOEXEC = 0x400000
IN_NONBLOCK = 0x4000
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
+ IPV6_FLOWINFO_MASK = 0xfffffff
+ IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -357,6 +359,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x38
SCM_TIMESTAMPING_PKTINFO = 0x3c
SCM_TIMESTAMPNS = 0x21
+ SCM_TS_OPT_ID = 0x5a
SCM_TXTIME = 0x3f
SCM_WIFI_STATUS = 0x25
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
index 829b87feb8da6..c6545413c45b4 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
@@ -141,6 +141,16 @@ import (
//go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so"
//go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so"
//go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so"
+//go:cgo_import_dynamic libc_getpeerucred getpeerucred "libc.so"
+//go:cgo_import_dynamic libc_ucred_get ucred_get "libc.so"
+//go:cgo_import_dynamic libc_ucred_geteuid ucred_geteuid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getegid ucred_getegid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getruid ucred_getruid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getrgid ucred_getrgid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getsuid ucred_getsuid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getsgid ucred_getsgid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getpid ucred_getpid "libc.so"
+//go:cgo_import_dynamic libc_ucred_free ucred_free "libc.so"
//go:cgo_import_dynamic libc_port_create port_create "libc.so"
//go:cgo_import_dynamic libc_port_associate port_associate "libc.so"
//go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so"
@@ -280,6 +290,16 @@ import (
//go:linkname procgetpeername libc_getpeername
//go:linkname procsetsockopt libc_setsockopt
//go:linkname procrecvfrom libc_recvfrom
+//go:linkname procgetpeerucred libc_getpeerucred
+//go:linkname procucred_get libc_ucred_get
+//go:linkname procucred_geteuid libc_ucred_geteuid
+//go:linkname procucred_getegid libc_ucred_getegid
+//go:linkname procucred_getruid libc_ucred_getruid
+//go:linkname procucred_getrgid libc_ucred_getrgid
+//go:linkname procucred_getsuid libc_ucred_getsuid
+//go:linkname procucred_getsgid libc_ucred_getsgid
+//go:linkname procucred_getpid libc_ucred_getpid
+//go:linkname procucred_free libc_ucred_free
//go:linkname procport_create libc_port_create
//go:linkname procport_associate libc_port_associate
//go:linkname procport_dissociate libc_port_dissociate
@@ -420,6 +440,16 @@ var (
procgetpeername,
procsetsockopt,
procrecvfrom,
+ procgetpeerucred,
+ procucred_get,
+ procucred_geteuid,
+ procucred_getegid,
+ procucred_getruid,
+ procucred_getrgid,
+ procucred_getsuid,
+ procucred_getsgid,
+ procucred_getpid,
+ procucred_free,
procport_create,
procport_associate,
procport_dissociate,
@@ -2029,6 +2059,90 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func getpeerucred(fd uintptr, ucred *uintptr) (err error) {
+ _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetpeerucred)), 2, uintptr(fd), uintptr(unsafe.Pointer(ucred)), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGet(pid int) (ucred uintptr, err error) {
+ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procucred_get)), 1, uintptr(pid), 0, 0, 0, 0, 0)
+ ucred = uintptr(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGeteuid(ucred uintptr) (uid int) {
+ r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_geteuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+ uid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetegid(ucred uintptr) (gid int) {
+ r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getegid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+ gid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetruid(ucred uintptr) (uid int) {
+ r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getruid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+ uid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetrgid(ucred uintptr) (gid int) {
+ r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getrgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+ gid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetsuid(ucred uintptr) (uid int) {
+ r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+ uid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetsgid(ucred uintptr) (gid int) {
+ r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+ gid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetpid(ucred uintptr) (pid int) {
+ r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getpid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+ pid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredFree(ucred uintptr) {
+ sysvicall6(uintptr(unsafe.Pointer(&procucred_free)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func port_create() (n int, err error) {
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0)
n = int(r0)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
index 524b0820cbc2e..c79aaff306ae3 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
@@ -458,4 +458,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
index f485dbf456567..5eb450695e95a 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
@@ -381,4 +381,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
index 70b35bf3b09f6..05e5029744586 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
@@ -422,4 +422,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
index 1893e2fe88404..38c53ec51bb3e 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
@@ -325,4 +325,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
index 16a4017da0ab2..31d2e71a18e17 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
@@ -321,4 +321,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
index 7e567f1efff21..f4184a336b0e0 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
@@ -442,4 +442,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 4460
SYS_LSM_LIST_MODULES = 4461
SYS_MSEAL = 4462
+ SYS_SETXATTRAT = 4463
+ SYS_GETXATTRAT = 4464
+ SYS_LISTXATTRAT = 4465
+ SYS_REMOVEXATTRAT = 4466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
index 38ae55e5ef856..05b9962278f27 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
@@ -372,4 +372,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 5460
SYS_LSM_LIST_MODULES = 5461
SYS_MSEAL = 5462
+ SYS_SETXATTRAT = 5463
+ SYS_GETXATTRAT = 5464
+ SYS_LISTXATTRAT = 5465
+ SYS_REMOVEXATTRAT = 5466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
index 55e92e60a82ab..43a256e9e6758 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
@@ -372,4 +372,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 5460
SYS_LSM_LIST_MODULES = 5461
SYS_MSEAL = 5462
+ SYS_SETXATTRAT = 5463
+ SYS_GETXATTRAT = 5464
+ SYS_LISTXATTRAT = 5465
+ SYS_REMOVEXATTRAT = 5466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
index 60658d6a021f6..eea5ddfc22077 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
@@ -442,4 +442,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 4460
SYS_LSM_LIST_MODULES = 4461
SYS_MSEAL = 4462
+ SYS_SETXATTRAT = 4463
+ SYS_GETXATTRAT = 4464
+ SYS_LISTXATTRAT = 4465
+ SYS_REMOVEXATTRAT = 4466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
index e203e8a7ed4b2..0d777bfbb1408 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
@@ -449,4 +449,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
index 5944b97d54604..b44636502561e 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
@@ -421,4 +421,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
index c66d416dad1cc..0c7d21c188165 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
@@ -421,4 +421,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
index a5459e766f59d..8405391698787 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
@@ -326,4 +326,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
index 01d86825bb926..fcf1b790d6cfd 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
@@ -387,4 +387,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
index 7b703e77cda84..52d15b5f9d459 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
@@ -400,4 +400,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go
index 5537148dcbb3d..a46abe6472054 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -4747,7 +4747,7 @@ const (
NL80211_ATTR_MAC_HINT = 0xc8
NL80211_ATTR_MAC_MASK = 0xd7
NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca
- NL80211_ATTR_MAX = 0x14c
+ NL80211_ATTR_MAX = 0x14d
NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4
NL80211_ATTR_MAX_CSA_COUNTERS = 0xce
NL80211_ATTR_MAX_MATCH_SETS = 0x85
@@ -5519,7 +5519,7 @@ const (
NL80211_MNTR_FLAG_CONTROL = 0x3
NL80211_MNTR_FLAG_COOK_FRAMES = 0x5
NL80211_MNTR_FLAG_FCSFAIL = 0x1
- NL80211_MNTR_FLAG_MAX = 0x6
+ NL80211_MNTR_FLAG_MAX = 0x7
NL80211_MNTR_FLAG_OTHER_BSS = 0x4
NL80211_MNTR_FLAG_PLCPFAIL = 0x2
NL80211_MPATH_FLAG_ACTIVE = 0x1
@@ -6174,3 +6174,5 @@ type SockDiagReq struct {
Family uint8
Protocol uint8
}
+
+const RTM_NEWNVLAN = 0x70
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 92cceb83f10b3..c72c711d622c4 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1951,7 +1951,7 @@ golang.org/x/oauth2/jwt
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
golang.org/x/sync/singleflight
-# golang.org/x/sys v0.29.0
+# golang.org/x/sys v0.30.0
## explicit; go 1.18
golang.org/x/sys/cpu
golang.org/x/sys/plan9
|
fix
|
update module golang.org/x/sys to v0.30.0 (main) (#16087)
|
f06eabbf0e2c3db3ec899c224d6c947c5edd7d6a
|
2024-06-21 15:26:26
|
Christian Haudum
|
feat: Add settings for cpu/mutex/block profiling options (#13278)
| false
|
diff --git a/cmd/loki/main.go b/cmd/loki/main.go
index 128efe6bc3690..bb839c6cf3ec8 100644
--- a/cmd/loki/main.go
+++ b/cmd/loki/main.go
@@ -106,6 +106,8 @@ func main() {
}()
}
+ setProfilingOptions(config.Profiling)
+
// Allocate a block of memory to reduce the frequency of garbage collection.
// The larger the ballast, the lower the garbage collection frequency.
// https://github.com/grafana/loki/issues/781
@@ -127,3 +129,15 @@ func main() {
err = t.Run(loki.RunOpts{StartTime: startTime})
util_log.CheckFatal("running loki", err, util_log.Logger)
}
+
+func setProfilingOptions(cfg loki.ProfilingConfig) {
+ if cfg.BlockProfileRate > 0 {
+ runtime.SetBlockProfileRate(cfg.BlockProfileRate)
+ }
+ if cfg.CPUProfileRate > 0 {
+ runtime.SetCPUProfileRate(cfg.CPUProfileRate)
+ }
+ if cfg.MutexProfileFraction > 0 {
+ runtime.SetMutexProfileFraction(cfg.MutexProfileFraction)
+ }
+}
diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md
index 00ccded5ef905..89d4615418459 100644
--- a/docs/sources/shared/configuration.md
+++ b/docs/sources/shared/configuration.md
@@ -567,6 +567,9 @@ compactor_grpc_client:
# Configuration for analytics.
[analytics: <analytics>]
+# Configuration for profiling options.
+[profiling: <profiling>]
+
# Common configuration to be shared between multiple modules. If a more specific
# configuration is given in other sections, the related configuration within
# this section will be ignored.
@@ -3850,6 +3853,24 @@ chunks:
[row_shards: <int> | default = 16]
```
+### profiling
+
+Configuration for `profiling` options.
+
+```yaml
+# Sets the value for runtime.SetBlockProfilingRate
+# CLI flag: -profiling.block-profile-rate
+[block_profile_rate: <int> | default = 0]
+
+# Sets the value for runtime.SetCPUProfileRate
+# CLI flag: -profiling.cpu-profile-rate
+[cpu_profile_rate: <int> | default = 0]
+
+# Sets the value for runtime.SetMutexProfileFraction
+# CLI flag: -profiling.mutex-profile-fraction
+[mutex_profile_fraction: <int> | default = 0]
+```
+
### querier
Configures the `querier`. Only appropriate when running all modules or just the querier.
diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go
index 68b210de4a772..0b2f2a3c91058 100644
--- a/pkg/loki/loki.go
+++ b/pkg/loki/loki.go
@@ -108,6 +108,7 @@ type Config struct {
OperationalConfig runtime.Config `yaml:"operational_config,omitempty"`
Tracing tracing.Config `yaml:"tracing"`
Analytics analytics.Config `yaml:"analytics"`
+ Profiling ProfilingConfig `yaml:"profiling,omitempty"`
LegacyReadTarget bool `yaml:"legacy_read_target,omitempty" doc:"hidden|deprecated"`
@@ -179,6 +180,7 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) {
c.QueryScheduler.RegisterFlags(f)
c.Analytics.RegisterFlags(f)
c.OperationalConfig.RegisterFlags(f)
+ c.Profiling.RegisterFlags(f)
}
func (c *Config) registerServerFlagsWithChangedDefaultValues(fs *flag.FlagSet) {
diff --git a/pkg/loki/profiling_config.go b/pkg/loki/profiling_config.go
new file mode 100644
index 0000000000000..30162f2b00bd0
--- /dev/null
+++ b/pkg/loki/profiling_config.go
@@ -0,0 +1,21 @@
+package loki
+
+import "flag"
+
+type ProfilingConfig struct {
+ BlockProfileRate int `yaml:"block_profile_rate"`
+ CPUProfileRate int `yaml:"cpu_profile_rate"`
+ MutexProfileFraction int `yaml:"mutex_profile_fraction"`
+}
+
+// RegisterFlags registers flag.
+func (c *ProfilingConfig) RegisterFlags(f *flag.FlagSet) {
+ c.RegisterFlagsWithPrefix("profiling.", f)
+}
+
+// RegisterFlagsWithPrefix registers flag with a common prefix.
+func (c *ProfilingConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
+ f.IntVar(&c.BlockProfileRate, prefix+"block-profile-rate", 0, "Sets the value for runtime.SetBlockProfilingRate")
+ f.IntVar(&c.CPUProfileRate, prefix+"cpu-profile-rate", 0, "Sets the value for runtime.SetCPUProfileRate")
+ f.IntVar(&c.MutexProfileFraction, prefix+"mutex-profile-fraction", 0, "Sets the value for runtime.SetMutexProfileFraction")
+}
diff --git a/tools/doc-generator/parse/root_blocks.go b/tools/doc-generator/parse/root_blocks.go
index fa3da5ab28b71..85e81705848cc 100644
--- a/tools/doc-generator/parse/root_blocks.go
+++ b/tools/doc-generator/parse/root_blocks.go
@@ -23,6 +23,7 @@ import (
"github.com/grafana/loki/v3/pkg/ingester"
ingester_client "github.com/grafana/loki/v3/pkg/ingester/client"
"github.com/grafana/loki/v3/pkg/loghttp/push"
+ "github.com/grafana/loki/v3/pkg/loki"
"github.com/grafana/loki/v3/pkg/loki/common"
frontend "github.com/grafana/loki/v3/pkg/lokifrontend"
"github.com/grafana/loki/v3/pkg/querier"
@@ -168,6 +169,11 @@ var (
StructType: []reflect.Type{reflect.TypeOf(analytics.Config{})},
Desc: "Configuration for analytics.",
},
+ {
+ Name: "profiling",
+ StructType: []reflect.Type{reflect.TypeOf(loki.ProfilingConfig{})},
+ Desc: "Configuration for profiling options.",
+ },
{
Name: "common",
|
feat
|
Add settings for cpu/mutex/block profiling options (#13278)
|
af22f25b02ea253a3adfb6d65812f1c52f87ca02
|
2024-12-05 23:07:18
|
J Stickler
|
docs: Release notes for 3.2.2 (#15277)
| false
|
diff --git a/docs/sources/release-notes/v3-2.md b/docs/sources/release-notes/v3-2.md
index 89b0b48d2674d..c061ab2eae07c 100644
--- a/docs/sources/release-notes/v3-2.md
+++ b/docs/sources/release-notes/v3-2.md
@@ -63,6 +63,7 @@ For important upgrade guidance, refer to the [Upgrade Guide](https://grafana.com
- **BREAKING CHANGE - API:** Fail log queries when executed on instant query endpoint ([#13421](https://github.com/grafana/loki/issues/13421)).
- **BREAKING CHANGE - blooms:** Remove bloom compactor component ([#13969](https://github.com/grafana/loki/issues/13969)).
+- **BREAKING CHANGE - docker:** Remove wget from Promtail docker image([#15101](https://github.com/grafana/loki/issues/15101)).
- **BREAKING CHANGE - Helm:** Update Helm chart to support distributed mode and 3.0 ([#12067](https://github.com/grafana/loki/issues/12067)).
- **BREAKING CHANGE - Helm:** Fix how we set imagePullSecrets for enterprise-gateway and admin-api. ([#13761](https://github.com/grafana/loki/issues/13761)) ([3be5a45](https://github.com/grafana/loki/commit/3be5a4576fd0f0dca321e017a637f7a3159c00e5)).
- **BREAKING CHANGE - jsonnet:** Convert read statefulset into deployment for loki-simple-scalable ([#13977](https://github.com/grafana/loki/issues/13977)).
@@ -73,6 +74,14 @@ Out of an abundance of caution, we advise that users with Loki or Grafana Enterp
## Bug fixes
+### 3.2.2 (2024-12-04)
+
+- **BREAKING CHANGE - docker:** Remove wget from Promtail docker image([#15101](https://github.com/grafana/loki/issues/15101)).
+- **docker:** Move from base-nossl to static. This PR removes the inclusion of glibc into most of the Docker images created by the Loki build system. ([#15203](https://github.com/grafana/loki/issues/15203)).
+- **logql:** Updated JSONExpressionParser not to unescape extracted values if it is JSON object. ([#14499](https://github.com/grafana/loki/issues/14499)).
+- **promtail:** Switch Promtail base image from Debian to Ubuntu to fix critical security issues ([#15195](https://github.com/grafana/loki/issues/15195)).
+- **storage:** Have GetObject check for canceled context. S3ObjectClient.GetObject incorrectly returned nil, 0, nil when the provided context is already canceled ([#14420](https://github.com/grafana/loki/issues/14420)).
+
### 3.2.1 (2024-10-17)
- **config:** Copy Alibaba and IBM object storage configuration from common ([#14315](https://github.com/grafana/loki/issues/14315)) ([32a9bc0](https://github.com/grafana/loki/commit/32a9bc0ca852bdc692c2ccebbae448856e191953)).
|
docs
|
Release notes for 3.2.2 (#15277)
|
9617c06a70a5388d2d93b7a2902fa2f8d44d549e
|
2023-07-14 09:23:23
|
Periklis Tsirakidis
|
operator: Use PodAntiAffinity for all components (#9930)
| false
|
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md
index 7f7a7269c5c51..cb7fc2f3afed9 100644
--- a/operator/CHANGELOG.md
+++ b/operator/CHANGELOG.md
@@ -1,5 +1,7 @@
## Main
+- [9930](https://github.com/grafana/loki/pull/9930) **periklis**: Use PodAntiAffinity for all components
+- [9860](https://github.com/grafana/loki/pull/9860) **xperimental**: Fix update of labels and annotations of PodTemplates
- [9830](https://github.com/grafana/loki/pull/9830) **periklis**: Expose limits config setting cardinality_limit
- [9600](https://github.com/grafana/loki/pull/9600) **periklis**: Add rules labels filters for openshift-logging application tenant
- [9735](https://github.com/grafana/loki/pull/9735) **JoaoBraveCoding** Adjust 1x.extra-small resources according to findings
diff --git a/operator/internal/manifests/distributor.go b/operator/internal/manifests/distributor.go
index 5b929af9d1ca7..2d8e1ee7c02a1 100644
--- a/operator/internal/manifests/distributor.go
+++ b/operator/internal/manifests/distributor.go
@@ -65,8 +65,7 @@ func NewDistributorDeployment(opts Options) *appsv1.Deployment {
l := ComponentLabels(LabelDistributorComponent, opts.Name)
a := commonAnnotations(opts.ConfigSHA1, opts.CertRotationRequiredAt)
podSpec := corev1.PodSpec{
- Affinity: configureAffinity(LabelDistributorComponent, opts.Name, opts.Gates.DefaultNodeAffinity, opts.Stack.Template.Distributor),
- TopologySpreadConstraints: defaultTopologySpreadConstraints(LabelDistributorComponent, opts.Name),
+ Affinity: configureAffinity(LabelDistributorComponent, opts.Name, opts.Gates.DefaultNodeAffinity, opts.Stack.Template.Distributor),
Volumes: []corev1.Volume{
{
Name: configVolumeName,
diff --git a/operator/internal/manifests/distributor_test.go b/operator/internal/manifests/distributor_test.go
index 5371eba93ddc1..ec169fac5a09e 100644
--- a/operator/internal/manifests/distributor_test.go
+++ b/operator/internal/manifests/distributor_test.go
@@ -101,94 +101,53 @@ func TestBuildDistributor_PodDisruptionBudget(t *testing.T) {
}
func TestNewDistributorDeployment_TopologySpreadConstraints(t *testing.T) {
- for _, tc := range []struct {
- Name string
- Replication *lokiv1.ReplicationSpec
- ExpectedTopologySpreadContraint []corev1.TopologySpreadConstraint
- }{
- {
- Name: "default",
- ExpectedTopologySpreadContraint: []corev1.TopologySpreadConstraint{
- {
- MaxSkew: 1,
- TopologyKey: "kubernetes.io/hostname",
- LabelSelector: &metav1.LabelSelector{
- MatchLabels: map[string]string{
- "app.kubernetes.io/component": "distributor",
- "app.kubernetes.io/instance": "abcd",
- },
- },
- WhenUnsatisfiable: corev1.ScheduleAnyway,
+ d := NewDistributorDeployment(Options{
+ Name: "abcd",
+ Namespace: "efgh",
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Distributor: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
},
},
- },
- {
- Name: "replication_defined",
Replication: &lokiv1.ReplicationSpec{
Zones: []lokiv1.ZoneSpec{
{
TopologyKey: "zone",
- MaxSkew: 3,
+ MaxSkew: 2,
},
{
TopologyKey: "region",
- MaxSkew: 2,
+ MaxSkew: 1,
},
},
Factor: 1,
},
- ExpectedTopologySpreadContraint: []corev1.TopologySpreadConstraint{
- {
- MaxSkew: 1,
- TopologyKey: "kubernetes.io/hostname",
- LabelSelector: &metav1.LabelSelector{
- MatchLabels: map[string]string{
- "app.kubernetes.io/component": "distributor",
- "app.kubernetes.io/instance": "abcd",
- },
- },
- WhenUnsatisfiable: corev1.ScheduleAnyway,
- },
- {
- MaxSkew: 3,
- TopologyKey: "zone",
- WhenUnsatisfiable: "DoNotSchedule",
- LabelSelector: &metav1.LabelSelector{
- MatchLabels: map[string]string{
- "app.kubernetes.io/component": "distributor",
- "app.kubernetes.io/instance": "abcd",
- },
- },
- },
- {
- MaxSkew: 2,
- TopologyKey: "region",
- WhenUnsatisfiable: "DoNotSchedule",
- LabelSelector: &metav1.LabelSelector{
- MatchLabels: map[string]string{
- "app.kubernetes.io/component": "distributor",
- "app.kubernetes.io/instance": "abcd",
- },
- },
+ },
+ })
+
+ require.Equal(t, []corev1.TopologySpreadConstraint{
+ {
+ MaxSkew: 2,
+ TopologyKey: "zone",
+ WhenUnsatisfiable: "DoNotSchedule",
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "app.kubernetes.io/component": "distributor",
+ "app.kubernetes.io/instance": "abcd",
},
},
},
- } {
- t.Run(tc.Name, func(t *testing.T) {
- depl := NewDistributorDeployment(Options{
- Name: "abcd",
- Namespace: "efgh",
- Stack: lokiv1.LokiStackSpec{
- Template: &lokiv1.LokiTemplateSpec{
- Distributor: &lokiv1.LokiComponentSpec{
- Replicas: 1,
- },
- },
- Replication: tc.Replication,
+ {
+ MaxSkew: 1,
+ TopologyKey: "region",
+ WhenUnsatisfiable: "DoNotSchedule",
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "app.kubernetes.io/component": "distributor",
+ "app.kubernetes.io/instance": "abcd",
},
- })
-
- require.Equal(t, tc.ExpectedTopologySpreadContraint, depl.Spec.Template.Spec.TopologySpreadConstraints)
- })
- }
+ },
+ },
+ }, d.Spec.Template.Spec.TopologySpreadConstraints)
}
diff --git a/operator/internal/manifests/gateway.go b/operator/internal/manifests/gateway.go
index 7cb8e79299eb9..76cd7268d6eeb 100644
--- a/operator/internal/manifests/gateway.go
+++ b/operator/internal/manifests/gateway.go
@@ -102,9 +102,8 @@ func NewGatewayDeployment(opts Options, sha1C string) *appsv1.Deployment {
l := ComponentLabels(LabelGatewayComponent, opts.Name)
a := commonAnnotations(sha1C, opts.CertRotationRequiredAt)
podSpec := corev1.PodSpec{
- ServiceAccountName: GatewayName(opts.Name),
- Affinity: configureAffinity(LabelGatewayComponent, opts.Name, opts.Gates.DefaultNodeAffinity, opts.Stack.Template.Gateway),
- TopologySpreadConstraints: defaultTopologySpreadConstraints(LabelGatewayComponent, opts.Name),
+ ServiceAccountName: GatewayName(opts.Name),
+ Affinity: configureAffinity(LabelGatewayComponent, opts.Name, opts.Gates.DefaultNodeAffinity, opts.Stack.Template.Gateway),
Volumes: []corev1.Volume{
{
Name: "rbac",
@@ -221,6 +220,10 @@ func NewGatewayDeployment(opts Options, sha1C string) *appsv1.Deployment {
podSpec.NodeSelector = opts.Stack.Template.Gateway.NodeSelector
}
+ if opts.Stack.Replication != nil {
+ podSpec.TopologySpreadConstraints = append(podSpec.TopologySpreadConstraints, topologySpreadConstraints(*opts.Stack.Replication, LabelGatewayComponent, opts.Name)...)
+ }
+
return &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
diff --git a/operator/internal/manifests/gateway_test.go b/operator/internal/manifests/gateway_test.go
index 95af2966f6f32..b2a49c2f53142 100644
--- a/operator/internal/manifests/gateway_test.go
+++ b/operator/internal/manifests/gateway_test.go
@@ -971,6 +971,19 @@ func TestBuildGateway_TopologySpreadConstraint(t *testing.T) {
LokiStackGateway: true,
},
Stack: lokiv1.LokiStackSpec{
+ Replication: &lokiv1.ReplicationSpec{
+ Zones: []lokiv1.ZoneSpec{
+ {
+ TopologyKey: "zone",
+ MaxSkew: 2,
+ },
+ {
+ TopologyKey: "region",
+ MaxSkew: 1,
+ },
+ },
+ Factor: 1,
+ },
Template: &lokiv1.LokiTemplateSpec{
Gateway: &lokiv1.LokiComponentSpec{
Replicas: rand.Int31(),
@@ -985,15 +998,26 @@ func TestBuildGateway_TopologySpreadConstraint(t *testing.T) {
require.EqualValues(t, dpl.Spec.Template.Spec.TopologySpreadConstraints, []corev1.TopologySpreadConstraint{
{
- MaxSkew: 1,
- TopologyKey: kubernetesNodeHostnameLabel,
+ MaxSkew: 2,
+ TopologyKey: "zone",
+ WhenUnsatisfiable: "DoNotSchedule",
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "app.kubernetes.io/component": "lokistack-gateway",
+ "app.kubernetes.io/instance": "abcd",
+ },
+ },
+ },
+ {
+ MaxSkew: 1,
+ TopologyKey: "region",
+ WhenUnsatisfiable: "DoNotSchedule",
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app.kubernetes.io/component": "lokistack-gateway",
"app.kubernetes.io/instance": "abcd",
},
},
- WhenUnsatisfiable: corev1.ScheduleAnyway,
},
})
}
diff --git a/operator/internal/manifests/node_placement_test.go b/operator/internal/manifests/node_placement_test.go
index e10c4a379ac17..085c565e1600b 100644
--- a/operator/internal/manifests/node_placement_test.go
+++ b/operator/internal/manifests/node_placement_test.go
@@ -355,37 +355,63 @@ func TestDefaultNodeAffinityForEachComponent(t *testing.T) {
})
}
-func TestPodAntiAffinityForEachComponent(t *testing.T) {
- paTerm := []corev1.WeightedPodAffinityTerm{
- {
- Weight: 100,
- PodAffinityTerm: corev1.PodAffinityTerm{
- LabelSelector: &metav1.LabelSelector{
- MatchLabels: map[string]string{
- "foo": "bar",
- },
- },
- TopologyKey: "foo",
- },
+var podAntiAffinityTestTable = []struct {
+ component string
+ generator func(Options) *corev1.Affinity
+}{
+ {
+ component: "lokistack-gateway",
+ generator: func(opts Options) *corev1.Affinity {
+ return NewGatewayDeployment(opts, "").Spec.Template.Spec.Affinity
},
- }
- expectedPATerm := &corev1.PodAntiAffinity{
- PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
- {
- Weight: 100,
- PodAffinityTerm: corev1.PodAffinityTerm{
- LabelSelector: &metav1.LabelSelector{
- MatchLabels: map[string]string{
- "foo": "bar",
- },
- },
- TopologyKey: "foo",
- },
- },
+ },
+ {
+ component: "distributor",
+ generator: func(opts Options) *corev1.Affinity {
+ return NewDistributorDeployment(opts).Spec.Template.Spec.Affinity
},
- }
- optsWithNoPodAntiAffinity := Options{
- // We need to set name here to propperly validate default PodAntiAffinity
+ },
+ {
+ component: "query-frontend",
+ generator: func(opts Options) *corev1.Affinity {
+ return NewQueryFrontendDeployment(opts).Spec.Template.Spec.Affinity
+ },
+ },
+ {
+ component: "querier",
+ generator: func(opts Options) *corev1.Affinity {
+ return NewQuerierDeployment(opts).Spec.Template.Spec.Affinity
+ },
+ },
+ {
+ component: "ingester",
+ generator: func(opts Options) *corev1.Affinity {
+ return NewIngesterStatefulSet(opts).Spec.Template.Spec.Affinity
+ },
+ },
+ {
+ component: "compactor",
+ generator: func(opts Options) *corev1.Affinity {
+ return NewCompactorStatefulSet(opts).Spec.Template.Spec.Affinity
+ },
+ },
+ {
+ component: "index-gateway",
+ generator: func(opts Options) *corev1.Affinity {
+ return NewIndexGatewayStatefulSet(opts).Spec.Template.Spec.Affinity
+ },
+ },
+ {
+ component: "ruler",
+ generator: func(opts Options) *corev1.Affinity {
+ return NewRulerStatefulSet(opts).Spec.Template.Spec.Affinity
+ },
+ },
+}
+
+func TestDefaultPodAntiAffinity(t *testing.T) {
+ opts := Options{
+ // We need to set name here to properly validate default PodAntiAffinity
Name: "abcd",
Stack: lokiv1.LokiStackSpec{
Template: &lokiv1.LokiTemplateSpec{
@@ -395,6 +421,9 @@ func TestPodAntiAffinityForEachComponent(t *testing.T) {
Distributor: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
+ Gateway: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
Ingester: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
@@ -413,127 +442,85 @@ func TestPodAntiAffinityForEachComponent(t *testing.T) {
},
},
}
- optsWithPodAntiAffinity := Options{
+
+ for _, tc := range podAntiAffinityTestTable {
+ tc := tc
+ t.Run(tc.component, func(t *testing.T) {
+ t.Parallel()
+
+ wantAffinity := defaultPodAntiAffinity(tc.component, "abcd")
+
+ affinity := tc.generator(opts)
+ assert.Equal(t, wantAffinity, affinity.PodAntiAffinity)
+ })
+ }
+}
+
+func TestCustomPodAntiAffinity(t *testing.T) {
+ paTerm := []corev1.WeightedPodAffinityTerm{
+ {
+ Weight: 100,
+ PodAffinityTerm: corev1.PodAffinityTerm{
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "foo": "bar",
+ },
+ },
+ TopologyKey: "foo",
+ },
+ },
+ }
+
+ wantAffinity := &corev1.PodAntiAffinity{
+ PreferredDuringSchedulingIgnoredDuringExecution: paTerm,
+ }
+
+ opts := Options{
Stack: lokiv1.LokiStackSpec{
Template: &lokiv1.LokiTemplateSpec{
Compactor: &lokiv1.LokiComponentSpec{
- Replicas: 1,
- PodAntiAffinity: &corev1.PodAntiAffinity{
- PreferredDuringSchedulingIgnoredDuringExecution: paTerm,
- },
+ Replicas: 1,
+ PodAntiAffinity: wantAffinity,
},
Distributor: &lokiv1.LokiComponentSpec{
- Replicas: 1,
- PodAntiAffinity: &corev1.PodAntiAffinity{
- PreferredDuringSchedulingIgnoredDuringExecution: paTerm,
- },
+ Replicas: 1,
+ PodAntiAffinity: wantAffinity,
+ },
+ Gateway: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ PodAntiAffinity: wantAffinity,
},
Ingester: &lokiv1.LokiComponentSpec{
- Replicas: 1,
- PodAntiAffinity: &corev1.PodAntiAffinity{
- PreferredDuringSchedulingIgnoredDuringExecution: paTerm,
- },
+ Replicas: 1,
+ PodAntiAffinity: wantAffinity,
},
Querier: &lokiv1.LokiComponentSpec{
- Replicas: 1,
- PodAntiAffinity: &corev1.PodAntiAffinity{
- PreferredDuringSchedulingIgnoredDuringExecution: paTerm,
- },
+ Replicas: 1,
+ PodAntiAffinity: wantAffinity,
},
QueryFrontend: &lokiv1.LokiComponentSpec{
- Replicas: 1,
- PodAntiAffinity: &corev1.PodAntiAffinity{
- PreferredDuringSchedulingIgnoredDuringExecution: paTerm,
- },
+ Replicas: 1,
+ PodAntiAffinity: wantAffinity,
},
IndexGateway: &lokiv1.LokiComponentSpec{
- Replicas: 1,
- PodAntiAffinity: &corev1.PodAntiAffinity{
- PreferredDuringSchedulingIgnoredDuringExecution: paTerm,
- },
+ Replicas: 1,
+ PodAntiAffinity: wantAffinity,
},
Ruler: &lokiv1.LokiComponentSpec{
- Replicas: 1,
- PodAntiAffinity: &corev1.PodAntiAffinity{
- PreferredDuringSchedulingIgnoredDuringExecution: paTerm,
- },
+ Replicas: 1,
+ PodAntiAffinity: wantAffinity,
},
},
},
}
- t.Run("distributor", func(t *testing.T) {
- assert.Equal(t, expectedPATerm, NewDistributorDeployment(optsWithPodAntiAffinity).Spec.Template.Spec.Affinity.PodAntiAffinity)
- affinity := NewDistributorDeployment(optsWithNoPodAntiAffinity).Spec.Template.Spec.Affinity
- if affinity != nil {
- assert.Empty(t, affinity.PodAntiAffinity)
- }
- })
-
- t.Run("query_frontend", func(t *testing.T) {
- assert.Equal(t, expectedPATerm, NewQueryFrontendDeployment(optsWithPodAntiAffinity).Spec.Template.Spec.Affinity.PodAntiAffinity)
- affinity := NewQueryFrontendDeployment(optsWithNoPodAntiAffinity).Spec.Template.Spec.Affinity
- if affinity != nil {
- assert.Equal(t, expectedDefaultPodAntiAffinity("query-frontend"), affinity.PodAntiAffinity)
- }
- })
-
- t.Run("querier", func(t *testing.T) {
- assert.Equal(t, expectedPATerm, NewQuerierDeployment(optsWithPodAntiAffinity).Spec.Template.Spec.Affinity.PodAntiAffinity)
- affinity := NewQuerierDeployment(optsWithNoPodAntiAffinity).Spec.Template.Spec.Affinity
- if affinity != nil {
- assert.Empty(t, affinity.PodAntiAffinity)
- }
- })
-
- t.Run("ingester", func(t *testing.T) {
- assert.Equal(t, expectedPATerm, NewIngesterStatefulSet(optsWithPodAntiAffinity).Spec.Template.Spec.Affinity.PodAntiAffinity)
- affinity := NewIngesterStatefulSet(optsWithNoPodAntiAffinity).Spec.Template.Spec.Affinity
- if affinity != nil {
- assert.Equal(t, expectedDefaultPodAntiAffinity("ingester"), affinity.PodAntiAffinity)
- }
- })
-
- t.Run("compactor", func(t *testing.T) {
- assert.Equal(t, expectedPATerm, NewCompactorStatefulSet(optsWithPodAntiAffinity).Spec.Template.Spec.Affinity.PodAntiAffinity)
- affinity := NewCompactorStatefulSet(optsWithNoPodAntiAffinity).Spec.Template.Spec.Affinity
- if affinity != nil {
- assert.Empty(t, affinity.PodAntiAffinity)
- }
- })
-
- t.Run("index_gateway", func(t *testing.T) {
- assert.Equal(t, expectedPATerm, NewIndexGatewayStatefulSet(optsWithPodAntiAffinity).Spec.Template.Spec.Affinity.PodAntiAffinity)
- affinity := NewIndexGatewayStatefulSet(optsWithNoPodAntiAffinity).Spec.Template.Spec.Affinity
- if affinity != nil {
- assert.Empty(t, affinity.PodAntiAffinity)
- }
- })
-
- t.Run("ruler", func(t *testing.T) {
- assert.Equal(t, expectedPATerm, NewRulerStatefulSet(optsWithPodAntiAffinity).Spec.Template.Spec.Affinity.PodAntiAffinity)
- affinity := NewRulerStatefulSet(optsWithNoPodAntiAffinity).Spec.Template.Spec.Affinity
- if affinity != nil {
- assert.Equal(t, expectedDefaultPodAntiAffinity("ruler"), affinity.PodAntiAffinity)
- }
- })
-}
+ for _, tc := range podAntiAffinityTestTable {
+ tc := tc
+ t.Run(tc.component, func(t *testing.T) {
+ t.Parallel()
-func expectedDefaultPodAntiAffinity(component string) *corev1.PodAntiAffinity {
- return &corev1.PodAntiAffinity{
- PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
- {
- Weight: 100,
- PodAffinityTerm: corev1.PodAffinityTerm{
- LabelSelector: &metav1.LabelSelector{
- MatchLabels: map[string]string{
- "app.kubernetes.io/instance": "abcd",
- "app.kubernetes.io/component": component,
- },
- },
- TopologyKey: "kubernetes.io/hostname",
- },
- },
- },
+ affinity := tc.generator(opts)
+ assert.Equal(t, wantAffinity, affinity.PodAntiAffinity)
+ })
}
}
diff --git a/operator/internal/manifests/querier.go b/operator/internal/manifests/querier.go
index d5c9380ec19ef..33015218acdcf 100644
--- a/operator/internal/manifests/querier.go
+++ b/operator/internal/manifests/querier.go
@@ -71,8 +71,7 @@ func NewQuerierDeployment(opts Options) *appsv1.Deployment {
l := ComponentLabels(LabelQuerierComponent, opts.Name)
a := commonAnnotations(opts.ConfigSHA1, opts.CertRotationRequiredAt)
podSpec := corev1.PodSpec{
- Affinity: configureAffinity(LabelQuerierComponent, opts.Name, opts.Gates.DefaultNodeAffinity, opts.Stack.Template.Querier),
- TopologySpreadConstraints: defaultTopologySpreadConstraints(LabelQuerierComponent, opts.Name),
+ Affinity: configureAffinity(LabelQuerierComponent, opts.Name, opts.Gates.DefaultNodeAffinity, opts.Stack.Template.Querier),
Volumes: []corev1.Volume{
{
Name: configVolumeName,
diff --git a/operator/internal/manifests/querier_test.go b/operator/internal/manifests/querier_test.go
index 104cd4460fa6b..3a3ca656ea2b8 100644
--- a/operator/internal/manifests/querier_test.go
+++ b/operator/internal/manifests/querier_test.go
@@ -186,94 +186,53 @@ func TestBuildQuerier_PodDisruptionBudget(t *testing.T) {
}
func TestNewQuerierDeployment_TopologySpreadConstraints(t *testing.T) {
- for _, tc := range []struct {
- Name string
- Replication *lokiv1.ReplicationSpec
- ExpectedTopologySpreadContraint []corev1.TopologySpreadConstraint
- }{
- {
- Name: "default",
- ExpectedTopologySpreadContraint: []corev1.TopologySpreadConstraint{
- {
- MaxSkew: 1,
- TopologyKey: "kubernetes.io/hostname",
- LabelSelector: &metav1.LabelSelector{
- MatchLabels: map[string]string{
- "app.kubernetes.io/component": "querier",
- "app.kubernetes.io/instance": "abcd",
- },
- },
- WhenUnsatisfiable: corev1.ScheduleAnyway,
+ d := NewQuerierDeployment(Options{
+ Name: "abcd",
+ Namespace: "efgh",
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Querier: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
},
},
- },
- {
- Name: "replication_defined",
Replication: &lokiv1.ReplicationSpec{
Zones: []lokiv1.ZoneSpec{
{
TopologyKey: "zone",
- MaxSkew: 3,
+ MaxSkew: 2,
},
{
TopologyKey: "region",
- MaxSkew: 2,
+ MaxSkew: 1,
},
},
Factor: 1,
},
- ExpectedTopologySpreadContraint: []corev1.TopologySpreadConstraint{
- {
- MaxSkew: 1,
- TopologyKey: "kubernetes.io/hostname",
- LabelSelector: &metav1.LabelSelector{
- MatchLabels: map[string]string{
- "app.kubernetes.io/component": "querier",
- "app.kubernetes.io/instance": "abcd",
- },
- },
- WhenUnsatisfiable: corev1.ScheduleAnyway,
- },
- {
- MaxSkew: 3,
- TopologyKey: "zone",
- WhenUnsatisfiable: "DoNotSchedule",
- LabelSelector: &metav1.LabelSelector{
- MatchLabels: map[string]string{
- "app.kubernetes.io/component": "querier",
- "app.kubernetes.io/instance": "abcd",
- },
- },
- },
- {
- MaxSkew: 2,
- TopologyKey: "region",
- WhenUnsatisfiable: "DoNotSchedule",
- LabelSelector: &metav1.LabelSelector{
- MatchLabels: map[string]string{
- "app.kubernetes.io/component": "querier",
- "app.kubernetes.io/instance": "abcd",
- },
- },
+ },
+ })
+
+ require.Equal(t, []corev1.TopologySpreadConstraint{
+ {
+ MaxSkew: 2,
+ TopologyKey: "zone",
+ WhenUnsatisfiable: "DoNotSchedule",
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "app.kubernetes.io/component": "querier",
+ "app.kubernetes.io/instance": "abcd",
},
},
},
- } {
- t.Run(tc.Name, func(t *testing.T) {
- depl := NewQuerierDeployment(Options{
- Name: "abcd",
- Namespace: "efgh",
- Stack: lokiv1.LokiStackSpec{
- Template: &lokiv1.LokiTemplateSpec{
- Querier: &lokiv1.LokiComponentSpec{
- Replicas: 1,
- },
- },
- Replication: tc.Replication,
+ {
+ MaxSkew: 1,
+ TopologyKey: "region",
+ WhenUnsatisfiable: "DoNotSchedule",
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "app.kubernetes.io/component": "querier",
+ "app.kubernetes.io/instance": "abcd",
},
- })
-
- require.Equal(t, tc.ExpectedTopologySpreadContraint, depl.Spec.Template.Spec.TopologySpreadConstraints)
- })
- }
+ },
+ },
+ }, d.Spec.Template.Spec.TopologySpreadConstraints)
}
diff --git a/operator/internal/manifests/var.go b/operator/internal/manifests/var.go
index b0aa732d824d0..2a5552d61f751 100644
--- a/operator/internal/manifests/var.go
+++ b/operator/internal/manifests/var.go
@@ -123,13 +123,8 @@ const (
var (
defaultTimeoutConfig = calculateHTTPTimeouts(lokiDefaultQueryTimeout)
- defaultConfigMapMode = int32(420)
- volumeFileSystemMode = corev1.PersistentVolumeFilesystem
- podAntiAffinityComponents = map[string]struct{}{
- LabelIngesterComponent: {},
- LabelRulerComponent: {},
- LabelQueryFrontendComponent: {},
- }
+ defaultConfigMapMode = int32(420)
+ volumeFileSystemMode = corev1.PersistentVolumeFilesystem
)
func commonAnnotations(configHash, rotationRequiredAt string) map[string]string {
@@ -155,19 +150,6 @@ func componentInstaceLabels(component string, stackName string) map[string]strin
}
}
-// defaultTopologySpreadConstraints returns a topology spread contraint that will
-// instruct the scheduler to try and schedule pods from the same component in different nodes
-func defaultTopologySpreadConstraints(component string, stackName string) []corev1.TopologySpreadConstraint {
- return []corev1.TopologySpreadConstraint{{
- MaxSkew: 1,
- TopologyKey: kubernetesNodeHostnameLabel,
- LabelSelector: &metav1.LabelSelector{
- MatchLabels: componentInstaceLabels(component, stackName),
- },
- WhenUnsatisfiable: corev1.ScheduleAnyway,
- }}
-}
-
func serviceAnnotations(serviceName string, enableSigningService bool) map[string]string {
annotations := map[string]string{}
if enableSigningService {
@@ -532,10 +514,6 @@ func configureAffinity(componentLabel, stackName string, enableNodeAffinity bool
if cSpec.PodAntiAffinity != nil {
affinity.PodAntiAffinity = cSpec.PodAntiAffinity
}
-
- if affinity.NodeAffinity == nil && affinity.PodAntiAffinity == nil {
- return nil
- }
return affinity
}
@@ -567,11 +545,6 @@ func defaultNodeAffinity(enableNodeAffinity bool) *corev1.NodeAffinity {
// defaultPodAntiAffinity for components in podAntiAffinityComponents will
// configure pods, of a LokiStack, to preferably not run on the same node
func defaultPodAntiAffinity(componentLabel, stackName string) *corev1.PodAntiAffinity {
- _, enablePodAntiAffinity := podAntiAffinityComponents[componentLabel]
- if !enablePodAntiAffinity {
- return nil
- }
-
return &corev1.PodAntiAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
{
|
operator
|
Use PodAntiAffinity for all components (#9930)
|
611cb01b70bcd52ec16be087b27d77ffac52e88a
|
2024-03-22 23:28:21
|
Callum Styan
|
fix: quantile sharding; fix bug where sharding would always happen (#12283)
| false
|
diff --git a/pkg/logql/downstream_test.go b/pkg/logql/downstream_test.go
index 41b46024bb2e5..b3b8c6f37e48a 100644
--- a/pkg/logql/downstream_test.go
+++ b/pkg/logql/downstream_test.go
@@ -97,6 +97,11 @@ func TestMappingEquivalence(t *testing.T) {
ctx := user.InjectOrgID(context.Background(), "fake")
mapper := NewShardMapper(ConstantShards(shards), nilShardMetrics, []string{})
+ // TODO (callum) refactor this test so that we won't need to set every
+ // possible sharding config option to true when we have multiple in the future
+ if tc.approximate {
+ mapper.quantileOverTimeSharding = true
+ }
_, _, mapped, err := mapper.Parse(params.GetExpression())
require.NoError(t, err)
diff --git a/pkg/logql/shardmapper.go b/pkg/logql/shardmapper.go
index e8d78a438c9bb..28918ab1b7419 100644
--- a/pkg/logql/shardmapper.go
+++ b/pkg/logql/shardmapper.go
@@ -451,6 +451,10 @@ func (m ShardMapper) mapRangeAggregationExpr(expr *syntax.RangeAggregationExpr,
}, bytesPerShard, nil
case syntax.OpRangeTypeQuantile:
+ if !m.quantileOverTimeSharding {
+ return noOp(expr, m.shards)
+ }
+
potentialConflict := syntax.ReducesLabels(expr)
if !potentialConflict && (expr.Grouping == nil || expr.Grouping.Noop()) {
return m.mapSampleExpr(expr, r)
@@ -460,7 +464,7 @@ func (m ShardMapper) mapRangeAggregationExpr(expr *syntax.RangeAggregationExpr,
if err != nil {
return nil, 0, err
}
- if shards == 0 || !m.quantileOverTimeSharding {
+ if shards == 0 {
return noOp(expr, m.shards)
}
diff --git a/pkg/logql/shardmapper_test.go b/pkg/logql/shardmapper_test.go
index 0e345291eed3b..f92ef909e8b85 100644
--- a/pkg/logql/shardmapper_test.go
+++ b/pkg/logql/shardmapper_test.go
@@ -363,6 +363,10 @@ func TestMappingStrings(t *testing.T) {
)
)`,
},
+ { // This should result in the same downstream sharding of the max and not the inner quantile regardless of whether quantile sharding is turned on.
+ in: `max by (status)(quantile_over_time(0.70, {a=~".+"} | logfmt | unwrap value [1s]))`,
+ out: `maxby(status)(downstream<maxby(status)(quantile_over_time(0.7,{a=~".+"}|logfmt|unwrapvalue[1s])),shard=0_of_2>++downstream<maxby(status)(quantile_over_time(0.7,{a=~".+"}|logfmt|unwrapvalue[1s])),shard=1_of_2>)`,
+ },
// should be noop if VectorExpr
{
in: `vector(0)`,
@@ -417,6 +421,35 @@ func TestMappingStrings(t *testing.T) {
}
}
+// Test that mapping of queries for operation types that have probabilistic
+// sharding options, but whose sharding is turned off, are not sharded on those operations.
+func TestMappingStrings_NoProbabilisticSharding(t *testing.T) {
+ m := NewShardMapper(ConstantShards(2), nilShardMetrics, []string{})
+ for _, tc := range []struct {
+ in string
+ out string
+ }{
+ { // This should result in the same downstream sharding of the max and not the inner quantile regardless of whether quantile sharding is turned on.
+ in: `max by (status)(quantile_over_time(0.70, {a=~".+"} | logfmt | unwrap value [1s]))`,
+ out: `maxby(status)(downstream<maxby(status)(quantile_over_time(0.7,{a=~".+"}|logfmt|unwrapvalue[1s])),shard=0_of_2>++downstream<maxby(status)(quantile_over_time(0.7,{a=~".+"}|logfmt|unwrapvalue[1s])),shard=1_of_2>)`,
+ },
+ {
+ in: `quantile_over_time(0.70, {a=~".+"} | logfmt | unwrap value [1s])`,
+ out: `quantile_over_time(0.7,{a=~".+"}|logfmt|unwrapvalue[1s])`,
+ },
+ } {
+ t.Run(tc.in, func(t *testing.T) {
+ ast, err := syntax.ParseExpr(tc.in)
+ require.Nil(t, err)
+
+ mapped, _, err := m.Map(ast, nilShardMetrics.downstreamRecorder())
+ require.Nil(t, err)
+
+ require.Equal(t, removeWhiteSpace(tc.out), removeWhiteSpace(mapped.String()))
+ })
+ }
+}
+
func TestMapping(t *testing.T) {
m := NewShardMapper(ConstantShards(2), nilShardMetrics, []string{})
|
fix
|
quantile sharding; fix bug where sharding would always happen (#12283)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.