hash
stringlengths
40
40
date
stringdate
2018-12-11 14:31:19
2025-03-22 02:45:31
author
stringclasses
280 values
commit_message
stringlengths
14
176
is_merge
bool
1 class
git_diff
stringlengths
198
25.8M
type
stringclasses
83 values
masked_commit_message
stringlengths
8
170
bba6c14459bea3e5ef6907c30e9f4341f6365d7d
2022-08-31 02:33:19
Dylan Guedes
loki: Modify ingesters to return rate-limited streams to distributors (#6977)
false
diff --git a/go.mod b/go.mod index cd0948025d588..63da30a9cc8c9 100644 --- a/go.mod +++ b/go.mod @@ -113,6 +113,7 @@ require ( ) require ( + github.com/gogo/googleapis v1.4.0 github.com/grafana/groupcache_exporter v0.0.0-20220629095919-59a8c6428a43 github.com/heroku/x v0.0.50 github.com/mailgun/groupcache/v2 v2.3.2 @@ -179,7 +180,6 @@ require ( github.com/go-stack/stack v1.8.1 // indirect github.com/go-zookeeper/zk v1.0.2 // indirect github.com/gofrs/flock v0.7.1 // indirect - github.com/gogo/googleapis v1.4.0 // indirect github.com/golang-jwt/jwt/v4 v4.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/btree v1.0.1 // indirect diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index 6549891426ed8..92a16f35da661 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -10,6 +10,8 @@ import ( "testing" "time" + "github.com/gogo/protobuf/types" + "github.com/gogo/status" "github.com/grafana/dskit/flagext" "github.com/grafana/dskit/services" "github.com/prometheus/common/model" @@ -19,6 +21,7 @@ import ( "github.com/weaveworks/common/middleware" "github.com/weaveworks/common/user" "golang.org/x/net/context" + "golang.org/x/time/rate" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/metadata" @@ -40,6 +43,68 @@ import ( "github.com/grafana/loki/pkg/validation" ) +func TestRateLimitedStreamsReturn(t *testing.T) { + // setup. + ingesterConfig := defaultIngesterTestConfig(t) + limits := defaultLimitsTestConfig() + limits.PerStreamRateLimit.Set("1") //nolint:errcheck + limits.IngestionBurstSizeMB = 1 + limits.PerStreamRateLimitBurst.Set("1") //nolint:errcheck + overrides, err := validation.NewOverrides(limits, nil) + limit := overrides.PerStreamRateLimit("test") + require.Equal(t, limit.Limit, rate.Limit(1)) + require.NoError(t, err) + + store := &mockStore{ + chunks: map[string][]chunk.Chunk{}, + } + + i, err := New(ingesterConfig, client.Config{}, store, overrides, runtime.DefaultTenantConfigs(), nil) + require.NoError(t, err) + defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck + + req := logproto.PushRequest{ + Streams: []logproto.Stream{ + { + Labels: `{bar="baz1", foo="bar"}`, + }, + { + Labels: `{bar="baz2", foo="bar"}`, + }, + }, + } + for i := 0; i < 10; i++ { + req.Streams[0].Entries = append(req.Streams[0].Entries, logproto.Entry{ + Timestamp: time.Unix(0, 0), + Line: fmt.Sprintf("line %d", i), + }) + req.Streams[1].Entries = append(req.Streams[1].Entries, logproto.Entry{ + Timestamp: time.Unix(0, 0), + Line: fmt.Sprintf("line %d", i), + }) + } + + ctx := user.InjectOrgID(context.Background(), "test") + _, err = i.Push(ctx, &req) + require.Error(t, err) + + s, ok := status.FromError(err) + require.True(t, ok) + details := s.Proto().Details + + var rateLimitedLabels []string + for _, detail := range details { + rls := &logproto.RateLimitedStream{} + err := types.UnmarshalAny(detail, rls) + require.NoError(t, err) + rateLimitedLabels = append(rateLimitedLabels, rls.Labels) + } + + // note that streams[0], although rate-limited, isn't present in the details. + // that's because push as of now is only returning the last errored stream. + require.EqualValues(t, []string{req.Streams[1].Labels}, rateLimitedLabels) +} + func TestIngester(t *testing.T) { ingesterConfig := defaultIngesterTestConfig(t) limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go index 1c8e9d8d0ae68..05bd3d6ec3911 100644 --- a/pkg/ingester/instance.go +++ b/pkg/ingester/instance.go @@ -1,7 +1,9 @@ package ingester import ( + "bytes" "context" + fmt "fmt" "net/http" "os" "sync" @@ -9,6 +11,9 @@ import ( "time" "github.com/go-kit/log/level" + spb "github.com/gogo/googleapis/google/rpc" + "github.com/gogo/protobuf/types" + "github.com/gogo/status" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -19,6 +24,7 @@ import ( "github.com/weaveworks/common/httpgrpc" "go.uber.org/atomic" + "github.com/grafana/loki/pkg/chunkenc" "github.com/grafana/loki/pkg/ingester/index" "github.com/grafana/loki/pkg/iter" "github.com/grafana/loki/pkg/logproto" @@ -158,6 +164,11 @@ func (i *instance) consumeChunk(ctx context.Context, ls labels.Labels, chunk *lo return err } +// Push will iterate over the given streams present in the PushRequest and attempt to store them. +// +// Although multiple streams are part of the PushRequest, the returned error only reflects what +// happened to *the last stream in the request*. Ex: if three streams are part of the PushRequest +// and all three failed, the returned error only describes what happened to the last processed stream. func (i *instance) Push(ctx context.Context, req *logproto.PushRequest) error { record := recordPool.GetRecord() record.UserID = i.instanceID @@ -185,10 +196,9 @@ func (i *instance) Push(ctx context.Context, req *logproto.PushRequest) error { continue } - _, err = s.Push(ctx, reqStream.Entries, record, 0, false) - if err != nil { - appendErr = err - } + _, failedEntriesWithError := s.Push(ctx, reqStream.Entries, record, 0, false) + appendErr = errorForFailedEntries(s, failedEntriesWithError, len(reqStream.Entries)) + s.chunkMtx.Unlock() } @@ -211,6 +221,73 @@ func (i *instance) Push(ctx context.Context, req *logproto.PushRequest) error { return appendErr } +// errorForFailedEntries mounts an error to be returned in a GRPC call based on entries that couldn't be ingested. +// +// The returned error is enriched with the gRPC error status and with a list of details. +// As of now, the list of details is a list of all streams that were rate-limited. This list can be used +// by the distributor to fine tune streams that were limited. +func errorForFailedEntries(s *stream, failedEntriesWithError []entryWithError, totalEntries int) error { + if len(failedEntriesWithError) == 0 { + return nil + } + + lastEntryWithErr := failedEntriesWithError[len(failedEntriesWithError)-1] + _, ok := lastEntryWithErr.e.(*validation.ErrStreamRateLimit) + outOfOrder := chunkenc.IsOutOfOrderErr(lastEntryWithErr.e) + if !outOfOrder && !ok { + return lastEntryWithErr.e + } + + var statusCode int + if outOfOrder { + statusCode = http.StatusBadRequest + } + if ok { + // per-stream or ingestion limited. + statusCode = http.StatusTooManyRequests + } + + // Return a http status 4xx request response with all failed entries. + buf := bytes.Buffer{} + streamName := s.labelsString + + limitedFailedEntries := failedEntriesWithError + if maxIgnore := s.cfg.MaxReturnedErrors; maxIgnore > 0 && len(limitedFailedEntries) > maxIgnore { + limitedFailedEntries = limitedFailedEntries[:maxIgnore] + } + + for _, entryWithError := range limitedFailedEntries { + fmt.Fprintf(&buf, + "entry with timestamp %s ignored, reason: '%s' for stream: %s,\n", + entryWithError.entry.Timestamp.String(), entryWithError.e.Error(), streamName) + } + + fmt.Fprintf(&buf, "total ignored: %d out of %d", len(failedEntriesWithError), totalEntries) + + var details []*types.Any + + if statusCode == http.StatusTooManyRequests { + details = append(details, mountPerStreamDetails(streamName)...) + } + + return status.ErrorProto(&spb.Status{ + Code: int32(statusCode), + Message: buf.String(), + Details: details, + }) +} + +func mountPerStreamDetails(streamLabels string) []*types.Any { + rls := logproto.RateLimitedStream{Labels: streamLabels} + marshalledStream, err := types.MarshalAny(&rls) + if err == nil { + return []*types.Any{marshalledStream} + } + + level.Error(util_log.Logger).Log("msg", "error marshalling rate-limited stream", "err", err, "labels", streamLabels) + return []*types.Any{} +} + func (i *instance) createStream(pushReqStream logproto.Stream, record *WALRecord) (*stream, error) { // record is only nil when replaying WAL. We don't want to drop data when replaying a WAL after // reducing the stream limits, for instance. diff --git a/pkg/ingester/recovery.go b/pkg/ingester/recovery.go index c3bbc9a8af442..1a0c18786c8e4 100644 --- a/pkg/ingester/recovery.go +++ b/pkg/ingester/recovery.go @@ -165,10 +165,13 @@ func (r *ingesterRecoverer) Push(userID string, entries RefEntries) error { } // ignore out of order errors here (it's possible for a checkpoint to already have data from the wal segments) - bytesAdded, err := s.(*stream).Push(context.Background(), entries.Entries, nil, entries.Counter, true) + bytesAdded, entriesWithErrors := s.(*stream).Push(context.Background(), entries.Entries, nil, entries.Counter, true) r.ing.replayController.Add(int64(bytesAdded)) - if err != nil && err == ErrEntriesExist { - r.ing.metrics.duplicateEntriesTotal.Add(float64(len(entries.Entries))) + if len(entriesWithErrors) > 0 { + lastEntryWithError := entriesWithErrors[len(entriesWithErrors)-1] + if errors.Is(lastEntryWithError.e, ErrEntriesExist) { + r.ing.metrics.duplicateEntriesTotal.Add(float64(len(entries.Entries))) + } } return nil }) diff --git a/pkg/ingester/stream.go b/pkg/ingester/stream.go index 23a9d3d674573..3d824bd592529 100644 --- a/pkg/ingester/stream.go +++ b/pkg/ingester/stream.go @@ -1,18 +1,15 @@ package ingester import ( - "bytes" "context" - "fmt" - "net/http" "sync" "time" "github.com/go-kit/log/level" + "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" - "github.com/weaveworks/common/httpgrpc" "github.com/grafana/loki/pkg/chunkenc" "github.com/grafana/loki/pkg/iter" @@ -148,7 +145,7 @@ func (s *stream) Push( // Lock chunkMtx while pushing. // If this is false, chunkMtx must be held outside Push. lockChunk bool, -) (int, error) { +) (int, []entryWithError) { if lockChunk { s.chunkMtx.Lock() defer s.chunkMtx.Unlock() @@ -163,7 +160,7 @@ func (s *stream) Push( s.metrics.walReplaySamplesDropped.WithLabelValues(duplicateReason).Add(float64(len(entries))) s.metrics.walReplayBytesDropped.WithLabelValues(duplicateReason).Add(float64(byteCt)) - return 0, ErrEntriesExist + return 0, []entryWithError{{entry: &logproto.Entry{}, e: ErrEntriesExist}} } var bytesAdded int @@ -300,41 +297,8 @@ func (s *stream) Push( s.metrics.memoryChunks.Add(float64(len(s.chunks) - prevNumChunks)) } - if len(failedEntriesWithError) > 0 { - lastEntryWithErr := failedEntriesWithError[len(failedEntriesWithError)-1] - _, ok := lastEntryWithErr.e.(*validation.ErrStreamRateLimit) - outOfOrder := chunkenc.IsOutOfOrderErr(lastEntryWithErr.e) - if !outOfOrder && !ok { - return bytesAdded, lastEntryWithErr.e - } - var statusCode int - if outOfOrder { - statusCode = http.StatusBadRequest - } - if ok { - statusCode = http.StatusTooManyRequests - } - // Return a http status 4xx request response with all failed entries. - buf := bytes.Buffer{} - streamName := s.labelsString - - limitedFailedEntries := failedEntriesWithError - if maxIgnore := s.cfg.MaxReturnedErrors; maxIgnore > 0 && len(limitedFailedEntries) > maxIgnore { - limitedFailedEntries = limitedFailedEntries[:maxIgnore] - } - - for _, entryWithError := range limitedFailedEntries { - fmt.Fprintf(&buf, - "entry with timestamp %s ignored, reason: '%s' for stream: %s,\n", - entryWithError.entry.Timestamp.String(), entryWithError.e.Error(), streamName) - } - - fmt.Fprintf(&buf, "total ignored: %d out of %d", len(failedEntriesWithError), len(entries)) - - return bytesAdded, httpgrpc.Errorf(statusCode, buf.String()) - } + return bytesAdded, failedEntriesWithError - return bytesAdded, nil } func (s *stream) cutChunk(ctx context.Context) *chunkDesc { diff --git a/pkg/ingester/stream_test.go b/pkg/ingester/stream_test.go index 2b5e3b8711efc..780317a98ed42 100644 --- a/pkg/ingester/stream_test.go +++ b/pkg/ingester/stream_test.go @@ -64,10 +64,10 @@ func TestMaxReturnedStreamsErrors(t *testing.T) { NilMetrics, ) - _, err := s.Push(context.Background(), []logproto.Entry{ + _, entriesWithErrors := s.Push(context.Background(), []logproto.Entry{ {Timestamp: time.Unix(int64(numLogs), 0), Line: "log"}, }, recordPool.GetRecord(), 0, true) - require.NoError(t, err) + require.Empty(t, entriesWithErrors) newLines := make([]logproto.Entry, numLogs) for i := 0; i < numLogs; i++ { @@ -86,9 +86,9 @@ func TestMaxReturnedStreamsErrors(t *testing.T) { fmt.Fprintf(&expected, "total ignored: %d out of %d", numLogs, numLogs) expectErr := httpgrpc.Errorf(http.StatusBadRequest, expected.String()) - _, err = s.Push(context.Background(), newLines, recordPool.GetRecord(), 0, true) - require.Error(t, err) - require.Equal(t, expectErr.Error(), err.Error()) + _, entriesWithErrors = s.Push(context.Background(), newLines, recordPool.GetRecord(), 0, true) + finalErr := errorForFailedEntries(s, entriesWithErrors, len(newLines)) + require.Equal(t, expectErr.Error(), finalErr.Error()) }) } } @@ -110,12 +110,12 @@ func TestPushDeduplication(t *testing.T) { NilMetrics, ) - written, err := s.Push(context.Background(), []logproto.Entry{ + written, entriesWithErrors := s.Push(context.Background(), []logproto.Entry{ {Timestamp: time.Unix(1, 0), Line: "test"}, {Timestamp: time.Unix(1, 0), Line: "test"}, {Timestamp: time.Unix(1, 0), Line: "newer, better test"}, }, recordPool.GetRecord(), 0, true) - require.NoError(t, err) + require.Empty(t, entriesWithErrors) require.Len(t, s.chunks, 1) require.Equal(t, s.chunks[0].chunk.Size(), 2, "expected exact duplicate to be dropped and newer content with same timestamp to be appended") @@ -140,27 +140,28 @@ func TestPushRejectOldCounter(t *testing.T) { ) // counter should be 2 now since the first line will be deduped - _, err = s.Push(context.Background(), []logproto.Entry{ + _, entriesWithErrors := s.Push(context.Background(), []logproto.Entry{ {Timestamp: time.Unix(1, 0), Line: "test"}, {Timestamp: time.Unix(1, 0), Line: "test"}, {Timestamp: time.Unix(1, 0), Line: "newer, better test"}, }, recordPool.GetRecord(), 0, true) - require.NoError(t, err) + require.Empty(t, entriesWithErrors) require.Len(t, s.chunks, 1) require.Equal(t, s.chunks[0].chunk.Size(), 2, "expected exact duplicate to be dropped and newer content with same timestamp to be appended") // fail to push with a counter <= the streams internal counter - _, err = s.Push(context.Background(), []logproto.Entry{ + _, entriesWithErrors = s.Push(context.Background(), []logproto.Entry{ {Timestamp: time.Unix(1, 0), Line: "test"}, }, recordPool.GetRecord(), 2, true) - require.Equal(t, ErrEntriesExist, err) + require.Len(t, entriesWithErrors, 1) + require.Equal(t, entriesWithErrors[0].e, ErrEntriesExist) // succeed with a greater counter - _, err = s.Push(context.Background(), []logproto.Entry{ + _, entriesWithErrors = s.Push(context.Background(), []logproto.Entry{ {Timestamp: time.Unix(1, 0), Line: "test"}, }, recordPool.GetRecord(), 3, true) - require.Nil(t, err) + require.Empty(t, entriesWithErrors) } @@ -273,11 +274,11 @@ func TestUnorderedPush(t *testing.T) { if x.cutBefore { _ = s.cutChunk(context.Background()) } - written, err := s.Push(context.Background(), x.entries, recordPool.GetRecord(), 0, true) + written, entriesWithErrors := s.Push(context.Background(), x.entries, recordPool.GetRecord(), 0, true) if x.err { - require.NotNil(t, err) + require.NotEmpty(t, entriesWithErrors) } else { - require.Nil(t, err) + require.Empty(t, entriesWithErrors) } require.Equal(t, x.written, written) } @@ -334,8 +335,9 @@ func TestPushRateLimit(t *testing.T) { {Timestamp: time.Unix(1, 0), Line: "aaaaaaaaab"}, } // Counter should be 2 now since the first line will be deduped. - _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true) - require.Contains(t, err.Error(), (&validation.ErrStreamRateLimit{RateLimit: l.PerStreamRateLimit, Labels: s.labelsString, Bytes: flagext.ByteSize(len(entries[1].Line))}).Error()) + _, entriesWithErrors := s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true) + require.Len(t, entriesWithErrors, 1) + require.Contains(t, entriesWithErrors[0].e.Error(), (&validation.ErrStreamRateLimit{RateLimit: l.PerStreamRateLimit, Labels: s.labelsString, Bytes: flagext.ByteSize(len(entries[1].Line))}).Error()) } func TestReplayAppendIgnoresValidityWindow(t *testing.T) { @@ -365,8 +367,8 @@ func TestReplayAppendIgnoresValidityWindow(t *testing.T) { } // Push a first entry (it doesn't matter if we look like we're replaying or not) - _, err = s.Push(context.Background(), entries, nil, 1, true) - require.Nil(t, err) + _, entriesWithErrors := s.Push(context.Background(), entries, nil, 1, true) + require.Empty(t, entriesWithErrors) // Create a sample outside the validity window entries = []logproto.Entry{ @@ -374,12 +376,12 @@ func TestReplayAppendIgnoresValidityWindow(t *testing.T) { } // Pretend it's not a replay, ensure we error - _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true) - require.NotNil(t, err) + _, entriesWithErrors = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true) + require.NotEmpty(t, entriesWithErrors) // Now pretend it's a replay. The same write should succeed. - _, err = s.Push(context.Background(), entries, nil, 2, true) - require.Nil(t, err) + _, entriesWithErrors = s.Push(context.Background(), entries, nil, 2, true) + require.Empty(t, entriesWithErrors) } @@ -420,8 +422,8 @@ func Benchmark_PushStream(b *testing.B) { for n := 0; n < b.N; n++ { rec := recordPool.GetRecord() - _, err := s.Push(ctx, e, rec, 0, true) - require.NoError(b, err) + _, entriesWithErrors := s.Push(ctx, e, rec, 0, true) + require.Empty(b, entriesWithErrors) recordPool.PutRecord(rec) } } diff --git a/pkg/logproto/logproto.pb.go b/pkg/logproto/logproto.pb.go index cb3e65e2f8ab2..ead1e41c3ccdd 100644 --- a/pkg/logproto/logproto.pb.go +++ b/pkg/logproto/logproto.pb.go @@ -511,6 +511,49 @@ func (m *LabelRequest) GetEnd() *time.Time { return nil } +type RateLimitedStream struct { + Labels string `protobuf:"bytes,1,opt,name=labels,proto3" json:"labels,omitempty"` +} + +func (m *RateLimitedStream) Reset() { *m = RateLimitedStream{} } +func (*RateLimitedStream) ProtoMessage() {} +func (*RateLimitedStream) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{8} +} +func (m *RateLimitedStream) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RateLimitedStream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RateLimitedStream.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RateLimitedStream) XXX_Merge(src proto.Message) { + xxx_messageInfo_RateLimitedStream.Merge(m, src) +} +func (m *RateLimitedStream) XXX_Size() int { + return m.Size() +} +func (m *RateLimitedStream) XXX_DiscardUnknown() { + xxx_messageInfo_RateLimitedStream.DiscardUnknown(m) +} + +var xxx_messageInfo_RateLimitedStream proto.InternalMessageInfo + +func (m *RateLimitedStream) GetLabels() string { + if m != nil { + return m.Labels + } + return "" +} + type LabelResponse struct { Values []string `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` } @@ -518,7 +561,7 @@ type LabelResponse struct { func (m *LabelResponse) Reset() { *m = LabelResponse{} } func (*LabelResponse) ProtoMessage() {} func (*LabelResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{8} + return fileDescriptor_c28a5f14f1f4c79a, []int{9} } func (m *LabelResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -564,7 +607,7 @@ type StreamAdapter struct { func (m *StreamAdapter) Reset() { *m = StreamAdapter{} } func (*StreamAdapter) ProtoMessage() {} func (*StreamAdapter) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{9} + return fileDescriptor_c28a5f14f1f4c79a, []int{10} } func (m *StreamAdapter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -622,7 +665,7 @@ type EntryAdapter struct { func (m *EntryAdapter) Reset() { *m = EntryAdapter{} } func (*EntryAdapter) ProtoMessage() {} func (*EntryAdapter) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{10} + return fileDescriptor_c28a5f14f1f4c79a, []int{11} } func (m *EntryAdapter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -674,7 +717,7 @@ type Sample struct { func (m *Sample) Reset() { *m = Sample{} } func (*Sample) ProtoMessage() {} func (*Sample) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{11} + return fileDescriptor_c28a5f14f1f4c79a, []int{12} } func (m *Sample) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -733,7 +776,7 @@ type LegacySample struct { func (m *LegacySample) Reset() { *m = LegacySample{} } func (*LegacySample) ProtoMessage() {} func (*LegacySample) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{12} + return fileDescriptor_c28a5f14f1f4c79a, []int{13} } func (m *LegacySample) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -785,7 +828,7 @@ type Series struct { func (m *Series) Reset() { *m = Series{} } func (*Series) ProtoMessage() {} func (*Series) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{13} + return fileDescriptor_c28a5f14f1f4c79a, []int{14} } func (m *Series) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -845,7 +888,7 @@ type TailRequest struct { func (m *TailRequest) Reset() { *m = TailRequest{} } func (*TailRequest) ProtoMessage() {} func (*TailRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{14} + return fileDescriptor_c28a5f14f1f4c79a, []int{15} } func (m *TailRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -910,7 +953,7 @@ type TailResponse struct { func (m *TailResponse) Reset() { *m = TailResponse{} } func (*TailResponse) ProtoMessage() {} func (*TailResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{15} + return fileDescriptor_c28a5f14f1f4c79a, []int{16} } func (m *TailResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -956,7 +999,7 @@ type SeriesRequest struct { func (m *SeriesRequest) Reset() { *m = SeriesRequest{} } func (*SeriesRequest) ProtoMessage() {} func (*SeriesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{16} + return fileDescriptor_c28a5f14f1f4c79a, []int{17} } func (m *SeriesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1020,7 +1063,7 @@ type SeriesResponse struct { func (m *SeriesResponse) Reset() { *m = SeriesResponse{} } func (*SeriesResponse) ProtoMessage() {} func (*SeriesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{17} + return fileDescriptor_c28a5f14f1f4c79a, []int{18} } func (m *SeriesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1063,7 +1106,7 @@ type SeriesIdentifier struct { func (m *SeriesIdentifier) Reset() { *m = SeriesIdentifier{} } func (*SeriesIdentifier) ProtoMessage() {} func (*SeriesIdentifier) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{18} + return fileDescriptor_c28a5f14f1f4c79a, []int{19} } func (m *SeriesIdentifier) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1108,7 +1151,7 @@ type DroppedStream struct { func (m *DroppedStream) Reset() { *m = DroppedStream{} } func (*DroppedStream) ProtoMessage() {} func (*DroppedStream) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{19} + return fileDescriptor_c28a5f14f1f4c79a, []int{20} } func (m *DroppedStream) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1168,7 +1211,7 @@ type TimeSeriesChunk struct { func (m *TimeSeriesChunk) Reset() { *m = TimeSeriesChunk{} } func (*TimeSeriesChunk) ProtoMessage() {} func (*TimeSeriesChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{20} + return fileDescriptor_c28a5f14f1f4c79a, []int{21} } func (m *TimeSeriesChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1233,7 +1276,7 @@ type LabelPair struct { func (m *LabelPair) Reset() { *m = LabelPair{} } func (*LabelPair) ProtoMessage() {} func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{21} + return fileDescriptor_c28a5f14f1f4c79a, []int{22} } func (m *LabelPair) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1285,7 +1328,7 @@ type LegacyLabelPair struct { func (m *LegacyLabelPair) Reset() { *m = LegacyLabelPair{} } func (*LegacyLabelPair) ProtoMessage() {} func (*LegacyLabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{22} + return fileDescriptor_c28a5f14f1f4c79a, []int{23} } func (m *LegacyLabelPair) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1335,7 +1378,7 @@ type Chunk struct { func (m *Chunk) Reset() { *m = Chunk{} } func (*Chunk) ProtoMessage() {} func (*Chunk) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{23} + return fileDescriptor_c28a5f14f1f4c79a, []int{24} } func (m *Chunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1377,7 +1420,7 @@ type TransferChunksResponse struct { func (m *TransferChunksResponse) Reset() { *m = TransferChunksResponse{} } func (*TransferChunksResponse) ProtoMessage() {} func (*TransferChunksResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{24} + return fileDescriptor_c28a5f14f1f4c79a, []int{25} } func (m *TransferChunksResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1412,7 +1455,7 @@ type TailersCountRequest struct { func (m *TailersCountRequest) Reset() { *m = TailersCountRequest{} } func (*TailersCountRequest) ProtoMessage() {} func (*TailersCountRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{25} + return fileDescriptor_c28a5f14f1f4c79a, []int{26} } func (m *TailersCountRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1448,7 +1491,7 @@ type TailersCountResponse struct { func (m *TailersCountResponse) Reset() { *m = TailersCountResponse{} } func (*TailersCountResponse) ProtoMessage() {} func (*TailersCountResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{26} + return fileDescriptor_c28a5f14f1f4c79a, []int{27} } func (m *TailersCountResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1493,7 +1536,7 @@ type GetChunkIDsRequest struct { func (m *GetChunkIDsRequest) Reset() { *m = GetChunkIDsRequest{} } func (*GetChunkIDsRequest) ProtoMessage() {} func (*GetChunkIDsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{27} + return fileDescriptor_c28a5f14f1f4c79a, []int{28} } func (m *GetChunkIDsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1550,7 +1593,7 @@ type GetChunkIDsResponse struct { func (m *GetChunkIDsResponse) Reset() { *m = GetChunkIDsResponse{} } func (*GetChunkIDsResponse) ProtoMessage() {} func (*GetChunkIDsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{28} + return fileDescriptor_c28a5f14f1f4c79a, []int{29} } func (m *GetChunkIDsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1603,7 +1646,7 @@ type ChunkRef struct { func (m *ChunkRef) Reset() { *m = ChunkRef{} } func (*ChunkRef) ProtoMessage() {} func (*ChunkRef) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{29} + return fileDescriptor_c28a5f14f1f4c79a, []int{30} } func (m *ChunkRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1664,7 +1707,7 @@ type LabelValuesForMetricNameRequest struct { func (m *LabelValuesForMetricNameRequest) Reset() { *m = LabelValuesForMetricNameRequest{} } func (*LabelValuesForMetricNameRequest) ProtoMessage() {} func (*LabelValuesForMetricNameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{30} + return fileDescriptor_c28a5f14f1f4c79a, []int{31} } func (m *LabelValuesForMetricNameRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1723,7 +1766,7 @@ type LabelNamesForMetricNameRequest struct { func (m *LabelNamesForMetricNameRequest) Reset() { *m = LabelNamesForMetricNameRequest{} } func (*LabelNamesForMetricNameRequest) ProtoMessage() {} func (*LabelNamesForMetricNameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{31} + return fileDescriptor_c28a5f14f1f4c79a, []int{32} } func (m *LabelNamesForMetricNameRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1768,7 +1811,7 @@ type GetChunkRefRequest struct { func (m *GetChunkRefRequest) Reset() { *m = GetChunkRefRequest{} } func (*GetChunkRefRequest) ProtoMessage() {} func (*GetChunkRefRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{32} + return fileDescriptor_c28a5f14f1f4c79a, []int{33} } func (m *GetChunkRefRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1811,7 +1854,7 @@ type GetChunkRefResponse struct { func (m *GetChunkRefResponse) Reset() { *m = GetChunkRefResponse{} } func (*GetChunkRefResponse) ProtoMessage() {} func (*GetChunkRefResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{33} + return fileDescriptor_c28a5f14f1f4c79a, []int{34} } func (m *GetChunkRefResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1856,7 +1899,7 @@ type GetSeriesRequest struct { func (m *GetSeriesRequest) Reset() { *m = GetSeriesRequest{} } func (*GetSeriesRequest) ProtoMessage() {} func (*GetSeriesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{34} + return fileDescriptor_c28a5f14f1f4c79a, []int{35} } func (m *GetSeriesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1899,7 +1942,7 @@ type GetSeriesResponse struct { func (m *GetSeriesResponse) Reset() { *m = GetSeriesResponse{} } func (*GetSeriesResponse) ProtoMessage() {} func (*GetSeriesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{35} + return fileDescriptor_c28a5f14f1f4c79a, []int{36} } func (m *GetSeriesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1943,7 +1986,7 @@ type IndexSeries struct { func (m *IndexSeries) Reset() { *m = IndexSeries{} } func (*IndexSeries) ProtoMessage() {} func (*IndexSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{36} + return fileDescriptor_c28a5f14f1f4c79a, []int{37} } func (m *IndexSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1980,7 +2023,7 @@ type QueryIndexResponse struct { func (m *QueryIndexResponse) Reset() { *m = QueryIndexResponse{} } func (*QueryIndexResponse) ProtoMessage() {} func (*QueryIndexResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{37} + return fileDescriptor_c28a5f14f1f4c79a, []int{38} } func (m *QueryIndexResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2031,7 +2074,7 @@ type Row struct { func (m *Row) Reset() { *m = Row{} } func (*Row) ProtoMessage() {} func (*Row) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{38} + return fileDescriptor_c28a5f14f1f4c79a, []int{39} } func (m *Row) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2081,7 +2124,7 @@ type QueryIndexRequest struct { func (m *QueryIndexRequest) Reset() { *m = QueryIndexRequest{} } func (*QueryIndexRequest) ProtoMessage() {} func (*QueryIndexRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{39} + return fileDescriptor_c28a5f14f1f4c79a, []int{40} } func (m *QueryIndexRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2128,7 +2171,7 @@ type IndexQuery struct { func (m *IndexQuery) Reset() { *m = IndexQuery{} } func (*IndexQuery) ProtoMessage() {} func (*IndexQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{40} + return fileDescriptor_c28a5f14f1f4c79a, []int{41} } func (m *IndexQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2201,7 +2244,7 @@ type IndexStatsRequest struct { func (m *IndexStatsRequest) Reset() { *m = IndexStatsRequest{} } func (*IndexStatsRequest) ProtoMessage() {} func (*IndexStatsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{41} + return fileDescriptor_c28a5f14f1f4c79a, []int{42} } func (m *IndexStatsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2247,7 +2290,7 @@ type IndexStatsResponse struct { func (m *IndexStatsResponse) Reset() { *m = IndexStatsResponse{} } func (*IndexStatsResponse) ProtoMessage() {} func (*IndexStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{42} + return fileDescriptor_c28a5f14f1f4c79a, []int{43} } func (m *IndexStatsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2314,6 +2357,7 @@ func init() { proto.RegisterType((*QueryResponse)(nil), "logproto.QueryResponse") proto.RegisterType((*SampleQueryResponse)(nil), "logproto.SampleQueryResponse") proto.RegisterType((*LabelRequest)(nil), "logproto.LabelRequest") + proto.RegisterType((*RateLimitedStream)(nil), "logproto.RateLimitedStream") proto.RegisterType((*LabelResponse)(nil), "logproto.LabelResponse") proto.RegisterType((*StreamAdapter)(nil), "logproto.StreamAdapter") proto.RegisterType((*EntryAdapter)(nil), "logproto.EntryAdapter") @@ -2355,138 +2399,139 @@ func init() { func init() { proto.RegisterFile("pkg/logproto/logproto.proto", fileDescriptor_c28a5f14f1f4c79a) } var fileDescriptor_c28a5f14f1f4c79a = []byte{ - // 2088 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0x4f, 0x6f, 0x1b, 0xc7, - 0x15, 0xe7, 0x90, 0x4b, 0x8a, 0x7c, 0xa4, 0x24, 0x66, 0x2c, 0xcb, 0x0c, 0x6d, 0x73, 0xe5, 0x45, - 0x6a, 0x0b, 0x8e, 0x4d, 0xd6, 0x4a, 0xdb, 0x38, 0x76, 0xd3, 0x42, 0x94, 0x62, 0x5b, 0xb6, 0xe2, - 0x28, 0x2b, 0xd7, 0x01, 0x02, 0x14, 0xc6, 0x8a, 0x1c, 0x92, 0x0b, 0x71, 0xb9, 0xf4, 0xee, 0x30, - 0x8e, 0x80, 0x02, 0xed, 0x07, 0x68, 0x80, 0xf4, 0x54, 0xf4, 0x5e, 0xa0, 0x45, 0x0f, 0x3d, 0x14, - 0xe8, 0xb5, 0xed, 0xad, 0xee, 0xcd, 0xbd, 0x05, 0x39, 0xb0, 0xb5, 0x7c, 0x29, 0x74, 0xca, 0x27, - 0x28, 0x8a, 0xf9, 0xb7, 0x3b, 0x5c, 0x53, 0x8d, 0xe9, 0x1a, 0x28, 0x72, 0x11, 0x67, 0xde, 0xcc, - 0xbc, 0x37, 0xef, 0x37, 0xef, 0xef, 0x0a, 0x4e, 0x0f, 0xf7, 0xbb, 0x8d, 0xbe, 0xdf, 0x1d, 0x06, - 0x3e, 0xf5, 0xa3, 0x41, 0x9d, 0xff, 0xc5, 0x79, 0x35, 0xaf, 0x5e, 0xee, 0xba, 0xb4, 0x37, 0xda, - 0xab, 0xb7, 0x7c, 0xaf, 0xd1, 0xf5, 0xbb, 0x7e, 0x83, 0x93, 0xf7, 0x46, 0x1d, 0x3e, 0x13, 0x87, - 0xd9, 0x48, 0x1c, 0xac, 0x9a, 0x5d, 0xdf, 0xef, 0xf6, 0x49, 0xbc, 0x8b, 0xba, 0x1e, 0x09, 0xa9, - 0xe3, 0x0d, 0xe5, 0x86, 0x15, 0x29, 0xf6, 0x61, 0xdf, 0xf3, 0xdb, 0xa4, 0xdf, 0x08, 0xa9, 0x43, - 0x43, 0xf1, 0x57, 0xec, 0xb0, 0x3e, 0x82, 0xe2, 0xce, 0x28, 0xec, 0xd9, 0xe4, 0xe1, 0x88, 0x84, - 0x14, 0xdf, 0x82, 0xb9, 0x90, 0x06, 0xc4, 0xf1, 0xc2, 0x0a, 0x5a, 0xc9, 0xac, 0x16, 0xd7, 0x4e, - 0xd5, 0xa3, 0xcb, 0xee, 0xf2, 0x85, 0xf5, 0xb6, 0x33, 0xa4, 0x24, 0x68, 0x9e, 0xfc, 0x72, 0x6c, - 0xe6, 0x04, 0xe9, 0x68, 0x6c, 0xaa, 0x53, 0xb6, 0x1a, 0x58, 0x0b, 0x50, 0x12, 0x8c, 0xc3, 0xa1, - 0x3f, 0x08, 0x89, 0xf5, 0xd7, 0x34, 0x94, 0x3e, 0x1c, 0x91, 0xe0, 0x40, 0x89, 0xaa, 0x42, 0x3e, - 0x24, 0x7d, 0xd2, 0xa2, 0x7e, 0x50, 0x41, 0x2b, 0x68, 0xb5, 0x60, 0x47, 0x73, 0xbc, 0x04, 0xd9, - 0xbe, 0xeb, 0xb9, 0xb4, 0x92, 0x5e, 0x41, 0xab, 0xf3, 0xb6, 0x98, 0xe0, 0x6b, 0x90, 0x0d, 0xa9, - 0x13, 0xd0, 0x4a, 0x66, 0x05, 0xad, 0x16, 0xd7, 0xaa, 0x75, 0xa1, 0x7e, 0x5d, 0xa9, 0x5f, 0xbf, - 0xa7, 0xd4, 0x6f, 0xe6, 0x1f, 0x8f, 0xcd, 0xd4, 0xe7, 0xff, 0x30, 0x91, 0x2d, 0x8e, 0xe0, 0xef, - 0x41, 0x86, 0x0c, 0xda, 0x15, 0x63, 0x86, 0x93, 0xec, 0x00, 0xbe, 0x02, 0x85, 0xb6, 0x1b, 0x90, - 0x16, 0x75, 0xfd, 0x41, 0x25, 0xbb, 0x82, 0x56, 0x17, 0xd6, 0x4e, 0xc4, 0x90, 0x6c, 0xaa, 0x25, - 0x3b, 0xde, 0x85, 0x2f, 0x41, 0x2e, 0xec, 0x39, 0x41, 0x3b, 0xac, 0xcc, 0xad, 0x64, 0x56, 0x0b, - 0xcd, 0xa5, 0xa3, 0xb1, 0x59, 0x16, 0x94, 0x4b, 0xbe, 0xe7, 0x52, 0xe2, 0x0d, 0xe9, 0x81, 0x2d, - 0xf7, 0xe0, 0x8b, 0x30, 0xd7, 0x26, 0x7d, 0x42, 0x49, 0x58, 0xc9, 0x73, 0xc4, 0xcb, 0x1a, 0x7b, - 0xbe, 0x60, 0xab, 0x0d, 0xb7, 0x8d, 0x7c, 0xae, 0x3c, 0x67, 0xfd, 0x1b, 0x01, 0xde, 0x75, 0xbc, - 0x61, 0x9f, 0xbc, 0x30, 0x9e, 0x11, 0x72, 0xe9, 0x97, 0x46, 0x2e, 0x33, 0x2b, 0x72, 0x31, 0x0c, - 0xc6, 0x6c, 0x30, 0x64, 0xbf, 0x06, 0x06, 0x6b, 0x1b, 0x72, 0x82, 0xf4, 0x75, 0x36, 0x14, 0xeb, - 0x9c, 0x51, 0xda, 0x94, 0x63, 0x6d, 0x32, 0xfc, 0x9e, 0xd6, 0x4f, 0x61, 0x5e, 0xe2, 0x28, 0x2c, - 0x15, 0xaf, 0xbf, 0xb0, 0x0f, 0x2c, 0x3c, 0x1e, 0x9b, 0x28, 0xf6, 0x83, 0xc8, 0xf8, 0xf1, 0x9b, - 0x5c, 0x36, 0x0d, 0x25, 0xde, 0x8b, 0x75, 0xe1, 0x72, 0x5b, 0x83, 0x2e, 0x09, 0xd9, 0x41, 0x83, - 0x41, 0x65, 0x8b, 0x3d, 0xd6, 0x4f, 0xe0, 0xc4, 0xc4, 0x73, 0xca, 0x6b, 0x5c, 0x85, 0x5c, 0x48, - 0x02, 0x97, 0xa8, 0x5b, 0x68, 0x80, 0xec, 0x72, 0xba, 0x26, 0x9e, 0xcf, 0x6d, 0xb9, 0x7f, 0x36, - 0xe9, 0xbf, 0x47, 0x50, 0xda, 0x76, 0xf6, 0x48, 0x5f, 0xd9, 0x11, 0x06, 0x63, 0xe0, 0x78, 0x44, - 0xe2, 0xc9, 0xc7, 0x78, 0x19, 0x72, 0x9f, 0x38, 0xfd, 0x11, 0x11, 0x2c, 0xf3, 0xb6, 0x9c, 0xcd, - 0xea, 0x91, 0xe8, 0xa5, 0x3d, 0x12, 0x45, 0x76, 0x65, 0x5d, 0x80, 0x79, 0x79, 0x5f, 0x09, 0x54, - 0x7c, 0x39, 0x06, 0x54, 0x41, 0x5d, 0xce, 0xfa, 0x05, 0x82, 0xf9, 0x89, 0xf7, 0xc2, 0x16, 0xe4, - 0xfa, 0xec, 0x68, 0x28, 0x94, 0x6b, 0xc2, 0xd1, 0xd8, 0x94, 0x14, 0x5b, 0xfe, 0xb2, 0xd7, 0x27, - 0x03, 0xca, 0x71, 0x4f, 0x73, 0xdc, 0x97, 0x63, 0xdc, 0xdf, 0x1b, 0xd0, 0xe0, 0x40, 0x3d, 0xfe, - 0x22, 0x43, 0x91, 0x85, 0x3e, 0xb9, 0xdd, 0x56, 0x03, 0xfc, 0x3a, 0x18, 0x3d, 0x27, 0xec, 0x71, - 0x50, 0x8c, 0x66, 0xf6, 0x68, 0x6c, 0xa2, 0xcb, 0x36, 0x27, 0x59, 0x9f, 0x40, 0x49, 0x67, 0x82, - 0x6f, 0x41, 0x21, 0x8a, 0xd9, 0xfc, 0x52, 0xff, 0x1d, 0x8a, 0x05, 0x29, 0x33, 0x4d, 0x43, 0x0e, - 0x48, 0x7c, 0x18, 0x9f, 0x01, 0xa3, 0xef, 0x0e, 0x08, 0x7f, 0xa0, 0x42, 0x33, 0x7f, 0x34, 0x36, - 0xf9, 0xdc, 0xe6, 0x7f, 0x2d, 0x0f, 0x72, 0xc2, 0xc6, 0xf0, 0x1b, 0x49, 0x89, 0x99, 0x66, 0x4e, - 0x70, 0xd4, 0xb9, 0x99, 0x90, 0xe5, 0x28, 0x72, 0x76, 0xa8, 0x59, 0x38, 0x1a, 0x9b, 0x82, 0x60, - 0x8b, 0x1f, 0x26, 0x4e, 0xd3, 0x91, 0x8b, 0x63, 0x73, 0xa9, 0xe6, 0x4d, 0x28, 0x6d, 0x93, 0xae, - 0xd3, 0x3a, 0x90, 0x42, 0x97, 0x14, 0x3b, 0x26, 0x10, 0x29, 0x1e, 0xe7, 0xa0, 0x14, 0x49, 0x7c, - 0xe0, 0x85, 0xd2, 0x51, 0x8b, 0x11, 0xed, 0xfd, 0xd0, 0xfa, 0x15, 0x02, 0x69, 0xdd, 0x2f, 0xf4, - 0x78, 0xd7, 0x61, 0x2e, 0xe4, 0x12, 0xd5, 0xe3, 0xe9, 0x4e, 0xc3, 0x17, 0xe2, 0x67, 0x93, 0x1b, - 0x6d, 0x35, 0xc0, 0x75, 0x00, 0xe1, 0xbf, 0xb7, 0x62, 0xc5, 0x16, 0x8e, 0xc6, 0xa6, 0x46, 0xb5, - 0xb5, 0xb1, 0xf5, 0x4b, 0x04, 0xc5, 0x7b, 0x8e, 0x1b, 0x39, 0xce, 0x12, 0x64, 0x1f, 0x32, 0x0f, - 0x96, 0x9e, 0x23, 0x26, 0x2c, 0x44, 0xb5, 0x49, 0xdf, 0x39, 0xb8, 0xe1, 0x07, 0x9c, 0xe7, 0xbc, - 0x1d, 0xcd, 0xe3, 0x34, 0x67, 0x4c, 0x4d, 0x73, 0xd9, 0x99, 0x83, 0xf5, 0x6d, 0x23, 0x9f, 0x2e, - 0x67, 0xac, 0x9f, 0x23, 0x28, 0x89, 0x9b, 0x49, 0x17, 0xb9, 0x0e, 0x39, 0x71, 0x71, 0x69, 0x63, - 0xc7, 0x46, 0x34, 0xd0, 0xa2, 0x99, 0x3c, 0x82, 0x7f, 0x08, 0x0b, 0xed, 0xc0, 0x1f, 0x0e, 0x49, - 0x7b, 0x57, 0x86, 0xc5, 0x74, 0x32, 0x2c, 0x6e, 0xea, 0xeb, 0x76, 0x62, 0xbb, 0xf5, 0x37, 0xe6, - 0x88, 0x22, 0x44, 0x49, 0xa8, 0x22, 0x15, 0xd1, 0x4b, 0xe7, 0xa3, 0xf4, 0xac, 0xf9, 0x68, 0x19, - 0x72, 0xdd, 0xc0, 0x1f, 0x0d, 0xc3, 0x4a, 0x46, 0x84, 0x09, 0x31, 0x9b, 0x2d, 0x4f, 0x59, 0xb7, - 0x61, 0x41, 0xa9, 0x72, 0x4c, 0x9c, 0xae, 0x26, 0xe3, 0xf4, 0x56, 0x9b, 0x0c, 0xa8, 0xdb, 0x71, - 0xa3, 0xc8, 0x2b, 0xf7, 0x5b, 0x9f, 0x21, 0x28, 0x27, 0xb7, 0xe0, 0x1f, 0x68, 0x66, 0xce, 0xd8, - 0x9d, 0x3f, 0x9e, 0x5d, 0x9d, 0xc7, 0xc1, 0x90, 0x07, 0x14, 0xe5, 0x02, 0xd5, 0x77, 0xa0, 0xa8, - 0x91, 0x59, 0xbe, 0xdb, 0x27, 0xca, 0x24, 0xd9, 0x30, 0xf6, 0xc5, 0xb4, 0x30, 0x53, 0x3e, 0xb9, - 0x96, 0xbe, 0x8a, 0x98, 0x41, 0xcf, 0x4f, 0xbc, 0x24, 0xbe, 0x0a, 0x46, 0x27, 0xf0, 0xbd, 0x99, - 0x9e, 0x89, 0x9f, 0xc0, 0xdf, 0x81, 0x34, 0xf5, 0x67, 0x7a, 0xa4, 0x34, 0xf5, 0xd9, 0x1b, 0x49, - 0xe5, 0x33, 0xfc, 0x72, 0x72, 0x66, 0xfd, 0x0e, 0xc1, 0x22, 0x3b, 0x23, 0x10, 0xd8, 0xe8, 0x8d, - 0x06, 0xfb, 0x78, 0x15, 0xca, 0x4c, 0xd2, 0x03, 0x57, 0xa6, 0xb5, 0x07, 0x6e, 0x5b, 0xaa, 0xb9, - 0xc0, 0xe8, 0x2a, 0xdb, 0x6d, 0xb5, 0xf1, 0x29, 0x98, 0x1b, 0x85, 0x62, 0x83, 0xd0, 0x39, 0xc7, - 0xa6, 0x5b, 0x6d, 0xfc, 0xa6, 0x26, 0x8e, 0x61, 0xad, 0x55, 0x76, 0x1c, 0xc3, 0x1d, 0xc7, 0x0d, - 0xa2, 0xd8, 0x72, 0x01, 0x72, 0x2d, 0x26, 0x58, 0xd8, 0x09, 0x4b, 0xab, 0xd1, 0x66, 0x7e, 0x21, - 0x5b, 0x2e, 0x5b, 0xdf, 0x85, 0x42, 0x74, 0x7a, 0x6a, 0x36, 0x9d, 0xfa, 0x02, 0xd6, 0x75, 0x58, - 0x14, 0x31, 0x73, 0xfa, 0xe1, 0xd2, 0xb4, 0xc3, 0x25, 0x75, 0xf8, 0x34, 0x64, 0x05, 0x2a, 0x18, - 0x8c, 0xb6, 0x43, 0x1d, 0x75, 0x84, 0x8d, 0xad, 0x0a, 0x2c, 0xdf, 0x0b, 0x9c, 0x41, 0xd8, 0x21, - 0x01, 0xdf, 0x14, 0xd9, 0xae, 0x75, 0x12, 0x4e, 0xb0, 0x38, 0x41, 0x82, 0x70, 0xc3, 0x1f, 0x0d, - 0xa8, 0x74, 0x4f, 0xeb, 0x12, 0x2c, 0x4d, 0x92, 0xa5, 0xa9, 0x2f, 0x41, 0xb6, 0xc5, 0x08, 0x9c, - 0xfb, 0xbc, 0x2d, 0x26, 0xd6, 0xaf, 0x11, 0xe0, 0x9b, 0x84, 0x72, 0xd6, 0x5b, 0x9b, 0xa1, 0x56, - 0x8f, 0x7a, 0x0e, 0x6d, 0xf5, 0x48, 0x10, 0xaa, 0xda, 0x4c, 0xcd, 0xff, 0x1f, 0xf5, 0xa8, 0x75, - 0x05, 0x4e, 0x4c, 0xdc, 0x52, 0xea, 0x54, 0x85, 0x7c, 0x4b, 0xd2, 0x64, 0xfd, 0x10, 0xcd, 0xad, - 0x3f, 0xa4, 0x21, 0x2f, 0xde, 0x96, 0x74, 0xf0, 0x15, 0x28, 0x76, 0x98, 0xad, 0x05, 0xc3, 0xc0, - 0x95, 0x10, 0x18, 0xcd, 0xc5, 0xa3, 0xb1, 0xa9, 0x93, 0x6d, 0x7d, 0x82, 0x2f, 0x27, 0x0c, 0xaf, - 0xb9, 0x74, 0x38, 0x36, 0x73, 0x3f, 0x62, 0xc6, 0xb7, 0xc9, 0xb2, 0x17, 0x37, 0xc3, 0xcd, 0xc8, - 0x1c, 0xef, 0x48, 0x6f, 0xe3, 0xc5, 0x69, 0xf3, 0x6d, 0x76, 0xfd, 0x2f, 0xc7, 0xe6, 0x05, 0xad, - 0x27, 0x1c, 0x06, 0xbe, 0x47, 0x68, 0x8f, 0x8c, 0xc2, 0x46, 0xcb, 0xf7, 0x3c, 0x7f, 0xd0, 0xe0, - 0x7d, 0x1d, 0x57, 0x9a, 0xa5, 0x60, 0x76, 0x5c, 0x3a, 0xe0, 0x3d, 0x98, 0xa3, 0xbd, 0xc0, 0x1f, - 0x75, 0x7b, 0x3c, 0xbb, 0x64, 0x9a, 0xd7, 0x66, 0xe7, 0xa7, 0x38, 0xd8, 0x6a, 0x80, 0xcf, 0x31, - 0xb4, 0x48, 0x6b, 0x3f, 0x1c, 0x79, 0x3c, 0x3d, 0xcd, 0xab, 0xf2, 0x26, 0x22, 0x5b, 0x9f, 0xa5, - 0xc1, 0xe4, 0x26, 0x7c, 0x9f, 0x97, 0x61, 0x37, 0xfc, 0xe0, 0x7d, 0x42, 0x03, 0xb7, 0x75, 0xd7, - 0xf1, 0x88, 0xb2, 0x0d, 0x13, 0x8a, 0x1e, 0x27, 0x3e, 0xd0, 0x9c, 0x03, 0xbc, 0x68, 0x1f, 0x3e, - 0x0b, 0xc0, 0xdd, 0x4e, 0xac, 0x0b, 0x3f, 0x29, 0x70, 0x0a, 0x5f, 0xde, 0x98, 0x40, 0xaa, 0x31, - 0xa3, 0x66, 0x12, 0xa1, 0xad, 0x24, 0x42, 0x33, 0xf3, 0x89, 0x60, 0xd1, 0x6d, 0x3d, 0x3b, 0x69, - 0xeb, 0xd6, 0xdf, 0x11, 0xd4, 0xb6, 0xd5, 0xcd, 0x5f, 0x12, 0x0e, 0xa5, 0x6f, 0xfa, 0x15, 0xe9, - 0x9b, 0xf9, 0xdf, 0xf4, 0xb5, 0xfe, 0xa2, 0xb9, 0xbc, 0x4d, 0x3a, 0x4a, 0x8f, 0x0d, 0x2d, 0x5d, - 0xbc, 0x8a, 0x6b, 0xa6, 0x5f, 0xe1, 0xb3, 0x64, 0x12, 0xcf, 0xf2, 0x6e, 0x1c, 0x0e, 0xb8, 0x06, - 0x32, 0x1c, 0x9c, 0x07, 0x23, 0x20, 0x1d, 0x95, 0x7c, 0x71, 0x32, 0xc6, 0x93, 0x8e, 0xcd, 0xd7, - 0xad, 0x3f, 0x21, 0x28, 0xdf, 0x24, 0x74, 0xb2, 0xac, 0xf9, 0x26, 0xe9, 0x7f, 0x0b, 0x5e, 0xd3, - 0xee, 0x2f, 0xb5, 0x7f, 0x2b, 0x51, 0xcb, 0x9c, 0x8c, 0xf5, 0xdf, 0x1a, 0xb4, 0xc9, 0xa7, 0xb2, - 0xf1, 0x9c, 0x2c, 0x63, 0x76, 0xa0, 0xa8, 0x2d, 0xe2, 0xf5, 0x44, 0x01, 0x33, 0x2d, 0xa9, 0x36, - 0x97, 0xa4, 0x4e, 0xa2, 0xf5, 0x94, 0xd5, 0x67, 0x94, 0xee, 0x77, 0x01, 0xf3, 0x5e, 0x98, 0xb3, - 0xd5, 0x23, 0x35, 0xa7, 0xde, 0x89, 0xea, 0x99, 0x68, 0x8e, 0xcf, 0x81, 0x11, 0xf8, 0x8f, 0x54, - 0x65, 0x3a, 0x1f, 0x8b, 0xb4, 0xfd, 0x47, 0x36, 0x5f, 0xb2, 0xae, 0x43, 0xc6, 0xf6, 0x1f, 0xe1, - 0x1a, 0x40, 0xe0, 0x0c, 0xba, 0xe4, 0x7e, 0xd4, 0x8f, 0x94, 0x6c, 0x8d, 0x72, 0x4c, 0x7e, 0xdd, - 0x80, 0xd7, 0xf4, 0x1b, 0x89, 0xe7, 0xae, 0xc3, 0x1c, 0x23, 0xc6, 0x70, 0x2d, 0x25, 0xe0, 0x12, - 0x0d, 0xbd, 0xda, 0xc4, 0x6c, 0x06, 0x62, 0x3a, 0x3e, 0x03, 0x05, 0xea, 0xec, 0xf5, 0xc9, 0xdd, - 0xd8, 0xe7, 0x63, 0x02, 0x5b, 0x65, 0xad, 0xd4, 0x7d, 0xad, 0x50, 0x88, 0x09, 0xf8, 0x22, 0x94, - 0xe3, 0x3b, 0xef, 0x04, 0xa4, 0xe3, 0x7e, 0xca, 0x5f, 0xb8, 0x64, 0x3f, 0x47, 0xc7, 0xab, 0xb0, - 0x18, 0xd3, 0x76, 0x79, 0xda, 0x35, 0xf8, 0xd6, 0x24, 0x99, 0x61, 0xc3, 0xd5, 0x7d, 0xef, 0xe1, - 0xc8, 0xe9, 0xf3, 0x40, 0x56, 0xb2, 0x35, 0x8a, 0xf5, 0x67, 0x04, 0xaf, 0x89, 0xa7, 0xa6, 0x0e, - 0xfd, 0x46, 0x5a, 0xfd, 0x6f, 0x10, 0x60, 0x5d, 0x03, 0x69, 0x5a, 0xdf, 0xd2, 0x3f, 0xf9, 0xb0, - 0xbc, 0x5e, 0x9c, 0xf6, 0x4d, 0x93, 0xb5, 0xa0, 0xb2, 0x04, 0x4c, 0xf3, 0x5d, 0xbc, 0x05, 0x15, - 0x14, 0x55, 0xfd, 0xb1, 0xce, 0x79, 0xef, 0x80, 0x92, 0x50, 0x36, 0x90, 0xbc, 0x73, 0xe6, 0x04, - 0x5b, 0xfc, 0x30, 0x59, 0xea, 0x03, 0x83, 0x11, 0xcb, 0x4a, 0x7e, 0x44, 0xb8, 0x78, 0x1e, 0x0a, - 0xd1, 0xd7, 0x45, 0x5c, 0x84, 0xb9, 0x1b, 0x1f, 0xd8, 0x1f, 0xad, 0xdb, 0x9b, 0xe5, 0x14, 0x2e, - 0x41, 0xbe, 0xb9, 0xbe, 0x71, 0x87, 0xcf, 0xd0, 0xda, 0x3a, 0xe4, 0x76, 0x46, 0x61, 0x8f, 0x04, - 0xf8, 0x6d, 0x30, 0xd8, 0x08, 0x6b, 0x4e, 0xab, 0x7d, 0xda, 0xad, 0x2e, 0x27, 0xc9, 0xb2, 0x06, - 0x4c, 0xad, 0xfd, 0xd1, 0x50, 0x86, 0x1c, 0xe0, 0xef, 0x43, 0x56, 0x58, 0xa7, 0xb6, 0x5d, 0xff, - 0xcc, 0x58, 0x3d, 0xf5, 0x1c, 0x5d, 0xf1, 0xf9, 0x36, 0xc2, 0x77, 0xa1, 0xc8, 0x89, 0xb2, 0xed, - 0x3f, 0x93, 0xec, 0xbe, 0x27, 0x38, 0x9d, 0x3d, 0x66, 0x55, 0xe3, 0x77, 0x0d, 0xb2, 0x3c, 0x40, - 0xe8, 0xb7, 0xd1, 0x3f, 0x56, 0xe9, 0xb7, 0x99, 0xf8, 0x28, 0x64, 0xa5, 0xf0, 0x3b, 0x60, 0xb0, - 0x22, 0x56, 0x87, 0x43, 0xeb, 0xd6, 0x75, 0x38, 0xf4, 0x56, 0x99, 0x8b, 0x7d, 0x37, 0xfa, 0xe8, - 0x70, 0x2a, 0xd9, 0x7d, 0xa9, 0xe3, 0x95, 0xe7, 0x17, 0x22, 0xc9, 0x1f, 0x88, 0xee, 0x5b, 0x95, - 0xcf, 0xf8, 0xec, 0xa4, 0xa8, 0x44, 0xb5, 0x5d, 0xad, 0x1d, 0xb7, 0x1c, 0x31, 0xdc, 0x86, 0xa2, - 0x56, 0xba, 0xea, 0xb0, 0x3e, 0x5f, 0x77, 0xeb, 0xb0, 0x4e, 0xa9, 0x77, 0xad, 0x14, 0xbe, 0x09, - 0x79, 0x16, 0xf9, 0x99, 0x03, 0xe0, 0xd3, 0xc9, 0x00, 0xaf, 0x39, 0x76, 0xf5, 0xcc, 0xf4, 0xc5, - 0xc8, 0x6e, 0x7e, 0x0c, 0x79, 0xd5, 0x65, 0xe1, 0x0f, 0x61, 0x61, 0xb2, 0xc7, 0xc0, 0xaf, 0x6b, - 0x6a, 0x4d, 0xb6, 0x6e, 0xd5, 0x15, 0x6d, 0x69, 0x7a, 0x63, 0x92, 0x5a, 0x45, 0xcd, 0x8f, 0x9f, - 0x3c, 0xad, 0xa5, 0xbe, 0x78, 0x5a, 0x4b, 0x7d, 0xf5, 0xb4, 0x86, 0x7e, 0x76, 0x58, 0x43, 0xbf, - 0x3d, 0xac, 0xa1, 0xc7, 0x87, 0x35, 0xf4, 0xe4, 0xb0, 0x86, 0xfe, 0x79, 0x58, 0x43, 0xff, 0x3a, - 0xac, 0xa5, 0xbe, 0x3a, 0xac, 0xa1, 0xcf, 0x9f, 0xd5, 0x52, 0x4f, 0x9e, 0xd5, 0x52, 0x5f, 0x3c, - 0xab, 0xa5, 0x3e, 0x7e, 0x43, 0xff, 0x17, 0x4a, 0xe0, 0x74, 0x9c, 0x81, 0xd3, 0xe8, 0xfb, 0xfb, - 0x6e, 0x43, 0xff, 0x0f, 0xcc, 0x5e, 0x8e, 0xff, 0xbc, 0xf5, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, - 0xdc, 0x73, 0x3e, 0x8e, 0x98, 0x19, 0x00, 0x00, + // 2098 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcd, 0x6f, 0x1b, 0xc7, + 0x15, 0xe7, 0x90, 0x4b, 0x8a, 0x7c, 0xa4, 0x24, 0x7a, 0x2c, 0xdb, 0x0c, 0x6d, 0x73, 0xe5, 0x45, + 0x6a, 0x0b, 0xfe, 0x20, 0x6b, 0xa5, 0x6d, 0x1c, 0xbb, 0x69, 0x21, 0x4a, 0xb1, 0x2d, 0x5b, 0x71, + 0x94, 0x95, 0xeb, 0x00, 0x01, 0x0a, 0x63, 0x45, 0x0e, 0xc9, 0x85, 0xb8, 0x5c, 0x7a, 0x77, 0x18, + 0x47, 0x40, 0x81, 0xf6, 0x0f, 0x68, 0x80, 0xf4, 0x54, 0xf4, 0x5e, 0xa0, 0x45, 0x0f, 0x3d, 0x14, + 0xe8, 0xb5, 0xed, 0xad, 0xee, 0xcd, 0xbd, 0x05, 0x39, 0xb0, 0xb5, 0x7c, 0x29, 0x74, 0xca, 0x5f, + 0x50, 0x14, 0xf3, 0xb5, 0x3b, 0x5c, 0x53, 0x8d, 0xe9, 0x1a, 0x28, 0x72, 0x11, 0x67, 0xde, 0xcc, + 0xbc, 0x37, 0xef, 0x37, 0xef, 0x73, 0x05, 0xa7, 0x87, 0x7b, 0xdd, 0x46, 0xdf, 0xef, 0x0e, 0x03, + 0x9f, 0xfa, 0xd1, 0xa0, 0xce, 0xff, 0xe2, 0xbc, 0x9a, 0x57, 0xaf, 0x74, 0x5d, 0xda, 0x1b, 0xed, + 0xd6, 0x5b, 0xbe, 0xd7, 0xe8, 0xfa, 0x5d, 0xbf, 0xc1, 0xc9, 0xbb, 0xa3, 0x0e, 0x9f, 0x89, 0xc3, + 0x6c, 0x24, 0x0e, 0x56, 0xcd, 0xae, 0xef, 0x77, 0xfb, 0x24, 0xde, 0x45, 0x5d, 0x8f, 0x84, 0xd4, + 0xf1, 0x86, 0x72, 0xc3, 0xb2, 0x14, 0xfb, 0xa8, 0xef, 0xf9, 0x6d, 0xd2, 0x6f, 0x84, 0xd4, 0xa1, + 0xa1, 0xf8, 0x2b, 0x76, 0x58, 0x1f, 0x41, 0x71, 0x7b, 0x14, 0xf6, 0x6c, 0xf2, 0x68, 0x44, 0x42, + 0x8a, 0x6f, 0xc3, 0x5c, 0x48, 0x03, 0xe2, 0x78, 0x61, 0x05, 0x2d, 0x67, 0x56, 0x8a, 0xab, 0xa7, + 0xea, 0xd1, 0x65, 0x77, 0xf8, 0xc2, 0x5a, 0xdb, 0x19, 0x52, 0x12, 0x34, 0x4f, 0x7c, 0x39, 0x36, + 0x73, 0x82, 0x74, 0x38, 0x36, 0xd5, 0x29, 0x5b, 0x0d, 0xac, 0x05, 0x28, 0x09, 0xc6, 0xe1, 0xd0, + 0x1f, 0x84, 0xc4, 0xfa, 0x6b, 0x1a, 0x4a, 0x1f, 0x8e, 0x48, 0xb0, 0xaf, 0x44, 0x55, 0x21, 0x1f, + 0x92, 0x3e, 0x69, 0x51, 0x3f, 0xa8, 0xa0, 0x65, 0xb4, 0x52, 0xb0, 0xa3, 0x39, 0x5e, 0x82, 0x6c, + 0xdf, 0xf5, 0x5c, 0x5a, 0x49, 0x2f, 0xa3, 0x95, 0x79, 0x5b, 0x4c, 0xf0, 0x75, 0xc8, 0x86, 0xd4, + 0x09, 0x68, 0x25, 0xb3, 0x8c, 0x56, 0x8a, 0xab, 0xd5, 0xba, 0x50, 0xbf, 0xae, 0xd4, 0xaf, 0xdf, + 0x57, 0xea, 0x37, 0xf3, 0x4f, 0xc6, 0x66, 0xea, 0xf3, 0x7f, 0x98, 0xc8, 0x16, 0x47, 0xf0, 0xf7, + 0x20, 0x43, 0x06, 0xed, 0x8a, 0x31, 0xc3, 0x49, 0x76, 0x00, 0x5f, 0x85, 0x42, 0xdb, 0x0d, 0x48, + 0x8b, 0xba, 0xfe, 0xa0, 0x92, 0x5d, 0x46, 0x2b, 0x0b, 0xab, 0xc7, 0x63, 0x48, 0x36, 0xd4, 0x92, + 0x1d, 0xef, 0xc2, 0x97, 0x21, 0x17, 0xf6, 0x9c, 0xa0, 0x1d, 0x56, 0xe6, 0x96, 0x33, 0x2b, 0x85, + 0xe6, 0xd2, 0xe1, 0xd8, 0x2c, 0x0b, 0xca, 0x65, 0xdf, 0x73, 0x29, 0xf1, 0x86, 0x74, 0xdf, 0x96, + 0x7b, 0xf0, 0x45, 0x98, 0x6b, 0x93, 0x3e, 0xa1, 0x24, 0xac, 0xe4, 0x39, 0xe2, 0x65, 0x8d, 0x3d, + 0x5f, 0xb0, 0xd5, 0x86, 0x3b, 0x46, 0x3e, 0x57, 0x9e, 0xb3, 0xfe, 0x8d, 0x00, 0xef, 0x38, 0xde, + 0xb0, 0x4f, 0x5e, 0x1a, 0xcf, 0x08, 0xb9, 0xf4, 0x2b, 0x23, 0x97, 0x99, 0x15, 0xb9, 0x18, 0x06, + 0x63, 0x36, 0x18, 0xb2, 0x5f, 0x03, 0x83, 0xb5, 0x05, 0x39, 0x41, 0xfa, 0x3a, 0x1b, 0x8a, 0x75, + 0xce, 0x28, 0x6d, 0xca, 0xb1, 0x36, 0x19, 0x7e, 0x4f, 0xeb, 0xa7, 0x30, 0x2f, 0x71, 0x14, 0x96, + 0x8a, 0xd7, 0x5e, 0xda, 0x07, 0x16, 0x9e, 0x8c, 0x4d, 0x14, 0xfb, 0x41, 0x64, 0xfc, 0xf8, 0x12, + 0x97, 0x4d, 0x43, 0x89, 0xf7, 0x62, 0x5d, 0xb8, 0xdc, 0xe6, 0xa0, 0x4b, 0x42, 0x76, 0xd0, 0x60, + 0x50, 0xd9, 0x62, 0x8f, 0xf5, 0x13, 0x38, 0x3e, 0xf1, 0x9c, 0xf2, 0x1a, 0xd7, 0x20, 0x17, 0x92, + 0xc0, 0x25, 0xea, 0x16, 0x1a, 0x20, 0x3b, 0x9c, 0xae, 0x89, 0xe7, 0x73, 0x5b, 0xee, 0x9f, 0x4d, + 0xfa, 0xef, 0x11, 0x94, 0xb6, 0x9c, 0x5d, 0xd2, 0x57, 0x76, 0x84, 0xc1, 0x18, 0x38, 0x1e, 0x91, + 0x78, 0xf2, 0x31, 0x3e, 0x09, 0xb9, 0x4f, 0x9c, 0xfe, 0x88, 0x08, 0x96, 0x79, 0x5b, 0xce, 0x66, + 0xf5, 0x48, 0xf4, 0xca, 0x1e, 0x89, 0x22, 0xbb, 0xb2, 0x2e, 0xc1, 0x31, 0xdb, 0xa1, 0x64, 0x8b, + 0x85, 0x04, 0xd2, 0x16, 0xc8, 0xb3, 0x0b, 0xf6, 0x99, 0x12, 0xa1, 0xbc, 0xb6, 0x9c, 0x59, 0x17, + 0x60, 0x5e, 0x2a, 0x27, 0x51, 0x8d, 0x35, 0x61, 0xa8, 0x16, 0x94, 0x26, 0xd6, 0x2f, 0x10, 0xcc, + 0x4f, 0x3c, 0x2e, 0xb6, 0x26, 0x59, 0x36, 0xe1, 0x70, 0x6c, 0x4a, 0x8a, 0x62, 0xcf, 0x4c, 0x85, + 0x0c, 0x28, 0x7f, 0xa4, 0x34, 0x7f, 0xa4, 0x93, 0xf1, 0x23, 0xbd, 0x37, 0xa0, 0xc1, 0xbe, 0xb2, + 0x94, 0x45, 0x06, 0x39, 0x8b, 0x93, 0x72, 0xbb, 0xad, 0x06, 0xf8, 0x0d, 0x30, 0x7a, 0x4e, 0xd8, + 0xe3, 0x08, 0x1a, 0xcd, 0xec, 0xe1, 0xd8, 0x44, 0x57, 0x6c, 0x4e, 0xb2, 0x3e, 0x81, 0x92, 0xce, + 0x04, 0xdf, 0x86, 0x42, 0x14, 0xe0, 0xf9, 0xa5, 0xfe, 0x3b, 0x6e, 0x0b, 0x52, 0x66, 0x9a, 0x86, + 0x1c, 0xbd, 0xf8, 0x30, 0x3e, 0x03, 0x46, 0xdf, 0x1d, 0x10, 0xfe, 0x9a, 0x85, 0x66, 0xfe, 0x70, + 0x6c, 0xf2, 0xb9, 0xcd, 0xff, 0x5a, 0x1e, 0xe4, 0x84, 0x41, 0xe2, 0x37, 0x93, 0x12, 0x33, 0xcd, + 0x9c, 0xe0, 0xa8, 0x73, 0x33, 0x21, 0xcb, 0x51, 0xe4, 0xec, 0x50, 0xb3, 0x70, 0x38, 0x36, 0x05, + 0xc1, 0x16, 0x3f, 0x4c, 0x9c, 0xa6, 0x23, 0x17, 0xc7, 0xe6, 0x52, 0xcd, 0x5b, 0x50, 0xda, 0x22, + 0x5d, 0xa7, 0xb5, 0x2f, 0x85, 0x2e, 0x29, 0x76, 0x4c, 0x20, 0x52, 0x3c, 0xce, 0x41, 0x29, 0x92, + 0xf8, 0xd0, 0x0b, 0xa5, 0x57, 0x17, 0x23, 0xda, 0xfb, 0xa1, 0xf5, 0x2b, 0x04, 0xd2, 0x15, 0x5e, + 0xea, 0xf1, 0x6e, 0xc0, 0x5c, 0xc8, 0x25, 0xaa, 0xc7, 0xd3, 0x3d, 0x8c, 0x2f, 0xc4, 0xcf, 0x26, + 0x37, 0xda, 0x6a, 0x80, 0xeb, 0x00, 0xc2, 0xd9, 0x6f, 0xc7, 0x8a, 0x2d, 0x1c, 0x8e, 0x4d, 0x8d, + 0x6a, 0x6b, 0x63, 0xeb, 0x97, 0x08, 0x8a, 0xf7, 0x1d, 0x37, 0xf2, 0xb2, 0x25, 0xc8, 0x3e, 0x62, + 0xee, 0x2e, 0xed, 0x55, 0x4c, 0x58, 0x3c, 0x6b, 0x93, 0xbe, 0xb3, 0x7f, 0xd3, 0x0f, 0x38, 0xcf, + 0x79, 0x3b, 0x9a, 0xc7, 0x39, 0xd1, 0x98, 0x9a, 0x13, 0xb3, 0x33, 0x47, 0xf6, 0x3b, 0x46, 0x3e, + 0x5d, 0xce, 0x58, 0x3f, 0x47, 0x50, 0x12, 0x37, 0x93, 0x2e, 0x72, 0x03, 0x72, 0xe2, 0xe2, 0xd2, + 0xc6, 0x8e, 0x0c, 0x7f, 0xa0, 0x85, 0x3e, 0x79, 0x04, 0xff, 0x10, 0x16, 0xda, 0x81, 0x3f, 0x1c, + 0x2a, 0xcf, 0x54, 0xd8, 0x6a, 0x4c, 0x36, 0xf4, 0x75, 0x3b, 0xb1, 0xdd, 0xfa, 0x1b, 0x73, 0x44, + 0x11, 0xcf, 0x24, 0x54, 0x91, 0x8a, 0xe8, 0x95, 0x93, 0x57, 0x7a, 0xd6, 0xe4, 0x75, 0x12, 0x72, + 0xdd, 0xc0, 0x1f, 0x0d, 0xc3, 0x4a, 0x46, 0x84, 0x09, 0x31, 0x9b, 0x2d, 0xa9, 0x59, 0x77, 0x60, + 0x41, 0xa9, 0x72, 0x44, 0x50, 0xaf, 0x26, 0x83, 0xfa, 0x66, 0x9b, 0x0c, 0xa8, 0xdb, 0x71, 0xa3, + 0x30, 0x2d, 0xf7, 0x5b, 0x9f, 0x21, 0x28, 0x27, 0xb7, 0xe0, 0x1f, 0x68, 0x66, 0xce, 0xd8, 0x9d, + 0x3f, 0x9a, 0x5d, 0x9d, 0xc7, 0xc1, 0x90, 0x07, 0x14, 0xe5, 0x02, 0xd5, 0x77, 0xa0, 0xa8, 0x91, + 0x59, 0x72, 0xdc, 0x23, 0xca, 0x24, 0xd9, 0x30, 0xf6, 0xc5, 0xb4, 0x30, 0x53, 0x3e, 0xb9, 0x9e, + 0xbe, 0x86, 0x98, 0x41, 0xcf, 0x4f, 0xbc, 0x24, 0xbe, 0x06, 0x46, 0x27, 0xf0, 0xbd, 0x99, 0x9e, + 0x89, 0x9f, 0xc0, 0xdf, 0x81, 0x34, 0xf5, 0x67, 0x7a, 0xa4, 0x34, 0xf5, 0xb5, 0x98, 0x9f, 0x99, + 0x88, 0xf9, 0xbf, 0x43, 0xb0, 0xc8, 0xce, 0x08, 0x04, 0xd6, 0x7b, 0xa3, 0xc1, 0x1e, 0x5e, 0x81, + 0x32, 0x93, 0xf4, 0xd0, 0x95, 0x39, 0xf0, 0xa1, 0xdb, 0x96, 0x6a, 0x2e, 0x30, 0xba, 0x4a, 0x8d, + 0x9b, 0x6d, 0x7c, 0x0a, 0xe6, 0x46, 0xa1, 0xd8, 0x20, 0x74, 0xce, 0xb1, 0xe9, 0x66, 0x1b, 0x5f, + 0xd2, 0xc4, 0x31, 0xac, 0xb5, 0x32, 0x90, 0x63, 0xb8, 0xed, 0xb8, 0x41, 0x14, 0x5b, 0x2e, 0x40, + 0xae, 0xc5, 0x04, 0x0b, 0x3b, 0x61, 0x39, 0x38, 0xda, 0xcc, 0x2f, 0x64, 0xcb, 0x65, 0xeb, 0xbb, + 0x50, 0x88, 0x4e, 0x4f, 0x4d, 0xbd, 0x53, 0x5f, 0xc0, 0xba, 0x01, 0x8b, 0x22, 0x66, 0x4e, 0x3f, + 0x5c, 0x9a, 0x76, 0xb8, 0xa4, 0x0e, 0x9f, 0x86, 0xac, 0x40, 0x05, 0x83, 0xd1, 0x76, 0xa8, 0xa3, + 0x8e, 0xb0, 0xb1, 0x55, 0x81, 0x93, 0xf7, 0x03, 0x67, 0x10, 0x76, 0x48, 0xc0, 0x37, 0x45, 0xb6, + 0x6b, 0x9d, 0x80, 0xe3, 0x2c, 0x4e, 0x90, 0x20, 0x5c, 0xf7, 0x47, 0x03, 0x2a, 0xdd, 0xd3, 0xba, + 0x0c, 0x4b, 0x93, 0x64, 0x69, 0xea, 0x4b, 0x90, 0x6d, 0x31, 0x02, 0xe7, 0x3e, 0x6f, 0x8b, 0x89, + 0xf5, 0x6b, 0x04, 0xf8, 0x16, 0xa1, 0x9c, 0xf5, 0xe6, 0x46, 0xa8, 0x15, 0xaf, 0x9e, 0x43, 0x5b, + 0x3d, 0x12, 0xa8, 0x0c, 0x1e, 0xcd, 0xff, 0x1f, 0xc5, 0xab, 0x75, 0x15, 0x8e, 0x4f, 0xdc, 0x52, + 0xea, 0x54, 0x85, 0x7c, 0x4b, 0xd2, 0x64, 0xfd, 0x10, 0xcd, 0xad, 0x3f, 0xa4, 0x21, 0x2f, 0xde, + 0x96, 0x74, 0xf0, 0x55, 0x28, 0x76, 0x98, 0xad, 0x05, 0xc3, 0xc0, 0x95, 0x10, 0x18, 0xcd, 0xc5, + 0xc3, 0xb1, 0xa9, 0x93, 0x6d, 0x7d, 0x82, 0xaf, 0x24, 0x0c, 0xaf, 0xb9, 0x74, 0x30, 0x36, 0x73, + 0x3f, 0x62, 0xc6, 0xb7, 0xc1, 0xb2, 0x17, 0x37, 0xc3, 0x8d, 0xc8, 0x1c, 0xef, 0x4a, 0x6f, 0xe3, + 0x95, 0x6c, 0xf3, 0x6d, 0x76, 0xfd, 0x2f, 0xc7, 0xe6, 0x05, 0xad, 0x81, 0x1c, 0x06, 0xbe, 0x47, + 0x68, 0x8f, 0x8c, 0xc2, 0x46, 0xcb, 0xf7, 0x3c, 0x7f, 0xd0, 0xe0, 0x4d, 0x20, 0x57, 0x9a, 0xa5, + 0x60, 0x76, 0x5c, 0x3a, 0xe0, 0x7d, 0x98, 0xa3, 0xbd, 0xc0, 0x1f, 0x75, 0x7b, 0x3c, 0xbb, 0x64, + 0x9a, 0xd7, 0x67, 0xe7, 0xa7, 0x38, 0xd8, 0x6a, 0x80, 0xcf, 0x31, 0xb4, 0x48, 0x6b, 0x2f, 0x1c, + 0x79, 0x3c, 0x3d, 0xcd, 0xab, 0xf2, 0x26, 0x22, 0x5b, 0x9f, 0xa5, 0xc1, 0xe4, 0x26, 0xfc, 0x80, + 0x97, 0x61, 0x37, 0xfd, 0xe0, 0x7d, 0x42, 0x03, 0xb7, 0x75, 0xcf, 0xf1, 0x88, 0xb2, 0x0d, 0x13, + 0x8a, 0x1e, 0x27, 0x3e, 0xd4, 0x9c, 0x03, 0xbc, 0x68, 0x1f, 0x3e, 0x0b, 0xc0, 0xdd, 0x4e, 0xac, + 0x0b, 0x3f, 0x29, 0x70, 0x0a, 0x5f, 0x5e, 0x9f, 0x40, 0xaa, 0x31, 0xa3, 0x66, 0x12, 0xa1, 0xcd, + 0x24, 0x42, 0x33, 0xf3, 0x89, 0x60, 0xd1, 0x6d, 0x3d, 0x3b, 0x69, 0xeb, 0xd6, 0xdf, 0x11, 0xd4, + 0xb6, 0xd4, 0xcd, 0x5f, 0x11, 0x0e, 0xa5, 0x6f, 0xfa, 0x35, 0xe9, 0x9b, 0xf9, 0xdf, 0xf4, 0xb5, + 0xfe, 0xa2, 0xb9, 0xbc, 0x4d, 0x3a, 0x4a, 0x8f, 0x75, 0x2d, 0x5d, 0xbc, 0x8e, 0x6b, 0xa6, 0x5f, + 0xe3, 0xb3, 0x64, 0x12, 0xcf, 0xf2, 0x6e, 0x1c, 0x0e, 0xb8, 0x06, 0x32, 0x1c, 0x9c, 0x07, 0x23, + 0x20, 0x1d, 0x95, 0x7c, 0x71, 0x32, 0xc6, 0x93, 0x8e, 0xcd, 0xd7, 0xad, 0x3f, 0x21, 0x28, 0xdf, + 0x22, 0x74, 0xb2, 0xac, 0xf9, 0x26, 0xe9, 0x7f, 0x1b, 0x8e, 0x69, 0xf7, 0x97, 0xda, 0xbf, 0x95, + 0xa8, 0x65, 0x4e, 0xc4, 0xfa, 0x6f, 0x0e, 0xda, 0xe4, 0x53, 0xd9, 0xa5, 0x4e, 0x96, 0x31, 0xdb, + 0x50, 0xd4, 0x16, 0xf1, 0x5a, 0xa2, 0x80, 0x99, 0x96, 0x54, 0x9b, 0x4b, 0x52, 0x27, 0xd1, 0xa7, + 0xca, 0xea, 0x33, 0x4a, 0xf7, 0x3b, 0x80, 0x79, 0xe3, 0xcc, 0xd9, 0xea, 0x91, 0x9a, 0x53, 0xef, + 0x46, 0xf5, 0x4c, 0x34, 0xc7, 0xe7, 0xc0, 0x08, 0xfc, 0xc7, 0xaa, 0x32, 0x9d, 0x8f, 0x45, 0xda, + 0xfe, 0x63, 0x9b, 0x2f, 0x59, 0x37, 0x20, 0x63, 0xfb, 0x8f, 0x71, 0x0d, 0x20, 0x70, 0x06, 0x5d, + 0xf2, 0x20, 0xea, 0x47, 0x4a, 0xb6, 0x46, 0x39, 0x22, 0xbf, 0xae, 0xc3, 0x31, 0xfd, 0x46, 0xe2, + 0xb9, 0xeb, 0x30, 0xc7, 0x88, 0x31, 0x5c, 0x4b, 0x09, 0xb8, 0x44, 0xf7, 0xaf, 0x36, 0x31, 0x9b, + 0x81, 0x98, 0x8e, 0xcf, 0x40, 0x81, 0x3a, 0xbb, 0x7d, 0x72, 0x2f, 0xf6, 0xf9, 0x98, 0xc0, 0x56, + 0x59, 0x2b, 0xf5, 0x40, 0x2b, 0x14, 0x62, 0x02, 0xbe, 0x08, 0xe5, 0xf8, 0xce, 0xdb, 0x01, 0xe9, + 0xb8, 0x9f, 0xf2, 0x17, 0x2e, 0xd9, 0x2f, 0xd0, 0xf1, 0x0a, 0x2c, 0xc6, 0xb4, 0x1d, 0x9e, 0x76, + 0x0d, 0xbe, 0x35, 0x49, 0x66, 0xd8, 0x70, 0x75, 0xdf, 0x7b, 0x34, 0x72, 0xfa, 0x3c, 0x90, 0x95, + 0x6c, 0x8d, 0x62, 0xfd, 0x19, 0xc1, 0x31, 0xf1, 0xd4, 0xd4, 0xa1, 0xdf, 0x48, 0xab, 0xff, 0x0d, + 0x02, 0xac, 0x6b, 0x20, 0x4d, 0xeb, 0x5b, 0xfa, 0xf7, 0x21, 0x96, 0xd7, 0x8b, 0xd3, 0x3e, 0x80, + 0xb2, 0x16, 0x54, 0x96, 0x80, 0x69, 0xbe, 0x8b, 0xb7, 0xa0, 0x82, 0xa2, 0xaa, 0x3f, 0xd6, 0x39, + 0xef, 0xee, 0x53, 0x12, 0xca, 0x06, 0x92, 0x77, 0xce, 0x9c, 0x60, 0x8b, 0x1f, 0x26, 0x4b, 0x7d, + 0x60, 0x30, 0x62, 0x59, 0xc9, 0x8f, 0x08, 0x17, 0xcf, 0x43, 0x21, 0xfa, 0x14, 0x89, 0x8b, 0x30, + 0x77, 0xf3, 0x03, 0xfb, 0xa3, 0x35, 0x7b, 0xa3, 0x9c, 0xc2, 0x25, 0xc8, 0x37, 0xd7, 0xd6, 0xef, + 0xf2, 0x19, 0x5a, 0x5d, 0x83, 0xdc, 0xf6, 0x28, 0xec, 0x91, 0x00, 0xbf, 0x0d, 0x06, 0x1b, 0x61, + 0xcd, 0x69, 0xb5, 0xef, 0xc0, 0xd5, 0x93, 0x49, 0xb2, 0xac, 0x01, 0x53, 0xab, 0x7f, 0x34, 0x94, + 0x21, 0x07, 0xf8, 0xfb, 0x90, 0x15, 0xd6, 0xa9, 0x6d, 0xd7, 0xbf, 0x49, 0x56, 0x4f, 0xbd, 0x40, + 0x57, 0x7c, 0xbe, 0x8d, 0xf0, 0x3d, 0x28, 0x72, 0xa2, 0x6c, 0xfb, 0xcf, 0x24, 0xbb, 0xef, 0x09, + 0x4e, 0x67, 0x8f, 0x58, 0xd5, 0xf8, 0x5d, 0x87, 0x2c, 0x0f, 0x10, 0xfa, 0x6d, 0xf4, 0x2f, 0x5b, + 0xfa, 0x6d, 0x26, 0x3e, 0x0a, 0x59, 0x29, 0xfc, 0x0e, 0x18, 0xac, 0x88, 0xd5, 0xe1, 0xd0, 0xba, + 0x75, 0x1d, 0x0e, 0xbd, 0x55, 0xe6, 0x62, 0xdf, 0x8d, 0x3e, 0x3a, 0x9c, 0x4a, 0x76, 0x5f, 0xea, + 0x78, 0xe5, 0xc5, 0x85, 0x48, 0xf2, 0x07, 0xa2, 0xfb, 0x56, 0xe5, 0x33, 0x3e, 0x3b, 0x29, 0x2a, + 0x51, 0x6d, 0x57, 0x6b, 0x47, 0x2d, 0x47, 0x0c, 0xb7, 0xa0, 0xa8, 0x95, 0xae, 0x3a, 0xac, 0x2f, + 0xd6, 0xdd, 0x3a, 0xac, 0x53, 0xea, 0x5d, 0x2b, 0x85, 0x6f, 0x41, 0x9e, 0x45, 0x7e, 0xe6, 0x00, + 0xf8, 0x74, 0x32, 0xc0, 0x6b, 0x8e, 0x5d, 0x3d, 0x33, 0x7d, 0x31, 0xb2, 0x9b, 0x1f, 0x43, 0x5e, + 0x75, 0x59, 0xf8, 0x43, 0x58, 0x98, 0xec, 0x31, 0xf0, 0x1b, 0x9a, 0x5a, 0x93, 0xad, 0x5b, 0x75, + 0x59, 0x5b, 0x9a, 0xde, 0x98, 0xa4, 0x56, 0x50, 0xf3, 0xe3, 0xa7, 0xcf, 0x6a, 0xa9, 0x2f, 0x9e, + 0xd5, 0x52, 0x5f, 0x3d, 0xab, 0xa1, 0x9f, 0x1d, 0xd4, 0xd0, 0x6f, 0x0f, 0x6a, 0xe8, 0xc9, 0x41, + 0x0d, 0x3d, 0x3d, 0xa8, 0xa1, 0x7f, 0x1e, 0xd4, 0xd0, 0xbf, 0x0e, 0x6a, 0xa9, 0xaf, 0x0e, 0x6a, + 0xe8, 0xf3, 0xe7, 0xb5, 0xd4, 0xd3, 0xe7, 0xb5, 0xd4, 0x17, 0xcf, 0x6b, 0xa9, 0x8f, 0xdf, 0xd4, + 0xff, 0xdf, 0x12, 0x38, 0x1d, 0x67, 0xe0, 0x34, 0xfa, 0xfe, 0x9e, 0xdb, 0xd0, 0xff, 0x5d, 0xb3, + 0x9b, 0xe3, 0x3f, 0x6f, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0x20, 0xae, 0x71, 0xe2, 0xc5, 0x19, + 0x00, 0x00, } func (x Direction) String() string { @@ -2779,6 +2824,30 @@ func (this *LabelRequest) Equal(that interface{}) bool { } return true } +func (this *RateLimitedStream) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RateLimitedStream) + if !ok { + that2, ok := that.(RateLimitedStream) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Labels != that1.Labels { + return false + } + return true +} func (this *LabelResponse) Equal(that interface{}) bool { if that == nil { return this == nil @@ -3944,6 +4013,16 @@ func (this *LabelRequest) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *RateLimitedStream) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&logproto.RateLimitedStream{") + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} func (this *LabelResponse) GoString() string { if this == nil { return "nil" @@ -5396,6 +5475,36 @@ func (m *LabelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *RateLimitedStream) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimitedStream) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RateLimitedStream) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Labels) > 0 { + i -= len(m.Labels) + copy(dAtA[i:], m.Labels) + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Labels))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *LabelResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7007,6 +7116,19 @@ func (m *LabelRequest) Size() (n int) { return n } +func (m *RateLimitedStream) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Labels) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } + return n +} + func (m *LabelResponse) Size() (n int) { if m == nil { return 0 @@ -7752,6 +7874,16 @@ func (this *LabelRequest) String() string { }, "") return s } +func (this *RateLimitedStream) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RateLimitedStream{`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, + `}`, + }, "") + return s +} func (this *LabelResponse) String() string { if this == nil { return "nil" @@ -9369,6 +9501,91 @@ func (m *LabelRequest) Unmarshal(dAtA []byte) error { } return nil } +func (m *RateLimitedStream) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RateLimitedStream: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RateLimitedStream: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *LabelResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/pkg/logproto/logproto.proto b/pkg/logproto/logproto.proto index ad9631c1e09d9..ca11a736f928d 100644 --- a/pkg/logproto/logproto.proto +++ b/pkg/logproto/logproto.proto @@ -116,6 +116,10 @@ message LabelRequest { ]; } +message RateLimitedStream { + string labels = 1; +} + message LabelResponse { repeated string values = 1; }
loki
Modify ingesters to return rate-limited streams to distributors (#6977)
c89df1a795b8eaf6f3c765db85e86f1a20f16deb
2019-11-27 00:03:15
Putra Sattvika, I Gusti Ngurah
storage: fix missing logs with batched chunk iterator (#1299)
false
diff --git a/pkg/storage/iterator.go b/pkg/storage/iterator.go index 229a89d5b3055..5acf5ea25d8e8 100644 --- a/pkg/storage/iterator.go +++ b/pkg/storage/iterator.go @@ -77,6 +77,16 @@ type batchChunkIterator struct { // newBatchChunkIterator creates a new batch iterator with the given batchSize. func newBatchChunkIterator(ctx context.Context, chunks []*chunkenc.LazyChunk, batchSize int, matchers []*labels.Matcher, filter logql.Filter, req *logproto.QueryRequest) *batchChunkIterator { + + // __name__ is not something we filter by because it's a constant in loki and only used for upstream compatibility. + // Therefore remove it + for i := range matchers { + if matchers[i].Name == labels.MetricName { + matchers = append(matchers[:i], matchers[i+1:]...) + break + } + } + res := &batchChunkIterator{ batchSize: batchSize, matchers: matchers, @@ -112,6 +122,9 @@ func (it *batchChunkIterator) Next() bool { } func (it *batchChunkIterator) nextBatch() (iter.EntryIterator, error) { + // the first chunk of the batch + headChunk := it.chunks.Peek() + // pop the next batch of chunks and append/preprend previous overlapping chunks // so we can merge/de-dupe overlapping entries. batch := make([]*chunkenc.LazyChunk, 0, it.batchSize+len(it.lastOverlapping)) @@ -130,6 +143,14 @@ func (it *batchChunkIterator) nextBatch() (iter.EntryIterator, error) { // so that overlapping chunks are together if it.req.Direction == logproto.BACKWARD { from = time.Unix(0, nextChunk.Chunk.Through.UnixNano()) + + // we have to reverse the inclusivity of the chunk iterator from + // [from, through) to (from, through] for backward queries, except when + // the batch's `from` is equal to the query's Start. This can be achieved + // by shifting `from` by one nanosecond. + if !from.Equal(it.req.Start) { + from = from.Add(time.Nanosecond) + } } else { through = time.Unix(0, nextChunk.Chunk.From.UnixNano()) } @@ -149,7 +170,7 @@ func (it *batchChunkIterator) nextBatch() (iter.EntryIterator, error) { // └────────────────────┘ // // And nextChunk is # 49, we need to keep references to #47 and #48 as they won't be - // iterated over completely (we're clipping through to #49's from) and then add them to the next batch. + // iterated over completely (we're clipping through to #49's from) and then add them to the next batch. it.lastOverlapping = it.lastOverlapping[:0] for _, c := range batch { if it.req.Direction == logproto.BACKWARD { @@ -162,13 +183,27 @@ func (it *batchChunkIterator) nextBatch() (iter.EntryIterator, error) { } } } + } + + if it.req.Direction == logproto.BACKWARD { + through = time.Unix(0, headChunk.Chunk.Through.UnixNano()) + + if through.After(it.req.End) { + through = it.req.End + } + + // we have to reverse the inclusivity of the chunk iterator from + // [from, through) to (from, through] for backward queries, except when + // the batch's `through` is equal to the query's End. This can be achieved + // by shifting `through` by one nanosecond. + if !through.Equal(it.req.End) { + through = through.Add(time.Nanosecond) + } } else { - if len(it.lastOverlapping) > 0 { - if it.req.Direction == logproto.BACKWARD { - through = time.Unix(0, it.lastOverlapping[0].Chunk.From.UnixNano()) - } else { - from = time.Unix(0, it.lastOverlapping[0].Chunk.Through.UnixNano()) - } + from = time.Unix(0, headChunk.Chunk.From.UnixNano()) + + if from.Before(it.req.Start) { + from = it.req.Start } } @@ -250,12 +285,9 @@ func buildIterators(ctx context.Context, chks map[model.Fingerprint][][]*chunken func buildHeapIterator(ctx context.Context, chks [][]*chunkenc.LazyChunk, filter logql.Filter, direction logproto.Direction, from, through time.Time) (iter.EntryIterator, error) { result := make([]iter.EntryIterator, 0, len(chks)) - if chks[0][0].Chunk.Metric.Has("__name__") { - labelsBuilder := labels.NewBuilder(chks[0][0].Chunk.Metric) - labelsBuilder.Del("__name__") - chks[0][0].Chunk.Metric = labelsBuilder.Labels() - } - labels := chks[0][0].Chunk.Metric.String() + + // __name__ is only used for upstream compatibility and is hardcoded within loki. Strip it from the return label set. + labels := dropLabels(chks[0][0].Chunk.Metric, labels.MetricName).String() for i := range chks { iterators := make([]iter.EntryIterator, 0, len(chks[i])) @@ -400,3 +432,20 @@ outer: return css } + +// dropLabels returns a new label set with certain labels dropped +func dropLabels(ls labels.Labels, removals ...string) (dst labels.Labels) { + toDel := make(map[string]struct{}) + for _, r := range removals { + toDel[r] = struct{}{} + } + + for _, l := range ls { + _, remove := toDel[l.Name] + if !remove { + dst = append(dst, l) + } + } + + return dst +} diff --git a/pkg/storage/iterator_test.go b/pkg/storage/iterator_test.go index 8f04348148dcc..fac34f5fe77aa 100644 --- a/pkg/storage/iterator_test.go +++ b/pkg/storage/iterator_test.go @@ -2,12 +2,15 @@ package storage import ( "context" + "fmt" "testing" "time" "github.com/grafana/loki/pkg/chunkenc" "github.com/grafana/loki/pkg/iter" "github.com/grafana/loki/pkg/logproto" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/stretchr/testify/require" ) func Test_newBatchChunkIterator(t *testing.T) { @@ -18,11 +21,12 @@ func Test_newBatchChunkIterator(t *testing.T) { matchers string start, end time.Time direction logproto.Direction + batchSize int }{ "forward with overlap": { []*chunkenc.LazyChunk{ newLazyChunk(logproto.Stream{ - Labels: fooLabels, + Labels: fooLabelsWithName, Entries: []logproto.Entry{ { Timestamp: from, @@ -35,7 +39,7 @@ func Test_newBatchChunkIterator(t *testing.T) { }, }), newLazyChunk(logproto.Stream{ - Labels: fooLabels, + Labels: fooLabelsWithName, Entries: []logproto.Entry{ { Timestamp: from.Add(time.Millisecond), @@ -48,7 +52,7 @@ func Test_newBatchChunkIterator(t *testing.T) { }, }), newLazyChunk(logproto.Stream{ - Labels: fooLabels, + Labels: fooLabelsWithName, Entries: []logproto.Entry{ { Timestamp: from.Add(time.Millisecond), @@ -61,7 +65,120 @@ func Test_newBatchChunkIterator(t *testing.T) { }, }), newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from.Add(2 * time.Millisecond), + Line: "3", + }, + { + Timestamp: from.Add(3 * time.Millisecond), + Line: "4", + }, + }, + }), + newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from.Add(2 * time.Millisecond), + Line: "3", + }, + { + Timestamp: from.Add(3 * time.Millisecond), + Line: "4", + }, + }, + }), + newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from.Add(3 * time.Millisecond), + Line: "4", + }, + { + Timestamp: from.Add(4 * time.Millisecond), + Line: "5", + }, + }, + }), + }, + []*logproto.Stream{ + { Labels: fooLabels, + Entries: []logproto.Entry{ + { + Timestamp: from, + Line: "1", + }, + { + Timestamp: from.Add(time.Millisecond), + Line: "2", + }, + { + Timestamp: from.Add(2 * time.Millisecond), + Line: "3", + }, + { + Timestamp: from.Add(3 * time.Millisecond), + Line: "4", + }, + }, + }, + }, + fooLabelsWithName, + from, from.Add(4 * time.Millisecond), + logproto.FORWARD, + 2, + }, + "forward with overlapping non-continuous entries": { + []*chunkenc.LazyChunk{ + newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from, + Line: "1", + }, + { + Timestamp: from.Add(time.Millisecond), + Line: "2", + }, + { + Timestamp: from.Add(3 * time.Millisecond), + Line: "4", + }, + }, + }), + newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from.Add(time.Millisecond), + Line: "2", + }, + { + Timestamp: from.Add(2 * time.Millisecond), + Line: "3", + }, + }, + }), + newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from.Add(time.Millisecond), + Line: "2", + }, + { + Timestamp: from.Add(3 * time.Millisecond), + Line: "4", + }, + }, + }), + newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, Entries: []logproto.Entry{ { Timestamp: from.Add(2 * time.Millisecond), @@ -93,14 +210,15 @@ func Test_newBatchChunkIterator(t *testing.T) { }, }, }, - fooLabels, + fooLabelsWithName, from, from.Add(3 * time.Millisecond), logproto.FORWARD, + 2, }, "backward with overlap": { []*chunkenc.LazyChunk{ newLazyChunk(logproto.Stream{ - Labels: fooLabels, + Labels: fooLabelsWithName, Entries: []logproto.Entry{ { Timestamp: from, @@ -113,7 +231,7 @@ func Test_newBatchChunkIterator(t *testing.T) { }, }), newLazyChunk(logproto.Stream{ - Labels: fooLabels, + Labels: fooLabelsWithName, Entries: []logproto.Entry{ { Timestamp: from.Add(time.Millisecond), @@ -126,7 +244,7 @@ func Test_newBatchChunkIterator(t *testing.T) { }, }), newLazyChunk(logproto.Stream{ - Labels: fooLabels, + Labels: fooLabelsWithName, Entries: []logproto.Entry{ { Timestamp: from.Add(time.Millisecond), @@ -139,7 +257,7 @@ func Test_newBatchChunkIterator(t *testing.T) { }, }), newLazyChunk(logproto.Stream{ - Labels: fooLabels, + Labels: fooLabelsWithName, Entries: []logproto.Entry{ { Timestamp: from.Add(2 * time.Millisecond), @@ -151,11 +269,41 @@ func Test_newBatchChunkIterator(t *testing.T) { }, }, }), + newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from.Add(2 * time.Millisecond), + Line: "3", + }, + { + Timestamp: from.Add(3 * time.Millisecond), + Line: "4", + }, + }, + }), + newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from.Add(3 * time.Millisecond), + Line: "4", + }, + { + Timestamp: from.Add(4 * time.Millisecond), + Line: "5", + }, + }, + }), }, []*logproto.Stream{ { Labels: fooLabels, Entries: []logproto.Entry{ + { + Timestamp: from.Add(3 * time.Millisecond), + Line: "4", + }, { Timestamp: from.Add(2 * time.Millisecond), Line: "3", @@ -171,14 +319,114 @@ func Test_newBatchChunkIterator(t *testing.T) { }, }, }, - fooLabels, - from, from.Add(3 * time.Millisecond), + fooLabelsWithName, + from, from.Add(4 * time.Millisecond), logproto.BACKWARD, + 2, }, - "forward without overlap": { + "backward with overlapping non-continuous entries": { []*chunkenc.LazyChunk{ newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from.Add(0 * time.Millisecond), + Line: "0", + }, + { + Timestamp: from.Add(3 * time.Millisecond), + Line: "3", + }, + }, + }), + newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from.Add(1 * time.Millisecond), + Line: "1", + }, + { + Timestamp: from.Add(6 * time.Millisecond), + Line: "6", + }, + }, + }), + newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from.Add(2 * time.Millisecond), + Line: "2", + }, + { + Timestamp: from.Add(5 * time.Millisecond), + Line: "5", + }, + }, + }), + newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from.Add(4 * time.Millisecond), + Line: "4", + }, + { + Timestamp: from.Add(7 * time.Millisecond), + Line: "7", + }, + }, + }), + }, + []*logproto.Stream{ + { Labels: fooLabels, + Entries: []logproto.Entry{ + { + Timestamp: from.Add(7 * time.Millisecond), + Line: "7", + }, + { + Timestamp: from.Add(6 * time.Millisecond), + Line: "6", + }, + { + Timestamp: from.Add(5 * time.Millisecond), + Line: "5", + }, + { + Timestamp: from.Add(4 * time.Millisecond), + Line: "4", + }, + { + Timestamp: from.Add(3 * time.Millisecond), + Line: "3", + }, + { + Timestamp: from.Add(2 * time.Millisecond), + Line: "2", + }, + { + Timestamp: from.Add(1 * time.Millisecond), + Line: "1", + }, + { + Timestamp: from.Add(0 * time.Millisecond), + Line: "0", + }, + }, + }, + }, + fooLabelsWithName, + from, from.Add(8 * time.Millisecond), + logproto.BACKWARD, + 2, + }, + "forward without overlap": { + []*chunkenc.LazyChunk{ + newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, Entries: []logproto.Entry{ { Timestamp: from, @@ -191,7 +439,7 @@ func Test_newBatchChunkIterator(t *testing.T) { }, }), newLazyChunk(logproto.Stream{ - Labels: fooLabels, + Labels: fooLabelsWithName, Entries: []logproto.Entry{ { Timestamp: from.Add(2 * time.Millisecond), @@ -200,7 +448,7 @@ func Test_newBatchChunkIterator(t *testing.T) { }, }), newLazyChunk(logproto.Stream{ - Labels: fooLabels, + Labels: fooLabelsWithName, Entries: []logproto.Entry{ { Timestamp: from.Add(3 * time.Millisecond), @@ -228,14 +476,15 @@ func Test_newBatchChunkIterator(t *testing.T) { }, }, }, - fooLabels, + fooLabelsWithName, from, from.Add(3 * time.Millisecond), logproto.FORWARD, + 2, }, "backward without overlap": { []*chunkenc.LazyChunk{ newLazyChunk(logproto.Stream{ - Labels: fooLabels, + Labels: fooLabelsWithName, Entries: []logproto.Entry{ { Timestamp: from, @@ -248,7 +497,7 @@ func Test_newBatchChunkIterator(t *testing.T) { }, }), newLazyChunk(logproto.Stream{ - Labels: fooLabels, + Labels: fooLabelsWithName, Entries: []logproto.Entry{ { Timestamp: from.Add(2 * time.Millisecond), @@ -257,7 +506,7 @@ func Test_newBatchChunkIterator(t *testing.T) { }, }), newLazyChunk(logproto.Stream{ - Labels: fooLabels, + Labels: fooLabelsWithName, Entries: []logproto.Entry{ { Timestamp: from.Add(3 * time.Millisecond), @@ -285,16 +534,17 @@ func Test_newBatchChunkIterator(t *testing.T) { }, }, }, - fooLabels, + fooLabelsWithName, from, from.Add(3 * time.Millisecond), logproto.BACKWARD, + 2, }, } for name, tt := range tests { tt := tt t.Run(name, func(t *testing.T) { - it := newBatchChunkIterator(context.Background(), tt.chunks, 2, newMatchers(tt.matchers), nil, newQuery("", tt.start, tt.end, tt.direction)) + it := newBatchChunkIterator(context.Background(), tt.chunks, tt.batchSize, newMatchers(tt.matchers), nil, newQuery("", tt.start, tt.end, tt.direction)) streams, _, err := iter.ReadBatch(it, 1000) _ = it.Close() if err != nil { @@ -305,5 +555,127 @@ func Test_newBatchChunkIterator(t *testing.T) { }) } +} +func TestPartitionOverlappingchunks(t *testing.T) { + var ( + oneThroughFour = newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from, + Line: "1", + }, + { + Timestamp: from.Add(3 * time.Millisecond), + Line: "4", + }, + }, + }) + two = newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from.Add(1 * time.Millisecond), + Line: "2", + }, + }, + }) + three = newLazyChunk(logproto.Stream{ + Labels: fooLabelsWithName, + Entries: []logproto.Entry{ + { + Timestamp: from.Add(2 * time.Millisecond), + Line: "3", + }, + }, + }) + ) + + for i, tc := range []struct { + input []*chunkenc.LazyChunk + expected [][]*chunkenc.LazyChunk + }{ + { + input: []*chunkenc.LazyChunk{ + oneThroughFour, + two, + three, + }, + expected: [][]*chunkenc.LazyChunk{ + []*chunkenc.LazyChunk{oneThroughFour}, + []*chunkenc.LazyChunk{two, three}, + }, + }, + { + input: []*chunkenc.LazyChunk{ + two, + oneThroughFour, + three, + }, + expected: [][]*chunkenc.LazyChunk{ + []*chunkenc.LazyChunk{oneThroughFour}, + []*chunkenc.LazyChunk{two, three}, + }, + }, + { + input: []*chunkenc.LazyChunk{ + two, + two, + three, + three, + }, + expected: [][]*chunkenc.LazyChunk{ + []*chunkenc.LazyChunk{two, three}, + []*chunkenc.LazyChunk{two, three}, + }, + }, + } { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + out := partitionOverlappingChunks(tc.input) + require.Equal(t, tc.expected, out) + }) + } +} + +func TestDropLabels(t *testing.T) { + + for i, tc := range []struct { + ls labels.Labels + drop []string + expected labels.Labels + }{ + { + ls: labels.Labels{ + labels.Label{ + Name: "a", + Value: "1", + }, + labels.Label{ + Name: "b", + Value: "2", + }, + labels.Label{ + Name: "c", + Value: "3", + }, + }, + drop: []string{"b"}, + expected: labels.Labels{ + labels.Label{ + Name: "a", + Value: "1", + }, + labels.Label{ + Name: "c", + Value: "3", + }, + }, + }, + } { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + dropped := dropLabels(tc.ls, tc.drop...) + require.Equal(t, tc.expected, dropped) + }) + } } diff --git a/pkg/storage/util_test.go b/pkg/storage/util_test.go index 9107438b8f145..57d9f974488de 100644 --- a/pkg/storage/util_test.go +++ b/pkg/storage/util_test.go @@ -19,6 +19,7 @@ import ( "github.com/stretchr/testify/assert" ) +var fooLabelsWithName = "{foo=\"bar\", __name__=\"log\"}" var fooLabels = "{foo=\"bar\"}" var from = time.Unix(0, time.Millisecond.Nanoseconds())
storage
fix missing logs with batched chunk iterator (#1299)
2edbe1280f3c32e1d02794343d666edb67296811
2024-03-21 21:58:15
Christian Haudum
fix(blooms): Correctly skip block page in case it exceeds the max page size for querying (#12297)
false
diff --git a/pkg/storage/bloom/v1/block.go b/pkg/storage/bloom/v1/block.go index 84bc71a6b203c..c9eef5fa33027 100644 --- a/pkg/storage/bloom/v1/block.go +++ b/pkg/storage/bloom/v1/block.go @@ -148,25 +148,26 @@ func (bq *BlockQuerier) Seek(fp model.Fingerprint) error { } func (bq *BlockQuerier) Next() bool { - if !bq.series.Next() { - return false - } - - series := bq.series.At() - - bq.blooms.Seek(series.Offset) - if !bq.blooms.Next() { - return false - } - - bloom := bq.blooms.At() - - bq.cur = &SeriesWithBloom{ - Series: &series.Series, - Bloom: bloom, + for bq.series.Next() { + series := bq.series.At() + bq.blooms.Seek(series.Offset) + if !bq.blooms.Next() { + // skip blocks that are too large + if errors.Is(bq.blooms.Err(), ErrPageTooLarge) { + // fmt.Printf("skipping bloom page: %s (%d)\n", series.Fingerprint, series.Chunks.Len()) + bq.blooms.err = nil + continue + } + return false + } + bloom := bq.blooms.At() + bq.cur = &SeriesWithBloom{ + Series: &series.Series, + Bloom: bloom, + } + return true } - return true - + return false } func (bq *BlockQuerier) At() *SeriesWithBloom { diff --git a/pkg/storage/bloom/v1/bloom.go b/pkg/storage/bloom/v1/bloom.go index 6a6c2610e82e2..da0a770fb2579 100644 --- a/pkg/storage/bloom/v1/bloom.go +++ b/pkg/storage/bloom/v1/bloom.go @@ -18,7 +18,7 @@ import ( // Figure out a decent maximum page size that we can process. // TODO(chaudum): Make max page size configurable var maxPageSize = 32 << 20 // 32MB -var errPageTooLarge = "bloom page too large to process: N=%d Offset=%d Len=%d DecompressedLen=%d" +var ErrPageTooLarge = errors.Errorf("bloom page too large: size limit is %.1fMiB", float64(maxPageSize)/float64(1<<20)) type Bloom struct { filter.ScalableBloomFilter @@ -253,9 +253,10 @@ func (b *BloomBlock) BloomPageDecoder(r io.ReadSeeker, pageIdx int) (*BloomPageD } page := b.pageHeaders[pageIdx] + // fmt.Printf("pageIdx=%d page=%+v size=%.2fMiB\n", pageIdx, page, float64(page.Len)/float64(1<<20)) if page.Len > maxPageSize { - return nil, fmt.Errorf(errPageTooLarge, page.N, page.Offset, page.Len, page.DecompressedLen) + return nil, ErrPageTooLarge } if _, err := r.Seek(int64(page.Offset), io.SeekStart); err != nil { diff --git a/tools/bloom/inspector/main.go b/tools/bloom/inspector/main.go new file mode 100644 index 0000000000000..bb81d02b260b1 --- /dev/null +++ b/tools/bloom/inspector/main.go @@ -0,0 +1,37 @@ +package main + +import ( + "fmt" + "os" + + v1 "github.com/grafana/loki/pkg/storage/bloom/v1" +) + +func main() { + if len(os.Args) < 2 { + fmt.Println("Usage: go run main.go BLOCK_DIRECTORY") + os.Exit(2) + } + + path := os.Args[1] + fmt.Printf("Block directory: %s\n", path) + + r := v1.NewDirectoryBlockReader(path) + b := v1.NewBlock(r) + q := v1.NewBlockQuerier(b) + + md, err := q.Metadata() + if err != nil { + panic(err) + } + + fmt.Printf("Metadata: %+v\n", md) + + for q.Next() { + swb := q.At() + fmt.Printf("%s (%d)\n", swb.Series.Fingerprint, swb.Series.Chunks.Len()) + } + if q.Err() != nil { + fmt.Printf("error: %s\n", q.Err()) + } +}
fix
Correctly skip block page in case it exceeds the max page size for querying (#12297)
f0ec743ba09d38c4cdb01e88fb8b0a1198b3c25f
2023-12-13 16:21:36
Zirko
helm: add cilium networkpolicies (#11425)
false
diff --git a/docs/sources/setup/install/helm/reference.md b/docs/sources/setup/install/helm/reference.md index 8252a6fd103a3..e650d0fca2fc2 100644 --- a/docs/sources/setup/install/helm/reference.md +++ b/docs/sources/setup/install/helm/reference.md @@ -3110,6 +3110,15 @@ false <td><pre lang="json"> [] </pre> +</td> + </tr> + <tr> + <td>networkPolicy.flavor</td> + <td>string</td> + <td>Specifies whether the policies created will be standard Network Policies (flavor: kubernetes) or Cilium Network Policies (flavor: cilium)</td> + <td><pre lang="json"> +"kubernetes" +</pre> </td> </tr> <tr> diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md index 78571b3de600d..96bebdf5aebc9 100644 --- a/production/helm/loki/CHANGELOG.md +++ b/production/helm/loki/CHANGELOG.md @@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang [//]: # (<AUTOMATED_UPDATES_LOCATOR> : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) +## 5.41.2 + +- [FEATURE] Add ciliumnetworkpolicies. + ## 5.41.1 - [FEATURE] Allow topology spread constraints for Loki read deployment component. diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml index d9cf011e4f23e..fc7e0fbacbc6e 100644 --- a/production/helm/loki/Chart.yaml +++ b/production/helm/loki/Chart.yaml @@ -3,7 +3,7 @@ name: loki description: Helm chart for Grafana Loki in simple, scalable mode type: application appVersion: 2.9.3 -version: 5.41.1 +version: 5.41.2 home: https://grafana.github.io/helm-charts sources: - https://github.com/grafana/loki diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md index 3caad398ada44..e1da365b5bf92 100644 --- a/production/helm/loki/README.md +++ b/production/helm/loki/README.md @@ -1,6 +1,6 @@ # loki -![Version: 5.41.1](https://img.shields.io/badge/Version-5.41.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.3](https://img.shields.io/badge/AppVersion-2.9.3-informational?style=flat-square) +![Version: 5.41.2](https://img.shields.io/badge/Version-5.41.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.3](https://img.shields.io/badge/AppVersion-2.9.3-informational?style=flat-square) Helm chart for Grafana Loki in simple, scalable mode diff --git a/production/helm/loki/templates/ciliumnetworkpolicy.yaml b/production/helm/loki/templates/ciliumnetworkpolicy.yaml new file mode 100644 index 0000000000000..5633ae1945206 --- /dev/null +++ b/production/helm/loki/templates/ciliumnetworkpolicy.yaml @@ -0,0 +1,184 @@ +{{- if and (.Values.networkPolicy.enabled) (eq .Values.networkPolicy.flavor "cilium") }} +--- +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: {{ include "loki.name" . }}-namespace-only + namespace: {{ $.Release.Namespace }} + labels: + {{- include "loki.labels" . | nindent 4 }} +spec: + endpointSelector: {} + egress: + - toEndpoints: + - {} + ingress: + - fromEndpoints: + - {} + +--- +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: {{ include "loki.name" . }}-egress-dns + namespace: {{ $.Release.Namespace }} + labels: + {{- include "loki.labels" . | nindent 4 }} +spec: + endpointSelector: + matchLabels: + {{- include "loki.selectorLabels" . | nindent 6 }} + egress: + - toPorts: + - ports: + - port: dns + protocol: UDP + toEndpoints: + - namespaceSelector: {} + +--- +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: {{ include "loki.name" . }}-ingress + namespace: {{ $.Release.Namespace }} + labels: + {{- include "loki.labels" . | nindent 4 }} +spec: + endpointSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + {{- if .Values.gateway.enabled }} + - gateway + {{- else }} + - read + - write + {{- end }} + matchLabels: + {{- include "loki.selectorLabels" . | nindent 6 }} + ingress: + - toPorts: + - port: http + protocol: TCP + {{- if .Values.networkPolicy.ingress.namespaceSelector }} + fromEndpoints: + - matchLabels: + {{- toYaml .Values.networkPolicy.ingress.namespaceSelector | nindent 8 }} + {{- if .Values.networkPolicy.ingress.podSelector }} + {{- toYaml .Values.networkPolicy.ingress.podSelector | nindent 8 }} + {{- end }} + {{- end }} + +--- +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: {{ include "loki.name" . }}-ingress-metrics + namespace: {{ $.Release.Namespace }} + labels: + {{- include "loki.labels" . | nindent 4 }} +spec: + endpointSelector: + matchLabels: + {{- include "loki.selectorLabels" . | nindent 6 }} + ingress: + - toPorts: + - port: http-metrics + protocol: TCP + {{- if .Values.networkPolicy.metrics.cidrs }} + {{- range $cidr := .Values.networkPolicy.metrics.cidrs }} + toCIDR: + - {{ $cidr }} + {{- end }} + {{- if .Values.networkPolicy.metrics.namespaceSelector }} + fromEndpoints: + - matchLabels: + {{- toYaml .Values.networkPolicy.metrics.namespaceSelector | nindent 8 }} + {{- if .Values.networkPolicy.metrics.podSelector }} + {{- toYaml .Values.networkPolicy.metrics.podSelector | nindent 8 }} + {{- end }} + {{- end }} + {{- end }} + +--- +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: {{ include "loki.name" . }}-egress-alertmanager + namespace: {{ $.Release.Namespace }} + labels: + {{- include "loki.labels" . | nindent 4 }} +spec: + endpointSelector: + matchLabels: + {{- include "loki.backendSelectorLabels" . | nindent 6 }} + egress: + - toPorts: + - port: {{ .Values.networkPolicy.alertmanager.port }} + protocol: TCP + {{- if .Values.networkPolicy.alertmanager.namespaceSelector }} + toEndpoints: + - matchLabels: + {{- toYaml .Values.networkPolicy.alertmanager.namespaceSelector | nindent 8 }} + {{- if .Values.networkPolicy.alertmanager.podSelector }} + {{- toYaml .Values.networkPolicy.alertmanager.podSelector | nindent 8 }} + {{- end }} + {{- end }} + +{{- if .Values.networkPolicy.externalStorage.ports }} +--- +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: {{ include "loki.name" . }}-egress-external-storage + namespace: {{ $.Release.Namespace }} + labels: + {{- include "loki.labels" . | nindent 4 }} +spec: + endpointSelector: + matchLabels: + {{- include "loki.selectorLabels" . | nindent 6 }} + egress: + - toPorts: + {{- range $port := .Values.networkPolicy.externalStorage.ports }} + - port: {{ $port }} + protocol: TCP + {{- end }} + {{- if .Values.networkPolicy.externalStorage.cidrs }} + {{- range $cidr := .Values.networkPolicy.externalStorage.cidrs }} + toCIDR: + - {{ $cidr }} + {{- end }} + {{- end }} +{{- end }} + +{{- end }} + +{{- if .Values.networkPolicy.discovery.port }} +--- +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: {{ include "loki.name" . }}-egress-discovery + namespace: {{ $.Release.Namespace }} + labels: + {{- include "loki.labels" . | nindent 4 }} +spec: + endpointSelector: + matchLabels: + {{- include "loki.selectorLabels" . | nindent 6 }} + egress: + - toPorts: + - port: {{ .Values.networkPolicy.discovery.port }} + protocol: TCP + {{- if .Values.networkPolicy.discovery.namespaceSelector }} + toEndpoints: + - matchLabels: + {{- toYaml .Values.networkPolicy.discovery.namespaceSelector | nindent 8 }} + {{- if .Values.networkPolicy.discovery.podSelector }} + {{- toYaml .Values.networkPolicy.discovery.podSelector | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/production/helm/loki/templates/networkpolicy.yaml b/production/helm/loki/templates/networkpolicy.yaml index 4424d90db08d4..27c85280eb08c 100644 --- a/production/helm/loki/templates/networkpolicy.yaml +++ b/production/helm/loki/templates/networkpolicy.yaml @@ -1,4 +1,4 @@ -{{- if .Values.networkPolicy.enabled }} +{{- if and (.Values.networkPolicy.enabled) (eq .Values.networkPolicy.flavor "kubernetes") }} --- apiVersion: networking.k8s.io/v1 kind: NetworkPolicy diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml index 738cf6ea25ae7..e82967a4efb3b 100644 --- a/production/helm/loki/values.yaml +++ b/production/helm/loki/values.yaml @@ -1465,6 +1465,9 @@ gateway: networkPolicy: # -- Specifies whether Network Policies should be created enabled: false + # -- Specifies whether the policies created will be standard Network Policies (flavor: kubernetes) + # or Cilium Network Policies (flavor: cilium) + flavor: kubernetes metrics: # -- Specifies the Pods which are allowed to access the metrics port. # As this is cross-namespace communication, you also need the namespaceSelector.
helm
add cilium networkpolicies (#11425)
86f6c2eb8d549438df6628be4d1efacae5444831
2022-09-01 23:08:04
Gerard Vanloo
operator: Adding Lokistack Gateway Request Errors Alert (#6999)
false
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md index dd721d52f0c1a..d36dd519d4071 100644 --- a/operator/CHANGELOG.md +++ b/operator/CHANGELOG.md @@ -1,5 +1,6 @@ ## Main +- [6999](https://github.com/grafana/loki/pull/6999) **Red-GV**: Adding LokiStack Gateway alerts - [7000](https://github.com/grafana/loki/pull/7000) **xperimental**: Configure default node affinity for all pods - [6923](https://github.com/grafana/loki/pull/6923) **xperimental**: Reconcile owner reference for existing objects - [6907](https://github.com/grafana/loki/pull/6907) **Red-GV**: Adding valid subscription annotation to operator metadata diff --git a/operator/docs/lokistack/sop.md b/operator/docs/lokistack/sop.md index 3e1ff566c0035..9f2cdaa8b2e7b 100644 --- a/operator/docs/lokistack/sop.md +++ b/operator/docs/lokistack/sop.md @@ -34,8 +34,8 @@ A service(s) is failing to process at least 10% of all incoming requests. - Console access to the cluster - Edit access to the deployed operator and Loki namespace: - OpenShift - - `openshift-logging` - - `openshift-operators-redhat` + - `openshift-logging` (LokiStack) + - `openshift-operators-redhat` (Loki Operator) ### Steps @@ -46,6 +46,69 @@ A service(s) is failing to process at least 10% of all incoming requests. - `loki_ingester_wal_disk_full_failures_total` - `loki_ingester_wal_corruptions_total` +## LokiStack Write Request Errors + +### Impact + +The LokiStack Gateway component is unable to perform its duties for a number of write requests, resulting in potential loss of data. + +### Summary + +The LokiStack Gateway is failing to process at least 10% of all incoming write requests. + +### Severity + +`Critical` + +### Access Required + +- Console access to the cluster +- Edit access to the deployed operator and Loki namespace: + - OpenShift + - `openshift-logging` (LokiStack) + - `openshift-operators-redhat` (Loki Operator) + +### Steps + +- Ensure that the LokiStack Gateway component is ready and available +- Ensure that the `distributor`, `ingester`, and `index-gateway` components are ready and available +- Ensure that store services (`ingester`, `querier`, `index-gateway`, `compactor`) can communicate with backend storage +- Examine metrics for signs of failure + - WAL Complications + - `loki_ingester_wal_disk_full_failures_total` + - `loki_ingester_wal_corruptions_total` + +## LokiStack Read Request Errors + +### Impact + +The LokiStack Gateway component is unable to perform its duties for a number of query requests, resulting in a potential disruption. + +### Summary + +The LokiStack Gateway is failing to process at least 10% of all incoming query requests. + +### Severity + +`Critical` + +### Access Required + +- Console access to the cluster +- Edit access to the deployed operator and Loki namespace: + - OpenShift + - `openshift-logging` (LokiStack) + - `openshift-operators-redhat` (Loki Operator) + +### Steps + +- Ensure that the LokiStack Gateway component is ready and available +- Ensure that the `query-frontend`, `querier`, `ingester`, and `index-gateway` components are ready and available +- Ensure that store services (`ingester`, `querier`, `index-gateway`, `compactor`) can communicate with backend storage +- Examine metrics for signs of failure + - WAL Complications + - `loki_ingester_wal_disk_full_failures_total` + - `loki_ingester_wal_corruptions_total` ## Loki Request Panics @@ -66,8 +129,8 @@ A service(s) has crashed. - Console access to the cluster - Edit access to the deployed operator and Loki namespace: - OpenShift - - `openshift-logging` - - `openshift-operators-redhat` + - `openshift-logging` (LokiStack) + - `openshift-operators-redhat` (Loki Operator) ### Steps diff --git a/operator/internal/manifests/internal/alerts/build.go b/operator/internal/manifests/internal/alerts/build.go index b1bf76558af16..4e860664f2266 100644 --- a/operator/internal/manifests/internal/alerts/build.go +++ b/operator/internal/manifests/internal/alerts/build.go @@ -13,7 +13,7 @@ import ( const ( // RunbookDefaultURL is the default url for the documentation of the Prometheus alerts - RunbookDefaultURL = "https://github.com/grafana/loki/tree/main/operator/docs/alerts.md" + RunbookDefaultURL = "https://github.com/grafana/loki/blob/main/operator/docs/lokistack/sop.md" ) var ( diff --git a/operator/internal/manifests/internal/alerts/prometheus-alerts.yaml b/operator/internal/manifests/internal/alerts/prometheus-alerts.yaml index 8635c347e9e3e..0e408200b16a0 100644 --- a/operator/internal/manifests/internal/alerts/prometheus-alerts.yaml +++ b/operator/internal/manifests/internal/alerts/prometheus-alerts.yaml @@ -10,21 +10,67 @@ groups: runbook_url: "[[ .RunbookURL ]]#Loki-Request-Errors" expr: | sum( - rate( - loki_request_duration_seconds_count{status_code=~"5.."}[1m] - ) + rate( + loki_request_duration_seconds_count{status_code=~"5.."}[1m] + ) ) by (namespace, job, route) / sum( - rate( - loki_request_duration_seconds_count[1m] - ) + rate( + loki_request_duration_seconds_count[1m] + ) ) by (namespace, job, route) * 100 > 10 for: 15m labels: severity: critical + - alert: LokiStackWriteRequestErrors + annotations: + message: |- + {{ printf "%.2f" $value }}% of write requests from {{ $labels.job }} are returned with server errors. + summary: "At least 10% of write requests to the lokistack-gateway are responded with 5xx server errors." + runbook_url: "[[ .RunbookURL ]]#LokiStack-Write-Request-Errors" + expr: | + sum( + rate( + http_requests_total{code=~"5..", group="logsv1", handler="push"}[1m] + ) + ) by (namespace, job, tenant) + / + sum( + rate( + http_requests_total{group="logsv1", handler="push"}[1m] + ) + ) by (namespace, job, tenant) + * 100 + > 10 + for: 15m + labels: + severity: critical + - alert: LokiStackReadRequestErrors + annotations: + message: |- + {{ printf "%.2f" $value }}% of query requests from {{ $labels.job }} are returned with server errors. + summary: "At least 10% of query requests to the lokistack-gateway are responded with 5xx server errors." + runbook_url: "[[ .RunbookURL ]]#LokiStack-Read-Request-Errors" + expr: | + sum( + rate( + http_requests_total{code=~"5..", group="logsv1", handler=~"query|query_range|label|labels|label_values"}[1m] + ) + ) by (namespace, job, tenant) + / + sum( + rate( + http_requests_total{group="logsv1", handler=~"query|query_range|label|labels|label_values"}[1m] + ) + ) by (namespace, job, tenant) + * 100 + > 10 + for: 15m + labels: + severity: critical - alert: LokiRequestPanics annotations: message: |- @@ -33,9 +79,9 @@ groups: runbook_url: "[[ .RunbookURL ]]#Loki-Request-Panics" expr: | sum( - increase( - loki_panic_total[10m] - ) + increase( + loki_panic_total[10m] + ) ) by (namespace, job) > 0 labels: diff --git a/operator/internal/manifests/internal/alerts/testdata/test.yaml b/operator/internal/manifests/internal/alerts/testdata/test.yaml index 295c97fe2c11f..6de3c7266fba8 100644 --- a/operator/internal/manifests/internal/alerts/testdata/test.yaml +++ b/operator/internal/manifests/internal/alerts/testdata/test.yaml @@ -11,13 +11,20 @@ tests: values: '1+1x20' - series: 'loki_request_duration_seconds_count{status_code="200", namespace="my-ns", job="ingester", route="my-route"}' values: '1+3x20' + - series: 'http_requests_total{code="500", namespace="my-ns", job="gateway", handler="push", group="logsv1"}' + values: '1+1x20' + - series: 'http_requests_total{code="200", namespace="my-ns", job="gateway", handler="push", group="logsv1"}' + values: '1+3x20' + - series: 'http_requests_total{code="500", namespace="my-ns", job="gateway", handler="query", group="logsv1"}' + values: '1+1x20' + - series: 'http_requests_total{code="200", namespace="my-ns", job="gateway", handler="query", group="logsv1"}' + values: '1+3x20' - series: 'loki_panic_total{namespace="my-ns", job="ingester"}' values: '0 1 1 2+0x10' # Unit test for alerting rules. alert_rule_test: - # --------- LokiRequestErrors --------- - eval_time: 16m alertname: LokiRequestErrors exp_alerts: @@ -30,8 +37,28 @@ tests: summary: "At least 10% of requests are responded by 5xx server errors." message: "ingester my-route is experiencing 25.00% errors." runbook_url: "[[ .RunbookURL ]]#Loki-Request-Errors" - - # --------- LokiRequestPanics --------- + - eval_time: 16m + alertname: LokiStackWriteRequestErrors + exp_alerts: + - exp_labels: + namespace: my-ns + job: gateway + severity: critical + exp_annotations: + summary: "At least 10% of write requests to the lokistack-gateway are responded with 5xx server errors." + message: "25.00% of write requests from gateway are returned with server errors." + runbook_url: "[[ .RunbookURL ]]#LokiStack-Write-Request-Errors" + - eval_time: 16m + alertname: LokiStackReadRequestErrors + exp_alerts: + - exp_labels: + namespace: my-ns + job: gateway + severity: critical + exp_annotations: + summary: "At least 10% of query requests to the lokistack-gateway are responded with 5xx server errors." + message: "25.00% of query requests from gateway are returned with server errors." + runbook_url: "[[ .RunbookURL ]]#LokiStack-Read-Request-Errors" - eval_time: 10m alertname: LokiRequestPanics exp_alerts:
operator
Adding Lokistack Gateway Request Errors Alert (#6999)
d0a285926b7257d54cf948ba644c619a4b49a871
2024-05-29 15:24:59
benclive
feat: Increase drain max depth from 8 -> 30 (#13063)
false
diff --git a/pkg/pattern/drain/drain.go b/pkg/pattern/drain/drain.go index 31932832f7010..c3076386c4d65 100644 --- a/pkg/pattern/drain/drain.go +++ b/pkg/pattern/drain/drain.go @@ -130,7 +130,7 @@ func DefaultConfig() *Config { // > message are more likely to be constants. Specifically, Drain // > selects the next internal node by the tokens in the beginning // > positions of the log message - LogClusterDepth: 8, + LogClusterDepth: 30, // SimTh is basically a ratio of matching/total in the cluster. // Cluster tokens: "foo <*> bar fred" // Log line: "foo bar baz qux" diff --git a/pkg/pattern/drain/drain_benchmark_test.go b/pkg/pattern/drain/drain_benchmark_test.go new file mode 100644 index 0000000000000..cf9378025102b --- /dev/null +++ b/pkg/pattern/drain/drain_benchmark_test.go @@ -0,0 +1,78 @@ +package drain + +import ( + "bufio" + "os" + "testing" + + "github.com/stretchr/testify/require" +) + +func BenchmarkDrain_TrainExtractsPatterns(b *testing.B) { + tests := []struct { + name string + drain *Drain + inputFile string + }{ + { + name: `Patterns for agent logfmt logs`, + inputFile: `testdata/agent-logfmt.txt`, + }, + { + name: `Patterns for ingester logfmt logs`, + inputFile: `testdata/ingester-logfmt.txt`, + }, + { + name: `Patterns for Drone json logs`, + inputFile: `testdata/drone-json.txt`, + }, + { + name: "Patterns for distributor logfmt logs", + inputFile: "testdata/distributor-logfmt.txt", + }, + { + name: "Patterns for journald logs", + inputFile: "testdata/journald.txt", + }, + { + name: "Patterns for kafka logs", + inputFile: "testdata/kafka.txt", + }, + { + name: "Patterns for kubernetes logs", + inputFile: "testdata/kubernetes.txt", + }, + { + name: "Patterns for vault logs", + inputFile: "testdata/vault.txt", + }, + { + name: "Patterns for calico logs", + inputFile: "testdata/calico.txt", + }, + } + + for _, tt := range tests { + b.Run(tt.name, func(b *testing.B) { + file, err := os.Open(tt.inputFile) + require.NoError(b, err) + defer file.Close() + + scanner := bufio.NewScanner(file) + var lines []string + for scanner.Scan() { + line := scanner.Text() + lines = append(lines, line) + } + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, line := range lines { + drain := New(DefaultConfig(), nil) + drain.Train(line, 0) + } + } + }) + } +} diff --git a/pkg/pattern/drain/drain_test.go b/pkg/pattern/drain/drain_test.go index e9709aed3fec4..754ac54b3fb71 100644 --- a/pkg/pattern/drain/drain_test.go +++ b/pkg/pattern/drain/drain_test.go @@ -2,16 +2,22 @@ package drain import ( "bufio" + "fmt" "os" "testing" "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" "github.com/grafana/loki/v3/pkg/logql/log/pattern" ) func TestDrain_TrainExtractsPatterns(t *testing.T) { t.Parallel() + + // Set this so the test will print the patterns found, in string slice format for easy copy-paste + outputPatternsForTestUpdate := false + tests := []struct { name string drain *Drain @@ -24,20 +30,38 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { drain: New(DefaultConfig(), nil), inputFile: `testdata/agent-logfmt.txt`, patterns: []string{ - "ts=2024-04-16T15:10:43.192290389Z caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg=\"Adding target\" key=\"/var/log/pods/*19a1cce8-5f04-46e0-a124-292b0dd9b343/testcoordinator/*.log:{batch_kubernetes_io_controller_uid=\\\"25ec5edf-f78e-468b-b6f3-3b9685f0cc8f\\\", batch_kubernetes_io_job_name=\\\"testcoordinator-job-2665838\\\", container=\\\"testcoordinator\\\", controller_uid=\\\"25ec5edf-f78e-468b-b6f3-3b9685f0cc8f\\\", job=\\\"k6-cloud/testcoordinator\\\", job_name=\\\"testcoordinator-job-2665838\\\", name=\\\"testcoordinator\\\", namespace=\\\"k6-cloud\\\", pod=\\\"testcoordinator-job-2665838-9g8ds\\\"}\"", - "<_> <_> level=info component=logs logs_config=default <_> target\" <_> <_> <_> <_> <_> <_>", - "<_> caller=filetarget.go:192 level=info component=logs logs_config=default msg=\"filetarget: watcher closed, tailer stopped, positions saved\" <_>", - "<_> caller=tailer.go:164 level=info component=logs logs_config=default component=tailer msg=\"tail routine: tail channel closed, stopping tailer\" <_> reason=null", - "<_> caller=tailer.go:207 level=info component=logs logs_config=default component=tailer msg=\"skipping update of position for a file which does not currently exist\" <_>", - "<_> caller=log.go:168 component=logs logs_config=default level=info msg=\"Successfully reopened <_>", - "<_> caller=logfmt.go:139 level=error component=logs logs_config=default component=file_pipeline component=stage type=logfmt msg=\"failed to decode logfmt\" err=\"bufio.Scanner: token too long\"", - "<_> caller=filetargetmanager.go:181 level=info component=logs logs_config=default msg=\"received file watcher event\" <_> op=CREATE", - "<_> caller=logfmt.go:139 level=error component=logs logs_config=default component=file_pipeline component=stage type=logfmt msg=\"failed to decode logfmt\" err=\"logfmt syntax error at pos <_> on line 1: unexpected '\\\"'\"", - "<_> <_> level=info component=logs logs_config=default <_> <_> <_> <_> <_>", - "<_> caller=log.go:168 component=logs logs_config=default level=info <_> <_> <_> <_> <_>", - "<_> caller=filetarget.go:313 level=info component=logs logs_config=default msg=\"watching new directory\" <_>", - "<_> <_> level=info component=logs logs_config=default <_> target\" <_> conprof=\\\"true\\\", <_> <_> job=\\\"hosted-grafana/grafana\\\", name=\\\"grafana\\\", namespace=\\\"hosted-grafana\\\", <_> plan=\\\"free\\\", <_> <_> <_> <_> <_>", - "<_> level=info msg=\"finished node evaluation\" controller_id=module.http.cloudwatch_pipelines <_> <_>", + `<_> caller=filetarget.go:192 level=info component=logs logs_config=default msg="filetarget: watcher closed, tailer stopped, positions saved" <_>`, + `<_> caller=filetarget.go:313 level=info component=logs logs_config=default msg="watching new directory" <_>`, + `<_> caller=filetarget.go:326 level=info component=logs logs_config=default msg="removing directory from watcher" <_>`, + `<_> caller=filetargetmanager.go:181 level=info component=logs logs_config=default msg="received file watcher event" <_> op=CREATE`, + `<_> caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg="Adding target" <_> container=\"kube-proxy\", <_> namespace=\"kube-system\", pod=\"kube-proxy-gke-ops-us-east-0-main-n2s32-1-1dd39c-32ae1dde-hmhw\", tier=\"node\"}"`, + `<_> caller=filetargetmanager.go:397 level=info component=logs logs_config=default msg="Removing target" <_> conprof=\"true\", container=\"grafana\", <_> job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", <_> plan=\"free\", <_> <_> <_> <_> <_>`, + `<_> caller=filetargetmanager.go:397 level=info component=logs logs_config=default msg="Removing target" <_> conprof=\"true\", container=\"hg-plugins\", <_> job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", <_> plan=\"free\", <_> <_> <_> <_> <_>`, + `<_> caller=filetargetmanager.go:397 level=info component=logs logs_config=default msg="Removing target" <_> conprof=\"true\", container=\"hgrun\", <_> job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", <_> plan=\"free\", <_> <_> <_> <_> <_>`, + `<_> caller=filetargetmanager.go:397 level=info component=logs logs_config=default msg="Removing target" <_> conprof=\"true\", container=\"hosted-grafana-security\", <_> job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", <_> plan=\"free\", <_> <_> <_> <_> <_>`, + `<_> caller=log.go:168 component=logs logs_config=default level=info msg="Re-opening moved/deleted file <_> ..."`, + `<_> caller=log.go:168 component=logs logs_config=default level=info msg="Seeked <_> - &{Offset:0 Whence:0}"`, + `<_> caller=log.go:168 component=logs logs_config=default level=info msg="Successfully reopened <_>`, + `<_> caller=log.go:168 component=logs logs_config=default level=info msg="Waiting for <_> to appear..."`, + `<_> caller=logfmt.go:139 level=error component=logs logs_config=default component=file_pipeline component=stage type=logfmt msg="failed to decode logfmt" err="bufio.Scanner: token too long"`, + `<_> caller=logfmt.go:139 level=error component=logs logs_config=default component=file_pipeline component=stage type=logfmt msg="failed to decode logfmt" err="logfmt syntax error at pos <_> on line 1: unexpected '\"'"`, + `<_> caller=tailer.go:118 level=info component=logs logs_config=default component=tailer msg="position timer: exited" <_>`, + `<_> caller=tailer.go:147 level=info component=logs logs_config=default component=tailer msg="tail routine: started" <_>`, + `<_> caller=tailer.go:155 level=info component=logs logs_config=default component=tailer msg="tail routine: exited" <_>`, + `<_> caller=tailer.go:164 level=info component=logs logs_config=default component=tailer msg="tail routine: tail channel closed, stopping tailer" <_> reason=null`, + `<_> caller=tailer.go:207 level=info component=logs logs_config=default component=tailer msg="skipping update of position for a file which does not currently exist" <_>`, + `<_> caller=tailer.go:245 level=info component=logs logs_config=default component=tailer msg="stopped tailing file" <_>`, + `<_> level=info msg="finished node evaluation" controller_id=module.http.cloudwatch_pipelines <_> <_>`, + `ts=2024-04-16T15:10:42.556278698Z caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg="Adding target" key="/var/log/pods/*b92ee988-5c26-4c64-bba3-ff6a01723759/grafana/*.log:{app=\"grafana\", conprof=\"true\", container=\"grafana\", instanceId=\"i1111\", job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=\"orgnamehere\", plan=\"free\", pod=\"orgnamehere-grafana-7c65678f86-9zhlb\", pod_template_hash=\"7c65678f86\", resource_version=\"143638246\", slug=\"orgnamehere\", stackId=\"866772\"}"`, + `ts=2024-04-16T15:10:42.556706613Z caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg="Adding target" key="/var/log/pods/*b92ee988-5c26-4c64-bba3-ff6a01723759/hgrun/*.log:{app=\"grafana\", conprof=\"true\", container=\"hgrun\", instanceId=\"i1111\", job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=\"orgnamehere\", plan=\"free\", pod=\"orgnamehere-grafana-7c65678f86-9zhlb\", pod_template_hash=\"7c65678f86\", resource_version=\"143638246\", slug=\"orgnamehere\", stackId=\"866772\"}"`, + `ts=2024-04-16T15:10:42.556930066Z caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg="Adding target" key="/var/log/pods/*b92ee988-5c26-4c64-bba3-ff6a01723759/hg-plugins/*.log:{app=\"grafana\", conprof=\"true\", container=\"hg-plugins\", instanceId=\"i1111\", job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=\"orgnamehere\", plan=\"free\", pod=\"orgnamehere-grafana-7c65678f86-9zhlb\", pod_template_hash=\"7c65678f86\", resource_version=\"143638246\", slug=\"orgnamehere\", stackId=\"866772\"}"`, + `ts=2024-04-16T15:10:42.557102408Z caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg="Adding target" key="/var/log/pods/*b92ee988-5c26-4c64-bba3-ff6a01723759/hosted-grafana-security/*.log:{app=\"grafana\", conprof=\"true\", container=\"hosted-grafana-security\", instanceId=\"i1111\", job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=\"orgnamehere\", plan=\"free\", pod=\"orgnamehere-grafana-7c65678f86-9zhlb\", pod_template_hash=\"7c65678f86\", resource_version=\"143638246\", slug=\"orgnamehere\", stackId=\"866772\"}"`, + `ts=2024-04-16T15:10:43.192290389Z caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg="Adding target" key="/var/log/pods/*19a1cce8-5f04-46e0-a124-292b0dd9b343/testcoordinator/*.log:{batch_kubernetes_io_controller_uid=\"25ec5edf-f78e-468b-b6f3-3b9685f0cc8f\", batch_kubernetes_io_job_name=\"testcoordinator-job-2665838\", container=\"testcoordinator\", controller_uid=\"25ec5edf-f78e-468b-b6f3-3b9685f0cc8f\", job=\"k6-cloud/testcoordinator\", job_name=\"testcoordinator-job-2665838\", name=\"testcoordinator\", namespace=\"k6-cloud\", pod=\"testcoordinator-job-2665838-9g8ds\"}"`, + `ts=2024-04-16T15:10:43.551543875Z caller=filetargetmanager.go:397 level=info component=logs logs_config=default msg="Removing target" key="/var/log/pods/*35649bfd-52ff-4281-9294-5f65fd5a89fc/marketplaces-api/*.log:{container=\"marketplaces-api\", job=\"grafana-com/marketplaces-api\", name=\"marketplaces-api\", namespace=\"grafana-com\", pod=\"marketplaces-api-f67ff7567-gqrvb\", pod_template_hash=\"f67ff7567\"}"`, + `ts=2024-04-16T15:10:43.869370539Z caller=filetargetmanager.go:397 level=info component=logs logs_config=default msg="Removing target" key="/var/log/pods/*37ae8d4e-1a76-40f2-be88-2251a3528a0b/hosted-grafana-security/*.log:{app=\"grafana\", conprof=\"true\", container=\"hosted-grafana-security\", instanceId=\"i2222\", job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=\"someorg\", plan=\"free\", pod=\"someorg-grafana-666bd48cf9-7zrtv\", pod_template_hash=\"666bd48cf9\", resource_version=\"167212086\", slug=\"someorg\", stackId=\"444444\"}"`, + `ts=2024-04-16T15:10:43.869672113Z caller=filetargetmanager.go:397 level=info component=logs logs_config=default msg="Removing target" key="/var/log/pods/*37ae8d4e-1a76-40f2-be88-2251a3528a0b/hgrun/*.log:{app=\"grafana\", conprof=\"true\", container=\"hgrun\", instanceId=\"i2222\", job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=\"someorg\", plan=\"free\", pod=\"someorg-grafana-666bd48cf9-7zrtv\", pod_template_hash=\"666bd48cf9\", resource_version=\"167212086\", slug=\"someorg\", stackId=\"444444\"}"`, + `ts=2024-04-16T15:10:43.869833185Z caller=filetargetmanager.go:397 level=info component=logs logs_config=default msg="Removing target" key="/var/log/pods/*37ae8d4e-1a76-40f2-be88-2251a3528a0b/grafana/*.log:{app=\"grafana\", conprof=\"true\", container=\"grafana\", instanceId=\"i2222\", job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=\"someorg\", plan=\"free\", pod=\"someorg-grafana-666bd48cf9-7zrtv\", pod_template_hash=\"666bd48cf9\", resource_version=\"167212086\", slug=\"someorg\", stackId=\"444444\"}"`, + `ts=2024-04-16T15:10:43.870016638Z caller=filetargetmanager.go:397 level=info component=logs logs_config=default msg="Removing target" key="/var/log/pods/*37ae8d4e-1a76-40f2-be88-2251a3528a0b/hg-plugins/*.log:{app=\"grafana\", conprof=\"true\", container=\"hg-plugins\", instanceId=\"i2222\", job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=\"someorg\", plan=\"free\", pod=\"someorg-grafana-666bd48cf9-7zrtv\", pod_template_hash=\"666bd48cf9\", resource_version=\"167212086\", slug=\"someorg\", stackId=\"444444\"}"`, }, }, { @@ -46,9 +70,9 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { drain: New(DefaultConfig(), nil), inputFile: `testdata/ingester-logfmt.txt`, patterns: []string{ - "<_> caller=head.go:216 level=debug tenant=987678 msg=\"profile is empty after delta computation\" metricName=memory", - "ts=2024-04-17T09:52:46.363974185Z caller=http.go:194 level=debug traceID=1b48f5156a61ca69 msg=\"GET /debug/pprof/delta_mutex (200) 1.161082ms\"", - "<_> caller=http.go:194 level=debug <_> <_> msg=\"POST /ingester.v1.IngesterService/Push (200) <_>", // A perfect log line: Abstracted the variable part but kept the constants. + `<_> caller=head.go:216 level=debug tenant=987678 msg="profile is empty after delta computation" metricName=memory`, + `<_> caller=http.go:194 level=debug <_> <_> msg="POST /ingester.v1.IngesterService/Push (200) <_>`, + `ts=2024-04-17T09:52:46.363974185Z caller=http.go:194 level=debug traceID=1b48f5156a61ca69 msg="GET /debug/pprof/delta_mutex (200) 1.161082ms"`, }, }, { @@ -57,41 +81,41 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { drain: New(DefaultConfig(), nil), inputFile: `testdata/drone-json.txt`, patterns: []string{ - "<_> capacity <_>", - "<_> capacity changes <_>", - "{\"id\":\"D4Oh1ivB6cdLWa08\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:48:52Z\"}", - "{\"id\":\"q62wCcIkEOueqFKF\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T15:03:28Z\"}", - "{\"id\":\"m6SpYHzdXrDAFqDR\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T15:02:58Z\"}", - "{\"id\":\"T0I8Dsnw3uSi3Gal\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T15:02:28Z\"}", - "{\"id\":\"9eA72xOtx8kzMhXn\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T15:01:58Z\"}", - "{\"id\":\"pet7QVfO1yE8fk56\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T15:01:28Z\"}", - "{\"id\":\"15eSzaEG0enf86Kl\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T15:00:57Z\"}", - "{\"id\":\"JO1OT5ADoNA8NYqr\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T15:00:27Z\"}", - "{\"id\":\"Xz2OCJhgeBSRFyoN\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:59:57Z\"}", - "{\"id\":\"pPc2ORUhHAhFgBg3\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:59:27Z\"}", - "{\"id\":\"4G6Srn6lSwzYrx19\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:58:57Z\"}", - "{\"id\":\"1Lu90T1fWzsWOKlc\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:58:27Z\"}", - "{\"id\":\"4XjwwNoOwZFaWePQ\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:57:57Z\"}", - "{\"id\":\"IQy23J3NON0BV10V\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:57:26Z\"}", - "{\"id\":\"FQ8wCQfaR9W387cH\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:56:56Z\"}", - "{\"id\":\"Hhwn7ecXjxF67DG6\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:56:26Z\"}", - "{\"id\":\"luflyGZvZnLzhQEH\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:55:56Z\"}", - "{\"id\":\"q20GZcvyzMwrTGx5\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:55:26Z\"}", - "{\"id\":\"3K61Yf6ImKYexoFx\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:54:56Z\"}", - "{\"id\":\"SmbOO0l5aADX9BaQ\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:54:23Z\"}", - "{\"id\":\"96TvvsMzSkkaW8oW\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:53:53Z\"}", - "{\"id\":\"C7aYn8cb4NCrkkYI\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:53:23Z\"}", - "{\"id\":\"CMG7ZwwYqNPBonAn\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:52:53Z\"}", - "{\"id\":\"focV9BzODwRbWwKE\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:52:23Z\"}", - "{\"id\":\"HphRnJOM8uYohf1p\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:51:53Z\"}", - "{\"id\":\"m3n8GndhG45uGIQA\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:51:23Z\"}", - "{\"id\":\"nTO38tWtnvRWRl1G\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:50:52Z\"}", - "{\"id\":\"5qEIzErDfiALVPAN\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:50:22Z\"}", - "{\"id\":\"q61oHTtF4MMiQVGH\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:49:52Z\"}", - "{\"id\":\"4rNxIlhDKxGgzBHe\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:49:22Z\"}", - "<_> server <_>", - "<_> unfinished <_>", - "<_> <_> (flow; linux; helm)\"}", + `<_> <_> (flow; linux; helm)"}`, + `<_> capacity <_>`, + `<_> capacity changes <_>`, + `<_> server <_>`, + `<_> unfinished <_>`, + `{"id":"15eSzaEG0enf86Kl","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T15:00:57Z"}`, + `{"id":"1Lu90T1fWzsWOKlc","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:58:27Z"}`, + `{"id":"3K61Yf6ImKYexoFx","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:54:56Z"}`, + `{"id":"4G6Srn6lSwzYrx19","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:58:57Z"}`, + `{"id":"4XjwwNoOwZFaWePQ","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:57:57Z"}`, + `{"id":"4rNxIlhDKxGgzBHe","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:49:22Z"}`, + `{"id":"5qEIzErDfiALVPAN","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:50:22Z"}`, + `{"id":"96TvvsMzSkkaW8oW","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:53:53Z"}`, + `{"id":"9eA72xOtx8kzMhXn","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T15:01:58Z"}`, + `{"id":"C7aYn8cb4NCrkkYI","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:53:23Z"}`, + `{"id":"CMG7ZwwYqNPBonAn","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:52:53Z"}`, + `{"id":"D4Oh1ivB6cdLWa08","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:48:52Z"}`, + `{"id":"FQ8wCQfaR9W387cH","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:56:56Z"}`, + `{"id":"Hhwn7ecXjxF67DG6","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:56:26Z"}`, + `{"id":"HphRnJOM8uYohf1p","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:51:53Z"}`, + `{"id":"IQy23J3NON0BV10V","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:57:26Z"}`, + `{"id":"JO1OT5ADoNA8NYqr","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T15:00:27Z"}`, + `{"id":"SmbOO0l5aADX9BaQ","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:54:23Z"}`, + `{"id":"T0I8Dsnw3uSi3Gal","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T15:02:28Z"}`, + `{"id":"Xz2OCJhgeBSRFyoN","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:59:57Z"}`, + `{"id":"focV9BzODwRbWwKE","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:52:23Z"}`, + `{"id":"luflyGZvZnLzhQEH","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:55:56Z"}`, + `{"id":"m3n8GndhG45uGIQA","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:51:23Z"}`, + `{"id":"m6SpYHzdXrDAFqDR","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T15:02:58Z"}`, + `{"id":"nTO38tWtnvRWRl1G","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:50:52Z"}`, + `{"id":"pPc2ORUhHAhFgBg3","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:59:27Z"}`, + `{"id":"pet7QVfO1yE8fk56","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T15:01:28Z"}`, + `{"id":"q20GZcvyzMwrTGx5","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:55:26Z"}`, + `{"id":"q61oHTtF4MMiQVGH","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:49:52Z"}`, + `{"id":"q62wCcIkEOueqFKF","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T15:03:28Z"}`, }, }, { @@ -107,90 +131,123 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { drain: New(DefaultConfig(), nil), inputFile: "testdata/journald.txt", patterns: []string{ - "2024-05-07T11:59:43.484606Z INFO ExtHandler ExtHandler Downloading agent manifest", - "<_> INFO TelemetryEventsCollector ExtHandler Collected 2 events for extension: Microsoft.Azure.Extensions.CustomScript", - "E0507 11:59:41.375655 4736 kuberuntime_manager.go:1256] container &Container{Name:ruler,Image:grafana/enterprise-metrics:v2.12.0,Command:[],Args:[-target=ruler -config.expand-env=true -config.file=/etc/mimir/mimir.yaml -distributor.remote-timeout=10s],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:grpc,HostPort:0,ContainerPort:9095,Protocol:TCP,HostIP:,},ContainerPort{Name:memberlist,HostPort:0,ContainerPort:7946,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:JAEGER_AGENT_HOST,Value:alloy-otlp.alloy-otlp.svc.cluster.local.,ValueFrom:nil,},EnvVar{Name:JAEGER_TAGS,Value:namespace=ge-metrics-federation,cluster=dev-us-central-0,ValueFrom:nil,},EnvVar{Name:JAEGER_SAMPLER_MANAGER_HOST_PORT,Value:http://alloy-otlp.alloy-otlp.svc.cluster.local.:5778/sampling,ValueFrom:nil,},EnvVar{Name:GOOGLE_APPLICATION_CREDENTIALS,Value:/var/secrets/google/credentials.json,ValueFrom:nil,},EnvVar{Name:AM_TOKEN,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:ruler-alertmanager-token,},Key:token,Optional:nil,},},},EnvVar{Name:JAEGER_REPORTER_MAX_QUEUE_SIZE,Value:1000,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{100 -3} {<nil>} 100m DecimalSI},memory: {{134217728 0} {<nil>} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:gcs-credentials,ReadOnly:false,MountPath:/var/secrets/google/,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:config,ReadOnly:false,MountPath:/etc/mimir,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:license,ReadOnly:false,MountPath:/license,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:runtime-config,ReadOnly:false,MountPath:/var/mimir,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:storage,ReadOnly:false,MountPath:/data,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:active-queries,ReadOnly:false,MountPath:/active-query-tracker,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-jtnbs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/ready,Port:{1 0 http-metrics},Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:45,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod gem-mimir-ruler-5f56f7846b-fgxdm_ge-metrics-federation(07c06e21-137b-4fdd-b7d3-703f0a567720): CreateContainerConfigError: secret \"ruler-alertmanager-token\" not found", - "\tts=2024-05-07T11:59:32.025687537Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg=\"request", - "time=\"2024-05-07T11:59:38.484586527Z\" level=error msg=\"Failed to delete exec process \\\"d9e0a1867ce73695ad859f2b0a76fe8f5053db8a5e49142d747e53a445729bd4\\\" for container \\\"6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408\\\"\" error=\"ttrpc: closed: unknown\"", - "I0507 <_> <_> prober.go:107] \"Probe failed\" probeType=\"Readiness\" <_> <_> <_> probeResult=\"failure\" output=\"HTTP probe failed with statuscode: <_>", - "net_ratelimit: 2 callbacks suppressed", - "kauditd_printk_skb: <_> callbacks suppressed", - "Started cri-containerd-95bf586cd79d43120ff44582d4dbd2476de61744411f8515b9b2c527a41fd5d9.scope.", - "Removed slice libcontainer container kubepods-burstable-pod25cb986c_3d6c_4ed0_abf3_ee59ed6175f9.slice.", - "E0507 11:59:34.923938 3027 kuberuntime_manager.go:1261] container &Container{Name:mysqld-exporter,Image:prom/mysqld-exporter:v0.13.0,Command:[],Args:[--collect.info_schema.innodb_metrics],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:9104,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:MYSQL_USER,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:testcrossplane-user-exporter,},Key:username,Optional:nil,},},},EnvVar{Name:MYSQL_PASSWORD,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:testcrossplane-user-exporter,},Key:password,Optional:nil,},},},EnvVar{Name:MYSQL_HOST,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:testcrossplane-user-exporter,},Key:endpoint,Optional:nil,},},},EnvVar{Name:MYSQL_PORT,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:testcrossplane-user-exporter,},Key:port,Optional:nil,},},},EnvVar{Name:MYSQL_TLS_MODE,Value:preferred,ValueFrom:nil,},EnvVar{Name:DATA_SOURCE_NAME,Value:$(MYSQL_USER):$(MYSQL_PASSWORD)@tcp($(MYSQL_HOST):$(MYSQL_PORT))/?tls=$(MYSQL_TLS_MODE),ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dzx7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod testcrossplane-exporter-c67cfc58f-vbzl4_crossplane-playground(3d49134d-3378-4ec3-824c-5ff4ea2590a5): CreateContainerConfigError: secret \"testcrossplane-user-exporter\" not found", - "I0507 <_> 3224 <_> <_> <_> for volume <_> (UniqueName: <_> <_> <_> <_> <_> <_>", - "E0507 <_> <_> kuberuntime_manager.go:1256] container &Container{Name:ruler,Image:grafana/enterprise-metrics:v2.11.1,Command:[],Args:[-target=ruler -config.expand-env=true <_> {{100 -3} {<nil>} 100m DecimalSI},memory: {{134217728 0} {<nil>} <_> 0 http-metrics},Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:45,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod <_> CreateContainerConfigError: secret \"ruler-alertmanager-token\" not found", - "time=\"2024-05-07T11:59:34.707025668Z\" level=info msg=\"StopPodSandbox for \\\"c605ad2cdc74c6b5288f2532ad71cce81a28ef6965f97a89ff6609deb825553a\\\" returns successfully\"", - "time=\"2024-05-07T11:59:34.706960850Z\" level=info msg=\"TearDown network for sandbox \\\"c605ad2cdc74c6b5288f2532ad71cce81a28ef6965f97a89ff6609deb825553a\\\" successfully\"", - "time=\"2024-05-07T11:59:34.592084495Z\" level=info msg=\"Container to stop \\\"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\\\" must be in running or unknown state, current state \\\"CONTAINER_EXITED\\\"\"", - "time=\"2024-05-07T11:59:34.592005066Z\" level=info msg=\"StopPodSandbox for \\\"c605ad2cdc74c6b5288f2532ad71cce81a28ef6965f97a89ff6609deb825553a\\\"\"", - "time=\"2024-05-07T11:59:34.591282703Z\" level=info msg=\"StopContainer for \\\"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\\\" returns successfully\"", - "time=\"2024-05-07T11:59:34.520032214Z\" level=info msg=\"Stop container \\\"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\\\" with signal terminated\"", - "time=\"2024-05-07T11:59:34.519591759Z\" level=info msg=\"StopContainer for \\\"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\\\" with timeout 30 (s)\"", - "E0507 <_> <_> pod_workers.go:1300] \"Error syncing pod, skipping\" err=\"failed to \\\"StartContainer\\\" for \\\"grafana\\\" with ErrImagePull: \\\"[rpc error: code = NotFound desc = failed to pull and unpack image <_> failed to resolve reference <_> <_> not found, failed to pull and unpack image <_> failed to resolve reference <_> unexpected status from HEAD request to <_> 403 Forbidden]\\\"\" <_> <_>", - "\t\t\t\t\t\twhile [ \"$(pidof plugins-pause)\" = \"\" ]; do sleep 0.5; done;", - "\t\t\t\t\t\tln --force -s /proc/$(pidof hgrun-pause)/root/bin/hgrun /bin/hgrun;", - "\t\t\t\t\t\texec /bin/hgrun -log.level=debug launch -bundledPluginsManifest /proc/$(pidof plugins-pause)/root/manifest.json -bundledPluginsDir /proc/$(pidof <_> -profile-port=6060 <_> {{536870912 0} {<nil>} BinarySI},},Requests:ResourceList{cpu: {{26 -3} {<nil>} 26m DecimalSI},memory: {{293601280 0} {<nil>} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/api/health,Port:{0 80 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:10,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/bin/hgrun check],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/bin/hgrun drain -timeout 1m0s -waitTime 55s],},HTTPGet:nil,TCPSocket:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[SYS_PTRACE],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod <_> ErrImagePull: [rpc error: code = NotFound desc = failed to pull and unpack image <_> failed to resolve reference <_> <_> not found, failed to pull and unpack image <_> failed to resolve reference <_> unexpected status from HEAD request to <_> 403 Forbidden]", - "<_> level=error msg=\"PullImage <_> failed\" error=\"failed to pull and unpack image <_> failed to resolve reference <_> unexpected status from HEAD request to <_> 403 Forbidden\"", - "<_> level=error msg=\"PullImage <_> failed\" error=\"rpc error: code = NotFound desc = failed to pull and unpack image <_> failed to resolve reference <_> <_> not found\"", - "<_> level=info msg=\"trying next host - response was http.StatusNotFound\" host=us.gcr.io", - "I0507 11:59:34.518822 3224 kuberuntime_container.go:745] \"Killing container with a grace period\" pod=\"hosted-grafana/hosted-grafana-api-7b6bd9b949-9csb4\" podUID=\"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\" containerName=\"hgapi\" containerID=\"containerd://c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\" gracePeriod=30", - "E0507 <_> <_> prober.go:239] \"Unable to write all bytes from execInContainer\" err=\"short write\" <_> actualBytes=10240", - "I0507 11:59:33.422254 1537502 kubelet_getters.go:187] \"Pod status updated\" pod=\"kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-x28r\" status=\"Running\"", - "<_> level=info msg=\"RemoveContainer for <_> returns successfully\"", - "<_> level=info msg=\"RemoveContainer for <_>", - "E0507 <_> <_> prober.go:104] \"Probe errored\" err=\"rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task <_> not found: not found\" probeType=\"Readiness\" <_> <_> containerName=\"grafana\"", - "<_> level=error msg=\"ExecSync for <_> failed\" error=\"rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task <_> not found: not found\"", - " >", - "E0507 <_> <_> remote_image.go:180] \"PullImage from image service failed\" err=\"rpc error: code = Unknown desc = failed to pull and unpack image <_> failed to resolve reference <_> unexpected status from HEAD request to <_> 403 Forbidden\" <_>", - "E0507 <_> <_> remote_runtime.go:496] \"ExecSync cmd from runtime service failed\" err=\"rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task <_> not found: not found\" <_> cmd=[\"/bin/hgrun\",\"check\"]", - "<_> level=error caller=http_client.go:56 app=hgrun <_> msg=\"request failed\" error=\"Get \\\"http://127.0.0.1:3000/api/health\\\": dial tcp 127.0.0.1:3000: connect: connection refused\" method=GET url=http://127.0.0.1:3000/api/health", - "<_> level=warning msg=\"cleaning up after shim disconnected\" <_> namespace=k8s.io", - "<_> level=info msg=\"cleaning up dead shim\" namespace=k8s.io", - "<_> level=info msg=\"shim disconnected\" <_> namespace=k8s.io", - "I0507 11:59:32.409568 581823 cache.go:40] re-using cached key and certificate", - "I0507 <_> <_> <_> <_> (PLEG): <_> <_> <_> <_> <_>", - "<_> level=info msg=\"StartContainer for <_> returns successfully\"", - "audit: type=1400 <_> apparmor=\"DENIED\" operation=\"ptrace\" profile=\"cri-containerd.apparmor.d\" <_> comm=\"pidof\" requested_mask=\"read\" denied_mask=\"read\" peer=\"unconfined\"", - "AVC apparmor=\"DENIED\" operation=\"ptrace\" profile=\"cri-containerd.apparmor.d\" <_> comm=\"pidof\" requested_mask=\"read\" denied_mask=\"read\" peer=\"unconfined\"", - "Started libcontainer container <_>", - "<_> level=info msg=\"StartContainer for <_>", - "<_> level=info msg=\"CreateContainer within sandbox <_> for <_> returns container id <_>", - "<_> level=info msg=\"CreateContainer within sandbox <_> for container <_>", - "<_> level=info msg=\"PullImage <_>", - "<_> level=info msg=\"PullImage <_> returns image reference <_>", - "<_> level=info msg=\"Pulled image <_> with image id <_> repo tag <_> repo digest <_> size <_> in <_>", - "<_> level=info msg=\"ImageUpdate event <_> labels:{key:\\\"io.cri-containerd.image\\\" value:\\\"managed\\\"}\"", - "<_> level=info msg=\"stop pulling image <_> active requests=0, bytes <_>", - "<_> level=info msg=\"ImageCreate event <_> labels:{key:\\\"io.cri-containerd.image\\\" value:\\\"managed\\\"}\"", - "E0507 <_> <_> kuberuntime_manager.go:1256] container <_> set -e; while [ \"$(pidof hgrun-pause)\" = \"\" ]; do sleep 0.5; done;", - "I0507 <_> 6247 prober.go:107] \"Probe failed\" probeType=\"Readiness\" pod=\"grafana-agent/grafana-agent-helm-4\" podUID=\"c36c5200-1cd6-4093-893c-c022f91af996\" containerName=\"grafana-agent\" probeResult=\"failure\" output=\"Get \\\"http://10.0.99.125:3090/-/ready\\\": dial tcp 10.0.99.125:3090: connect: connection refused\"", - "<_> Consumed <_> CPU time.", - "<_> Deactivated successfully.", - "RCV: Reply message on eth0 from fe80::e9:7eff:fedf:3d37.", - "XMT: Renew on eth0, interval 9700ms.", - "PRC: Renewing lease on eth0.", - "I0507 <_> <_> prober.go:107] \"Probe failed\" probeType=\"Readiness\" <_> <_> containerName=\"grafana\" probeResult=\"failure\" output=<", - "I0507 <_> 2791 azure_credentials.go:220] <_> is not from ACR, return empty authentication", - "I0507 <_> <_> <_> \"Cleaned up orphaned pod volumes dir\" <_> <_>", - "XMT: Solicit on eth0, interval <_>", - "I0507 <_> <_> cache.go:40] re-using cached key and certificate", - "ll header: 00000000: 42 01 0a 80 00 <_> 42 01 0a 80 00 01 08 00", - "IPv4: martian source <_> from <_> on dev eth0", - "I0507 11:59:29.320184 1537502 kubelet_pods.go:906] \"Unable to retrieve pull secret, the image pull may not succeed.\" pod=\"logs-endpoint-dev-005/kafka-controller-0\" secret=\"\" err=\"secret \\\"not-needed\\\" not found\"", - "E0507 <_> <_> kuberuntime_manager.go:1256] container &Container{Name:pdc,Image:us.gcr.io/hosted-grafana/pdc:0.1.415,Command:[],Args:[-proxy.auth.ca-keys-dir=/var/run/secrets/pdc-certs -proxy.socks-server.addr=:10443 -proxy.ssh-server.addr=:2222 -proxy.use-socks-username-for-routing -proxy.api.http-address=:9182 -proxy.check-connpool-address-in-ring -memberlist.join=dns+gossip-ring.pdc.svc.cluster.local:7946 -api.http-address=:11443 -distributor.enabled=true -distributor.addr=:10444 -distributor.use-socks-username-for-routing -gateway.enabled=true -gateway.addr=:2244 -log.level=debug -certs.ca-private-key-file=/var/run/secrets/pdc-certs/ca.key -certs.ca-cert-file=/var/run/secrets/pdc-certs/ca.crt -certs.ca-pub-file=/var/run/secrets/pdc-certs/ca.pub -certs.cluster=local-k8s -shard-size=3 -graceful-shutdown-period=30s -enable-multiple-networks],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:socks,HostPort:0,ContainerPort:10443,Protocol:TCP,HostIP:,},ContainerPort{Name:ssh,HostPort:0,ContainerPort:2222,Protocol:TCP,HostIP:,},ContainerPort{Name:distributor,HostPort:0,ContainerPort:10444,Protocol:TCP,HostIP:,},ContainerPort{Name:gateway,HostPort:0,ContainerPort:2244,Protocol:TCP,HostIP:,},ContainerPort{Name:api,HostPort:0,ContainerPort:11443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {<nil>} 500m DecimalSI},memory: {{134217728 0} {<nil>} BinarySI},},Requests:ResourceList{cpu: {{250 -3} {<nil>} 250m DecimalSI},memory: {{67108864 0} {<nil>} <_> 11443 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:40,TimeoutSeconds:1,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/bin/sleep 5],},HTTPGet:nil,TCPSocket:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Never,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod <_> ErrImageNeverPull: Container image \"us.gcr.io/hosted-grafana/pdc:0.1.415\" is not present with pull policy of Never", - "I0507 <_> <_> kubelet_pods.go:906] \"Unable to retrieve pull secret, the image pull may not succeed.\" <_> secret=\"\" err=\"secret <_> not found\"", - "I0507 <_> 3224 operation_generator.go:888] UnmountVolume.TearDown succeeded for volume <_> (OuterVolumeSpecName: <_> pod \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\" (UID: \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\"). InnerVolumeSpecName <_> PluginName <_> VolumeGidValue \"\"", - "E0507 <_> <_> pod_workers.go:1300] \"Error syncing pod, skipping\" <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_>", - "E0507 <_> <_> pod_workers.go:1300] \"Error syncing pod, skipping\" err=\"failed to \\\"StartContainer\\\" for <_> with CrashLoopBackOff: \\\"back-off <_> restarting failed <_> <_> <_> <_>", - "I0507 <_> <_> <_> <_> <_> <_> <_> <_>", - "I0507 <_> <_> <_> \"SyncLoop <_> source=\"api\" <_>", - "<_> level=error msg=\"ContainerStatus for <_> failed\" error=\"rpc error: code = NotFound desc = an error occurred when try to find container <_> not found\"", - "I0507 <_> <_> scope.go:117] \"RemoveContainer\" <_>", - "E0507 <_> <_> remote_image.go:180] \"PullImage from image service failed\" err=\"rpc error: code = NotFound desc = failed to pull and unpack image <_> failed to resolve reference <_> <_> not found\" <_>", - "I0507 <_> <_> pod_container_deletor.go:53] \"DeleteContainer returned error\" <_> err=\"failed to get container status <_> rpc error: code = NotFound desc = an error occurred when try to find container <_> not found\"", - "E0507 <_> <_> pod_workers.go:1300] \"Error syncing pod, skipping\" err=\"failed to \\\"StartContainer\\\" for \\\"pdc\\\" with ErrImageNeverPull: \\\"Container image \\\\\\\"us.gcr.io/hosted-grafana/pdc:0.1.415\\\\\\\" is not present with pull policy of Never\\\"\" <_> <_>", - "E0507 <_> <_> remote_runtime.go:432] \"ContainerStatus from runtime service failed\" err=\"rpc error: code = NotFound desc = an error occurred when try to find container <_> not found\" <_>", + ` exec /bin/hgrun -log.level=debug launch -bundledPluginsManifest /proc/$(pidof plugins-pause)/root/manifest.json -bundledPluginsDir /proc/$(pidof <_> -profile-port=6060 <_> {{536870912 0} {<nil>} BinarySI},},Requests:ResourceList{cpu: {{26 -3} {<nil>} 26m DecimalSI},memory: {{293601280 0} {<nil>} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/api/health,Port:{0 80 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:10,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/bin/hgrun check],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/bin/hgrun drain -timeout 1m0s -waitTime 55s],},HTTPGet:nil,TCPSocket:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[SYS_PTRACE],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod <_> ErrImagePull: [rpc error: code = NotFound desc = failed to pull and unpack image <_> failed to resolve reference <_> <_> not found, failed to pull and unpack image <_> failed to resolve reference <_> unexpected status from HEAD request to <_> 403 Forbidden]`, + ` ln --force -s /proc/$(pidof hgrun-pause)/root/bin/hgrun /bin/hgrun;`, + ` while [ "$(pidof plugins-pause)" = "" ]; do sleep 0.5; done;`, + ` ts=2024-05-07T11:59:32.025687537Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request`, + ` >`, + `2024-05-07T11:59:43.484606Z INFO ExtHandler ExtHandler Downloading agent manifest`, + `<_> Consumed <_> CPU time.`, + `<_> Deactivated successfully.`, + `<_> INFO TelemetryEventsCollector ExtHandler Collected 2 events for extension: Microsoft.Azure.Extensions.CustomScript`, + `<_> level=error caller=http_client.go:56 app=hgrun <_> msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health`, + `<_> level=error msg="ContainerStatus for <_> failed" error="rpc error: code = NotFound desc = an error occurred when try to find container <_> not found"`, + `<_> level=error msg="ExecSync for <_> failed" error="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task <_> not found: not found"`, + `<_> level=error msg="PullImage <_> failed" error="failed to pull and unpack image <_> failed to resolve reference <_> unexpected status from HEAD request to <_> 403 Forbidden"`, + `<_> level=error msg="PullImage <_> failed" error="rpc error: code = NotFound desc = failed to pull and unpack image <_> failed to resolve reference <_> <_> not found"`, + `<_> level=info msg="CreateContainer within sandbox <_> for <_> returns container id <_>`, + `<_> level=info msg="CreateContainer within sandbox <_> for container <_>`, + `<_> level=info msg="ImageCreate event <_> labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"`, + `<_> level=info msg="ImageUpdate event <_> labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"`, + `<_> level=info msg="PullImage <_>`, + `<_> level=info msg="PullImage <_> returns image reference <_>`, + `<_> level=info msg="Pulled image <_> with image id <_> repo tag <_> repo digest <_> size <_> in <_>`, + `<_> level=info msg="RemoveContainer for <_>`, + `<_> level=info msg="RemoveContainer for <_> returns successfully"`, + `<_> level=info msg="StartContainer for <_>`, + `<_> level=info msg="StartContainer for <_> returns successfully"`, + `<_> level=info msg="cleaning up dead shim" namespace=k8s.io`, + `<_> level=info msg="shim disconnected" <_> namespace=k8s.io`, + `<_> level=info msg="stop pulling image <_> active requests=0, bytes <_>`, + `<_> level=info msg="trying next host - response was http.StatusNotFound" host=us.gcr.io`, + `<_> level=warning msg="cleaning up after shim disconnected" <_> namespace=k8s.io`, + `AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" <_> comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined"`, + `E0507 11:59:29.725681 3089 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"azure-resourcemanager-exporter\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=azure-resourcemanager-exporter pod=azure-resourcemanager-exporter-6b5b58c666-rsttd_infra-exporters(5a95f801-309c-4f33-864a-406262c6ece6)\"" pod="infra-exporters/azure-resourcemanager-exporter-6b5b58c666-rsttd" podUID="5a95f801-309c-4f33-864a-406262c6ece6"`, + `E0507 11:59:31.554203 4531 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"frontend\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=frontend pod=otel-demo-alt-dev-frontend-79ccf98858-mbj4x_otel-demo-alt(d08e620e-00d0-49f1-a195-820a62e8de8f)\"" pod="otel-demo-alt/otel-demo-alt-dev-frontend-79ccf98858-mbj4x" podUID="d08e620e-00d0-49f1-a195-820a62e8de8f"`, + `E0507 11:59:31.928148 4734 pod_workers.go:1300] "Error syncing pod, skipping" err="unmounted volumes=[terraform-drift-detector-data], unattached volumes=[terraform-drift-detector-data], failed to process volumes=[]: context deadline exceeded" pod="terraform-drift-detector/terraform-drift-detector-d68b4c545-jg2vj" podUID="6c607496-ef26-454e-b2f2-4cb75b233fa3"`, + `E0507 11:59:34.856101 4727 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana-render-security\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-security:0.1.181\\\"\"" pod="integration/grafana-render-service-cbff479fc-cj9tp" podUID="0e3114d1-2f3a-49d6-a71d-dbc75050d8e0"`, + `E0507 11:59:34.923938 3027 kuberuntime_manager.go:1261] container &Container{Name:mysqld-exporter,Image:prom/mysqld-exporter:v0.13.0,Command:[],Args:[--collect.info_schema.innodb_metrics],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:9104,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:MYSQL_USER,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:testcrossplane-user-exporter,},Key:username,Optional:nil,},},},EnvVar{Name:MYSQL_PASSWORD,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:testcrossplane-user-exporter,},Key:password,Optional:nil,},},},EnvVar{Name:MYSQL_HOST,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:testcrossplane-user-exporter,},Key:endpoint,Optional:nil,},},},EnvVar{Name:MYSQL_PORT,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:testcrossplane-user-exporter,},Key:port,Optional:nil,},},},EnvVar{Name:MYSQL_TLS_MODE,Value:preferred,ValueFrom:nil,},EnvVar{Name:DATA_SOURCE_NAME,Value:$(MYSQL_USER):$(MYSQL_PASSWORD)@tcp($(MYSQL_HOST):$(MYSQL_PORT))/?tls=$(MYSQL_TLS_MODE),ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dzx7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod testcrossplane-exporter-c67cfc58f-vbzl4_crossplane-playground(3d49134d-3378-4ec3-824c-5ff4ea2590a5): CreateContainerConfigError: secret "testcrossplane-user-exporter" not found`, + `E0507 11:59:34.923984 3027 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysqld-exporter\" with CreateContainerConfigError: \"secret \\\"testcrossplane-user-exporter\\\" not found\"" pod="crossplane-playground/testcrossplane-exporter-c67cfc58f-vbzl4" podUID="3d49134d-3378-4ec3-824c-5ff4ea2590a5"`, + `E0507 11:59:35.928465 4734 pod_workers.go:1300] "Error syncing pod, skipping" err="unmounted volumes=[custom-grafana-agent], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="loki-dev-010/custom-grafana-agent-856948968f-6jfks" podUID="17b244cc-ecb9-4fbc-beaa-8fa47fafe013"`, + `E0507 11:59:37.252214 4736 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ksm\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=ksm pod=new-relic-nri-bundle-nrk8s-ksm-6c785668f5-jcxh2_integration(f7cc3cca-2ffb-4fde-a73e-a4ba8b0f6b3c)\"" pod="integration/new-relic-nri-bundle-nrk8s-ksm-6c785668f5-jcxh2" podUID="f7cc3cca-2ffb-4fde-a73e-a4ba8b0f6b3c"`, + `E0507 11:59:39.149450 4729 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-agent\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=cluster-agent pod=appdynamics-cluster-agent-appdynamics-cluster-agent-56667dmbnkv_integration(69bc5e6c-0451-443e-af8a-c831871afbb8)\"" pod="integration/appdynamics-cluster-agent-appdynamics-cluster-agent-56667dmbnkv" podUID="69bc5e6c-0451-443e-af8a-c831871afbb8"`, + `E0507 11:59:41.375655 4736 kuberuntime_manager.go:1256] container &Container{Name:ruler,Image:grafana/enterprise-metrics:v2.12.0,Command:[],Args:[-target=ruler -config.expand-env=true -config.file=/etc/mimir/mimir.yaml -distributor.remote-timeout=10s],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:grpc,HostPort:0,ContainerPort:9095,Protocol:TCP,HostIP:,},ContainerPort{Name:memberlist,HostPort:0,ContainerPort:7946,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:JAEGER_AGENT_HOST,Value:alloy-otlp.alloy-otlp.svc.cluster.local.,ValueFrom:nil,},EnvVar{Name:JAEGER_TAGS,Value:namespace=ge-metrics-federation,cluster=dev-us-central-0,ValueFrom:nil,},EnvVar{Name:JAEGER_SAMPLER_MANAGER_HOST_PORT,Value:http://alloy-otlp.alloy-otlp.svc.cluster.local.:5778/sampling,ValueFrom:nil,},EnvVar{Name:GOOGLE_APPLICATION_CREDENTIALS,Value:/var/secrets/google/credentials.json,ValueFrom:nil,},EnvVar{Name:AM_TOKEN,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:ruler-alertmanager-token,},Key:token,Optional:nil,},},},EnvVar{Name:JAEGER_REPORTER_MAX_QUEUE_SIZE,Value:1000,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{100 -3} {<nil>} 100m DecimalSI},memory: {{134217728 0} {<nil>} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:gcs-credentials,ReadOnly:false,MountPath:/var/secrets/google/,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:config,ReadOnly:false,MountPath:/etc/mimir,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:license,ReadOnly:false,MountPath:/license,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:runtime-config,ReadOnly:false,MountPath:/var/mimir,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:storage,ReadOnly:false,MountPath:/data,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:active-queries,ReadOnly:false,MountPath:/active-query-tracker,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-jtnbs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/ready,Port:{1 0 http-metrics},Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:45,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod gem-mimir-ruler-5f56f7846b-fgxdm_ge-metrics-federation(07c06e21-137b-4fdd-b7d3-703f0a567720): CreateContainerConfigError: secret "ruler-alertmanager-token" not found`, + `E0507 <_> 4731 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"overrides-exporter\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/kubernetes-dev/enterprise-logs:callum-shard-firstlast-08\\\"\"" pod="loki-dev-010/overrides-exporter-98c77fd66-6zj6m" podUID="1ff5bf3e-5856-4f6f-ae04-273f2dee170b"`, + `E0507 <_> 4733 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=prometheus pod=bryan-prometheus-0_bryan-prometheus(6dadfe71-eb19-4231-a96e-c64bb5499a1e)\"" pod="bryan-prometheus/bryan-prometheus-0" podUID="6dadfe71-eb19-4231-a96e-c64bb5499a1e"`, + `E0507 <_> <_> kuberuntime_manager.go:1256] container &Container{Name:pdc,Image:us.gcr.io/hosted-grafana/pdc:0.1.415,Command:[],Args:[-proxy.auth.ca-keys-dir=/var/run/secrets/pdc-certs -proxy.socks-server.addr=:10443 -proxy.ssh-server.addr=:2222 -proxy.use-socks-username-for-routing -proxy.api.http-address=:9182 -proxy.check-connpool-address-in-ring -memberlist.join=dns+gossip-ring.pdc.svc.cluster.local:7946 -api.http-address=:11443 -distributor.enabled=true -distributor.addr=:10444 -distributor.use-socks-username-for-routing -gateway.enabled=true -gateway.addr=:2244 -log.level=debug -certs.ca-private-key-file=/var/run/secrets/pdc-certs/ca.key -certs.ca-cert-file=/var/run/secrets/pdc-certs/ca.crt -certs.ca-pub-file=/var/run/secrets/pdc-certs/ca.pub -certs.cluster=local-k8s -shard-size=3 -graceful-shutdown-period=30s -enable-multiple-networks],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:socks,HostPort:0,ContainerPort:10443,Protocol:TCP,HostIP:,},ContainerPort{Name:ssh,HostPort:0,ContainerPort:2222,Protocol:TCP,HostIP:,},ContainerPort{Name:distributor,HostPort:0,ContainerPort:10444,Protocol:TCP,HostIP:,},ContainerPort{Name:gateway,HostPort:0,ContainerPort:2244,Protocol:TCP,HostIP:,},ContainerPort{Name:api,HostPort:0,ContainerPort:11443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {<nil>} 500m DecimalSI},memory: {{134217728 0} {<nil>} BinarySI},},Requests:ResourceList{cpu: {{250 -3} {<nil>} 250m DecimalSI},memory: {{67108864 0} {<nil>} <_> 11443 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:40,TimeoutSeconds:1,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/bin/sleep 5],},HTTPGet:nil,TCPSocket:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Never,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod <_> ErrImageNeverPull: Container image "us.gcr.io/hosted-grafana/pdc:0.1.415" is not present with pull policy of Never`, + `E0507 <_> <_> kuberuntime_manager.go:1256] container &Container{Name:ruler,Image:grafana/enterprise-metrics:v2.11.1,Command:[],Args:[-target=ruler -config.expand-env=true <_> {{100 -3} {<nil>} 100m DecimalSI},memory: {{134217728 0} {<nil>} <_> 0 http-metrics},Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:45,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod <_> CreateContainerConfigError: secret "ruler-alertmanager-token" not found`, + `E0507 <_> <_> kuberuntime_manager.go:1256] container <_> set -e; while [ "$(pidof hgrun-pause)" = "" ]; do sleep 0.5; done;`, + `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"agent\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=agent <_> <_> <_>`, + `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cortex-gw\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=cortex-gw <_> <_> <_>`, + `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"gcom-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/kubernetes-dev/frontend-monitoring:6a8eb5a\\\"\"" <_> <_>`, + `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"goldpinger\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=goldpinger <_> <_> <_>`, + `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off <_> restarting failed container=grafana <_> <_> <_>`, + `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ErrImagePull: \"[rpc error: code = NotFound desc = failed to pull and unpack image <_> failed to resolve reference <_> <_> not found, failed to pull and unpack image <_> failed to resolve reference <_> unexpected status from HEAD request to <_> 403 Forbidden]\"" <_> <_>`, + `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image <_> <_> <_>`, + `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pdc\" with ErrImageNeverPull: \"Container image \\\"us.gcr.io/hosted-grafana/pdc:0.1.415\\\" is not present with pull policy of Never\"" <_> <_>`, + `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ruler\" with CreateContainerConfigError: \"secret \\\"ruler-alertmanager-token\\\" not found\"" <_> <_>`, + `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"support-agent\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=support-agent <_> <_> <_>`, + `E0507 <_> <_> prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task <_> not found: not found" probeType="Readiness" <_> <_> containerName="grafana"`, + `E0507 <_> <_> prober.go:239] "Unable to write all bytes from execInContainer" err="short write" <_> actualBytes=10240`, + `E0507 <_> <_> remote_image.go:180] "PullImage from image service failed" err="rpc error: code = NotFound desc = failed to pull and unpack image <_> failed to resolve reference <_> <_> not found" <_>`, + `E0507 <_> <_> remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = failed to pull and unpack image <_> failed to resolve reference <_> unexpected status from HEAD request to <_> 403 Forbidden" <_>`, + `E0507 <_> <_> remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = an error occurred when try to find container <_> not found" <_>`, + `E0507 <_> <_> remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task <_> not found: not found" <_> cmd=["/bin/hgrun","check"]`, + `I0507 11:59:29.320184 1537502 kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod="logs-endpoint-dev-005/kafka-controller-0" secret="" err="secret \"not-needed\" not found"`, + `I0507 11:59:31.815514 2791 azure_credentials.go:220] image(us.gcr.io/hosted-grafana/hosted-grafana-pro) is not from ACR, return empty authentication`, + `I0507 11:59:32.409568 581823 cache.go:40] re-using cached key and certificate`, + `I0507 11:59:33.422254 1537502 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-x28r" status="Running"`, + `I0507 11:59:34.518822 3224 kuberuntime_container.go:745] "Killing container with a grace period" pod="hosted-grafana/hosted-grafana-api-7b6bd9b949-9csb4" podUID="25cb986c-3d6c-4ed0-abf3-ee59ed6175f9" containerName="hgapi" containerID="containerd://c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e" gracePeriod=30`, + `I0507 11:59:34.834734 3224 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-95j2t\" (UniqueName: \"kubernetes.io/projected/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-kube-api-access-95j2t\") pod \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\" (UID: \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\") "`, + `I0507 11:59:34.834794 3224 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"pdc-certs\" (UniqueName: \"kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-pdc-certs\") pod \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\" (UID: \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\") "`, + `I0507 11:59:34.834835 3224 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"gcs-serviceaccount\" (UniqueName: \"kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-gcs-serviceaccount\") pod \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\" (UID: \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\") "`, + `I0507 11:59:34.836955 3224 operation_generator.go:888] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-pdc-certs" (OuterVolumeSpecName: "pdc-certs") pod "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9" (UID: "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9"). InnerVolumeSpecName "pdc-certs". PluginName "kubernetes.io/secret", VolumeGidValue ""`, + `I0507 11:59:34.841404 3224 operation_generator.go:888] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-kube-api-access-95j2t" (OuterVolumeSpecName: "kube-api-access-95j2t") pod "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9" (UID: "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9"). InnerVolumeSpecName "kube-api-access-95j2t". PluginName "kubernetes.io/projected", VolumeGidValue ""`, + `I0507 11:59:34.841447 3224 operation_generator.go:888] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-gcs-serviceaccount" (OuterVolumeSpecName: "gcs-serviceaccount") pod "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9" (UID: "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9"). InnerVolumeSpecName "gcs-serviceaccount". PluginName "kubernetes.io/secret", VolumeGidValue ""`, + `I0507 11:59:34.854084 4727 kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod="integration/grafana-render-service-cbff479fc-cj9tp" secret="" err="secret \"us-gcr-io-hosted-grafana\" not found"`, + `I0507 11:59:34.936025 3224 reconciler_common.go:300] "Volume detached for volume \"pdc-certs\" (UniqueName: \"kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-pdc-certs\") on node \"ip-10-60-2-58.us-east-2.compute.internal\" DevicePath \"\""`, + `I0507 11:59:37.133005 3782 prober.go:107] "Probe failed" probeType="Readiness" pod="loki-dev-014/loki-dev-014-rollout-operator-58fc68b876-2qhmp" podUID="e6504036-2514-4ecc-b78c-c47061f60c9f" containerName="rollout-operator" probeResult="failure" output="HTTP probe failed with statuscode: 500"`, + `I0507 11:59:37.915108 4726 prober.go:107] "Probe failed" probeType="Readiness" pod="agent-management-dev-002/agent-management-api-7ff7b9b9-k9nft" podUID="9893f9ac-f3e4-41fb-8da7-592061d2386c" containerName="agent-management-api" probeResult="failure" output="HTTP probe failed with statuscode: 400"`, + `I0507 11:59:38.116658 2791 azure_credentials.go:220] image(us.gcr.io/hosted-grafana/hg-plugins) is not from ACR, return empty authentication`, + `I0507 11:59:39.168633 2776 kubelet.go:2493] "SyncLoop (probe)" probe="readiness" status="" pod="hosted-grafana/dafdeveuwest2-grafana-7845d969b5-f8h5q"`, + `I0507 11:59:39.560605 4739 kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod="logs-endpoint-dev-005/kafka-exporter-766c6757b5-bggf6" secret="" err="secret \"not-needed\" not found"`, + `I0507 <_> 2791 azure_credentials.go:220] image(us.gcr.io/hosted-grafana/hgrun) is not from ACR, return empty authentication`, + `I0507 <_> 3224 reconciler_common.go:300] "Volume detached for volume <_> (UniqueName: <_> on node \"ip-10-60-2-58.us-east-2.compute.internal\" DevicePath \"\""`, + `I0507 <_> 6247 prober.go:107] "Probe failed" probeType="Readiness" pod="grafana-agent/grafana-agent-helm-4" podUID="c36c5200-1cd6-4093-893c-c022f91af996" containerName="grafana-agent" probeResult="failure" output="Get \"http://10.0.99.125:3090/-/ready\": dial tcp 10.0.99.125:3090: connect: connection refused"`, + `I0507 <_> <_> <_> "Cleaned up orphaned pod volumes dir" <_> <_>`, + `I0507 <_> <_> <_> "SyncLoop (PLEG): event for pod" <_> <_>`, + `I0507 <_> <_> <_> "SyncLoop DELETE" source="api" <_>`, + `I0507 <_> <_> <_> "SyncLoop REMOVE" source="api" <_>`, + `I0507 <_> <_> generic.go:334] "Generic (PLEG): container finished" <_> <_> exitCode=1`, + `I0507 <_> <_> kubelet.go:2498] "SyncLoop (probe)" probe="liveness" status="unhealthy" <_>`, + `I0507 <_> <_> kubelet.go:2498] "SyncLoop (probe)" probe="readiness" status="ready" <_>`, + `I0507 <_> <_> kubelet_getters.go:187] "Pod status updated" <_> status="Running"`, + `I0507 <_> <_> kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." <_> secret="" err="secret \"dockerhub\" not found"`, + `I0507 <_> <_> kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." <_> secret="" err="secret \"gcr\" not found"`, + `I0507 <_> <_> pod_container_deletor.go:53] "DeleteContainer returned error" <_> err="failed to get container status <_> rpc error: code = NotFound desc = an error occurred when try to find container <_> not found"`, + `I0507 <_> <_> prober.go:107] "Probe failed" probeType="Readiness" <_> <_> containerName="grafana" probeResult="failure" output=<`, + `I0507 <_> <_> scope.go:117] "RemoveContainer" <_>`, + `I0507 <_> <_> cache.go:40] re-using cached key and certificate`, + `IPv4: martian source <_> from <_> on dev eth0`, + `PRC: Renewing lease on eth0.`, + `RCV: Reply message on eth0 from fe80::e9:7eff:fedf:3d37.`, + `Removed slice libcontainer container kubepods-burstable-pod25cb986c_3d6c_4ed0_abf3_ee59ed6175f9.slice.`, + `Started cri-containerd-95bf586cd79d43120ff44582d4dbd2476de61744411f8515b9b2c527a41fd5d9.scope.`, + `Started libcontainer container <_>`, + `XMT: Renew on eth0, interval 9700ms.`, + `XMT: Solicit on eth0, interval <_>`, + `audit: type=1400 <_> apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" <_> comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined"`, + `kauditd_printk_skb: <_> callbacks suppressed`, + `ll header: 00000000: 42 01 0a 80 00 <_> 42 01 0a 80 00 01 08 00`, + `net_ratelimit: 2 callbacks suppressed`, + `time="2024-05-07T11:59:34.519591759Z" level=info msg="StopContainer for \"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\" with timeout 30 (s)"`, + `time="2024-05-07T11:59:34.520032214Z" level=info msg="Stop container \"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\" with signal terminated"`, + `time="2024-05-07T11:59:34.591282703Z" level=info msg="StopContainer for \"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\" returns successfully"`, + `time="2024-05-07T11:59:34.592005066Z" level=info msg="StopPodSandbox for \"c605ad2cdc74c6b5288f2532ad71cce81a28ef6965f97a89ff6609deb825553a\""`, + `time="2024-05-07T11:59:34.592084495Z" level=info msg="Container to stop \"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\" must be in running or unknown state, current state \"CONTAINER_EXITED\""`, + `time="2024-05-07T11:59:34.706960850Z" level=info msg="TearDown network for sandbox \"c605ad2cdc74c6b5288f2532ad71cce81a28ef6965f97a89ff6609deb825553a\" successfully"`, + `time="2024-05-07T11:59:34.707025668Z" level=info msg="StopPodSandbox for \"c605ad2cdc74c6b5288f2532ad71cce81a28ef6965f97a89ff6609deb825553a\" returns successfully"`, + `time="2024-05-07T11:59:38.484586527Z" level=error msg="Failed to delete exec process \"d9e0a1867ce73695ad859f2b0a76fe8f5053db8a5e49142d747e53a445729bd4\" for container \"6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408\"" error="ttrpc: closed: unknown"`, }, }, { @@ -198,23 +255,23 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { drain: New(DefaultConfig(), nil), inputFile: "testdata/kafka.txt", patterns: []string{ - `[2024-05-07 <_> INFO [LocalLog partition=mimir-dev-09-aggregations-offsets-0, dir=/bitnami/kafka/data] Deleting segment files <_> size=948, <_> <_> (kafka.log.LocalLog$)`, - `[2024-05-07 <_> INFO [UnifiedLog <_> dir=/bitnami/kafka/data] Deleting segments due to log start offset <_> breach: <_> size=948, <_> <_> size=948, <_> <_> (kafka.log.UnifiedLog)`, - `[2024-05-07 <_> INFO [UnifiedLog <_> dir=/bitnami/kafka/data] Incremented log start offset to <_> due to leader offset increment (kafka.log.UnifiedLog)`, - `[2024-05-07 <_> INFO [UnifiedLog <_> dir=/bitnami/kafka/data] Deleting segments due to log start offset <_> breach: <_> <_> <_> <_> (kafka.log.UnifiedLog)`, - `[2024-05-07 <_> INFO [UnifiedLog <_> dir=/bitnami/kafka/data] Incremented log start offset to <_> due to segment deletion (kafka.log.UnifiedLog)`, - `[2024-05-07 <_> INFO [UnifiedLog <_> dir=/bitnami/kafka/data] Deleting segment <_> <_> <_> <_> due to retention size <_> breach. Log size after deletion will be <_> (kafka.log.UnifiedLog)`, - `[2024-05-07 <_> INFO [ProducerStateManager <_> Wrote producer snapshot at offset <_> with 0 producer ids in <_> ms. (kafka.log.ProducerStateManager)`, - `[2024-05-07 10:55:53,038] INFO [LocalLog partition=mimir-dev-09-aggregations-offsets-1, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=447957, size=948, lastModifiedTime=1715059232052, largestRecordTimestamp=Some(1715059232002)),LogSegment(baseOffset=447969, size=948, lastModifiedTime=1715059424352, largestRecordTimestamp=Some(1715059424301)) (kafka.log.LocalLog$)`, - `[2024-05-07 <_> INFO [LocalLog <_> dir=/bitnami/kafka/data] Rolled new log segment at offset <_> in <_> ms. (kafka.log.LocalLog)`, - `[2024-05-07 10:55:40,638] INFO [LocalLog partition=ingest-6, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=180400817, size=16997594, lastModifiedTime=1715075775780, largestRecordTimestamp=Some(1715075775771)),LogSegment(baseOffset=180403261, size=16992344, lastModifiedTime=1715075781053, largestRecordTimestamp=Some(1715075781021)),LogSegment(baseOffset=180405723, size=16989895, lastModifiedTime=1715075786205, largestRecordTimestamp=Some(1715075786174)),LogSegment(baseOffset=180408118, size=16998698, lastModifiedTime=1715075791681, largestRecordTimestamp=Some(1715075791673)),LogSegment(baseOffset=180410608, size=16995676, lastModifiedTime=1715075796438, largestRecordTimestamp=Some(1715075796430)),LogSegment(baseOffset=180412733, size=16963278, lastModifiedTime=1715075800534, largestRecordTimestamp=Some(1715075800511)),LogSegment(baseOffset=180414883, size=16984328, lastModifiedTime=1715075805272, largestRecordTimestamp=Some(1715075805230)),LogSegment(baseOffset=180417063, size=16989109, lastModifiedTime=1715075810381, largestRecordTimestamp=Some(1715075810372)),LogSegment(baseOffset=180419267, size=16996871, lastModifiedTime=1715075815153, largestRecordTimestamp=Some(1715075815125)),LogSegment(baseOffset=180421560, size=16988558, lastModifiedTime=1715075819785, largestRecordTimestamp=Some(1715075819763)),LogSegment(baseOffset=180424008, size=16999292, lastModifiedTime=1715075825336, largestRecordTimestamp=Some(1715075825303)),LogSegment(baseOffset=180426459, size=16990595, lastModifiedTime=1715075830839, largestRecordTimestamp=Some(1715075830827)),LogSegment(baseOffset=180428944, size=16995859, lastModifiedTime=1715075835942, largestRecordTimestamp=Some(1715075835904)),LogSegment(baseOffset=180431327, size=16992294, lastModifiedTime=1715075841219, largestRecordTimestamp=Some(1715075841214)),LogSegment(baseOffset=180433867, size=16966736, lastModifiedTime=1715075846443, largestRecordTimestamp=Some(1715075846401)),LogSegment(baseOffset=180436204, size=16894731, lastModifiedTime=1715075853273, largestRecordTimestamp=Some(1715075853244)),LogSegment(baseOffset=180438984, size=16983529, lastModifiedTime=1715075858911, largestRecordTimestamp=Some(1715075858891)),LogSegment(baseOffset=180441466, size=16996933, lastModifiedTime=1715075863566, largestRecordTimestamp=Some(1715075863554)),LogSegment(baseOffset=180443778, size=16999841, lastModifiedTime=1715075866199, largestRecordTimestamp=Some(1715075866185)),LogSegment(baseOffset=180445367, size=16992471, lastModifiedTime=1715075870385, largestRecordTimestamp=Some(1715075870347)),LogSegment(baseOffset=180447366, size=16999996, lastModifiedTime=1715075875102, largestRecordTimestamp=Some(1715075875091)),LogSegment(baseOffset=180449601, size=16994426, lastModifiedTime=1715075879927, largestRecordTimestamp=Some(1715075879926)),LogSegment(baseOffset=180452079, size=16998020, lastModifiedTime=1715075885293, largestRecordTimestamp=Some(1715075885263)),LogSegment(baseOffset=180454546, size=16992231, lastModifiedTime=1715075890424, largestRecordTimestamp=Some(1715075890409)),LogSegment(baseOffset=180456986, size=16970315, lastModifiedTime=1715075895719, largestRecordTimestamp=Some(1715075895690)),LogSegment(baseOffset=180459366, size=16990785, lastModifiedTime=1715075900996, largestRecordTimestamp=Some(1715075900985)),LogSegment(baseOffset=180461885, size=16996655, lastModifiedTime=1715075905847, largestRecordTimestamp=Some(1715075905841)),LogSegment(baseOffset=180464299, size=16982181, lastModifiedTime=1715075911052, largestRecordTimestamp=Some(1715075911028)),LogSegment(baseOffset=180466821, size=16997630, lastModifiedTime=1715075915962, largestRecordTimestamp=Some(1715075915953)),LogSegment(baseOffset=180468968, size=16995723, lastModifiedTime=1715075920325, largestRecordTimestamp=Some(1715075920308)),LogSegment(baseOffset=180471046, size=16979316, lastModifiedTime=1715075924724, largestRecordTimestamp=Some(1715075924697)),LogSegment(baseOffset=180473259, size=16995238, lastModifiedTime=1715075929645, largestRecordTimestamp=Some(1715075929624)),LogSegment(baseOffset=180475486, size=16988461, lastModifiedTime=1715075934288, largestRecordTimestamp=Some(1715075934283)),LogSegment(baseOffset=180477735, size=16993767, lastModifiedTime=1715075939277, largestRecordTimestamp=Some(1715075939270)),LogSegment(baseOffset=180480095, size=16995409, lastModifiedTime=1715075944639, largestRecordTimestamp=Some(1715075944635)),LogSegment(baseOffset=180482560, size=16992784, lastModifiedTime=1715075949760, largestRecordTimestamp=Some(1715075949760)),LogSegment(baseOffset=180484967, size=16990838, lastModifiedTime=1715075954937, largestRecordTimestamp=Some(1715075954929)),LogSegment(baseOffset=180487377, size=16976794, lastModifiedTime=1715075960151, largestRecordTimestamp=Some(1715075960119)),LogSegment(baseOffset=180489919, size=16997379, lastModifiedTime=1715075965116, largestRecordTimestamp=Some(1715075965085)),LogSegment(baseOffset=180492304, size=16956613, lastModifiedTime=1715075970448, largestRecordTimestamp=Some(1715075970424)),LogSegment(baseOffset=180494832, size=16895640, lastModifiedTime=1715075975354, largestRecordTimestamp=Some(1715075975341)),LogSegment(baseOffset=180496930, size=16998328, lastModifiedTime=1715075979813, largestRecordTimestamp=Some(1715075979796)),LogSegment(baseOffset=180499079, size=16995699, lastModifiedTime=1715075984309, largestRecordTimestamp=Some(1715075984285)),LogSegment(baseOffset=180501183, size=16993785, lastModifiedTime=1715075989086, largestRecordTimestamp=Some(1715075989064)),LogSegment(baseOffset=180503431, size=16989600, lastModifiedTime=1715075993713, largestRecordTimestamp=Some(1715075993683)),LogSegment(baseOffset=180505674, size=16984790, lastModifiedTime=1715075998337, largestRecordTimestamp=Some(1715075998318)),LogSegment(baseOffset=180508022, size=16982630, lastModifiedTime=1715076003671, largestRecordTimestamp=Some(1715076003660)),LogSegment(baseOffset=180510439, size=16999488, lastModifiedTime=1715076009000, largestRecordTimestamp=Some(1715076008996)),LogSegment(baseOffset=180512848, size=16997845, lastModifiedTime=1715076014033, largestRecordTimestamp=Some(1715076014032)),LogSegment(baseOffset=180515281, size=16990661, lastModifiedTime=1715076019245, largestRecordTimestamp=Some(1715076019216)),LogSegment(baseOffset=180517815, size=16996244, lastModifiedTime=1715076023989, largestRecordTimestamp=Some(1715076023963)),LogSegment(baseOffset=180520112, size=16992012, lastModifiedTime=1715076029243, largestRecordTimestamp=Some(1715076029231)) (kafka.log.LocalLog$)`, - `[2024-05-07 10:55:40,626] INFO [LocalLog partition=ingest-6, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=180391157, size=16991045, lastModifiedTime=1715075754780, largestRecordTimestamp=Some(1715075754774)),LogSegment(baseOffset=180393429, size=16997692, lastModifiedTime=1715075760206, largestRecordTimestamp=Some(1715075760186)),LogSegment(baseOffset=180395889, size=16998200, lastModifiedTime=1715075765542, largestRecordTimestamp=Some(1715075765526)),LogSegment(baseOffset=180398373, size=16977347, lastModifiedTime=1715075770515, largestRecordTimestamp=Some(1715075770504)) (kafka.log.LocalLog$)`, `[2024-05-07 10:55:40,559] INFO [LocalLog partition=ingest-7, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=179133378, size=16987985, lastModifiedTime=1715075760072, largestRecordTimestamp=Some(1715075760047)),LogSegment(baseOffset=179135832, size=16999459, lastModifiedTime=1715075765431, largestRecordTimestamp=Some(1715075765398)),LogSegment(baseOffset=179138321, size=16994485, lastModifiedTime=1715075770425, largestRecordTimestamp=Some(1715075770404)),LogSegment(baseOffset=179140761, size=16996810, lastModifiedTime=1715075775622, largestRecordTimestamp=Some(1715075775619)),LogSegment(baseOffset=179143198, size=16998520, lastModifiedTime=1715075780912, largestRecordTimestamp=Some(1715075780889)),LogSegment(baseOffset=179145674, size=16988474, lastModifiedTime=1715075786051, largestRecordTimestamp=Some(1715075786030)),LogSegment(baseOffset=179148084, size=16956099, lastModifiedTime=1715075791514, largestRecordTimestamp=Some(1715075791486)),LogSegment(baseOffset=179150568, size=16995476, lastModifiedTime=1715075796360, largestRecordTimestamp=Some(1715075796329)),LogSegment(baseOffset=179152727, size=16993313, lastModifiedTime=1715075800440, largestRecordTimestamp=Some(1715075800430)),LogSegment(baseOffset=179154861, size=16992142, lastModifiedTime=1715075805147, largestRecordTimestamp=Some(1715075805135)),LogSegment(baseOffset=179157056, size=16999919, lastModifiedTime=1715075810155, largestRecordTimestamp=Some(1715075810153)),LogSegment(baseOffset=179159230, size=16995021, lastModifiedTime=1715075815018, largestRecordTimestamp=Some(1715075815016)),LogSegment(baseOffset=179161550, size=16966526, lastModifiedTime=1715075819528, largestRecordTimestamp=Some(1715075819521)),LogSegment(baseOffset=179163962, size=16990848, lastModifiedTime=1715075825066, largestRecordTimestamp=Some(1715075825042)),LogSegment(baseOffset=179166414, size=16997833, lastModifiedTime=1715075830662, largestRecordTimestamp=Some(1715075830656)),LogSegment(baseOffset=179168915, size=16992619, lastModifiedTime=1715075835771, largestRecordTimestamp=Some(1715075835741)),LogSegment(baseOffset=179171302, size=16999091, lastModifiedTime=1715075841031, largestRecordTimestamp=Some(1715075841022)),LogSegment(baseOffset=179173853, size=16993953, lastModifiedTime=1715075846197, largestRecordTimestamp=Some(1715075846181)),LogSegment(baseOffset=179176191, size=16997479, lastModifiedTime=1715075853192, largestRecordTimestamp=Some(1715075853172)),LogSegment(baseOffset=179179037, size=16997174, lastModifiedTime=1715075858693, largestRecordTimestamp=Some(1715075858682)),LogSegment(baseOffset=179181478, size=16986004, lastModifiedTime=1715075863400, largestRecordTimestamp=Some(1715075863396)),LogSegment(baseOffset=179183786, size=16995316, lastModifiedTime=1715075866123, largestRecordTimestamp=Some(1715075866112)),LogSegment(baseOffset=179185434, size=16990492, lastModifiedTime=1715075870154, largestRecordTimestamp=Some(1715075870146)),LogSegment(baseOffset=179187398, size=16999541, lastModifiedTime=1715075874980, largestRecordTimestamp=Some(1715075874961)),LogSegment(baseOffset=179189664, size=16987383, lastModifiedTime=1715075879670, largestRecordTimestamp=Some(1715075879639)),LogSegment(baseOffset=179192076, size=16991701, lastModifiedTime=1715075885010, largestRecordTimestamp=Some(1715075884995)),LogSegment(baseOffset=179194546, size=16989109, lastModifiedTime=1715075890220, largestRecordTimestamp=Some(1715075890208)),LogSegment(baseOffset=179197009, size=16962782, lastModifiedTime=1715075895466, largestRecordTimestamp=Some(1715075895456)),LogSegment(baseOffset=179199373, size=16974715, lastModifiedTime=1715075900757, largestRecordTimestamp=Some(1715075900746)),LogSegment(baseOffset=179201897, size=16993973, lastModifiedTime=1715075905639, largestRecordTimestamp=Some(1715075905638)),LogSegment(baseOffset=179204346, size=16979828, lastModifiedTime=1715075910798, largestRecordTimestamp=Some(1715075910782)),LogSegment(baseOffset=179206836, size=16992092, lastModifiedTime=1715075915638, largestRecordTimestamp=Some(1715075915632)),LogSegment(baseOffset=179208986, size=16988849, lastModifiedTime=1715075920193, largestRecordTimestamp=Some(1715075920176)),LogSegment(baseOffset=179211133, size=16989206, lastModifiedTime=1715075924352, largestRecordTimestamp=Some(1715075924338)),LogSegment(baseOffset=179213268, size=16989737, lastModifiedTime=1715075929343, largestRecordTimestamp=Some(1715075929332)),LogSegment(baseOffset=179215514, size=16997903, lastModifiedTime=1715075934074, largestRecordTimestamp=Some(1715075934056)),LogSegment(baseOffset=179217793, size=16995100, lastModifiedTime=1715075938937, largestRecordTimestamp=Some(1715075938925)),LogSegment(baseOffset=179220122, size=16981574, lastModifiedTime=1715075944296, largestRecordTimestamp=Some(1715075944288)),LogSegment(baseOffset=179222600, size=16999794, lastModifiedTime=1715075949454, largestRecordTimestamp=Some(1715075949432)),LogSegment(baseOffset=179224988, size=16998870, lastModifiedTime=1715075954567, largestRecordTimestamp=Some(1715075954544)),LogSegment(baseOffset=179227402, size=16986053, lastModifiedTime=1715075959815, largestRecordTimestamp=Some(1715075959813)),LogSegment(baseOffset=179229948, size=16999937, lastModifiedTime=1715075964787, largestRecordTimestamp=Some(1715075964779)),LogSegment(baseOffset=179232368, size=16992995, lastModifiedTime=1715075970109, largestRecordTimestamp=Some(1715075970096)),LogSegment(baseOffset=179234885, size=16995271, lastModifiedTime=1715075975078, largestRecordTimestamp=Some(1715075975066)),LogSegment(baseOffset=179237038, size=16987833, lastModifiedTime=1715075979534, largestRecordTimestamp=Some(1715075979499)),LogSegment(baseOffset=179239147, size=16844618, lastModifiedTime=1715075984150, largestRecordTimestamp=Some(1715075984139)),LogSegment(baseOffset=179241334, size=16968482, lastModifiedTime=1715075988727, largestRecordTimestamp=Some(1715075988700)),LogSegment(baseOffset=179243472, size=16991395, lastModifiedTime=1715075993359, largestRecordTimestamp=Some(1715075993333)),LogSegment(baseOffset=179245756, size=16985926, lastModifiedTime=1715075998010, largestRecordTimestamp=Some(1715075998005)),LogSegment(baseOffset=179248096, size=16948574, lastModifiedTime=1715076003328, largestRecordTimestamp=Some(1715076003298)),LogSegment(baseOffset=179250530, size=16986047, lastModifiedTime=1715076008650, largestRecordTimestamp=Some(1715076008628)),LogSegment(baseOffset=179252915, size=16998875, lastModifiedTime=1715076013551, largestRecordTimestamp=Some(1715076013516)),LogSegment(baseOffset=179255312, size=16997990, lastModifiedTime=1715076018832, largestRecordTimestamp=Some(1715076018797)),LogSegment(baseOffset=179257861, size=16999525, lastModifiedTime=1715076023621, largestRecordTimestamp=Some(1715076023601)),LogSegment(baseOffset=179260226, size=16997755, lastModifiedTime=1715076028814, largestRecordTimestamp=Some(1715076028800)),LogSegment(baseOffset=179262715, size=16981492, lastModifiedTime=1715076034150, largestRecordTimestamp=Some(1715076034140)),LogSegment(baseOffset=179265040, size=16998332, lastModifiedTime=1715076038676, largestRecordTimestamp=Some(1715076038657)) (kafka.log.LocalLog$)`, + `[2024-05-07 10:55:40,626] INFO [LocalLog partition=ingest-6, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=180391157, size=16991045, lastModifiedTime=1715075754780, largestRecordTimestamp=Some(1715075754774)),LogSegment(baseOffset=180393429, size=16997692, lastModifiedTime=1715075760206, largestRecordTimestamp=Some(1715075760186)),LogSegment(baseOffset=180395889, size=16998200, lastModifiedTime=1715075765542, largestRecordTimestamp=Some(1715075765526)),LogSegment(baseOffset=180398373, size=16977347, lastModifiedTime=1715075770515, largestRecordTimestamp=Some(1715075770504)) (kafka.log.LocalLog$)`, + `[2024-05-07 10:55:40,638] INFO [LocalLog partition=ingest-6, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=180400817, size=16997594, lastModifiedTime=1715075775780, largestRecordTimestamp=Some(1715075775771)),LogSegment(baseOffset=180403261, size=16992344, lastModifiedTime=1715075781053, largestRecordTimestamp=Some(1715075781021)),LogSegment(baseOffset=180405723, size=16989895, lastModifiedTime=1715075786205, largestRecordTimestamp=Some(1715075786174)),LogSegment(baseOffset=180408118, size=16998698, lastModifiedTime=1715075791681, largestRecordTimestamp=Some(1715075791673)),LogSegment(baseOffset=180410608, size=16995676, lastModifiedTime=1715075796438, largestRecordTimestamp=Some(1715075796430)),LogSegment(baseOffset=180412733, size=16963278, lastModifiedTime=1715075800534, largestRecordTimestamp=Some(1715075800511)),LogSegment(baseOffset=180414883, size=16984328, lastModifiedTime=1715075805272, largestRecordTimestamp=Some(1715075805230)),LogSegment(baseOffset=180417063, size=16989109, lastModifiedTime=1715075810381, largestRecordTimestamp=Some(1715075810372)),LogSegment(baseOffset=180419267, size=16996871, lastModifiedTime=1715075815153, largestRecordTimestamp=Some(1715075815125)),LogSegment(baseOffset=180421560, size=16988558, lastModifiedTime=1715075819785, largestRecordTimestamp=Some(1715075819763)),LogSegment(baseOffset=180424008, size=16999292, lastModifiedTime=1715075825336, largestRecordTimestamp=Some(1715075825303)),LogSegment(baseOffset=180426459, size=16990595, lastModifiedTime=1715075830839, largestRecordTimestamp=Some(1715075830827)),LogSegment(baseOffset=180428944, size=16995859, lastModifiedTime=1715075835942, largestRecordTimestamp=Some(1715075835904)),LogSegment(baseOffset=180431327, size=16992294, lastModifiedTime=1715075841219, largestRecordTimestamp=Some(1715075841214)),LogSegment(baseOffset=180433867, size=16966736, lastModifiedTime=1715075846443, largestRecordTimestamp=Some(1715075846401)),LogSegment(baseOffset=180436204, size=16894731, lastModifiedTime=1715075853273, largestRecordTimestamp=Some(1715075853244)),LogSegment(baseOffset=180438984, size=16983529, lastModifiedTime=1715075858911, largestRecordTimestamp=Some(1715075858891)),LogSegment(baseOffset=180441466, size=16996933, lastModifiedTime=1715075863566, largestRecordTimestamp=Some(1715075863554)),LogSegment(baseOffset=180443778, size=16999841, lastModifiedTime=1715075866199, largestRecordTimestamp=Some(1715075866185)),LogSegment(baseOffset=180445367, size=16992471, lastModifiedTime=1715075870385, largestRecordTimestamp=Some(1715075870347)),LogSegment(baseOffset=180447366, size=16999996, lastModifiedTime=1715075875102, largestRecordTimestamp=Some(1715075875091)),LogSegment(baseOffset=180449601, size=16994426, lastModifiedTime=1715075879927, largestRecordTimestamp=Some(1715075879926)),LogSegment(baseOffset=180452079, size=16998020, lastModifiedTime=1715075885293, largestRecordTimestamp=Some(1715075885263)),LogSegment(baseOffset=180454546, size=16992231, lastModifiedTime=1715075890424, largestRecordTimestamp=Some(1715075890409)),LogSegment(baseOffset=180456986, size=16970315, lastModifiedTime=1715075895719, largestRecordTimestamp=Some(1715075895690)),LogSegment(baseOffset=180459366, size=16990785, lastModifiedTime=1715075900996, largestRecordTimestamp=Some(1715075900985)),LogSegment(baseOffset=180461885, size=16996655, lastModifiedTime=1715075905847, largestRecordTimestamp=Some(1715075905841)),LogSegment(baseOffset=180464299, size=16982181, lastModifiedTime=1715075911052, largestRecordTimestamp=Some(1715075911028)),LogSegment(baseOffset=180466821, size=16997630, lastModifiedTime=1715075915962, largestRecordTimestamp=Some(1715075915953)),LogSegment(baseOffset=180468968, size=16995723, lastModifiedTime=1715075920325, largestRecordTimestamp=Some(1715075920308)),LogSegment(baseOffset=180471046, size=16979316, lastModifiedTime=1715075924724, largestRecordTimestamp=Some(1715075924697)),LogSegment(baseOffset=180473259, size=16995238, lastModifiedTime=1715075929645, largestRecordTimestamp=Some(1715075929624)),LogSegment(baseOffset=180475486, size=16988461, lastModifiedTime=1715075934288, largestRecordTimestamp=Some(1715075934283)),LogSegment(baseOffset=180477735, size=16993767, lastModifiedTime=1715075939277, largestRecordTimestamp=Some(1715075939270)),LogSegment(baseOffset=180480095, size=16995409, lastModifiedTime=1715075944639, largestRecordTimestamp=Some(1715075944635)),LogSegment(baseOffset=180482560, size=16992784, lastModifiedTime=1715075949760, largestRecordTimestamp=Some(1715075949760)),LogSegment(baseOffset=180484967, size=16990838, lastModifiedTime=1715075954937, largestRecordTimestamp=Some(1715075954929)),LogSegment(baseOffset=180487377, size=16976794, lastModifiedTime=1715075960151, largestRecordTimestamp=Some(1715075960119)),LogSegment(baseOffset=180489919, size=16997379, lastModifiedTime=1715075965116, largestRecordTimestamp=Some(1715075965085)),LogSegment(baseOffset=180492304, size=16956613, lastModifiedTime=1715075970448, largestRecordTimestamp=Some(1715075970424)),LogSegment(baseOffset=180494832, size=16895640, lastModifiedTime=1715075975354, largestRecordTimestamp=Some(1715075975341)),LogSegment(baseOffset=180496930, size=16998328, lastModifiedTime=1715075979813, largestRecordTimestamp=Some(1715075979796)),LogSegment(baseOffset=180499079, size=16995699, lastModifiedTime=1715075984309, largestRecordTimestamp=Some(1715075984285)),LogSegment(baseOffset=180501183, size=16993785, lastModifiedTime=1715075989086, largestRecordTimestamp=Some(1715075989064)),LogSegment(baseOffset=180503431, size=16989600, lastModifiedTime=1715075993713, largestRecordTimestamp=Some(1715075993683)),LogSegment(baseOffset=180505674, size=16984790, lastModifiedTime=1715075998337, largestRecordTimestamp=Some(1715075998318)),LogSegment(baseOffset=180508022, size=16982630, lastModifiedTime=1715076003671, largestRecordTimestamp=Some(1715076003660)),LogSegment(baseOffset=180510439, size=16999488, lastModifiedTime=1715076009000, largestRecordTimestamp=Some(1715076008996)),LogSegment(baseOffset=180512848, size=16997845, lastModifiedTime=1715076014033, largestRecordTimestamp=Some(1715076014032)),LogSegment(baseOffset=180515281, size=16990661, lastModifiedTime=1715076019245, largestRecordTimestamp=Some(1715076019216)),LogSegment(baseOffset=180517815, size=16996244, lastModifiedTime=1715076023989, largestRecordTimestamp=Some(1715076023963)),LogSegment(baseOffset=180520112, size=16992012, lastModifiedTime=1715076029243, largestRecordTimestamp=Some(1715076029231)) (kafka.log.LocalLog$)`, `[2024-05-07 10:55:40,713] INFO [LocalLog partition=ingest-3, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=182526165, size=16998661, lastModifiedTime=1715075758062, largestRecordTimestamp=Some(1715075758061)),LogSegment(baseOffset=182528560, size=16999718, lastModifiedTime=1715075763583, largestRecordTimestamp=Some(1715075763577)),LogSegment(baseOffset=182531056, size=16994792, lastModifiedTime=1715075768711, largestRecordTimestamp=Some(1715075768697)),LogSegment(baseOffset=182533514, size=16987578, lastModifiedTime=1715075773552, largestRecordTimestamp=Some(1715075773536)),LogSegment(baseOffset=182535953, size=16987705, lastModifiedTime=1715075779055, largestRecordTimestamp=Some(1715075779046)),LogSegment(baseOffset=182538482, size=16997466, lastModifiedTime=1715075784005, largestRecordTimestamp=Some(1715075784004)),LogSegment(baseOffset=182540856, size=16981250, lastModifiedTime=1715075789523, largestRecordTimestamp=Some(1715075789487)),LogSegment(baseOffset=182543386, size=16980484, lastModifiedTime=1715075794637, largestRecordTimestamp=Some(1715075794632)),LogSegment(baseOffset=182545622, size=16999738, lastModifiedTime=1715075799008, largestRecordTimestamp=Some(1715075799000)),LogSegment(baseOffset=182547827, size=16872695, lastModifiedTime=1715075803273, largestRecordTimestamp=Some(1715075803251)),LogSegment(baseOffset=182550001, size=16999890, lastModifiedTime=1715075808368, largestRecordTimestamp=Some(1715075808355)),LogSegment(baseOffset=182552113, size=16959982, lastModifiedTime=1715075813294, largestRecordTimestamp=Some(1715075813293)),LogSegment(baseOffset=182554415, size=16988073, lastModifiedTime=1715075817816, largestRecordTimestamp=Some(1715075817783)),LogSegment(baseOffset=182556814, size=16974731, lastModifiedTime=1715075823018, largestRecordTimestamp=Some(1715075823016)),LogSegment(baseOffset=182559282, size=16996090, lastModifiedTime=1715075828672, largestRecordTimestamp=Some(1715075828632)),LogSegment(baseOffset=182561708, size=16999327, lastModifiedTime=1715075833742, largestRecordTimestamp=Some(1715075833709)),LogSegment(baseOffset=182564173, size=16992947, lastModifiedTime=1715075839121, largestRecordTimestamp=Some(1715075839114)),LogSegment(baseOffset=182566740, size=16982572, lastModifiedTime=1715075844268, largestRecordTimestamp=Some(1715075844254)),LogSegment(baseOffset=182569086, size=16994786, lastModifiedTime=1715075850659, largestRecordTimestamp=Some(1715075850642)),LogSegment(baseOffset=182571815, size=16998391, lastModifiedTime=1715075856704, largestRecordTimestamp=Some(1715075856684)),LogSegment(baseOffset=182574372, size=16994403, lastModifiedTime=1715075861956, largestRecordTimestamp=Some(1715075861922)),LogSegment(baseOffset=182576828, size=16984546, lastModifiedTime=1715075865194, largestRecordTimestamp=Some(1715075865180)),LogSegment(baseOffset=182578716, size=16987846, lastModifiedTime=1715075868470, largestRecordTimestamp=Some(1715075868460)),LogSegment(baseOffset=182580437, size=16958237, lastModifiedTime=1715075873168, largestRecordTimestamp=Some(1715075873151)),LogSegment(baseOffset=182582637, size=16999432, lastModifiedTime=1715075877858, largestRecordTimestamp=Some(1715075877850)),LogSegment(baseOffset=182585006, size=16938567, lastModifiedTime=1715075882952, largestRecordTimestamp=Some(1715075882938)),LogSegment(baseOffset=182587493, size=16998214, lastModifiedTime=1715075888306, largestRecordTimestamp=Some(1715075888285)),LogSegment(baseOffset=182589965, size=16996264, lastModifiedTime=1715075893370, largestRecordTimestamp=Some(1715075893365)),LogSegment(baseOffset=182592327, size=16991650, lastModifiedTime=1715075898806, largestRecordTimestamp=Some(1715075898802)),LogSegment(baseOffset=182594863, size=16998234, lastModifiedTime=1715075903737, largestRecordTimestamp=Some(1715075903733)),LogSegment(baseOffset=182597289, size=16996241, lastModifiedTime=1715075908805, largestRecordTimestamp=Some(1715075908797)),LogSegment(baseOffset=182599811, size=16993657, lastModifiedTime=1715075913918, largestRecordTimestamp=Some(1715075913915)),LogSegment(baseOffset=182602171, size=16993112, lastModifiedTime=1715075918570, largestRecordTimestamp=Some(1715075918570)),LogSegment(baseOffset=182604245, size=16959963, lastModifiedTime=1715075922720, largestRecordTimestamp=Some(1715075922714)),LogSegment(baseOffset=182606451, size=16998518, lastModifiedTime=1715075927490, largestRecordTimestamp=Some(1715075927484)),LogSegment(baseOffset=182608616, size=16999103, lastModifiedTime=1715075932207, largestRecordTimestamp=Some(1715075932188)),LogSegment(baseOffset=182610888, size=16999389, lastModifiedTime=1715075937118, largestRecordTimestamp=Some(1715075937103)),LogSegment(baseOffset=182613221, size=16982597, lastModifiedTime=1715075942170, largestRecordTimestamp=Some(1715075942153)),LogSegment(baseOffset=182615634, size=16986904, lastModifiedTime=1715075947544, largestRecordTimestamp=Some(1715075947541)),LogSegment(baseOffset=182618074, size=16998820, lastModifiedTime=1715075952370, largestRecordTimestamp=Some(1715075952351)),LogSegment(baseOffset=182620446, size=16985066, lastModifiedTime=1715075957884, largestRecordTimestamp=Some(1715075957865)),LogSegment(baseOffset=182623007, size=16998235, lastModifiedTime=1715075963030, largestRecordTimestamp=Some(1715075963008)),LogSegment(baseOffset=182625520, size=16987568, lastModifiedTime=1715075967944, largestRecordTimestamp=Some(1715075967934)),LogSegment(baseOffset=182627921, size=16997118, lastModifiedTime=1715075973216, largestRecordTimestamp=Some(1715075973204)),LogSegment(baseOffset=182630290, size=16978465, lastModifiedTime=1715075978064, largestRecordTimestamp=Some(1715075978053)),LogSegment(baseOffset=182632463, size=16901644, lastModifiedTime=1715075982228, largestRecordTimestamp=Some(1715075982211)),LogSegment(baseOffset=182634546, size=16992477, lastModifiedTime=1715075986935, largestRecordTimestamp=Some(1715075986914)),LogSegment(baseOffset=182636738, size=16951087, lastModifiedTime=1715075991658, largestRecordTimestamp=Some(1715075991636)),LogSegment(baseOffset=182639001, size=16994471, lastModifiedTime=1715075996281, largestRecordTimestamp=Some(1715075996266)),LogSegment(baseOffset=182641298, size=16995754, lastModifiedTime=1715076001319, largestRecordTimestamp=Some(1715076001269)),LogSegment(baseOffset=182643712, size=16992752, lastModifiedTime=1715076006604, largestRecordTimestamp=Some(1715076006583)),LogSegment(baseOffset=182646095, size=16992944, lastModifiedTime=1715076011511, largestRecordTimestamp=Some(1715076011470)),LogSegment(baseOffset=182648504, size=16998993, lastModifiedTime=1715076016908, largestRecordTimestamp=Some(1715076016908)),LogSegment(baseOffset=182651018, size=16996765, lastModifiedTime=1715076021971, largestRecordTimestamp=Some(1715076021968)),LogSegment(baseOffset=182653526, size=16995808, lastModifiedTime=1715076026767, largestRecordTimestamp=Some(1715076026752)),LogSegment(baseOffset=182655860, size=16993535, lastModifiedTime=1715076032181, largestRecordTimestamp=Some(1715076032131)),LogSegment(baseOffset=182658341, size=16971926, lastModifiedTime=1715076037067, largestRecordTimestamp=Some(1715076037053)) (kafka.log.LocalLog$)`, - `[2024-05-07 <_> INFO Deleted producer state snapshot <_> (kafka.log.SnapshotFile)`, - `[2024-05-07 <_> INFO Deleted offset index <_> (kafka.log.LogSegment)`, + `[2024-05-07 10:55:53,038] INFO [LocalLog partition=mimir-dev-09-aggregations-offsets-1, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=447957, size=948, lastModifiedTime=1715059232052, largestRecordTimestamp=Some(1715059232002)),LogSegment(baseOffset=447969, size=948, lastModifiedTime=1715059424352, largestRecordTimestamp=Some(1715059424301)) (kafka.log.LocalLog$)`, `[2024-05-07 <_> INFO Deleted log <_> (kafka.log.LogSegment)`, + `[2024-05-07 <_> INFO Deleted offset index <_> (kafka.log.LogSegment)`, + `[2024-05-07 <_> INFO Deleted producer state snapshot <_> (kafka.log.SnapshotFile)`, `[2024-05-07 <_> INFO Deleted time index <_> (kafka.log.LogSegment)`, + `[2024-05-07 <_> INFO [LocalLog <_> dir=/bitnami/kafka/data] Rolled new log segment at offset <_> in <_> ms. (kafka.log.LocalLog)`, + `[2024-05-07 <_> INFO [LocalLog partition=mimir-dev-09-aggregations-offsets-0, dir=/bitnami/kafka/data] Deleting segment files <_> size=948, <_> <_> (kafka.log.LocalLog$)`, + `[2024-05-07 <_> INFO [ProducerStateManager <_> Wrote producer snapshot at offset <_> with 0 producer ids in <_> ms. (kafka.log.ProducerStateManager)`, + `[2024-05-07 <_> INFO [UnifiedLog <_> dir=/bitnami/kafka/data] Deleting segment <_> <_> <_> <_> due to retention size <_> breach. Log size after deletion will be <_> (kafka.log.UnifiedLog)`, + `[2024-05-07 <_> INFO [UnifiedLog <_> dir=/bitnami/kafka/data] Deleting segments due to log start offset <_> breach: <_> <_> <_> <_> (kafka.log.UnifiedLog)`, + `[2024-05-07 <_> INFO [UnifiedLog <_> dir=/bitnami/kafka/data] Deleting segments due to log start offset <_> breach: <_> size=948, <_> <_> size=948, <_> <_> (kafka.log.UnifiedLog)`, + `[2024-05-07 <_> INFO [UnifiedLog <_> dir=/bitnami/kafka/data] Incremented log start offset to <_> due to leader offset increment (kafka.log.UnifiedLog)`, + `[2024-05-07 <_> INFO [UnifiedLog <_> dir=/bitnami/kafka/data] Incremented log start offset to <_> due to segment deletion (kafka.log.UnifiedLog)`, }, }, { @@ -222,32 +279,42 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { drain: New(DefaultConfig(), nil), inputFile: "testdata/kubernetes.txt", patterns: []string{ - "I0507 12:04:17.596484 1 highnodeutilization.go:107] \"Criteria for a node below target utilization\" CPU=50 Mem=50 Pods=100", - "I0507 12:04:17.595169 1 descheduler.go:155] Building a pod evictor", - "I0507 <_> 1 <_> \"Number of <_> <_> <_>", - "I0507 <_> 1 <_> \"Total <_> <_> <_> <_> <_> <_> <_>", - "I0507 <_> 1 <_> <_> <_> <_> <_> <_> <_> <_> <_> <_>", - "I0507 <_> 1 node.go:157] \"Pod does not fit on any other node\" pod:=\"loki-dev-005/querier-burst-6b5f6db455-5zvkm\" <_> error:=\"[insufficient <_> insufficient <_>", - "I0507 <_> 1 node.go:157] \"Pod does not fit on any other node\" pod:=\"loki-dev-005/querier-burst-6b5f6db455-5zvkm\" <_> error:=\"pod node selector does not match the node label\"", - "I0507 <_> 1 <_> \"Pods on node\" <_> <_> <_> <_>", - "I0507 12:02:27.947830 1 nodeutilization.go:274] \"Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers\"", - "I0507 <_> 1 defaultevictor.go:202] \"Pod fails the following checks\" <_> checks=\"pod has local storage and descheduler is not configured with evictLocalStoragePods\"", - "I0507 <_> 1 defaultevictor.go:202] \"Pod fails the following checks\" <_> checks=\"pod is a DaemonSet pod\"", - "I0507 <_> 1 defaultevictor.go:202] \"Pod fails the following checks\" <_> checks=\"[pod is a DaemonSet pod, pod has local storage and descheduler is not configured with evictLocalStoragePods]\"", - "I0507 <_> 1 defaultevictor.go:202] \"Pod fails the following checks\" <_> checks=\"[pod is a mirror pod, pod is a static pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]\"", - "I0507 <_> 1 defaultevictor.go:202] \"Pod fails the following checks\" <_> checks=\"[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]\"", - "I0507 <_> 1 defaultevictor.go:202] \"Pod fails the following checks\" <_> checks=\"[pod <_> <_> <_> <_> pod has higher priority than specified priority class threshold]\"", - "I0507 <_> 1 defaultevictor.go:163] \"pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable\" <_>", - "I0507 <_> 1 node.go:157] \"Pod does not fit on any other node\" <_> <_> error:=\"[pod node selector does not match the node label, pod does not tolerate taints on the node]\"", - "I0507 <_> 1 defaultevictor.go:202] \"Pod fails the following checks\" <_> checks=\"[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold]\"", - "I0507 <_> 1 node.go:157] \"Pod does not fit on any other node\" <_> <_> error:=\"[pod node selector does not match the node label, insufficient <_> insufficient <_> insufficient pods]\"", - "I0507 <_> 1 defaultevictor.go:202] \"Pod fails the following checks\" <_> checks=\"[pod <_> <_> <_> <_> pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]\"", - "I0507 <_> 1 node.go:157] \"Pod does not fit on any other node\" <_> <_> error:=\"[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient <_> insufficient <_>", - "I0507 <_> 1 node.go:157] \"Pod does not fit on any other node\" <_> <_> error:=\"insufficient cpu\"", - "I0507 <_> 1 node.go:157] \"Pod does not fit on any other node\" <_> <_> error:=\"[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient <_>", - "I0507 <_> 1 node.go:157] \"Pod does not fit on any other node\" <_> <_> error:=\"[pod node selector does not match the node label, insufficient <_> insufficient <_>", - "I0507 <_> 1 node.go:157] \"Pod does not fit on any other node\" <_> <_> error:=\"[pod node selector does not match the node label, insufficient <_>", - "I0507 <_> 1 <_> <_> <_> <_> <_> <_> <_>", + `I0507 12:02:27.947830 1 nodeutilization.go:274] "Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers"`, + `I0507 12:04:17.595169 1 descheduler.go:155] Building a pod evictor`, + `I0507 12:04:17.596431 1 nodeutilization.go:204] "Node is underutilized" node="gke-dev-eu-west-3-main-n2s8-1-1dd39c-d1c92061-4z2l" usage={"cpu":"984m","memory":"611Mi","pods":"16"} usagePercentage={"cpu":12.44,"memory":2.15,"pods":25}`, + `I0507 12:04:17.596484 1 highnodeutilization.go:107] "Criteria for a node below target utilization" CPU=50 Mem=50 Pods=100`, + `I0507 12:04:17.596504 1 highnodeutilization.go:108] "Number of underutilized nodes" totalNumber=1`, + `I0507 12:04:17.596528 1 nodeutilization.go:260] "Total capacity to be moved" CPU=5060 Mem=112216292800 Pods=163`, + `I0507 12:04:17.596651 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/metrics-server-v0.6.3-68f5b7c4d5-t5mz8" checks="[pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, + `I0507 12:04:17.596803 1 defaultevictor.go:202] "Pod fails the following checks" pod="gadget/gadget-zjjts" checks="[pod is a DaemonSet pod, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, + `I0507 <_> 1 <_> "Evicting pods from node" <_> <_>`, + `I0507 <_> 1 <_> "No removable pods on node, try next node" <_>`, + `I0507 <_> 1 <_> "Number of evicted pods" <_>`, + `I0507 <_> 1 <_> "Pods on node" <_> <_> <_> <_>`, + `I0507 <_> 1 <_> "Total number of pods evicted" extension point="Balance" <_>`, + `I0507 <_> 1 <_> <_> Watch close - <_> total <_> items received`, + `I0507 <_> 1 defaultevictor.go:163] "pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable" <_>`, + `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="[pod has system critical priority, pod has higher priority than specified priority class threshold]"`, + `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, + `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]"`, + `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="[pod is a DaemonSet pod, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, + `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, + `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold]"`, + `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="[pod is a mirror pod, pod is a static pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, + `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="pod has local storage and descheduler is not configured with evictLocalStoragePods"`, + `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="pod is a DaemonSet pod"`, + `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" <_> <_> error:="[pod node selector does not match the node label, insufficient <_>`, + `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" <_> <_> error:="[pod node selector does not match the node label, insufficient <_> insufficient <_>`, + `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" <_> <_> error:="[pod node selector does not match the node label, insufficient <_> insufficient <_> insufficient pods]"`, + `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" <_> <_> error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient <_>`, + `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" <_> <_> error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient <_> insufficient <_>`, + `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" <_> <_> error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]"`, + `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" <_> <_> error:="insufficient cpu"`, + `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" <_> error:="[insufficient cpu, insufficient memory]"`, + `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" <_> error:="[insufficient memory, insufficient cpu]"`, + `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" <_> error:="pod node selector does not match the node label"`, + `I0507 <_> 1 node.go:339] "no Pod antiaffinity rule found" <_>`, + `I0507 <_> 1 nodeutilization.go:207] "Node is overutilized" <_> <_> <_>`, }, }, { @@ -263,91 +330,87 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { drain: New(DefaultConfig(), nil), inputFile: "testdata/calico.txt", patterns: []string{ - `2024-05-08 <_> [DEBUG][216945] felix/table.go 870: Found forward-reference <_> ipVersion=0x4 <_> <_> [0:0]" table="nat"`, - `2024-05-08 15:23:58.716 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line="*nat" table="nat"`, - `2024-05-08 15:23:58.716 [DEBUG][216945] felix/table.go 881: Not an append, skipping ipVersion=0x4 line="# Generated by iptables-nft-save v1.8.4 on Wed May 8 15:23:58 2024" table="nat"`, - `2024-05-08 15:23:58.715 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line="# Generated by iptables-nft-save v1.8.4 on Wed May 8 15:23:58 2024" table="nat"`, - `2024-05-08 15:23:58.684 [DEBUG][216945] felix/versionparse.go 118: Parsed kernel version version=5.15.0-1057`, - `2024-05-08 15:23:58.684 [DEBUG][216945] felix/versionparse.go 110: Raw kernel version rawVersion="Linux version 5.15.0-1057-azure (buildd@lcy02-amd64-033) (gcc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0, GNU ld (GNU Binutils for Ubuntu) 2.38) #65-Ubuntu SMP Fri Feb 9 18:39:24 UTC 2024\n"`, - `2024-05-08 15:23:58.684 [DEBUG][216945] felix/feature_detect.go 242: Ran iptables --version rawVersion="iptables v1.8.4 (legacy)\n"`, - `2024-05-08 <_> [DEBUG][216945] felix/feature_detect.go <_> <_> <_> <_> <_>`, - `2024-05-08 <_> [DEBUG][216945] felix/table.go <_> <_> <_> <_> <_> <_> <_> table="nat"`, - `2024-05-08 <_> [DEBUG][3576126] felix/int_dataplane.go <_> <_> <_> for MTU <_> <_> <_>`, - `2024-05-08 15:23:57.969 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "Cz_u1IQiXIMmKD4c", "", "", "", "", "", "", "", "", "", "", "", ""} chainName="INPUT" expectedRuleIDs=[]string{"Cz_u1IQiXIMmKD4c", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="filter"`, - `2024-05-08 15:23:57.969 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "tVnHkvAo15HuiPy0", "", "", "", "", ""} chainName="OUTPUT" expectedRuleIDs=[]string{"tVnHkvAo15HuiPy0", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="filter"`, - `2024-05-08 15:23:57.942 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "", "", "", "", "", "", "", "", "tVnHkvAo15HuiPy0", "", ""} chainName="OUTPUT" expectedRuleIDs=[]string{"tVnHkvAo15HuiPy0", "", "", "", "", "", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="raw"`, - `2024-05-08 15:23:57.942 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "6gwbT8clXdHdC1b1"} chainName="PREROUTING" expectedRuleIDs=[]string{"6gwbT8clXdHdC1b1", "", "", "", ""} ipVersion=0x4 table="raw"`, - `2024-05-08 <_> <_> felix/ipsets.go 607: Skipping expected Calico IP set. family="inet" <_>`, - `2024-05-08 15:23:58.604 [DEBUG][65] felix/ipsets.go <_> <_> <_> <_> <_> <_> <_>`, - `2024-05-08 <_> <_> felix/ipsets.go 366: Finished IPSets resync family="inet" numInconsistenciesFound=0 <_>`, - `2024-05-08 <_> <_> felix/ipsets.go 589: Whitelisting IP sets. <_> family="inet" <_>`, - `2024-05-08 <_> <_> felix/ipsets.go 467: Found member in dataplane <_> family="inet" <_> setID="this-host"`, - `2024-05-08 <_> <_> felix/ipsets.go 234: Asked to resync with the dataplane on next update. family="inet"`, - `bird: Netlink: No route to host`, - `2024-05-08 <_> [DEBUG][76] felix/route_table.go 661: Syncing interface routes <_> <_> ipVersion=0x4 <_>`, - `2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 584: Flag no OIF for full re-sync`, - `2024-05-08 <_> [DEBUG][76] felix/route_table.go <_> <_> <_> <_> <_> <_> <_> ipVersion=0x4 <_>`, - `2024-05-08 15:23:56.617 [DEBUG][76] felix/wireguard.go 1503: Wireguard is disabled and does not exist ifaceName="wireguard.cali" ipVersion=0x4`, - `2024-05-08 15:23:56.615 [DEBUG][76] felix/route_table.go 533: Check interfaces matching regex`, - `2024-05-08 15:23:56.615 [DEBUG][76] felix/wireguard.go 654: Wireguard is not in-sync - verifying wireguard configuration is removed ipVersion=0x4`, - `2024-05-08 <_> <_> felix/ipsets.go 426: Parsing IP set. family="inet" <_>`, - `2024-05-08 <_> <_> felix/ipsets.go <_> <_> <_> <_> <_> family="inet"`, + `2024-05-08 15:23:56.403 [DEBUG][615489] felix/table.go 699: Finished loading iptables state ipVersion=0x4 table="filter"`, + `2024-05-08 15:23:56.614 [DEBUG][76] felix/int_dataplane.go 1777: Refreshing routes`, `2024-05-08 15:23:56.615 [DEBUG][76] felix/route_rule.go 179: Queueing a resync of routing rules. ipVersion=4`, + `2024-05-08 15:23:56.615 [DEBUG][76] felix/route_table.go 480: Queueing a resync of routing table. ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, + `2024-05-08 15:23:56.615 [DEBUG][76] felix/route_table.go 480: Queueing a resync of routing table. ifaceRegex="^wireguard.cali$" ipVersion=0x4 tableIndex=1`, + `2024-05-08 15:23:56.615 [DEBUG][76] felix/route_table.go 533: Check interfaces matching regex`, `2024-05-08 15:23:56.615 [DEBUG][76] felix/wireguard.go 605: Queueing a resync of wireguard configuration ipVersion=0x4`, - `2024-05-08 <_> <_> felix/wireguard.go 652: Wireguard is not enabled, skipping sync ipVersion=0x4`, - `2024-05-08 <_> [DEBUG][76] felix/route_table.go 880: Processing route: 254 <_> <_> <_> ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, - `2024-05-08 15:23:56.615 [DEBUG][76] felix/route_table.go 480: Queueing a resync of routing table. <_> ipVersion=0x4 <_>`, - `2024-05-08 <_> [DEBUG][216945] felix/xdp_state.go <_> <_> <_> <_> <_> <_>`, - `2024-05-08 <_> [DEBUG][65] felix/xdp_state.go <_> <_> <_> <_> <_> <_>`, - `2024-05-08 15:23:58.035 [DEBUG][501368] felix/xdp_state.go <_> <_> <_> <_> <_> <_>`, - `2024-05-08 15:23:57.886 [DEBUG][3503680] felix/xdp_state.go <_> <_> <_> <_> <_> <_>`, - `2024-05-08 15:23:57.239 [DEBUG][732993] felix/xdp_state.go <_> <_> <_> <_> <_> <_>`, - `2024-05-08 <_> [DEBUG][76] felix/xdp_state.go <_> <_> <_> <_> <_> <_>`, - `2024-05-08 <_> [DEBUG][216945] felix/xdp_state.go <_> <_> <_> <_> <_>`, - `2024-05-08 <_> [DEBUG][65] felix/xdp_state.go <_> <_> <_> <_> <_>`, - `2024-05-08 15:23:58.035 [DEBUG][501368] felix/xdp_state.go <_> <_> <_> <_> <_>`, - `2024-05-08 15:23:57.886 [DEBUG][3503680] felix/xdp_state.go <_> <_> <_> <_> <_>`, - `2024-05-08 15:23:57.239 [DEBUG][732993] felix/xdp_state.go <_> <_> <_> <_> <_>`, - `2024-05-08 <_> [DEBUG][76] felix/xdp_state.go <_> <_> <_> <_> <_>`, - `2024-05-08 <_> <_> felix/xdp_state.go 1270: Finished processing pending diff state. bpfActions=intdataplane.xdpBPFActions{CreateMap:set.Typed[string]{}, RemoveMap:set.Typed[string]{}, AddToMap:map[string]map[string]uint32{}, RemoveFromMap:map[string]map[string]uint32{}, InstallXDP:set.Typed[string]{}, UninstallXDP:set.Typed[string]{}, MembersToDrop:map[string]map[string]uint32{}, MembersToAdd:map[string]map[string]uint32{}} family=4 newCS=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}}`, - `2024-05-08 <_> <_> felix/xdp_state.go 1043: Processing pending diff state. cs=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}} family=4`, + `2024-05-08 15:23:56.615 [DEBUG][76] felix/wireguard.go 654: Wireguard is not in-sync - verifying wireguard configuration is removed ipVersion=0x4`, + `2024-05-08 15:23:56.617 [DEBUG][76] felix/wireguard.go 1503: Wireguard is disabled and does not exist ifaceName="wireguard.cali" ipVersion=0x4`, + `2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 584: Flag no OIF for full re-sync`, + `2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 614: Synchronised routes on interface ifaceName="*NoOIF*" ifaceRegex="^wireguard.cali$" ipVersion=0x4 tableIndex=1`, + `2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 661: Syncing interface routes ifaceName="*NoOIF*" ifaceRegex="^wireguard.cali$" ipVersion=0x4 tableIndex=1`, + `2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 686: Reconcile against kernel programming ifaceName="*NoOIF*" ifaceRegex="^wireguard.cali$" ipVersion=0x4 tableIndex=1`, + `2024-05-08 15:23:57.942 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "", "", "", "", "", "", "", "", "tVnHkvAo15HuiPy0", "", ""} chainName="OUTPUT" expectedRuleIDs=[]string{"tVnHkvAo15HuiPy0", "", "", "", "", "", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="raw"`, + `2024-05-08 15:23:57.942 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "6gwbT8clXdHdC1b1"} chainName="PREROUTING" expectedRuleIDs=[]string{"6gwbT8clXdHdC1b1", "", "", "", ""} ipVersion=0x4 table="raw"`, + `2024-05-08 15:23:57.969 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "Cz_u1IQiXIMmKD4c", "", "", "", "", "", "", "", "", "", "", "", ""} chainName="INPUT" expectedRuleIDs=[]string{"Cz_u1IQiXIMmKD4c", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="filter"`, + `2024-05-08 15:23:57.969 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "tVnHkvAo15HuiPy0", "", "", "", "", ""} chainName="OUTPUT" expectedRuleIDs=[]string{"tVnHkvAo15HuiPy0", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="filter"`, + `2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 957: Examining link for MTU calculation mtu=1500 name="eth0"`, + `2024-05-08 15:23:58.680 [DEBUG][216945] felix/int_dataplane.go 1785: Reschedule kick received`, + `2024-05-08 15:23:58.681 [DEBUG][216945] felix/feature_detect.go 112: Refreshing detected iptables features`, + `2024-05-08 15:23:58.681 [DEBUG][216945] felix/table.go 944: Invalidating dataplane cache ipVersion=0x4 reason="refresh timer" table="nat"`, + `2024-05-08 15:23:58.684 [DEBUG][216945] felix/feature_detect.go 242: Ran iptables --version rawVersion="iptables v1.8.4 (legacy)\n"`, + `2024-05-08 15:23:58.684 [DEBUG][216945] felix/feature_detect.go 255: Parsed iptables version version=1.8.4`, + `2024-05-08 15:23:58.684 [DEBUG][216945] felix/table.go 604: Loading current iptables state and checking it is correct. ipVersion=0x4 table="nat"`, + `2024-05-08 15:23:58.684 [DEBUG][216945] felix/versionparse.go 110: Raw kernel version rawVersion="Linux version 5.15.0-1057-azure (buildd@lcy02-amd64-033) (gcc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0, GNU ld (GNU Binutils for Ubuntu) 2.38) #65-Ubuntu SMP Fri Feb 9 18:39:24 UTC 2024\n"`, + `2024-05-08 15:23:58.684 [DEBUG][216945] felix/versionparse.go 118: Parsed kernel version version=5.15.0-1057`, + `2024-05-08 15:23:58.715 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line="# Generated by iptables-nft-save v1.8.4 on Wed May 8 15:23:58 2024" table="nat"`, + `2024-05-08 15:23:58.716 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line="*nat" table="nat"`, + `2024-05-08 15:23:58.716 [DEBUG][216945] felix/table.go 881: Not an append, skipping ipVersion=0x4 line="# Generated by iptables-nft-save v1.8.4 on Wed May 8 15:23:58 2024" table="nat"`, + `2024-05-08 15:23:58.716 [DEBUG][216945] felix/table.go 881: Not an append, skipping ipVersion=0x4 line="*nat" table="nat"`, + `2024-05-08 15:23:58.717 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":POSTROUTING ACCEPT [0:0]" table="nat"`, + `2024-05-08 15:23:58.717 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="POSTROUTING" ipVersion=0x4 line=":POSTROUTING ACCEPT [0:0]" table="nat"`, + `2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":OUTPUT ACCEPT [0:0]" table="nat"`, + `2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":PREROUTING ACCEPT [0:0]" table="nat"`, + `2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="OUTPUT" ipVersion=0x4 line=":OUTPUT ACCEPT [0:0]" table="nat"`, + `2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="PREROUTING" ipVersion=0x4 line=":PREROUTING ACCEPT [0:0]" table="nat"`, `2024-05-08 <_> <_> felix/endpoint_mgr.go 443: Reporting endpoint status. dirtyEndpoints=set.Set{}`, - `2024-05-08 15:23:58.680 [DEBUG][216945] felix/int_dataplane.go <_> <_> <_> <_>`, - `2024-05-08 <_> <_> felix/int_dataplane.go 1807: Applying dataplane updates`, - `2024-05-08 15:23:56.614 [DEBUG][76] felix/int_dataplane.go 1777: Refreshing routes`, - `2024-05-08 <_> <_> felix/sync_client.go <_> <_> <_> <_> Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type=""`, - `2024-05-08 <_> <_> felix/sync_client.go 434: New message from Typha. connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} envelope=syncproto.Envelope{Message:syncproto.MsgPing{Timestamp:time.Date(2024, time.May, 8, 15, 23, <_> <_> time.Local)}} type=""`, + `2024-05-08 <_> <_> felix/health.go 167: Health: <_>`, + `2024-05-08 <_> <_> felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"async_calc_graph", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:20000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, <_> <_> loc:(*time.Location)(0x4ce3aa0)}}`, + `2024-05-08 <_> <_> felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"felix-startup", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:0, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, <_> <_> loc:(*time.Location)(0x4ce3aa0)}}`, + `2024-05-08 <_> <_> felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"int_dataplane", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:90000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, <_> <_> loc:(*time.Location)(0x4ce3aa0)}}`, `2024-05-08 <_> <_> felix/health.go 245: Calculated health summary healthResult=&health.HealthReport{Live:true, Ready:true, Detail:"+------------------+---------+----------------+-----------------+--------+\n| COMPONENT | TIMEOUT | LIVENESS | READINESS | DETAIL |\n+------------------+---------+----------------+-----------------+--------+\n| async_calc_graph | 20s | reporting live | reporting ready | |\n| felix-startup | 0s | reporting live | reporting ready | |\n| int_dataplane | 1m30s | reporting live | reporting ready | |\n+------------------+---------+----------------+-----------------+--------+"}`, - `2024-05-08 <_> <_> felix/health.go 196: Checking state of reporter <_> reports:health.HealthReport{Live:true, Ready:true, Detail:""}, <_> latest:health.HealthReport{Live:true, Ready:true, Detail:""}, <_> <_> loc:(*time.Location)(0x4ce3aa0)}}`, - `2024-05-08 <_> [DEBUG][501368] felix/health.go <_> <_> <_>`, - `2024-05-08 <_> [DEBUG][76] felix/health.go <_> <_> <_>`, - `2024-05-08 <_> [DEBUG][3576126] felix/health.go <_> <_> <_>`, - `2024-05-08 15:23:57.701 [DEBUG][216945] felix/health.go <_> <_> <_>`, - `2024-05-08 <_> [DEBUG][3583983] felix/health.go <_> <_> <_>`, - `2024-05-08 <_> [DEBUG][3596528] felix/health.go <_> <_> <_>`, - `2024-05-08 <_> [DEBUG][65] felix/health.go <_> <_> <_>`, - `2024-05-08 <_> [DEBUG][3383360] felix/health.go <_> <_> <_>`, - `2024-05-08 <_> [DEBUG][3435880] felix/health.go <_> <_> <_>`, - `2024-05-08 <_> [DEBUG][3794357] felix/health.go <_> <_> <_>`, - `2024-05-08 <_> [DEBUG][88347] felix/health.go <_> <_> <_>`, - `2024-05-08 <_> [DEBUG][990568] felix/health.go <_> <_> <_>`, - `2024-05-08 15:23:56.615 [DEBUG][2460733] felix/health.go <_> <_> <_>`, - `2024-05-08 <_> [DEBUG][3503680] felix/health.go <_> <_> <_>`, - `2024-05-08 <_> <_> felix/summary.go 100: Summarising <_> dataplane reconciliation loops over <_> <_> <_> <_>`, - `2024-05-08 <_> [DEBUG][65] felix/int_dataplane.go <_> <_> <_> <_> <_>`, - `2024-05-08 <_> [DEBUG][501368] felix/int_dataplane.go <_> <_> <_> <_> <_>`, - `2024-05-08 <_> [DEBUG][3503680] felix/int_dataplane.go <_> <_> <_> <_> <_>`, - `2024-05-08 <_> [DEBUG][732993] felix/int_dataplane.go <_> <_> <_> <_> <_>`, - `2024-05-08 <_> [DEBUG][216945] felix/int_dataplane.go <_> <_> <_> <_> <_>`, + `2024-05-08 <_> <_> felix/health.go <_> GET <_>`, + `2024-05-08 <_> <_> felix/int_dataplane.go 1773: Refreshing IP sets state`, + `2024-05-08 <_> <_> felix/int_dataplane.go 1807: Applying dataplane updates`, `2024-05-08 <_> <_> felix/int_dataplane.go 2080: Asked to reschedule. <_>`, - `2024-05-08 15:23:58.684 [DEBUG][216945] felix/table.go 604: Loading current iptables state and checking it is correct. ipVersion=0x4 table="nat"`, - `2024-05-08 15:23:58.681 [DEBUG][216945] felix/table.go <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> ipVersion=0x4 <_>`, - `2024-05-08 15:23:58.605 [DEBUG][65] felix/table.go <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> ipVersion=0x4 table="filter"`, - `2024-05-08 15:23:58.604 [DEBUG][65] felix/table.go <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> ipVersion=0x4 <_>`, - `2024-05-08 15:23:58.038 [DEBUG][501368] felix/table.go <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> ipVersion=0x4 <_>`, - `2024-05-08 <_> <_> felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 <_>`, + `2024-05-08 <_> <_> felix/ipsets.go 234: Asked to resync with the dataplane on next update. family="inet"`, + `2024-05-08 <_> <_> felix/ipsets.go 314: Resyncing ipsets with dataplane. family="inet"`, + `2024-05-08 <_> <_> felix/ipsets.go 366: Finished IPSets resync family="inet" numInconsistenciesFound=0 <_>`, + `2024-05-08 <_> <_> felix/ipsets.go 426: Parsing IP set. family="inet" <_>`, + `2024-05-08 <_> <_> felix/ipsets.go 467: Found member in dataplane <_> family="inet" <_> setID="this-host"`, + `2024-05-08 <_> <_> felix/ipsets.go 589: Whitelisting IP sets. ID="all-ipam-pools" family="inet" mainName="cali40all-ipam-pools"`, + `2024-05-08 <_> <_> felix/ipsets.go 589: Whitelisting IP sets. ID="masq-ipam-pools" family="inet" mainName="cali40masq-ipam-pools"`, + `2024-05-08 <_> <_> felix/ipsets.go 589: Whitelisting IP sets. ID="this-host" family="inet" mainName="cali40this-host"`, + `2024-05-08 <_> <_> felix/ipsets.go 607: Skipping expected Calico IP set. family="inet" <_>`, + `2024-05-08 <_> <_> felix/ipsets.go 643: No dirty IP sets. family="inet"`, + `2024-05-08 <_> <_> felix/summary.go 100: Summarising <_> dataplane reconciliation loops over <_> <_> <_> <_>`, + `2024-05-08 <_> <_> felix/sync_client.go 347: Ping received from Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type=""`, + `2024-05-08 <_> <_> felix/sync_client.go 356: Pong sent to Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type=""`, + `2024-05-08 <_> <_> felix/sync_client.go 434: New message from Typha. connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} envelope=syncproto.Envelope{Message:syncproto.MsgPing{Timestamp:time.Date(2024, time.May, 8, 15, 23, <_> <_> time.Local)}} type=""`, `2024-05-08 <_> <_> felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 <_>`, - `2024-05-08 <_> [DEBUG][615489] felix/table.go <_> <_> <_> <_> <_> ipVersion=0x4 table="filter"`, + `2024-05-08 <_> <_> felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 <_>`, + `2024-05-08 <_> <_> felix/wireguard.go 652: Wireguard is not enabled, skipping sync ipVersion=0x4`, + `2024-05-08 <_> <_> felix/xdp_state.go 1004: Updating ipsetIDsToMembers cache. family=4`, + `2024-05-08 <_> <_> felix/xdp_state.go 1043: Processing pending diff state. cs=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}} family=4`, + `2024-05-08 <_> <_> felix/xdp_state.go 1270: Finished processing pending diff state. bpfActions=intdataplane.xdpBPFActions{CreateMap:set.Typed[string]{}, RemoveMap:set.Typed[string]{}, AddToMap:map[string]map[string]uint32{}, RemoveFromMap:map[string]map[string]uint32{}, InstallXDP:set.Typed[string]{}, UninstallXDP:set.Typed[string]{}, MembersToDrop:map[string]map[string]uint32{}, MembersToAdd:map[string]map[string]uint32{}} family=4 newCS=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}}`, + `2024-05-08 <_> <_> felix/xdp_state.go 1605: Getting member changes. family=4 oldMembers=map[string]set.Set[string]{}`, + `2024-05-08 <_> <_> felix/xdp_state.go 1798: Processing BPF actions. family="ipv4"`, + `2024-05-08 <_> <_> felix/xdp_state.go 1932: Finished processing BPF actions. family="ipv4"`, + `2024-05-08 <_> <_> felix/xdp_state.go 968: Processing member updates. family=4`, + `2024-05-08 <_> [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 <_> - [0:0]" table="nat"`, + `2024-05-08 <_> [DEBUG][216945] felix/table.go 870: Found forward-reference <_> ipVersion=0x4 <_> - [0:0]" table="nat"`, + `2024-05-08 <_> [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection <_> <_>`, + `2024-05-08 <_> [DEBUG][615489] felix/table.go 677: Skipping expected chain <_> ipVersion=0x4 table="filter"`, + `2024-05-08 <_> [DEBUG][76] felix/route_table.go 557: Resync: found calico-owned interface <_> ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, + `2024-05-08 <_> [DEBUG][76] felix/route_table.go 614: Synchronised routes on interface <_> ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, + `2024-05-08 <_> [DEBUG][76] felix/route_table.go 661: Syncing interface routes <_> ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, + `2024-05-08 <_> [DEBUG][76] felix/route_table.go 686: Reconcile against kernel programming <_> ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, + `2024-05-08 <_> [DEBUG][76] felix/route_table.go 880: Processing route: 254 <_> <_> <_> ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, + `2024-05-08 <_> [DEBUG][76] felix/route_table.go 915: Route is correct <_> <_> ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, + `bird: Netlink: No route to host`, }, }, } @@ -369,6 +432,13 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { for _, cluster := range clusters { output = append(output, cluster.String()) } + slices.Sort(output) + + if outputPatternsForTestUpdate { + for _, pattern := range output { + fmt.Printf("`%s`,\n", pattern) + } + } require.Equal(t, tt.patterns, output) })
feat
Increase drain max depth from 8 -> 30 (#13063)
4e04d07168a8c5cb7086ced8486c6d584faa1045
2024-04-19 13:08:48
Paul Rogers
fix: promtail race fixes (#12656)
false
diff --git a/clients/pkg/promtail/client/client_writeto_test.go b/clients/pkg/promtail/client/client_writeto_test.go index 3693b677f2ccf..4044d1641fb12 100644 --- a/clients/pkg/promtail/client/client_writeto_test.go +++ b/clients/pkg/promtail/client/client_writeto_test.go @@ -29,11 +29,14 @@ func TestClientWriter_LogEntriesAreReconstructedAndForwardedCorrectly(t *testing ch := make(chan api.Entry) defer close(ch) + var mu sync.Mutex var receivedEntries []api.Entry go func() { for e := range ch { + mu.Lock() receivedEntries = append(receivedEntries, e) + mu.Unlock() } }() @@ -72,12 +75,16 @@ func TestClientWriter_LogEntriesAreReconstructedAndForwardedCorrectly(t *testing } require.Eventually(t, func() bool { + mu.Lock() + defer mu.Unlock() return len(receivedEntries) == len(lines) }, time.Second*10, time.Second) + mu.Lock() for _, receivedEntry := range receivedEntries { require.Contains(t, lines, receivedEntry.Line, "entry line was not expected") require.Equal(t, model.LabelValue("test"), receivedEntry.Labels["app"]) } + mu.Unlock() } func TestClientWriter_LogEntriesWithoutMatchingSeriesAreIgnored(t *testing.T) { diff --git a/clients/pkg/promtail/client/manager_test.go b/clients/pkg/promtail/client/manager_test.go index f11821c82120a..2105e6a90e3d9 100644 --- a/clients/pkg/promtail/client/manager_test.go +++ b/clients/pkg/promtail/client/manager_test.go @@ -6,6 +6,7 @@ import ( "net/http" "net/url" "os" + "sync" "testing" "time" @@ -127,10 +128,13 @@ func TestManager_WALEnabled(t *testing.T) { require.NoError(t, err) require.Equal(t, "wal:test-client", manager.Name()) + var mu sync.Mutex receivedRequests := []utils.RemoteWriteRequest{} go func() { for req := range rwReceivedReqs { + mu.Lock() receivedRequests = append(receivedRequests, req) + mu.Unlock() } }() @@ -155,17 +159,21 @@ func TestManager_WALEnabled(t *testing.T) { } require.Eventually(t, func() bool { + mu.Lock() + defer mu.Unlock() return len(receivedRequests) == totalLines }, 5*time.Second, time.Second, "timed out waiting for requests to be received") var seenEntries = map[string]struct{}{} // assert over rw client received entries + mu.Lock() for _, req := range receivedRequests { require.Len(t, req.Request.Streams, 1, "expected 1 stream requests to be received") require.Len(t, req.Request.Streams[0].Entries, 1, "expected 1 entry in the only stream received per request") require.Equal(t, `{wal_enabled="true"}`, req.Request.Streams[0].Labels) seenEntries[req.Request.Streams[0].Entries[0].Line] = struct{}{} } + mu.Unlock() require.Len(t, seenEntries, totalLines) } @@ -182,10 +190,13 @@ func TestManager_WALDisabled(t *testing.T) { require.NoError(t, err) require.Equal(t, "multi:test-client", manager.Name()) + var mu sync.Mutex receivedRequests := []utils.RemoteWriteRequest{} go func() { for req := range rwReceivedReqs { + mu.Lock() receivedRequests = append(receivedRequests, req) + mu.Unlock() } }() @@ -209,17 +220,21 @@ func TestManager_WALDisabled(t *testing.T) { } require.Eventually(t, func() bool { + mu.Lock() + defer mu.Unlock() return len(receivedRequests) == totalLines }, 5*time.Second, time.Second, "timed out waiting for requests to be received") var seenEntries = map[string]struct{}{} // assert over rw client received entries + mu.Lock() for _, req := range receivedRequests { require.Len(t, req.Request.Streams, 1, "expected 1 stream requests to be received") require.Len(t, req.Request.Streams[0].Entries, 1, "expected 1 entry in the only stream received per request") require.Equal(t, `{pizza-flavour="fugazzeta"}`, req.Request.Streams[0].Labels) seenEntries[req.Request.Streams[0].Entries[0].Line] = struct{}{} } + mu.Unlock() require.Len(t, seenEntries, totalLines) } @@ -250,15 +265,20 @@ func TestManager_WALDisabled_MultipleConfigs(t *testing.T) { require.NoError(t, err) require.Equal(t, "multi:test-client,test-client-2", manager.Name()) + var mu sync.Mutex receivedRequests := []utils.RemoteWriteRequest{} ctx, cancel := context.WithCancel(context.Background()) go func(ctx context.Context) { for { select { case req := <-rwReceivedReqs: + mu.Lock() receivedRequests = append(receivedRequests, req) + mu.Unlock() case req := <-rwReceivedReqs2: + mu.Lock() receivedRequests = append(receivedRequests, req) + mu.Unlock() case <-ctx.Done(): return } @@ -289,16 +309,20 @@ func TestManager_WALDisabled_MultipleConfigs(t *testing.T) { // times 2 due to clients being run expectedTotalLines := totalLines * 2 require.Eventually(t, func() bool { + mu.Lock() + defer mu.Unlock() return len(receivedRequests) == expectedTotalLines }, 5*time.Second, time.Second, "timed out waiting for requests to be received") var seenEntries = map[string]struct{}{} // assert over rw client received entries + mu.Lock() for _, req := range receivedRequests { require.Len(t, req.Request.Streams, 1, "expected 1 stream requests to be received") require.Len(t, req.Request.Streams[0].Entries, 1, "expected 1 entry in the only stream received per request") seenEntries[fmt.Sprintf("%s-%s", req.Request.Streams[0].Labels, req.Request.Streams[0].Entries[0].Line)] = struct{}{} } + mu.Unlock() require.Len(t, seenEntries, expectedTotalLines) } diff --git a/clients/pkg/promtail/promtail_wal_test.go b/clients/pkg/promtail/promtail_wal_test.go index dfc7ce7273453..b4027ed2d9091 100644 --- a/clients/pkg/promtail/promtail_wal_test.go +++ b/clients/pkg/promtail/promtail_wal_test.go @@ -59,19 +59,25 @@ func TestPromtailWithWAL_SingleTenant(t *testing.T) { // create receive channel and start a collect routine receivedCh := make(chan utils.RemoteWriteRequest) received := map[string][]push.Entry{} + var mu sync.Mutex + // Create a channel for log messages + logCh := make(chan string, 100) // Buffered channel to avoid blocking + wg.Add(1) go func() { defer wg.Done() for req := range receivedCh { + mu.Lock() // Add some observability to the requests received in the remote write endpoint var counts []string for _, str := range req.Request.Streams { counts = append(counts, fmt.Sprint(len(str.Entries))) } - t.Logf("received request: %s", counts) + logCh <- fmt.Sprintf("received request: %s", counts) for _, stream := range req.Request.Streams { received[stream.Labels] = append(received[stream.Labels], stream.Entries...) } + mu.Unlock() } }() @@ -120,14 +126,23 @@ func TestPromtailWithWAL_SingleTenant(t *testing.T) { for i := 0; i < entriesToWrite; i++ { _, err = logsFile.WriteString(fmt.Sprintf("log line # %d\n", i)) if err != nil { - t.Logf("error writing to log file. Err: %s", err.Error()) + logCh <- fmt.Sprintf("error writing to log file. Err: %s", err.Error()) } // not overkill log file time.Sleep(1 * time.Millisecond) } }() + // Goroutine to handle log messages + go func() { + for msg := range logCh { + t.Log(msg) + } + }() + require.Eventually(t, func() bool { + mu.Lock() + defer mu.Unlock() if seen, ok := received[expectedLabelSet]; ok { return len(seen) == entriesToWrite } @@ -158,11 +173,13 @@ func TestPromtailWithWAL_MultipleTenants(t *testing.T) { receivedCh := make(chan utils.RemoteWriteRequest) // received is a mapping from tenant, string-formatted label set to received entries received := map[string]map[string][]push.Entry{} + var mu sync.Mutex var totalReceived = 0 wg.Add(1) go func() { defer wg.Done() for req := range receivedCh { + mu.Lock() // start received label entries map if first time tenant is seen if _, ok := received[req.TenantID]; !ok { received[req.TenantID] = map[string][]push.Entry{} @@ -173,6 +190,7 @@ func TestPromtailWithWAL_MultipleTenants(t *testing.T) { // increment total count totalReceived += len(stream.Entries) } + mu.Unlock() } }() @@ -250,15 +268,19 @@ func TestPromtailWithWAL_MultipleTenants(t *testing.T) { // wait for all entries to be remote written require.Eventually(t, func() bool { + mu.Lock() + defer mu.Unlock() return totalReceived == entriesToWrite }, time.Second*20, time.Second, "timed out waiting for entries to be remote written") // assert over received entries require.Len(t, received, expectedTenantCounts, "not expected tenant count") + mu.Lock() for tenantID := 0; tenantID < expectedTenantCounts; tenantID++ { // we should've received at least entriesToWrite / expectedTenantCounts require.GreaterOrEqual(t, len(received[fmt.Sprint(tenantID)][expectedLabelSet]), entriesToWrite/expectedTenantCounts) } + mu.Unlock() pr.Shutdown() close(receivedCh) diff --git a/clients/pkg/promtail/targets/cloudflare/util_test.go b/clients/pkg/promtail/targets/cloudflare/util_test.go index 18efefee5cb55..a702bb90f5ddf 100644 --- a/clients/pkg/promtail/targets/cloudflare/util_test.go +++ b/clients/pkg/promtail/targets/cloudflare/util_test.go @@ -3,6 +3,7 @@ package cloudflare import ( "context" "errors" + "sync" "time" "github.com/grafana/cloudflare-go" @@ -13,10 +14,13 @@ var ErrorLogpullReceived = errors.New("error logpull received") type fakeCloudflareClient struct { mock.Mock + mu sync.Mutex } func (f *fakeCloudflareClient) CallCount() int { var actualCalls int + f.mu.Lock() + defer f.mu.Unlock() for _, call := range f.Calls { if call.Method == "LogpullReceived" { actualCalls++ @@ -59,7 +63,9 @@ func newFakeCloudflareClient() *fakeCloudflareClient { } func (f *fakeCloudflareClient) LogpullReceived(ctx context.Context, start, end time.Time) (cloudflare.LogpullReceivedIterator, error) { + f.mu.Lock() r := f.Called(ctx, start, end) + f.mu.Unlock() if r.Get(0) != nil { it := r.Get(0).(cloudflare.LogpullReceivedIterator) if it.Err() == ErrorLogpullReceived { diff --git a/clients/pkg/promtail/targets/file/filetarget.go b/clients/pkg/promtail/targets/file/filetarget.go index 97dc10f148293..0ade51902b492 100644 --- a/clients/pkg/promtail/targets/file/filetarget.go +++ b/clients/pkg/promtail/targets/file/filetarget.go @@ -4,6 +4,7 @@ import ( "flag" "os" "path/filepath" + "sync" "time" "github.com/bmatcuk/doublestar" @@ -92,12 +93,14 @@ type FileTarget struct { fileEventWatcher chan fsnotify.Event targetEventHandler chan fileTargetEvent watches map[string]struct{} + watchesMutex sync.Mutex path string pathExclude string quit chan struct{} done chan struct{} - readers map[string]Reader + readers map[string]Reader + readersMutex sync.Mutex targetConfig *Config watchConfig WatchConfig @@ -150,7 +153,7 @@ func NewFileTarget( // Ready if at least one file is being tailed func (t *FileTarget) Ready() bool { - return len(t.readers) > 0 + return t.getReadersLen() > 0 } // Stop the target. @@ -178,17 +181,21 @@ func (t *FileTarget) Labels() model.LabelSet { // Details implements a Target func (t *FileTarget) Details() interface{} { files := map[string]int64{} + t.readersMutex.Lock() for fileName := range t.readers { files[fileName], _ = t.positions.Get(fileName) } + t.readersMutex.Unlock() return files } func (t *FileTarget) run() { defer func() { + t.readersMutex.Lock() for _, v := range t.readers { v.Stop() } + t.readersMutex.Unlock() level.Info(t.logger).Log("msg", "filetarget: watcher closed, tailer stopped, positions saved", "path", t.path) close(t.done) }() @@ -281,15 +288,22 @@ func (t *FileTarget) sync() error { } // Add any directories which are not already being watched. + t.watchesMutex.Lock() toStartWatching := missing(t.watches, dirs) + t.watchesMutex.Unlock() t.startWatching(toStartWatching) // Remove any directories which no longer need watching. + t.watchesMutex.Lock() toStopWatching := missing(dirs, t.watches) + t.watchesMutex.Unlock() + t.stopWatching(toStopWatching) // fsnotify.Watcher doesn't allow us to see what is currently being watched so we have to track it ourselves. + t.watchesMutex.Lock() t.watches = dirs + t.watchesMutex.Unlock() // Check if any running tailers have stopped because of errors and remove them from the running list // (They will be restarted in startTailing) @@ -299,7 +313,9 @@ func (t *FileTarget) sync() error { t.startTailing(matches) // Stop tailing any files which no longer exist + t.readersMutex.Lock() toStopTailing := toStopTailing(matches, t.readers) + t.readersMutex.Unlock() t.stopTailingAndRemovePosition(toStopTailing) return nil @@ -307,9 +323,10 @@ func (t *FileTarget) sync() error { func (t *FileTarget) startWatching(dirs map[string]struct{}) { for dir := range dirs { - if _, ok := t.watches[dir]; ok { + if _, ok := t.getWatch(dir); ok { continue } + level.Info(t.logger).Log("msg", "watching new directory", "directory", dir) t.targetEventHandler <- fileTargetEvent{ path: dir, @@ -320,9 +337,10 @@ func (t *FileTarget) startWatching(dirs map[string]struct{}) { func (t *FileTarget) stopWatching(dirs map[string]struct{}) { for dir := range dirs { - if _, ok := t.watches[dir]; !ok { + if _, ok := t.getWatch(dir); !ok { continue } + level.Info(t.logger).Log("msg", "removing directory from watcher", "directory", dir) t.targetEventHandler <- fileTargetEvent{ path: dir, @@ -333,7 +351,7 @@ func (t *FileTarget) stopWatching(dirs map[string]struct{}) { func (t *FileTarget) startTailing(ps []string) { for _, p := range ps { - if _, ok := t.readers[p]; ok { + if _, ok := t.getReader(p); ok { continue } @@ -387,7 +405,7 @@ func (t *FileTarget) startTailing(ps []string) { } reader = tailer } - t.readers[p] = reader + t.setReader(p, reader) } } @@ -395,10 +413,10 @@ func (t *FileTarget) startTailing(ps []string) { // Call this when a file no longer exists and you want to remove all traces of it. func (t *FileTarget) stopTailingAndRemovePosition(ps []string) { for _, p := range ps { - if reader, ok := t.readers[p]; ok { + if reader, ok := t.getReader(p); ok { reader.Stop() t.positions.Remove(reader.Path()) - delete(t.readers, p) + t.removeReader(p) } } } @@ -406,6 +424,7 @@ func (t *FileTarget) stopTailingAndRemovePosition(ps []string) { // pruneStoppedTailers removes any tailers which have stopped running from // the list of active tailers. This allows them to be restarted if there were errors. func (t *FileTarget) pruneStoppedTailers() { + t.readersMutex.Lock() toRemove := make([]string, 0, len(t.readers)) for k, t := range t.readers { if !t.IsRunning() { @@ -415,6 +434,45 @@ func (t *FileTarget) pruneStoppedTailers() { for _, tr := range toRemove { delete(t.readers, tr) } + t.readersMutex.Unlock() +} + +func (t *FileTarget) getReadersLen() int { + t.readersMutex.Lock() + defer t.readersMutex.Unlock() + return len(t.readers) +} + +func (t *FileTarget) getReader(val string) (Reader, bool) { + t.readersMutex.Lock() + defer t.readersMutex.Unlock() + reader, ok := t.readers[val] + return reader, ok +} + +func (t *FileTarget) setReader(val string, reader Reader) { + t.readersMutex.Lock() + defer t.readersMutex.Unlock() + t.readers[val] = reader +} + +func (t *FileTarget) getWatch(val string) (struct{}, bool) { + t.watchesMutex.Lock() + defer t.watchesMutex.Unlock() + fileTarget, ok := t.watches[val] + return fileTarget, ok +} + +func (t *FileTarget) removeReader(val string) { + t.readersMutex.Lock() + defer t.readersMutex.Unlock() + delete(t.readers, val) +} + +func (t *FileTarget) getWatchesLen() int { + t.watchesMutex.Lock() + defer t.watchesMutex.Unlock() + return len(t.watches) } func toStopTailing(nt []string, et map[string]Reader) []string { @@ -442,7 +500,7 @@ func toStopTailing(nt []string, et map[string]Reader) []string { func (t *FileTarget) reportSize(ms []string) { for _, m := range ms { // Ask the tailer to update the size if a tailer exists, this keeps position and size metrics in sync - if reader, ok := t.readers[m]; ok { + if reader, ok := t.getReader(m); ok { err := reader.MarkPositionAndSize() if err != nil { level.Warn(t.logger).Log("msg", "failed to get file size from tailer, ", "file", m, "error", err) @@ -459,7 +517,6 @@ func (t *FileTarget) reportSize(ms []string) { } t.metrics.totalBytes.WithLabelValues(m).Set(float64(fi.Size())) } - } } diff --git a/clients/pkg/promtail/targets/file/filetarget_test.go b/clients/pkg/promtail/targets/file/filetarget_test.go index 57bc31b0802ee..579ea19e2e56e 100644 --- a/clients/pkg/promtail/targets/file/filetarget_test.go +++ b/clients/pkg/promtail/targets/file/filetarget_test.go @@ -76,10 +76,10 @@ func TestFileTargetSync(t *testing.T) { assert.NoError(t, err) // Start with nothing watched. - if len(target.watches) != 0 { + if target.getWatchesLen() != 0 { t.Fatal("Expected watches to be 0 at this point in the test...") } - if len(target.readers) != 0 { + if target.getReadersLen() != 0 { t.Fatal("Expected tails to be 0 at this point in the test...") } @@ -90,10 +90,10 @@ func TestFileTargetSync(t *testing.T) { err = target.sync() assert.NoError(t, err) - if len(target.watches) != 0 { + if target.getWatchesLen() != 0 { t.Fatal("Expected watches to be 0 at this point in the test...") } - if len(target.readers) != 0 { + if target.getReadersLen() != 0 { t.Fatal("Expected tails to be 0 at this point in the test...") } @@ -106,10 +106,10 @@ func TestFileTargetSync(t *testing.T) { err = target.sync() assert.NoError(t, err) - assert.Equal(t, 1, len(target.watches), + assert.Equal(t, 1, target.getWatchesLen(), "Expected watches to be 1 at this point in the test...", ) - assert.Equal(t, 1, len(target.readers), + assert.Equal(t, 1, target.getReadersLen(), "Expected tails to be 1 at this point in the test...", ) @@ -124,10 +124,10 @@ func TestFileTargetSync(t *testing.T) { err = target.sync() assert.NoError(t, err) - assert.Equal(t, 1, len(target.watches), + assert.Equal(t, 1, target.getWatchesLen(), "Expected watches to be 1 at this point in the test...", ) - assert.Equal(t, 2, len(target.readers), + assert.Equal(t, 2, target.getReadersLen(), "Expected tails to be 2 at this point in the test...", ) @@ -138,10 +138,10 @@ func TestFileTargetSync(t *testing.T) { err = target.sync() assert.NoError(t, err) - assert.Equal(t, 1, len(target.watches), + assert.Equal(t, 1, target.getWatchesLen(), "Expected watches to be 1 at this point in the test...", ) - assert.Equal(t, 1, len(target.readers), + assert.Equal(t, 1, target.getReadersLen(), "Expected tails to be 1 at this point in the test...", ) @@ -152,10 +152,10 @@ func TestFileTargetSync(t *testing.T) { err = target.sync() assert.NoError(t, err) - assert.Equal(t, 0, len(target.watches), + assert.Equal(t, 0, target.getWatchesLen(), "Expected watches to be 0 at this point in the test...", ) - assert.Equal(t, 0, len(target.readers), + assert.Equal(t, 0, target.getReadersLen(), "Expected tails to be 0 at this point in the test...", ) requireEventually(t, func() bool { @@ -198,7 +198,7 @@ func TestFileTarget_StopsTailersCleanly(t *testing.T) { assert.NoError(t, err) requireEventually(t, func() bool { - return len(target.readers) == 1 + return target.getReadersLen() == 1 }, "expected 1 tailer to be created") require.NoError(t, testutil.GatherAndCompare(registry, bytes.NewBufferString(` @@ -208,12 +208,19 @@ func TestFileTarget_StopsTailersCleanly(t *testing.T) { `), "promtail_files_active_total")) // Inject an error to tailer - initailTailer := target.readers[logFile].(*tailer) + + initialReader, _ := target.getReader(logFile) + initailTailer := initialReader.(*tailer) _ = initailTailer.tail.Tomb.Killf("test: network file systems can be unreliable") // Tailer will be replaced by a new one requireEventually(t, func() bool { - return len(target.readers) == 1 && target.readers[logFile].(*tailer) != initailTailer + currentReader, _ := target.getReader(logFile) + var currentTailer *tailer + if currentReader != nil { + currentTailer = currentReader.(*tailer) + } + return target.getReadersLen() == 1 && currentTailer != initailTailer }, "expected dead tailer to be replaced by a new one") // The old tailer should be stopped: @@ -389,10 +396,10 @@ func TestFileTargetPathExclusion(t *testing.T) { assert.NoError(t, err) // Start with nothing watched. - if len(target.watches) != 0 { + if target.getWatchesLen() != 0 { t.Fatal("Expected watches to be 0 at this point in the test...") } - if len(target.readers) != 0 { + if target.getReadersLen() != 0 { t.Fatal("Expected tails to be 0 at this point in the test...") } @@ -407,10 +414,10 @@ func TestFileTargetPathExclusion(t *testing.T) { err = target.sync() assert.NoError(t, err) - if len(target.watches) != 0 { + if target.getWatchesLen() != 0 { t.Fatal("Expected watches to be 0 at this point in the test...") } - if len(target.readers) != 0 { + if target.getReadersLen() != 0 { t.Fatal("Expected tails to be 0 at this point in the test...") } @@ -425,10 +432,10 @@ func TestFileTargetPathExclusion(t *testing.T) { err = target.sync() assert.NoError(t, err) - assert.Equal(t, 2, len(target.watches), + assert.Equal(t, 2, target.getWatchesLen(), "Expected watches to be 2 at this point in the test...", ) - assert.Equal(t, 3, len(target.readers), + assert.Equal(t, 3, target.getReadersLen(), "Expected tails to be 3 at this point in the test...", ) requireEventually(t, func() bool { @@ -446,10 +453,10 @@ func TestFileTargetPathExclusion(t *testing.T) { err = target.sync() assert.NoError(t, err) - assert.Equal(t, 1, len(target.watches), + assert.Equal(t, 1, target.getWatchesLen(), "Expected watches to be 1 at this point in the test...", ) - assert.Equal(t, 1, len(target.readers), + assert.Equal(t, 1, target.getReadersLen(), "Expected tails to be 1 at this point in the test...", ) requireEventually(t, func() bool { @@ -538,7 +545,7 @@ func TestHandleFileCreationEvent(t *testing.T) { Op: fsnotify.Create, } requireEventually(t, func() bool { - return len(target.readers) == 1 + return target.getReadersLen() == 1 }, "Expected tails to be 1 at this point in the test...") } diff --git a/clients/pkg/promtail/targets/kafka/consumer_test.go b/clients/pkg/promtail/targets/kafka/consumer_test.go index 7420bdf6c1f11..a4d87e7c3c71e 100644 --- a/clients/pkg/promtail/targets/kafka/consumer_test.go +++ b/clients/pkg/promtail/targets/kafka/consumer_test.go @@ -3,6 +3,7 @@ package kafka import ( "context" "errors" + "sync" "testing" "time" @@ -34,7 +35,7 @@ func (f *fakeTarget) Details() interface{} { return nil } func Test_ComsumerConsume(t *testing.T) { var ( - group = &testConsumerGroupHandler{} + group = &testConsumerGroupHandler{mu: &sync.Mutex{}} session = &testSession{} ctx, cancel = context.WithCancel(context.Background()) c = &consumer{ @@ -86,6 +87,7 @@ func Test_ComsumerConsume(t *testing.T) { func Test_ComsumerRetry(_ *testing.T) { var ( group = &testConsumerGroupHandler{ + mu: &sync.Mutex{}, returnErr: errors.New("foo"), } ctx, cancel = context.WithCancel(context.Background()) diff --git a/clients/pkg/promtail/targets/kafka/target_syncer_test.go b/clients/pkg/promtail/targets/kafka/target_syncer_test.go index 1f0255cedf62e..6514afeefcb01 100644 --- a/clients/pkg/promtail/targets/kafka/target_syncer_test.go +++ b/clients/pkg/promtail/targets/kafka/target_syncer_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "reflect" + "sync" "testing" "time" @@ -24,7 +25,7 @@ import ( func Test_TopicDiscovery(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) - group := &testConsumerGroupHandler{} + group := &testConsumerGroupHandler{mu: &sync.Mutex{}} TopicPollInterval = time.Microsecond var closed bool client := &mockKafkaClient{ @@ -52,21 +53,28 @@ func Test_TopicDiscovery(t *testing.T) { } ts.loop() + tmpTopics := []string{} require.Eventually(t, func() bool { if !group.consuming.Load() { return false } + group.mu.Lock() + defer group.mu.Unlock() + tmpTopics = group.topics return reflect.DeepEqual([]string{"topic1"}, group.topics) - }, 200*time.Millisecond, time.Millisecond, "expected topics: %v, got: %v", []string{"topic1"}, group.topics) + }, 200*time.Millisecond, time.Millisecond, "expected topics: %v, got: %v", []string{"topic1"}, tmpTopics) + client.mu.Lock() client.topics = []string{"topic1", "topic2"} // introduce new topics + client.mu.Unlock() require.Eventually(t, func() bool { if !group.consuming.Load() { return false } + tmpTopics = group.topics return reflect.DeepEqual([]string{"topic1", "topic2"}, group.topics) - }, 200*time.Millisecond, time.Millisecond, "expected topics: %v, got: %v", []string{"topic1", "topic2"}, group.topics) + }, 200*time.Millisecond, time.Millisecond, "expected topics: %v, got: %v", []string{"topic1", "topic2"}, tmpTopics) require.NoError(t, ts.Stop()) require.True(t, closed) diff --git a/clients/pkg/promtail/targets/kafka/target_test.go b/clients/pkg/promtail/targets/kafka/target_test.go index 0f8061027de3a..3ffe4ac69f16b 100644 --- a/clients/pkg/promtail/targets/kafka/target_test.go +++ b/clients/pkg/promtail/targets/kafka/target_test.go @@ -21,6 +21,7 @@ type testConsumerGroupHandler struct { handler sarama.ConsumerGroupHandler ctx context.Context topics []string + mu *sync.Mutex returnErr error @@ -32,7 +33,9 @@ func (c *testConsumerGroupHandler) Consume(ctx context.Context, topics []string, return c.returnErr } c.ctx = ctx + c.mu.Lock() c.topics = topics + c.mu.Unlock() c.handler = handler c.consuming.Store(true) <-ctx.Done() diff --git a/clients/pkg/promtail/targets/kafka/topics_test.go b/clients/pkg/promtail/targets/kafka/topics_test.go index e24d8fd1eb604..447a8a0a65afc 100644 --- a/clients/pkg/promtail/targets/kafka/topics_test.go +++ b/clients/pkg/promtail/targets/kafka/topics_test.go @@ -3,12 +3,14 @@ package kafka import ( "errors" "strings" + "sync" "testing" "github.com/stretchr/testify/require" ) type mockKafkaClient struct { + mu sync.Mutex topics []string err error } @@ -18,6 +20,8 @@ func (m *mockKafkaClient) RefreshMetadata(_ ...string) error { } func (m *mockKafkaClient) Topics() ([]string, error) { + m.mu.Lock() + defer m.mu.Unlock() return m.topics, m.err } diff --git a/clients/pkg/promtail/utils/entries_test.go b/clients/pkg/promtail/utils/entries_test.go index c9b098d9ee4a4..0164794a89d2d 100644 --- a/clients/pkg/promtail/utils/entries_test.go +++ b/clients/pkg/promtail/utils/entries_test.go @@ -43,7 +43,14 @@ func TestFanoutEntryHandler_SuccessfulFanout(t *testing.T) { } require.Eventually(t, func() bool { - return len(eh1.Received) == len(expectedLines) && len(eh2.Received) == len(expectedLines) + eh1.mu.Lock() + len1 := len(eh1.Received) + eh1.mu.Unlock() + eh2.mu.Lock() + len2 := len(eh2.Received) + eh2.mu.Unlock() + + return len1 == len(expectedLines) && len2 == len(expectedLines) }, time.Second*10, time.Second, "expected entries to be received by fanned out channels") } @@ -77,6 +84,8 @@ func TestFanoutEntryHandler_TimeoutWaitingForEntriesToBeSent(t *testing.T) { }() require.Eventually(t, func() bool { + controlEH.mu.Lock() + defer controlEH.mu.Unlock() return len(controlEH.Received) == 1 }, time.Second*5, time.Second, "expected control entry handler to receive an entry") @@ -89,6 +98,7 @@ type savingEntryHandler struct { entries chan api.Entry Received []api.Entry wg sync.WaitGroup + mu sync.Mutex } func newSavingEntryHandler() *savingEntryHandler { @@ -99,7 +109,9 @@ func newSavingEntryHandler() *savingEntryHandler { eh.wg.Add(1) go func() { for e := range eh.entries { + eh.mu.Lock() eh.Received = append(eh.Received, e) + eh.mu.Unlock() } eh.wg.Done() }() diff --git a/clients/pkg/promtail/wal/watcher_test.go b/clients/pkg/promtail/wal/watcher_test.go index b41880f5d20ff..adf6dbef32de0 100644 --- a/clients/pkg/promtail/wal/watcher_test.go +++ b/clients/pkg/promtail/wal/watcher_test.go @@ -3,6 +3,7 @@ package wal import ( "fmt" "os" + "sync" "testing" "time" @@ -25,6 +26,7 @@ type testWriteTo struct { series map[uint64]model.LabelSet logger log.Logger ReceivedSeriesReset []int + mu sync.Mutex } func (t *testWriteTo) StoreSeries(series []record.RefSeries, _ int) { @@ -42,10 +44,12 @@ func (t *testWriteTo) AppendEntries(entries wal.RefEntries) error { var entry api.Entry if l, ok := t.series[uint64(entries.Ref)]; ok { entry.Labels = l + t.mu.Lock() for _, e := range entries.Entries { entry.Entry = e t.ReadEntries = append(t.ReadEntries, entry) } + t.mu.Unlock() } else { level.Debug(t.logger).Log("series for entry not found") } @@ -94,11 +98,15 @@ var cases = map[string]watcherTest{ res.notifyWrite() require.Eventually(t, func() bool { + res.writeTo.mu.Lock() + defer res.writeTo.mu.Unlock() return len(res.writeTo.ReadEntries) == 3 }, time.Second*10, time.Second, "expected watcher to catch up with written entries") + res.writeTo.mu.Lock() for _, readEntry := range res.writeTo.ReadEntries { require.Contains(t, lines, readEntry.Line, "not expected log line") } + res.writeTo.mu.Unlock() }, "read entries from WAL, just using backup timer to trigger reads": func(t *testing.T, res *watcherTestResources) { @@ -127,11 +135,15 @@ var cases = map[string]watcherTest{ // do not notify, let the backup timer trigger the watcher reads require.Eventually(t, func() bool { + res.writeTo.mu.Lock() + defer res.writeTo.mu.Unlock() return len(res.writeTo.ReadEntries) == 3 }, time.Second*10, time.Second, "expected watcher to catch up with written entries") + res.writeTo.mu.Lock() for _, readEntry := range res.writeTo.ReadEntries { require.Contains(t, lines, readEntry.Line, "not expected log line") } + res.writeTo.mu.Unlock() }, "continue reading entries in next segment after initial segment is closed": func(t *testing.T, res *watcherTestResources) { @@ -164,11 +176,15 @@ var cases = map[string]watcherTest{ res.notifyWrite() require.Eventually(t, func() bool { + res.writeTo.mu.Lock() + defer res.writeTo.mu.Unlock() return len(res.writeTo.ReadEntries) == 3 }, time.Second*10, time.Second, "expected watcher to catch up with written entries") + res.writeTo.mu.Lock() for _, readEntry := range res.writeTo.ReadEntries { require.Contains(t, lines, readEntry.Line, "not expected log line") } + res.writeTo.mu.Unlock() err := res.nextWALSegment() require.NoError(t, err, "expected no error when moving to next wal segment") @@ -186,12 +202,16 @@ var cases = map[string]watcherTest{ res.notifyWrite() require.Eventually(t, func() bool { + res.writeTo.mu.Lock() + defer res.writeTo.mu.Unlock() return len(res.writeTo.ReadEntries) == 6 }, time.Second*10, time.Second, "expected watcher to catch up after new wal segment is cut") // assert over second half of entries + res.writeTo.mu.Lock() for _, readEntry := range res.writeTo.ReadEntries[3:] { require.Contains(t, linesAfter, readEntry.Line, "not expected log line") } + res.writeTo.mu.Unlock() }, "start reading from last segment": func(t *testing.T, res *watcherTestResources) { @@ -234,12 +254,16 @@ var cases = map[string]watcherTest{ res.notifyWrite() require.Eventually(t, func() bool { + res.writeTo.mu.Lock() + defer res.writeTo.mu.Unlock() return len(res.writeTo.ReadEntries) == 3 }, time.Second*10, time.Second, "expected watcher to catch up after new wal segment is cut") // assert over second half of entries + res.writeTo.mu.Lock() for _, readEntry := range res.writeTo.ReadEntries[3:] { require.Contains(t, linesAfter, readEntry.Line, "not expected log line") } + res.writeTo.mu.Unlock() }, "watcher receives segments reclaimed notifications correctly": func(t *testing.T, res *watcherTestResources) { @@ -259,6 +283,8 @@ var cases = map[string]watcherTest{ require.NoError(t, res.syncWAL()) res.notifyWrite() require.Eventually(t, func() bool { + res.writeTo.mu.Lock() + defer res.writeTo.mu.Unlock() return len(res.writeTo.ReadEntries) == expectedReadEntries }, time.Second*10, time.Second, "expected watcher to catch up with written entries") } @@ -275,6 +301,8 @@ var cases = map[string]watcherTest{ // collecting segment 0 res.notifySegmentReclaimed(0) require.Eventually(t, func() bool { + res.writeTo.mu.Lock() + defer res.writeTo.mu.Unlock() return len(res.writeTo.ReceivedSeriesReset) == 1 && res.writeTo.ReceivedSeriesReset[0] == 0 }, time.Second*10, time.Second, "timed out waiting to receive series reset") @@ -290,6 +318,8 @@ var cases = map[string]watcherTest{ res.notifySegmentReclaimed(2) // Expect second SeriesReset call to have the highest numbered deleted segment, 2 require.Eventually(t, func() bool { + res.writeTo.mu.Lock() + defer res.writeTo.mu.Unlock() t.Logf("received series reset: %v", res.writeTo.ReceivedSeriesReset) return len(res.writeTo.ReceivedSeriesReset) == 2 && res.writeTo.ReceivedSeriesReset[1] == 2 }, time.Second*10, time.Second, "timed out waiting to receive series reset") diff --git a/clients/pkg/promtail/wal/writer_test.go b/clients/pkg/promtail/wal/writer_test.go index a9c637f98b1ce..4dae546044933 100644 --- a/clients/pkg/promtail/wal/writer_test.go +++ b/clients/pkg/promtail/wal/writer_test.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "path/filepath" + "sync" "testing" "time" @@ -77,6 +78,8 @@ func TestWriter_OldSegmentsAreCleanedUp(t *testing.T) { maxSegmentAge := time.Second * 2 + var mu1 sync.Mutex + var mu2 sync.Mutex subscriber1 := []int{} subscriber2 := []int{} @@ -92,10 +95,14 @@ func TestWriter_OldSegmentsAreCleanedUp(t *testing.T) { // add writer events subscriber. Add multiple to test fanout writer.SubscribeCleanup(notifySegmentsCleanedFunc(func(num int) { + mu1.Lock() subscriber1 = append(subscriber1, num) + mu1.Unlock() })) writer.SubscribeCleanup(notifySegmentsCleanedFunc(func(num int) { + mu2.Lock() subscriber2 = append(subscriber2, num) + mu2.Unlock() })) // write entries to wal and sync @@ -148,11 +155,15 @@ func TestWriter_OldSegmentsAreCleanedUp(t *testing.T) { require.ErrorIs(t, err, os.ErrNotExist, "expected file not exists error") // assert all subscribers were notified + mu1.Lock() require.Len(t, subscriber1, 1, "expected one segment reclaimed notification in subscriber1") require.Equal(t, 0, subscriber1[0]) + mu1.Unlock() + mu2.Lock() require.Len(t, subscriber2, 1, "expected one segment reclaimed notification in subscriber2") require.Equal(t, 0, subscriber2[0]) + mu2.Unlock() // Expect last, or "head" segment to still be alive _, err = os.Stat(filepath.Join(dir, "00000001"))
fix
promtail race fixes (#12656)
d2e1992366bf4dd34e6401cc6734ce532b38e5b9
2024-11-22 23:32:20
George Robinson
chore: separate usage of partition owner gauge (#15079)
false
diff --git a/pkg/kafka/partition/reader_service.go b/pkg/kafka/partition/reader_service.go index dedcf183485c2..b643182e617b5 100644 --- a/pkg/kafka/partition/reader_service.go +++ b/pkg/kafka/partition/reader_service.go @@ -136,7 +136,8 @@ func (s *ReaderService) starting(ctx context.Context) error { "partition", s.reader.Partition(), "consumer_group", s.reader.ConsumerGroup(), ) - s.metrics.reportStarting(s.reader.Partition()) + s.metrics.reportOwnerOfPartition(s.reader.Partition()) + s.metrics.reportStarting() // Fetch the last committed offset to determine where to start reading lastCommittedOffset, err := s.reader.FetchLastCommittedOffset(ctx) @@ -196,7 +197,7 @@ func (s *ReaderService) running(ctx context.Context) error { "partition", s.reader.Partition(), "consumer_group", s.reader.ConsumerGroup(), ) - s.metrics.reportRunning(s.reader.Partition()) + s.metrics.reportRunning() consumer, err := s.consumerFactory(s.committer) if err != nil { @@ -396,14 +397,16 @@ func (s *ReaderService) startFetchLoop(ctx context.Context) chan []Record { return records } -func (s *serviceMetrics) reportStarting(partition int32) { - s.partition.WithLabelValues(strconv.Itoa(int(partition))).Set(1) +func (s *serviceMetrics) reportOwnerOfPartition(id int32) { + s.partition.WithLabelValues(strconv.Itoa(int(id))).Set(1) +} + +func (s *serviceMetrics) reportStarting() { s.phase.WithLabelValues(phaseStarting).Set(1) s.phase.WithLabelValues(phaseRunning).Set(0) } -func (s *serviceMetrics) reportRunning(partition int32) { - s.partition.WithLabelValues(strconv.Itoa(int(partition))).Set(1) +func (s *serviceMetrics) reportRunning() { s.phase.WithLabelValues(phaseStarting).Set(0) s.phase.WithLabelValues(phaseRunning).Set(1) }
chore
separate usage of partition owner gauge (#15079)
aeaefe6aab3f611684dcf8c05756947d02c6e1e5
2024-02-21 23:44:49
Robert Jacob
feat(operator): Extend Azure secret validation (#12007)
false
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md index 8dae6eced0bfa..e6aaec29b99c7 100644 --- a/operator/CHANGELOG.md +++ b/operator/CHANGELOG.md @@ -1,5 +1,6 @@ ## Main +- [12007](https://github.com/grafana/loki/pull/12007) **xperimental**: Extend Azure secret validation - [12008](https://github.com/grafana/loki/pull/12008) **xperimental**: Support using multiple buckets with AWS STS - [11964](https://github.com/grafana/loki/pull/11964) **xperimental**: Provide Azure region for managed credentials using environment variable - [11920](https://github.com/grafana/loki/pull/11920) **xperimental**: Refactor handling of credentials in managed-auth mode diff --git a/operator/internal/handlers/internal/storage/secrets.go b/operator/internal/handlers/internal/storage/secrets.go index 2492eea4d4191..80dde97b61367 100644 --- a/operator/internal/handlers/internal/storage/secrets.go +++ b/operator/internal/handlers/internal/storage/secrets.go @@ -1,11 +1,14 @@ package storage import ( + "bytes" "context" "crypto/sha1" + "encoding/base64" "encoding/json" "errors" "fmt" + "io" "sort" corev1 "k8s.io/api/core/v1" @@ -33,9 +36,18 @@ var ( errAzureNoCredentials = errors.New("azure storage secret does contain neither account_key or client_id") errAzureMixedCredentials = errors.New("azure storage secret can not contain both account_key and client_id") errAzureManagedIdentityNoOverride = errors.New("when in managed mode, storage secret can not contain credentials") + errAzureInvalidEnvironment = errors.New("azure environment invalid (valid values: AzureGlobal, AzureChinaCloud, AzureGermanCloud, AzureUSGovernment)") + errAzureInvalidAccountKey = errors.New("azure account key is not valid base64") errGCPParseCredentialsFile = errors.New("gcp storage secret cannot be parsed from JSON content") errGCPWrongCredentialSourceFile = errors.New("credential source in secret needs to point to token file") + + azureValidEnvironments = map[string]bool{ + "AzureGlobal": true, + "AzureChinaCloud": true, + "AzureGermanCloud": true, + "AzureUSGovernment": true, + } ) const gcpAccountTypeExternal = "external_account" @@ -159,11 +171,15 @@ func hashSecretData(s *corev1.Secret) (string, error) { func extractAzureConfigSecret(s *corev1.Secret, fg configv1.FeatureGates) (*storage.AzureStorageConfig, error) { // Extract and validate mandatory fields - env := s.Data[storage.KeyAzureEnvironmentName] - if len(env) == 0 { + env := string(s.Data[storage.KeyAzureEnvironmentName]) + if env == "" { return nil, fmt.Errorf("%w: %s", errSecretMissingField, storage.KeyAzureEnvironmentName) } + if !azureValidEnvironments[env] { + return nil, fmt.Errorf("%w: %s", errAzureInvalidEnvironment, env) + } + accountName := s.Data[storage.KeyAzureStorageAccountName] if len(accountName) == 0 { return nil, fmt.Errorf("%w: %s", errSecretMissingField, storage.KeyAzureStorageAccountName) @@ -188,7 +204,7 @@ func extractAzureConfigSecret(s *corev1.Secret, fg configv1.FeatureGates) (*stor } return &storage.AzureStorageConfig{ - Env: string(env), + Env: env, Container: string(container), EndpointSuffix: string(endpointSuffix), Audience: string(audience), @@ -219,6 +235,10 @@ func validateAzureCredentials(s *corev1.Secret, fg configv1.FeatureGates) (workl } if len(accountKey) > 0 { + if err := validateBase64(accountKey); err != nil { + return false, errAzureInvalidAccountKey + } + // have both account_name and account_key -> no workload identity federation return false, nil } @@ -235,6 +255,13 @@ func validateAzureCredentials(s *corev1.Secret, fg configv1.FeatureGates) (workl return true, nil } +func validateBase64(data []byte) error { + buf := bytes.NewBuffer(data) + reader := base64.NewDecoder(base64.StdEncoding, buf) + _, err := io.ReadAll(reader) + return err +} + func extractGCSConfigSecret(s *corev1.Secret) (*storage.GCSStorageConfig, error) { // Extract and validate mandatory fields bucket := s.Data[storage.KeyGCPStorageBucketName] diff --git a/operator/internal/handlers/internal/storage/secrets_test.go b/operator/internal/handlers/internal/storage/secrets_test.go index ca3623b718c1b..647de5632b4bf 100644 --- a/operator/internal/handlers/internal/storage/secrets_test.go +++ b/operator/internal/handlers/internal/storage/secrets_test.go @@ -84,11 +84,20 @@ func TestAzureExtract(t *testing.T) { secret: &corev1.Secret{}, wantError: "missing secret field: environment", }, + { + name: "invalid environment", + secret: &corev1.Secret{ + Data: map[string][]byte{ + "environment": []byte("invalid-environment"), + }, + }, + wantError: "azure environment invalid (valid values: AzureGlobal, AzureChinaCloud, AzureGermanCloud, AzureUSGovernment): invalid-environment", + }, { name: "missing account_name", secret: &corev1.Secret{ Data: map[string][]byte{ - "environment": []byte("here"), + "environment": []byte("AzureGlobal"), }, }, wantError: "missing secret field: account_name", @@ -97,7 +106,7 @@ func TestAzureExtract(t *testing.T) { name: "missing container", secret: &corev1.Secret{ Data: map[string][]byte{ - "environment": []byte("here"), + "environment": []byte("AzureGlobal"), "account_name": []byte("id"), }, }, @@ -108,7 +117,7 @@ func TestAzureExtract(t *testing.T) { secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, Data: map[string][]byte{ - "environment": []byte("here"), + "environment": []byte("AzureGlobal"), "container": []byte("this,that"), "account_name": []byte("id"), }, @@ -120,7 +129,7 @@ func TestAzureExtract(t *testing.T) { secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, Data: map[string][]byte{ - "environment": []byte("here"), + "environment": []byte("AzureGlobal"), "container": []byte("this,that"), "account_name": []byte("test-account-name"), "account_key": []byte("test-account-key"), @@ -134,7 +143,7 @@ func TestAzureExtract(t *testing.T) { secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, Data: map[string][]byte{ - "environment": []byte("here"), + "environment": []byte("AzureGlobal"), "container": []byte("this,that"), "account_name": []byte("test-account-name"), "client_id": []byte("test-client-id"), @@ -147,7 +156,7 @@ func TestAzureExtract(t *testing.T) { secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, Data: map[string][]byte{ - "environment": []byte("here"), + "environment": []byte("AzureGlobal"), "container": []byte("this,that"), "account_name": []byte("test-account-name"), "client_id": []byte("test-client-id"), @@ -161,7 +170,7 @@ func TestAzureExtract(t *testing.T) { secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, Data: map[string][]byte{ - "environment": []byte("here"), + "environment": []byte("AzureGlobal"), "account_name": []byte("test-account-name"), "container": []byte("this,that"), "region": []byte("test-region"), @@ -184,10 +193,10 @@ func TestAzureExtract(t *testing.T) { secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, Data: map[string][]byte{ - "environment": []byte("here"), + "environment": []byte("AzureGlobal"), "container": []byte("this,that"), "account_name": []byte("id"), - "account_key": []byte("secret"), + "account_key": []byte("dGVzdC1hY2NvdW50LWtleQ=="), // test-account-key "audience": []byte("test-audience"), }, }, @@ -198,10 +207,10 @@ func TestAzureExtract(t *testing.T) { secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, Data: map[string][]byte{ - "environment": []byte("here"), + "environment": []byte("AzureGlobal"), "container": []byte("this,that"), "account_name": []byte("id"), - "account_key": []byte("secret"), + "account_key": []byte("dGVzdC1hY2NvdW50LWtleQ=="), // test-account-key }, }, wantCredentialMode: lokiv1.CredentialModeStatic, @@ -211,7 +220,7 @@ func TestAzureExtract(t *testing.T) { secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, Data: map[string][]byte{ - "environment": []byte("here"), + "environment": []byte("AzureGlobal"), "container": []byte("this,that"), "account_name": []byte("test-account-name"), "client_id": []byte("test-client-id"), @@ -227,7 +236,7 @@ func TestAzureExtract(t *testing.T) { secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, Data: map[string][]byte{ - "environment": []byte("here"), + "environment": []byte("AzureGlobal"), "account_name": []byte("test-account-name"), "container": []byte("this,that"), "region": []byte("test-region"), @@ -256,10 +265,10 @@ func TestAzureExtract(t *testing.T) { secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, Data: map[string][]byte{ - "environment": []byte("here"), + "environment": []byte("AzureGlobal"), "container": []byte("this,that"), "account_name": []byte("id"), - "account_key": []byte("secret"), + "account_key": []byte("dGVzdC1hY2NvdW50LWtleQ=="), // test-account-key "endpoint_suffix": []byte("suffix"), }, },
feat
Extend Azure secret validation (#12007)
3a0be4897512ec16a0e43cd5e0a49774cda87aed
2022-11-23 22:20:56
Kaviraj Kanagaraj
config: Improve error message loading config with ENV variables. (#7759)
false
diff --git a/CHANGELOG.md b/CHANGELOG.md index ff4b1f64c72ef..ba79c01ed7a84 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ * [6360](https://github.com/grafana/loki/pull/6099) **liguozhong**: Hide error message when ctx timeout occurs in s3.getObject * [7602](https://github.com/grafana/loki/pull/7602) **vmax**: Add decolorize filter to easily parse colored logs. * [7731](https://github.com/grafana/loki/pull/7731) **bitkill**: Add healthchecks to the docker-compose example. +* [7759](https://github.com/grafana/loki/pull/7759) **kavirajk**: Improve error message for loading config with ENV variables. ##### Fixes diff --git a/pkg/util/cfg/files.go b/pkg/util/cfg/files.go index 9f25238245a78..6a3faa0f56f05 100644 --- a/pkg/util/cfg/files.go +++ b/pkg/util/cfg/files.go @@ -58,6 +58,7 @@ func YAML(f string, expandEnvVars bool, strict bool) Source { } else { err = dYAML(y)(dst) } + return errors.Wrap(err, f) } } @@ -77,7 +78,6 @@ func dYAML(y []byte) Source { } func ConfigFileLoader(args []string, name string, strict bool) Source { - return func(dst Cloneable) error { freshFlags := flag.NewFlagSet("config-file-loader", flag.ContinueOnError) @@ -112,7 +112,11 @@ func ConfigFileLoader(args []string, name string, strict bool) Source { expandEnv, _ = strconv.ParseBool(expandEnvFlag.Value.String()) // Can ignore error as false returned } if _, err := os.Stat(val); err == nil { - return YAML(val, expandEnv, strict)(dst) + err := YAML(val, expandEnv, strict)(dst) + if err != nil && !expandEnv { + err = fmt.Errorf("%w. Use `-config.expand-env=true` flag if you want to expand environment variables in your config file", err) + } + return err } } return fmt.Errorf("%s does not exist, set %s for custom config path", f.Value.String(), name)
config
Improve error message loading config with ENV variables. (#7759)
e39677f97b4ba27c90d9f8d2991441095e55b06e
2024-04-19 15:34:36
Cyril Tovena
feat: Enable log volume endpoint by default in helm (#12690)
false
diff --git a/docs/sources/setup/install/helm/reference.md b/docs/sources/setup/install/helm/reference.md index 40efb8ac5ddd5..9d3e04fe9d8ea 100644 --- a/docs/sources/setup/install/helm/reference.md +++ b/docs/sources/setup/install/helm/reference.md @@ -5250,7 +5250,8 @@ null "query_timeout": "300s", "reject_old_samples": true, "reject_old_samples_max_age": "168h", - "split_queries_by_interval": "15m" + "split_queries_by_interval": "15m", + "volume_enabled": true }, "memberlistConfig": {}, "memcached": { @@ -5594,7 +5595,8 @@ null "query_timeout": "300s", "reject_old_samples": true, "reject_old_samples_max_age": "168h", - "split_queries_by_interval": "15m" + "split_queries_by_interval": "15m", + "volume_enabled": true } </pre> </td> diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md index a0ea01d6e2799..ad796a850c3bd 100644 --- a/production/helm/loki/CHANGELOG.md +++ b/production/helm/loki/CHANGELOG.md @@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang [//]: # (<AUTOMATED_UPDATES_LOCATOR> : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) +## 6.2.4 + +- [ENHANCEMENT] Activate the volume endpoint by default. + ## 6.2.3 - [ENHANCEMENT] Allow minio address to be overridden. diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml index f6e78b8ad7d7b..e8b160550bc92 100644 --- a/production/helm/loki/Chart.yaml +++ b/production/helm/loki/Chart.yaml @@ -3,7 +3,7 @@ name: loki description: Helm chart for Grafana Loki in simple, scalable mode type: application appVersion: 3.0.0 -version: 6.2.3 +version: 6.2.4 home: https://grafana.github.io/helm-charts sources: - https://github.com/grafana/loki diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md index 47213b2917e9e..c892d1b4f364f 100644 --- a/production/helm/loki/README.md +++ b/production/helm/loki/README.md @@ -1,6 +1,6 @@ # loki -![Version: 6.2.3](https://img.shields.io/badge/Version-6.2.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.0.0](https://img.shields.io/badge/AppVersion-3.0.0-informational?style=flat-square) +![Version: 6.2.4](https://img.shields.io/badge/Version-6.2.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.0.0](https://img.shields.io/badge/AppVersion-3.0.0-informational?style=flat-square) Helm chart for Grafana Loki in simple, scalable mode diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml index aa5918cb9ffeb..d9561ef72f845 100644 --- a/production/helm/loki/values.yaml +++ b/production/helm/loki/values.yaml @@ -304,6 +304,7 @@ loki: max_cache_freshness_per_query: 10m split_queries_by_interval: 15m query_timeout: 300s + volume_enabled: true # -- Provides a reloadable runtime configuration file for some specific configuration runtimeConfig: {} # -- Check https://grafana.com/docs/loki/latest/configuration/#common_config for more info on how to provide a common configuration
feat
Enable log volume endpoint by default in helm (#12690)
3b8d993500cb715f27e5618d90afe5c8ac9d501d
2025-01-06 18:58:17
Matt Veitas
feat: Skip writeback for chunks fetched by queriers older than a duration (#15605)
false
diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md index c8bc0e9aaeac5..d352cd6960082 100644 --- a/docs/sources/shared/configuration.md +++ b/docs/sources/shared/configuration.md @@ -1732,6 +1732,11 @@ The `chunk_store_config` block configures how chunks will be cached and how long # The CLI flags prefix for this block configuration is: store.index-cache-write [write_dedupe_cache_config: <cache_config>] +# Chunks fetched from queriers before this duration will not be written to the +# cache. A value of 0 will write all chunks to the cache +# CLI flag: -store.skip-query-writeback-older-than +[skip_query_writeback_cache_older_than: <duration> | default = 0s] + # Chunks will be handed off to the L2 cache after this duration. 0 to disable L2 # cache. # CLI flag: -store.chunks-cache-l2.handoff diff --git a/pkg/storage/chunk/cache/cache_test.go b/pkg/storage/chunk/cache/cache_test.go index 3ff473934cdb1..2607595b30cb9 100644 --- a/pkg/storage/chunk/cache/cache_test.go +++ b/pkg/storage/chunk/cache/cache_test.go @@ -132,7 +132,7 @@ func testChunkFetcher(t *testing.T, c cache.Cache, chunks []chunk.Chunk) { }, } - fetcher, err := fetcher.New(c, nil, false, s, nil, 0) + fetcher, err := fetcher.New(c, nil, false, s, nil, 0, 0) require.NoError(t, err) defer fetcher.Stop() diff --git a/pkg/storage/chunk/fetcher/fetcher.go b/pkg/storage/chunk/fetcher/fetcher.go index 45b6970045a91..cd0e4ce6a90b4 100644 --- a/pkg/storage/chunk/fetcher/fetcher.go +++ b/pkg/storage/chunk/fetcher/fetcher.go @@ -49,7 +49,8 @@ type Fetcher struct { cachel2 cache.Cache cacheStubs bool - l2CacheHandoff time.Duration + l2CacheHandoff time.Duration + skipQueryWritebackCacheOlderThan time.Duration wait sync.WaitGroup decodeRequests chan decodeRequest @@ -69,15 +70,16 @@ type decodeResponse struct { } // New makes a new ChunkFetcher. -func New(cache cache.Cache, cachel2 cache.Cache, cacheStubs bool, schema config.SchemaConfig, storage client.Client, l2CacheHandoff time.Duration) (*Fetcher, error) { +func New(cache cache.Cache, cachel2 cache.Cache, cacheStubs bool, schema config.SchemaConfig, storage client.Client, l2CacheHandoff time.Duration, skipQueryWritebackOlderThan time.Duration) (*Fetcher, error) { c := &Fetcher{ - schema: schema, - storage: storage, - cache: cache, - cachel2: cachel2, - l2CacheHandoff: l2CacheHandoff, - cacheStubs: cacheStubs, - decodeRequests: make(chan decodeRequest), + schema: schema, + storage: storage, + cache: cache, + cachel2: cachel2, + l2CacheHandoff: l2CacheHandoff, + cacheStubs: cacheStubs, + skipQueryWritebackCacheOlderThan: skipQueryWritebackOlderThan, + decodeRequests: make(chan decodeRequest), } c.wait.Add(chunkDecodeParallelism) @@ -138,6 +140,9 @@ func (c *Fetcher) FetchChunks(ctx context.Context, chunks []chunk.Chunk) ([]chun l2OnlyChunks := make([]chunk.Chunk, 0, len(chunks)) for _, m := range chunks { + if c.skipQueryWritebackCacheOlderThan > 0 && m.From.Time().Before(time.Now().UTC().Add(-c.skipQueryWritebackCacheOlderThan)) { + continue + } // Similar to below, this is an optimization to not bother looking in the l1 cache if there isn't a reasonable // expectation to find it there. if c.l2CacheHandoff > 0 && m.From.Time().Before(time.Now().UTC().Add(-extendedHandoff)) { @@ -230,6 +235,10 @@ func (c *Fetcher) WriteBackCache(ctx context.Context, chunks []chunk.Chunk) erro keysL2 := make([]string, 0, len(chunks)) bufsL2 := make([][]byte, 0, len(chunks)) for i := range chunks { + if c.skipQueryWritebackCacheOlderThan > 0 && chunks[i].From.Time().Before(time.Now().UTC().Add(-c.skipQueryWritebackCacheOlderThan)) { + continue + } + var encoded []byte var err error if !c.cacheStubs { diff --git a/pkg/storage/chunk/fetcher/fetcher_test.go b/pkg/storage/chunk/fetcher/fetcher_test.go index 2251c93022b29..5aceefb53c691 100644 --- a/pkg/storage/chunk/fetcher/fetcher_test.go +++ b/pkg/storage/chunk/fetcher/fetcher_test.go @@ -25,16 +25,17 @@ import ( func Test(t *testing.T) { now := time.Now() tests := []struct { - name string - handoff time.Duration - storeStart []chunk.Chunk - l1Start []chunk.Chunk - l2Start []chunk.Chunk - fetch []chunk.Chunk - l1KeysRequested int - l1End []chunk.Chunk - l2KeysRequested int - l2End []chunk.Chunk + name string + handoff time.Duration + skipQueryWriteback time.Duration + storeStart []chunk.Chunk + l1Start []chunk.Chunk + l2Start []chunk.Chunk + fetch []chunk.Chunk + l1KeysRequested int + l1End []chunk.Chunk + l2KeysRequested int + l2End []chunk.Chunk }{ { name: "all found in L1 cache", @@ -82,6 +83,19 @@ func Test(t *testing.T) { l2KeysRequested: 3, l2End: makeChunks(now, c{7 * time.Hour, 8 * time.Hour}, c{8 * time.Hour, 9 * time.Hour}, c{9 * time.Hour, 10 * time.Hour}), }, + { + name: "skipQueryWriteback", + handoff: 24 * time.Hour, + skipQueryWriteback: 3 * 24 * time.Hour, + storeStart: makeChunks(now, c{time.Hour, 2 * time.Hour}, c{2 * time.Hour, 3 * time.Hour}, c{3 * time.Hour, 4 * time.Hour}, c{5 * 24 * time.Hour, 6 * 24 * time.Hour}, c{5 * 24 * time.Hour, 6 * 24 * time.Hour}), + l1Start: []chunk.Chunk{}, + l2Start: []chunk.Chunk{}, + fetch: makeChunks(now, c{time.Hour, 2 * time.Hour}, c{2 * time.Hour, 3 * time.Hour}, c{3 * time.Hour, 4 * time.Hour}, c{5 * 24 * time.Hour, 6 * 24 * time.Hour}, c{5 * 24 * time.Hour, 6 * 24 * time.Hour}), + l1KeysRequested: 3, + l1End: makeChunks(now, c{time.Hour, 2 * time.Hour}, c{2 * time.Hour, 3 * time.Hour}, c{3 * time.Hour, 4 * time.Hour}), + l2KeysRequested: 0, + l2End: []chunk.Chunk{}, + }, { name: "writeback l1", handoff: 24 * time.Hour, @@ -194,7 +208,7 @@ func Test(t *testing.T) { assert.NoError(t, chunkClient.PutChunks(context.Background(), test.storeStart)) // Build fetcher - f, err := New(c1, c2, false, sc, chunkClient, test.handoff) + f, err := New(c1, c2, false, sc, chunkClient, test.handoff, test.skipQueryWriteback) assert.NoError(t, err) // Run the test @@ -235,16 +249,17 @@ func BenchmarkFetch(b *testing.B) { fetch = append(fetch, storeStart...) test := struct { - name string - handoff time.Duration - storeStart []chunk.Chunk - l1Start []chunk.Chunk - l2Start []chunk.Chunk - fetch []chunk.Chunk - l1KeysRequested int - l1End []chunk.Chunk - l2KeysRequested int - l2End []chunk.Chunk + name string + handoff time.Duration + skipQueryWriteback time.Duration + storeStart []chunk.Chunk + l1Start []chunk.Chunk + l2Start []chunk.Chunk + fetch []chunk.Chunk + l1KeysRequested int + l1End []chunk.Chunk + l2KeysRequested int + l2End []chunk.Chunk }{ name: "some in L1, some in L2", handoff: time.Duration(numchunks/3+100) * time.Hour, @@ -291,7 +306,7 @@ func BenchmarkFetch(b *testing.B) { _ = chunkClient.PutChunks(context.Background(), test.storeStart) // Build fetcher - f, _ := New(c1, c2, false, sc, chunkClient, test.handoff) + f, _ := New(c1, c2, false, sc, chunkClient, test.handoff, test.skipQueryWriteback) for i := 0; i < b.N; i++ { _, err := f.FetchChunks(context.Background(), test.fetch) diff --git a/pkg/storage/config/store.go b/pkg/storage/config/store.go index 8dbd57cdc2503..27c48d4b08a37 100644 --- a/pkg/storage/config/store.go +++ b/pkg/storage/config/store.go @@ -10,9 +10,10 @@ import ( ) type ChunkStoreConfig struct { - ChunkCacheConfig cache.Config `yaml:"chunk_cache_config"` - ChunkCacheConfigL2 cache.Config `yaml:"chunk_cache_config_l2"` - WriteDedupeCacheConfig cache.Config `yaml:"write_dedupe_cache_config" doc:"description=Write dedupe cache is deprecated along with legacy index types (aws, aws-dynamo, bigtable, bigtable-hashed, cassandra, gcp, gcp-columnkey, grpc-store).\nConsider using TSDB index which does not require a write dedupe cache."` + ChunkCacheConfig cache.Config `yaml:"chunk_cache_config"` + ChunkCacheConfigL2 cache.Config `yaml:"chunk_cache_config_l2"` + WriteDedupeCacheConfig cache.Config `yaml:"write_dedupe_cache_config" doc:"description=Write dedupe cache is deprecated along with legacy index types (aws, aws-dynamo, bigtable, bigtable-hashed, cassandra, gcp, gcp-columnkey, grpc-store).\nConsider using TSDB index which does not require a write dedupe cache."` + SkipQueryWritebackOlderThan time.Duration `yaml:"skip_query_writeback_cache_older_than"` L2ChunkCacheHandoff time.Duration `yaml:"l2_chunk_cache_handoff"` CacheLookupsOlderThan model.Duration `yaml:"cache_lookups_older_than"` @@ -38,6 +39,7 @@ func (cfg *ChunkStoreConfig) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.L2ChunkCacheHandoff, "store.chunks-cache-l2.handoff", 0, "Chunks will be handed off to the L2 cache after this duration. 0 to disable L2 cache.") f.BoolVar(&cfg.chunkCacheStubs, "store.chunks-cache.cache-stubs", false, "If true, don't write the full chunk to cache, just a stub entry.") cfg.WriteDedupeCacheConfig.RegisterFlagsWithPrefix("store.index-cache-write.", "", f) + f.DurationVar(&cfg.SkipQueryWritebackOlderThan, "store.skip-query-writeback-older-than", 0, "Chunks fetched from queriers before this duration will not be written to the cache. A value of 0 will write all chunks to the cache") f.Var(&cfg.CacheLookupsOlderThan, "store.cache-lookups-older-than", "Cache index entries older than this period. 0 to disable.") } diff --git a/pkg/storage/store.go b/pkg/storage/store.go index a8e6a1add3239..8daf27ce265f8 100644 --- a/pkg/storage/store.go +++ b/pkg/storage/store.go @@ -198,7 +198,7 @@ func (s *LokiStore) init() error { if err != nil { return err } - f, err := fetcher.New(s.chunksCache, s.chunksCacheL2, s.storeCfg.ChunkCacheStubs(), s.schemaCfg, chunkClient, s.storeCfg.L2ChunkCacheHandoff) + f, err := fetcher.New(s.chunksCache, s.chunksCacheL2, s.storeCfg.ChunkCacheStubs(), s.schemaCfg, chunkClient, s.storeCfg.L2ChunkCacheHandoff, s.storeCfg.SkipQueryWritebackOlderThan) if err != nil { return err } diff --git a/pkg/storage/stores/series_store_write_test.go b/pkg/storage/stores/series_store_write_test.go index 5ff8a00d99706..a6e0a9a55cb93 100644 --- a/pkg/storage/stores/series_store_write_test.go +++ b/pkg/storage/stores/series_store_write_test.go @@ -160,7 +160,7 @@ func TestChunkWriter_PutOne(t *testing.T) { idx := &mockIndexWriter{} client := &mockChunksClient{} - f, err := fetcher.New(cache, nil, false, schemaConfig, client, 0) + f, err := fetcher.New(cache, nil, false, schemaConfig, client, 0, 0) require.NoError(t, err) cw := NewChunkWriter(f, schemaConfig, idx, true) diff --git a/pkg/storage/util_test.go b/pkg/storage/util_test.go index a0dc75999692f..cc9ded1c53447 100644 --- a/pkg/storage/util_test.go +++ b/pkg/storage/util_test.go @@ -261,7 +261,7 @@ func (m *mockChunkStore) GetChunks(_ context.Context, _ string, _, _ model.Time, panic(err) } - f, err := fetcher.New(cache, nil, false, m.schemas, m.client, 0) + f, err := fetcher.New(cache, nil, false, m.schemas, m.client, 0, 0) if err != nil { panic(err) }
feat
Skip writeback for chunks fetched by queriers older than a duration (#15605)
b5ad4f09ee7ca93ab42f7d387eeb4db23767781f
2025-02-17 12:14:48
icylord
fix: change object_store.prefix to object_store.storage_prefix in helm (#16266)
false
diff --git a/docs/sources/setup/install/helm/reference.md b/docs/sources/setup/install/helm/reference.md index 9195794112a07..7faa744d011d9 100644 --- a/docs/sources/setup/install/helm/reference.md +++ b/docs/sources/setup/install/helm/reference.md @@ -6400,7 +6400,6 @@ null "bucket_name": null, "service_account": null }, - "prefix": null, "s3": { "access_key_id": null, "endpoint": null, @@ -6410,6 +6409,7 @@ null "secret_access_key": null, "sse": {} }, + "storage_prefix": null, "type": "s3" }, "s3": { diff --git a/production/helm/loki/templates/_helpers.tpl b/production/helm/loki/templates/_helpers.tpl index c6f2e6ff3fd88..7434350eb840d 100644 --- a/production/helm/loki/templates/_helpers.tpl +++ b/production/helm/loki/templates/_helpers.tpl @@ -1179,6 +1179,6 @@ azure: account_key: {{ .account_key }} {{- end }} {{- end }} -prefix: {{ .prefix }} +storage_prefix: {{ .storage_prefix }} {{- end }} {{- end }} diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml index 443d2f7d796c6..3c2c85bc5fc48 100644 --- a/production/helm/loki/values.yaml +++ b/production/helm/loki/values.yaml @@ -417,7 +417,7 @@ loki: object_store: # Type of object store. Valid options are: s3, gcs, azure type: s3 - prefix: null # Optional prefix for storage keys + storage_prefix: null # Optional prefix for storage keys # S3 configuration (when type is "s3") s3:
fix
change object_store.prefix to object_store.storage_prefix in helm (#16266)
fc2bc69eaa46c38d77a9f4cfd0ee1daeecc7e101
2021-12-15 15:42:15
Sandy Chen
feat(): add ManagedIdentity in Azure Blob Storage (#4858)
false
diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md index 291bd0554e92f..e95ac0797d98e 100644 --- a/docs/sources/configuration/_index.md +++ b/docs/sources/configuration/_index.md @@ -707,6 +707,10 @@ The `azure_storage_config` configures Azure as a general storage for different d # Maximum time to wait before retrying a request. # CLI flag: -<prefix>.azure.max-retry-delay [max_retry_delay: <duration> | default = 500ms] + +# Use Managed Identity or not. +# CLI flag: -ruler.storage.azure.use-managed-identity +[use_managed_identity: <boolean> | default = false] ``` ## gcs_storage_config diff --git a/docs/sources/storage/_index.md b/docs/sources/storage/_index.md index 5bd90947ddcdb..924c2cc2103ed 100644 --- a/docs/sources/storage/_index.md +++ b/docs/sources/storage/_index.md @@ -307,6 +307,7 @@ storage_config: # See https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blobs-introduction#containers container_name: <container-name> request_timeout: 0 + use_managed_identity: <true|false> boltdb_shipper: active_index_directory: /data/loki/boltdb-shipper-active cache_location: /data/loki/boltdb-shipper-cache diff --git a/go.mod b/go.mod index 2b2ffb212eae2..e2d3221fe9911 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( cloud.google.com/go/storage v1.10.0 github.com/Azure/azure-pipeline-go v0.2.3 github.com/Azure/azure-storage-blob-go v0.13.0 + github.com/Azure/go-autorest/autorest/adal v0.9.17 github.com/Masterminds/sprig/v3 v3.2.2 github.com/NYTimes/gziphandler v1.1.1 github.com/Shopify/sarama v1.30.0 @@ -114,7 +115,6 @@ require ( github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.22 // indirect - github.com/Azure/go-autorest/autorest/adal v0.9.17 // indirect github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 // indirect github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect diff --git a/pkg/storage/chunk/azure/blob_storage_client.go b/pkg/storage/chunk/azure/blob_storage_client.go index 43286a105e809..778c879ef6426 100644 --- a/pkg/storage/chunk/azure/blob_storage_client.go +++ b/pkg/storage/chunk/azure/blob_storage_client.go @@ -14,6 +14,7 @@ import ( "github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/Azure/go-autorest/autorest/adal" "github.com/mattn/go-ieproxy" "github.com/prometheus/client_golang/prometheus" @@ -93,6 +94,7 @@ type BlobStorageConfig struct { MaxRetries int `yaml:"max_retries"` MinRetryDelay time.Duration `yaml:"min_retry_delay"` MaxRetryDelay time.Duration `yaml:"max_retry_delay"` + UseManagedIdentity bool `yaml:"use_managed_identity"` } // RegisterFlags adds the flags required to config this to the given FlagSet @@ -113,6 +115,7 @@ func (c *BlobStorageConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagS f.IntVar(&c.MaxRetries, prefix+"azure.max-retries", 5, "Number of retries for a request which times out.") f.DurationVar(&c.MinRetryDelay, prefix+"azure.min-retry-delay", 10*time.Millisecond, "Minimum time to wait before retrying a request.") f.DurationVar(&c.MaxRetryDelay, prefix+"azure.max-retry-delay", 500*time.Millisecond, "Maximum time to wait before retrying a request.") + f.BoolVar(&c.UseManagedIdentity, prefix+"azure.use-managed-identity", false, "Use Managed Identity or not.") } func (c *BlobStorageConfig) ToCortexAzureConfig() cortex_azure.BlobStorageConfig { @@ -241,11 +244,7 @@ func (b *BlobStorage) buildContainerURL() (azblob.ContainerURL, error) { } func (b *BlobStorage) newPipeline(hedgingCfg hedging.Config, hedging bool) (pipeline.Pipeline, error) { - credential, err := azblob.NewSharedKeyCredential(b.cfg.AccountName, b.cfg.AccountKey.Value) - if err != nil { - return nil, err - } - + // defining the Azure Pipeline Options opts := azblob.PipelineOptions{ Retry: azblob.RetryOptions{ Policy: azblob.RetryPolicyExponential, @@ -255,6 +254,12 @@ func (b *BlobStorage) newPipeline(hedgingCfg hedging.Config, hedging bool) (pipe MaxRetryDelay: b.cfg.MaxRetryDelay, }, } + + credential, err := azblob.NewSharedKeyCredential(b.cfg.AccountName, b.cfg.AccountKey.Value) + if err != nil { + return nil, err + } + client := defaultClientFactory() opts.HTTPSender = pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { @@ -277,7 +282,61 @@ func (b *BlobStorage) newPipeline(hedgingCfg hedging.Config, hedging bool) (pipe }) } - return azblob.NewPipeline(credential, opts), nil + if !b.cfg.UseManagedIdentity { + return azblob.NewPipeline(credential, opts), nil + } + + tokenCredential, err := b.getOAuthToken() + if err != nil { + return nil, err + } + + return azblob.NewPipeline(*tokenCredential, opts), nil + +} + +func (b *BlobStorage) getOAuthToken() (*azblob.TokenCredential, error) { + spt, err := b.fetchMSIToken() + if err != nil { + return nil, err + } + + // Refresh obtains a fresh token + err = spt.Refresh() + if err != nil { + return nil, err + } + + tc := azblob.NewTokenCredential(spt.Token().AccessToken, func(tc azblob.TokenCredential) time.Duration { + err := spt.Refresh() + if err != nil { + // something went wrong, prevent the refresher from being triggered again + return 0 + } + + // set the new token value + tc.SetToken(spt.Token().AccessToken) + + // get the next token slightly before the current one expires + return time.Until(spt.Token().Expires()) - 10*time.Second + }) + + return &tc, nil +} + +func (b *BlobStorage) fetchMSIToken() (*adal.ServicePrincipalToken, error) { + // msiEndpoint is the well known endpoint for getting MSI authentications tokens + // msiEndpoint := "http://169.254.169.254/metadata/identity/oauth2/token" for production Jobs + msiEndpoint, _ := adal.GetMSIVMEndpoint() + + // both can be empty, systemAssignedMSI scenario + spt, err := adal.NewServicePrincipalTokenFromMSI(msiEndpoint, "https://storage.azure.com/") + + if err != nil { + return nil, err + } + + return spt, spt.Refresh() } // List implements chunk.ObjectClient.
unknown
feat(): add ManagedIdentity in Azure Blob Storage (#4858)
5415b11468c7f22be52eed4a282b37e6a45557f0
2023-09-15 17:44:02
Periklis Tsirakidis
operator: Update Loki operand to v2.9.1 (#10600)
false
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md index bd3ee9a30aa20..1d7e542af56cf 100644 --- a/operator/CHANGELOG.md +++ b/operator/CHANGELOG.md @@ -1,5 +1,6 @@ ## Main +- [10600](https://github.com/grafana/loki/pull/10600) **periklis**: Update Loki operand to v2.9.1 - [10545](https://github.com/grafana/loki/pull/10545) **xperimental**: Update gateway arguments to enable namespace extraction - [10558](https://github.com/grafana/loki/pull/10558) **periklis**: Upgrade dashboards for for Loki v2.9.0 - [10539](https://github.com/grafana/loki/pull/10539) **periklis**: Update Loki operand to v2.9.0 diff --git a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml index dfe1f390c67db..b0af78f20e02e 100644 --- a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml @@ -150,7 +150,7 @@ metadata: categories: OpenShift Optional, Logging & Tracing certified: "false" containerImage: docker.io/grafana/loki-operator:0.4.0 - createdAt: "2023-09-11T12:09:01Z" + createdAt: "2023-09-15T08:01:24Z" description: The Community Loki Operator provides Kubernetes native deployment and management of Loki and related logging components. operators.operatorframework.io/builder: operator-sdk-unknown @@ -1649,7 +1649,7 @@ spec: - /manager env: - name: RELATED_IMAGE_LOKI - value: docker.io/grafana/loki:2.9.0 + value: docker.io/grafana/loki:2.9.1 - name: RELATED_IMAGE_GATEWAY value: quay.io/observatorium/api:latest - name: RELATED_IMAGE_OPA @@ -1772,7 +1772,7 @@ spec: provider: name: Grafana Loki SIG Operator relatedImages: - - image: docker.io/grafana/loki:2.9.0 + - image: docker.io/grafana/loki:2.9.1 name: loki - image: quay.io/observatorium/api:latest name: gateway diff --git a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml index e2ccb15028bca..f0f0ae2a238ac 100644 --- a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml @@ -150,7 +150,7 @@ metadata: categories: OpenShift Optional, Logging & Tracing certified: "false" containerImage: docker.io/grafana/loki-operator:0.4.0 - createdAt: "2023-09-11T12:08:59Z" + createdAt: "2023-09-15T08:01:22Z" description: The Community Loki Operator provides Kubernetes native deployment and management of Loki and related logging components. operators.operatorframework.io/builder: operator-sdk-unknown @@ -1636,7 +1636,7 @@ spec: - /manager env: - name: RELATED_IMAGE_LOKI - value: docker.io/grafana/loki:2.9.0 + value: docker.io/grafana/loki:2.9.1 - name: RELATED_IMAGE_GATEWAY value: quay.io/observatorium/api:latest - name: RELATED_IMAGE_OPA @@ -1747,7 +1747,7 @@ spec: provider: name: Grafana Loki SIG Operator relatedImages: - - image: docker.io/grafana/loki:2.9.0 + - image: docker.io/grafana/loki:2.9.1 name: loki - image: quay.io/observatorium/api:latest name: gateway diff --git a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml index e5c5d86d420fc..66db43c1a5946 100644 --- a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml @@ -150,7 +150,7 @@ metadata: categories: OpenShift Optional, Logging & Tracing certified: "false" containerImage: quay.io/openshift-logging/loki-operator:0.1.0 - createdAt: "2023-09-11T12:09:03Z" + createdAt: "2023-09-15T08:01:27Z" description: | The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging. ## Prerequisites and Requirements @@ -1634,7 +1634,7 @@ spec: - /manager env: - name: RELATED_IMAGE_LOKI - value: quay.io/openshift-logging/loki:v2.9.0 + value: quay.io/openshift-logging/loki:v2.9.1 - name: RELATED_IMAGE_GATEWAY value: quay.io/observatorium/api:latest - name: RELATED_IMAGE_OPA @@ -1757,7 +1757,7 @@ spec: provider: name: Red Hat relatedImages: - - image: quay.io/openshift-logging/loki:v2.9.0 + - image: quay.io/openshift-logging/loki:v2.9.1 name: loki - image: quay.io/observatorium/api:latest name: gateway diff --git a/operator/config/overlays/community-openshift/manager_related_image_patch.yaml b/operator/config/overlays/community-openshift/manager_related_image_patch.yaml index 0ffbf9357e9f5..d11ceeaba5236 100644 --- a/operator/config/overlays/community-openshift/manager_related_image_patch.yaml +++ b/operator/config/overlays/community-openshift/manager_related_image_patch.yaml @@ -9,7 +9,7 @@ spec: - name: manager env: - name: RELATED_IMAGE_LOKI - value: docker.io/grafana/loki:2.9.0 + value: docker.io/grafana/loki:2.9.1 - name: RELATED_IMAGE_GATEWAY value: quay.io/observatorium/api:latest - name: RELATED_IMAGE_OPA diff --git a/operator/config/overlays/community/manager_related_image_patch.yaml b/operator/config/overlays/community/manager_related_image_patch.yaml index 0ffbf9357e9f5..d11ceeaba5236 100644 --- a/operator/config/overlays/community/manager_related_image_patch.yaml +++ b/operator/config/overlays/community/manager_related_image_patch.yaml @@ -9,7 +9,7 @@ spec: - name: manager env: - name: RELATED_IMAGE_LOKI - value: docker.io/grafana/loki:2.9.0 + value: docker.io/grafana/loki:2.9.1 - name: RELATED_IMAGE_GATEWAY value: quay.io/observatorium/api:latest - name: RELATED_IMAGE_OPA diff --git a/operator/config/overlays/development/manager_related_image_patch.yaml b/operator/config/overlays/development/manager_related_image_patch.yaml index e8d3ea371e0b3..e96aa0b72ac15 100644 --- a/operator/config/overlays/development/manager_related_image_patch.yaml +++ b/operator/config/overlays/development/manager_related_image_patch.yaml @@ -9,6 +9,6 @@ spec: - name: manager env: - name: RELATED_IMAGE_LOKI - value: docker.io/grafana/loki:2.9.0 + value: docker.io/grafana/loki:2.9.1 - name: RELATED_IMAGE_GATEWAY value: quay.io/observatorium/api:latest diff --git a/operator/config/overlays/openshift/manager_related_image_patch.yaml b/operator/config/overlays/openshift/manager_related_image_patch.yaml index 3b4dd25c2a788..f8319085b49d4 100644 --- a/operator/config/overlays/openshift/manager_related_image_patch.yaml +++ b/operator/config/overlays/openshift/manager_related_image_patch.yaml @@ -9,7 +9,7 @@ spec: - name: manager env: - name: RELATED_IMAGE_LOKI - value: quay.io/openshift-logging/loki:v2.9.0 + value: quay.io/openshift-logging/loki:v2.9.1 - name: RELATED_IMAGE_GATEWAY value: quay.io/observatorium/api:latest - name: RELATED_IMAGE_OPA diff --git a/operator/docs/operator/compatibility.md b/operator/docs/operator/compatibility.md index 35ce91b19f6bb..adcf373b99338 100644 --- a/operator/docs/operator/compatibility.md +++ b/operator/docs/operator/compatibility.md @@ -34,3 +34,4 @@ The versions of Loki compatible to be run with the Loki Operator are: * v2.8.0 * v2.8.3 * v2.9.0 +* v2.9.1 diff --git a/operator/hack/addons_dev.yaml b/operator/hack/addons_dev.yaml index 2e74fbbc1841d..5abb90ebd6c5f 100644 --- a/operator/hack/addons_dev.yaml +++ b/operator/hack/addons_dev.yaml @@ -29,7 +29,7 @@ spec: spec: containers: - name: logcli - image: docker.io/grafana/logcli:2.9.0-amd64 + image: docker.io/grafana/logcli:2.9.1-amd64 imagePullPolicy: IfNotPresent command: - /bin/sh @@ -73,7 +73,7 @@ spec: spec: containers: - name: promtail - image: docker.io/grafana/promtail:2.9.0 + image: docker.io/grafana/promtail:2.9.1 args: - -config.file=/etc/promtail/promtail.yaml - -log.level=info diff --git a/operator/hack/addons_ocp.yaml b/operator/hack/addons_ocp.yaml index acdd9fdfb4ecb..c878b59ca5de2 100644 --- a/operator/hack/addons_ocp.yaml +++ b/operator/hack/addons_ocp.yaml @@ -29,7 +29,7 @@ spec: spec: containers: - name: logcli - image: docker.io/grafana/logcli:2.9.0-amd64 + image: docker.io/grafana/logcli:2.9.1-amd64 imagePullPolicy: IfNotPresent command: - /bin/sh @@ -70,7 +70,7 @@ spec: spec: containers: - name: promtail - image: docker.io/grafana/promtail:2.9.0 + image: docker.io/grafana/promtail:2.9.1 args: - -config.file=/etc/promtail/promtail.yaml - -log.level=info diff --git a/operator/internal/manifests/var.go b/operator/internal/manifests/var.go index 649d0bc2bb616..8a728ab664e51 100644 --- a/operator/internal/manifests/var.go +++ b/operator/internal/manifests/var.go @@ -58,7 +58,7 @@ const ( EnvRelatedImageGateway = "RELATED_IMAGE_GATEWAY" // DefaultContainerImage declares the default fallback for loki image. - DefaultContainerImage = "docker.io/grafana/loki:2.9.0" + DefaultContainerImage = "docker.io/grafana/loki:2.9.1" // DefaultLokiStackGatewayImage declares the default image for lokiStack-gateway. DefaultLokiStackGatewayImage = "quay.io/observatorium/api:latest" diff --git a/operator/jsonnet/jsonnetfile.json b/operator/jsonnet/jsonnetfile.json index 54dce457bc309..4c40958da2cb5 100644 --- a/operator/jsonnet/jsonnetfile.json +++ b/operator/jsonnet/jsonnetfile.json @@ -8,7 +8,7 @@ "subdir": "production/loki-mixin" } }, - "version": "v2.9.0" + "version": "v2.9.1" } ], "legacyImports": true diff --git a/operator/jsonnet/jsonnetfile.lock.json b/operator/jsonnet/jsonnetfile.lock.json index f29fd0e9f4de4..a6fb089abc698 100644 --- a/operator/jsonnet/jsonnetfile.lock.json +++ b/operator/jsonnet/jsonnetfile.lock.json @@ -38,7 +38,7 @@ "subdir": "production/loki-mixin" } }, - "version": "1485f6eee846a22e4cf68f9fff1e2d6c0f71efc7", + "version": "1e450c51dcad77b236971b026dde455637ce0081", "sum": "a/71V1QzEB46ewPIE2nyNp2HlYFwmDqmSddNulZPP40=" }, {
operator
Update Loki operand to v2.9.1 (#10600)
ce7b2e89d9470e4e6a61a94f2b51ff8b938b5a5e
2024-09-30 18:50:52
Periklis Tsirakidis
fix(operator): Add missing groupBy label for all rules on OpenShift (#14279)
false
diff --git a/operator/internal/manifests/openshift/alertingrule.go b/operator/internal/manifests/openshift/alertingrule.go index 22923ed15482e..e4869e9f7ca5c 100644 --- a/operator/internal/manifests/openshift/alertingrule.go +++ b/operator/internal/manifests/openshift/alertingrule.go @@ -5,23 +5,32 @@ import lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" func AlertingRuleTenantLabels(ar *lokiv1.AlertingRule) { switch ar.Spec.TenantID { case tenantApplication: - for groupIdx, group := range ar.Spec.Groups { - group := group - for ruleIdx, rule := range group.Rules { - rule := rule - if rule.Labels == nil { - rule.Labels = map[string]string{} - } - rule.Labels[opaDefaultLabelMatcher] = ar.Namespace - group.Rules[ruleIdx] = rule - } - ar.Spec.Groups[groupIdx] = group - } - case tenantInfrastructure, tenantAudit: - // Do nothing - case tenantNetwork: - // Do nothing + appendAlertingRuleLabels(ar, map[string]string{ + opaDefaultLabelMatcher: ar.Namespace, + ocpMonitoringGroupByLabel: ar.Namespace, + }) + case tenantInfrastructure, tenantAudit, tenantNetwork: + appendAlertingRuleLabels(ar, map[string]string{ + ocpMonitoringGroupByLabel: ar.Namespace, + }) default: // Do nothing } } + +func appendAlertingRuleLabels(ar *lokiv1.AlertingRule, labels map[string]string) { + for groupIdx, group := range ar.Spec.Groups { + for ruleIdx, rule := range group.Rules { + if rule.Labels == nil { + rule.Labels = map[string]string{} + } + + for name, value := range labels { + rule.Labels[name] = value + } + + group.Rules[ruleIdx] = rule + } + ar.Spec.Groups[groupIdx] = group + } +} diff --git a/operator/internal/manifests/openshift/alertingrule_test.go b/operator/internal/manifests/openshift/alertingrule_test.go index 91da560e2a6df..2a1d032e8ed47 100644 --- a/operator/internal/manifests/openshift/alertingrule_test.go +++ b/operator/internal/manifests/openshift/alertingrule_test.go @@ -46,7 +46,8 @@ func TestAlertingRuleTenantLabels(t *testing.T) { { Alert: "alert", Labels: map[string]string{ - opaDefaultLabelMatcher: "test-ns", + opaDefaultLabelMatcher: "test-ns", + ocpMonitoringGroupByLabel: "test-ns", }, }, }, @@ -57,6 +58,9 @@ func TestAlertingRuleTenantLabels(t *testing.T) { }, { rule: &lokiv1.AlertingRule{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + }, Spec: lokiv1.AlertingRuleSpec{ TenantID: tenantInfrastructure, Groups: []*lokiv1.AlertingRuleGroup{ @@ -72,6 +76,9 @@ func TestAlertingRuleTenantLabels(t *testing.T) { }, }, want: &lokiv1.AlertingRule{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + }, Spec: lokiv1.AlertingRuleSpec{ TenantID: tenantInfrastructure, Groups: []*lokiv1.AlertingRuleGroup{ @@ -80,6 +87,9 @@ func TestAlertingRuleTenantLabels(t *testing.T) { Rules: []*lokiv1.AlertingRuleGroupSpec{ { Alert: "alert", + Labels: map[string]string{ + ocpMonitoringGroupByLabel: "test-ns", + }, }, }, }, @@ -89,6 +99,9 @@ func TestAlertingRuleTenantLabels(t *testing.T) { }, { rule: &lokiv1.AlertingRule{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + }, Spec: lokiv1.AlertingRuleSpec{ TenantID: tenantAudit, Groups: []*lokiv1.AlertingRuleGroup{ @@ -104,6 +117,9 @@ func TestAlertingRuleTenantLabels(t *testing.T) { }, }, want: &lokiv1.AlertingRule{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + }, Spec: lokiv1.AlertingRuleSpec{ TenantID: tenantAudit, Groups: []*lokiv1.AlertingRuleGroup{ @@ -112,6 +128,9 @@ func TestAlertingRuleTenantLabels(t *testing.T) { Rules: []*lokiv1.AlertingRuleGroupSpec{ { Alert: "alert", + Labels: map[string]string{ + ocpMonitoringGroupByLabel: "test-ns", + }, }, }, }, @@ -121,6 +140,9 @@ func TestAlertingRuleTenantLabels(t *testing.T) { }, { rule: &lokiv1.AlertingRule{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + }, Spec: lokiv1.AlertingRuleSpec{ TenantID: tenantNetwork, Groups: []*lokiv1.AlertingRuleGroup{ @@ -136,6 +158,9 @@ func TestAlertingRuleTenantLabels(t *testing.T) { }, }, want: &lokiv1.AlertingRule{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + }, Spec: lokiv1.AlertingRuleSpec{ TenantID: tenantNetwork, Groups: []*lokiv1.AlertingRuleGroup{ @@ -144,6 +169,9 @@ func TestAlertingRuleTenantLabels(t *testing.T) { Rules: []*lokiv1.AlertingRuleGroupSpec{ { Alert: "alert", + Labels: map[string]string{ + ocpMonitoringGroupByLabel: "test-ns", + }, }, }, }, @@ -153,6 +181,9 @@ func TestAlertingRuleTenantLabels(t *testing.T) { }, { rule: &lokiv1.AlertingRule{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + }, Spec: lokiv1.AlertingRuleSpec{ TenantID: "unknown", Groups: []*lokiv1.AlertingRuleGroup{ @@ -168,6 +199,9 @@ func TestAlertingRuleTenantLabels(t *testing.T) { }, }, want: &lokiv1.AlertingRule{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + }, Spec: lokiv1.AlertingRuleSpec{ TenantID: "unknown", Groups: []*lokiv1.AlertingRuleGroup{ diff --git a/operator/internal/manifests/openshift/opa_openshift.go b/operator/internal/manifests/openshift/opa_openshift.go index 9175983f89e14..ccf5eac09b7a7 100644 --- a/operator/internal/manifests/openshift/opa_openshift.go +++ b/operator/internal/manifests/openshift/opa_openshift.go @@ -13,14 +13,15 @@ import ( ) const ( - envRelatedImageOPA = "RELATED_IMAGE_OPA" - defaultOPAImage = "quay.io/observatorium/opa-openshift:latest" - opaContainerName = "opa" - opaDefaultPackage = "lokistack" - opaDefaultAPIGroup = "loki.grafana.com" - opaMetricsPortName = "opa-metrics" - opaDefaultLabelMatcher = "kubernetes_namespace_name" - opaNetworkLabelMatchers = "SrcK8S_Namespace,DstK8S_Namespace" + envRelatedImageOPA = "RELATED_IMAGE_OPA" + defaultOPAImage = "quay.io/observatorium/opa-openshift:latest" + opaContainerName = "opa" + opaDefaultPackage = "lokistack" + opaDefaultAPIGroup = "loki.grafana.com" + opaMetricsPortName = "opa-metrics" + opaDefaultLabelMatcher = "kubernetes_namespace_name" + opaNetworkLabelMatchers = "SrcK8S_Namespace,DstK8S_Namespace" + ocpMonitoringGroupByLabel = "namespace" ) func newOPAOpenShiftContainer(mode lokiv1.ModeType, secretVolumeName, tlsDir, minTLSVersion, ciphers string, withTLS bool, adminGroups []string) corev1.Container { diff --git a/operator/internal/manifests/openshift/recordingrule.go b/operator/internal/manifests/openshift/recordingrule.go new file mode 100644 index 0000000000000..97be1bb4a17ec --- /dev/null +++ b/operator/internal/manifests/openshift/recordingrule.go @@ -0,0 +1,36 @@ +package openshift + +import lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" + +func RecordingRuleTenantLabels(r *lokiv1.RecordingRule) { + switch r.Spec.TenantID { + case tenantApplication: + appendRecordingRuleLabels(r, map[string]string{ + opaDefaultLabelMatcher: r.Namespace, + ocpMonitoringGroupByLabel: r.Namespace, + }) + case tenantInfrastructure, tenantAudit, tenantNetwork: + appendRecordingRuleLabels(r, map[string]string{ + ocpMonitoringGroupByLabel: r.Namespace, + }) + default: + // Do nothing + } +} + +func appendRecordingRuleLabels(r *lokiv1.RecordingRule, labels map[string]string) { + for groupIdx, group := range r.Spec.Groups { + for ruleIdx, rule := range group.Rules { + if rule.Labels == nil { + rule.Labels = map[string]string{} + } + + for name, value := range labels { + rule.Labels[name] = value + } + + group.Rules[ruleIdx] = rule + } + r.Spec.Groups[groupIdx] = group + } +} diff --git a/operator/internal/manifests/openshift/recordingrule_test.go b/operator/internal/manifests/openshift/recordingrule_test.go index 49e30de999f35..6a620bc85d8de 100644 --- a/operator/internal/manifests/openshift/recordingrule_test.go +++ b/operator/internal/manifests/openshift/recordingrule_test.go @@ -46,7 +46,8 @@ func TestRecordingRuleTenantLabels(t *testing.T) { { Record: "record", Labels: map[string]string{ - opaDefaultLabelMatcher: "test-ns", + opaDefaultLabelMatcher: "test-ns", + ocpMonitoringGroupByLabel: "test-ns", }, }, }, @@ -57,6 +58,9 @@ func TestRecordingRuleTenantLabels(t *testing.T) { }, { rule: &lokiv1.RecordingRule{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + }, Spec: lokiv1.RecordingRuleSpec{ TenantID: tenantInfrastructure, Groups: []*lokiv1.RecordingRuleGroup{ @@ -72,6 +76,9 @@ func TestRecordingRuleTenantLabels(t *testing.T) { }, }, want: &lokiv1.RecordingRule{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + }, Spec: lokiv1.RecordingRuleSpec{ TenantID: tenantInfrastructure, Groups: []*lokiv1.RecordingRuleGroup{ @@ -80,6 +87,9 @@ func TestRecordingRuleTenantLabels(t *testing.T) { Rules: []*lokiv1.RecordingRuleGroupSpec{ { Record: "record", + Labels: map[string]string{ + ocpMonitoringGroupByLabel: "test-ns", + }, }, }, }, @@ -89,6 +99,9 @@ func TestRecordingRuleTenantLabels(t *testing.T) { }, { rule: &lokiv1.RecordingRule{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + }, Spec: lokiv1.RecordingRuleSpec{ TenantID: tenantAudit, Groups: []*lokiv1.RecordingRuleGroup{ @@ -104,6 +117,9 @@ func TestRecordingRuleTenantLabels(t *testing.T) { }, }, want: &lokiv1.RecordingRule{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + }, Spec: lokiv1.RecordingRuleSpec{ TenantID: tenantAudit, Groups: []*lokiv1.RecordingRuleGroup{ @@ -112,6 +128,9 @@ func TestRecordingRuleTenantLabels(t *testing.T) { Rules: []*lokiv1.RecordingRuleGroupSpec{ { Record: "record", + Labels: map[string]string{ + ocpMonitoringGroupByLabel: "test-ns", + }, }, }, }, @@ -121,6 +140,9 @@ func TestRecordingRuleTenantLabels(t *testing.T) { }, { rule: &lokiv1.RecordingRule{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + }, Spec: lokiv1.RecordingRuleSpec{ TenantID: tenantNetwork, Groups: []*lokiv1.RecordingRuleGroup{ @@ -136,6 +158,9 @@ func TestRecordingRuleTenantLabels(t *testing.T) { }, }, want: &lokiv1.RecordingRule{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + }, Spec: lokiv1.RecordingRuleSpec{ TenantID: tenantNetwork, Groups: []*lokiv1.RecordingRuleGroup{ @@ -144,6 +169,9 @@ func TestRecordingRuleTenantLabels(t *testing.T) { Rules: []*lokiv1.RecordingRuleGroupSpec{ { Record: "record", + Labels: map[string]string{ + ocpMonitoringGroupByLabel: "test-ns", + }, }, }, }, @@ -153,6 +181,9 @@ func TestRecordingRuleTenantLabels(t *testing.T) { }, { rule: &lokiv1.RecordingRule{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + }, Spec: lokiv1.RecordingRuleSpec{ TenantID: "unknown", Groups: []*lokiv1.RecordingRuleGroup{ @@ -168,6 +199,9 @@ func TestRecordingRuleTenantLabels(t *testing.T) { }, }, want: &lokiv1.RecordingRule{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + }, Spec: lokiv1.RecordingRuleSpec{ TenantID: "unknown", Groups: []*lokiv1.RecordingRuleGroup{ diff --git a/operator/internal/manifests/openshift/recordngrule.go b/operator/internal/manifests/openshift/recordngrule.go deleted file mode 100644 index e4448affeae99..0000000000000 --- a/operator/internal/manifests/openshift/recordngrule.go +++ /dev/null @@ -1,27 +0,0 @@ -package openshift - -import lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" - -func RecordingRuleTenantLabels(r *lokiv1.RecordingRule) { - switch r.Spec.TenantID { - case tenantApplication: - for groupIdx, group := range r.Spec.Groups { - group := group - for ruleIdx, rule := range group.Rules { - rule := rule - if rule.Labels == nil { - rule.Labels = map[string]string{} - } - rule.Labels[opaDefaultLabelMatcher] = r.Namespace - group.Rules[ruleIdx] = rule - } - r.Spec.Groups[groupIdx] = group - } - case tenantInfrastructure, tenantAudit: - // Do nothing - case tenantNetwork: - // Do nothing - default: - // Do nothing - } -}
fix
Add missing groupBy label for all rules on OpenShift (#14279)
ad60738b56ab1ba58c9b2a1062a881284a3ed309
2024-08-12 19:23:11
renovate[bot]
chore(deps): update grafana/loki-build-image docker tag to v0.33.6 (#13859)
false
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 8a653a1c0d14f..2b6fb69076ca2 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,5 +1,5 @@ { - "image": "grafana/loki-build-image:0.33.5", + "image": "grafana/loki-build-image:0.33.6", "containerEnv": { "BUILD_IN_CONTAINER": "false" }, diff --git a/clients/cmd/docker-driver/Dockerfile b/clients/cmd/docker-driver/Dockerfile index 9ab546a2beda1..d3eddaa93da60 100644 --- a/clients/cmd/docker-driver/Dockerfile +++ b/clients/cmd/docker-driver/Dockerfile @@ -1,4 +1,4 @@ -ARG BUILD_IMAGE=grafana/loki-build-image:0.33.5 +ARG BUILD_IMAGE=grafana/loki-build-image:0.33.6 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/loki -f cmd/loki/Dockerfile . diff --git a/clients/cmd/promtail/Dockerfile.cross b/clients/cmd/promtail/Dockerfile.cross index 523955d6adbd0..5bf89e71fa16d 100644 --- a/clients/cmd/promtail/Dockerfile.cross +++ b/clients/cmd/promtail/Dockerfile.cross @@ -1,4 +1,4 @@ -ARG BUILD_IMAGE=grafana/loki-build-image:0.33.5 +ARG BUILD_IMAGE=grafana/loki-build-image:0.33.6 ARG GO_VERSION=1.22 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: diff --git a/clients/cmd/promtail/Dockerfile.debug b/clients/cmd/promtail/Dockerfile.debug index 094296aa43dd3..baf41cc163493 100644 --- a/clients/cmd/promtail/Dockerfile.debug +++ b/clients/cmd/promtail/Dockerfile.debug @@ -2,7 +2,7 @@ # This file is intended to be called from the root like so: # docker build -t grafana/promtail -f clients/cmd/promtail/Dockerfile.debug . -FROM grafana/loki-build-image:0.33.5 as build +FROM grafana/loki-build-image:0.33.6 as build ARG GOARCH="amd64" COPY . /src/loki WORKDIR /src/loki diff --git a/cmd/loki-canary/Dockerfile.cross b/cmd/loki-canary/Dockerfile.cross index cce9f1b842d51..078cb62a37264 100644 --- a/cmd/loki-canary/Dockerfile.cross +++ b/cmd/loki-canary/Dockerfile.cross @@ -1,4 +1,4 @@ -ARG BUILD_IMAGE=grafana/loki-build-image:0.33.5 +ARG BUILD_IMAGE=grafana/loki-build-image:0.33.6 ARG GO_VERSION=1.22 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: diff --git a/cmd/loki/Dockerfile.debug b/cmd/loki/Dockerfile.debug index 64bcd8bfbf257..d2a3d7c4dbbef 100644 --- a/cmd/loki/Dockerfile.debug +++ b/cmd/loki/Dockerfile.debug @@ -1,4 +1,4 @@ -ARG BUILD_IMAGE=grafana/loki-build-image:0.33.5 +ARG BUILD_IMAGE=grafana/loki-build-image:0.33.6 ARG GO_VERSION=1.22 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: diff --git a/cmd/querytee/Dockerfile.cross b/cmd/querytee/Dockerfile.cross index 0a8f0a2718bc6..478f69a67e3b1 100644 --- a/cmd/querytee/Dockerfile.cross +++ b/cmd/querytee/Dockerfile.cross @@ -1,4 +1,4 @@ -ARG BUILD_IMAGE=grafana/loki-build-image:0.33.5 +ARG BUILD_IMAGE=grafana/loki-build-image:0.33.6 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/promtail -f cmd/promtail/Dockerfile . diff --git a/operator/Dockerfile.cross b/operator/Dockerfile.cross index 75b5c41d3d714..26c53ff89d077 100644 --- a/operator/Dockerfile.cross +++ b/operator/Dockerfile.cross @@ -1,4 +1,4 @@ -ARG BUILD_IMAGE=grafana/loki-build-image:0.33.5 +ARG BUILD_IMAGE=grafana/loki-build-image:0.33.6 FROM golang:1.22.6-alpine as goenv RUN go env GOARCH > /goarch && \
chore
update grafana/loki-build-image docker tag to v0.33.6 (#13859)
ae7fab62740b34e0b8381d3b0943e719662d1aea
2024-11-16 01:59:21
renovate[bot]
fix(deps): update module github.com/prometheus/common to v0.60.1 (#14962)
false
diff --git a/go.mod b/go.mod index f2afee0894d06..a340cabda8acb 100644 --- a/go.mod +++ b/go.mod @@ -84,7 +84,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.55.0 + github.com/prometheus/common v0.60.1 github.com/prometheus/prometheus v0.53.2-0.20240726125539-d4f098ae80fb github.com/segmentio/fasthash v1.0.3 github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c diff --git a/go.sum b/go.sum index eff4c8b634fc3..1e136c2f999e9 100644 --- a/go.sum +++ b/go.sum @@ -2418,8 +2418,8 @@ github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+ github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.11.0 h1:yNTsuZ0aNCNFQ3aFTD2uhPOvr4iD7fdBvKPAEGkNf+g= diff --git a/vendor/github.com/prometheus/common/config/headers.go b/vendor/github.com/prometheus/common/config/headers.go index 4a0be4a10e9c1..7276742ec906f 100644 --- a/vendor/github.com/prometheus/common/config/headers.go +++ b/vendor/github.com/prometheus/common/config/headers.go @@ -52,14 +52,6 @@ var reservedHeaders = map[string]struct{}{ // Headers represents the configuration for HTTP headers. type Headers struct { Headers map[string]Header `yaml:",inline"` - dir string -} - -// Header represents the configuration for a single HTTP header. -type Header struct { - Values []string `yaml:"values,omitempty" json:"values,omitempty"` - Secrets []Secret `yaml:"secrets,omitempty" json:"secrets,omitempty"` - Files []string `yaml:"files,omitempty" json:"files,omitempty"` } func (h Headers) MarshalJSON() ([]byte, error) { @@ -67,32 +59,40 @@ func (h Headers) MarshalJSON() ([]byte, error) { return json.Marshal(h.Headers) } -// SetDirectory records the directory to make headers file relative to the -// configuration file. +// SetDirectory make headers file relative to the configuration file. func (h *Headers) SetDirectory(dir string) { if h == nil { return } - h.dir = dir + for _, h := range h.Headers { + h.SetDirectory(dir) + } } // Validate validates the Headers config. func (h *Headers) Validate() error { - for n, header := range h.Headers { + for n := range h.Headers { if _, ok := reservedHeaders[http.CanonicalHeaderKey(n)]; ok { return fmt.Errorf("setting header %q is not allowed", http.CanonicalHeaderKey(n)) } - for _, v := range header.Files { - f := JoinDir(h.dir, v) - _, err := os.ReadFile(f) - if err != nil { - return fmt.Errorf("unable to read header %q from file %s: %w", http.CanonicalHeaderKey(n), f, err) - } - } } return nil } +// Header represents the configuration for a single HTTP header. +type Header struct { + Values []string `yaml:"values,omitempty" json:"values,omitempty"` + Secrets []Secret `yaml:"secrets,omitempty" json:"secrets,omitempty"` + Files []string `yaml:"files,omitempty" json:"files,omitempty"` +} + +// SetDirectory makes headers file relative to the configuration file. +func (h *Header) SetDirectory(dir string) { + for i := range h.Files { + h.Files[i] = JoinDir(dir, h.Files[i]) + } +} + // NewHeadersRoundTripper returns a RoundTripper that sets HTTP headers on // requests as configured. func NewHeadersRoundTripper(config *Headers, next http.RoundTripper) http.RoundTripper { @@ -121,10 +121,9 @@ func (rt *headersRoundTripper) RoundTrip(req *http.Request) (*http.Response, err req.Header.Add(n, string(v)) } for _, v := range h.Files { - f := JoinDir(rt.config.dir, v) - b, err := os.ReadFile(f) + b, err := os.ReadFile(v) if err != nil { - return nil, fmt.Errorf("unable to read headers file %s: %w", f, err) + return nil, fmt.Errorf("unable to read headers file %s: %w", v, err) } req.Header.Add(n, strings.TrimSpace(string(b))) } diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go index 3e3201347765a..e6bdd4c035dab 100644 --- a/vendor/github.com/prometheus/common/config/http_config.go +++ b/vendor/github.com/prometheus/common/config/http_config.go @@ -679,8 +679,8 @@ func NewRoundTripperFromConfigWithContext(ctx context.Context, cfg HTTPClientCon if err != nil { return nil, err } - if tlsSettings.CA == nil || tlsSettings.CA.Immutable() { - // No need for a RoundTripper that reloads the CA file automatically. + if tlsSettings.immutable() { + // No need for a RoundTripper that reloads the files automatically. return newRT(tlsConfig) } return NewTLSRoundTripperWithContext(ctx, tlsConfig, tlsSettings, newRT) @@ -828,7 +828,7 @@ type basicAuthRoundTripper struct { // NewBasicAuthRoundTripper will apply a BASIC auth authorization header to a request unless it has // already been set. -func NewBasicAuthRoundTripper(username SecretReader, password SecretReader, rt http.RoundTripper) http.RoundTripper { +func NewBasicAuthRoundTripper(username, password SecretReader, rt http.RoundTripper) http.RoundTripper { return &basicAuthRoundTripper{username, password, rt} } @@ -914,7 +914,7 @@ func (rt *oauth2RoundTripper) newOauth2TokenSource(req *http.Request, secret str if err != nil { return nil, nil, err } - if tlsSettings.CA == nil || tlsSettings.CA.Immutable() { + if tlsSettings.immutable() { t, _ = tlsTransport(tlsConfig) } else { t, err = NewTLSRoundTripperWithContext(req.Context(), tlsConfig, tlsSettings, tlsTransport) @@ -964,7 +964,7 @@ func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, erro } rt.mtx.Lock() - rt.lastSecret = secret + rt.lastSecret = newSecret rt.lastRT.Source = source if rt.client != nil { rt.client.CloseIdleConnections() @@ -1259,6 +1259,10 @@ type TLSRoundTripperSettings struct { Key SecretReader } +func (t *TLSRoundTripperSettings) immutable() bool { + return (t.CA == nil || t.CA.Immutable()) && (t.Cert == nil || t.Cert.Immutable()) && (t.Key == nil || t.Key.Immutable()) +} + func NewTLSRoundTripper( cfg *tls.Config, settings TLSRoundTripperSettings, diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 25cfaa21643e4..1448439b7f722 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -45,7 +45,7 @@ func ResponseFormat(h http.Header) Format { mediatype, params, err := mime.ParseMediaType(ct) if err != nil { - return fmtUnknown + return FmtUnknown } const textType = "text/plain" @@ -53,21 +53,21 @@ func ResponseFormat(h http.Header) Format { switch mediatype { case ProtoType: if p, ok := params["proto"]; ok && p != ProtoProtocol { - return fmtUnknown + return FmtUnknown } if e, ok := params["encoding"]; ok && e != "delimited" { - return fmtUnknown + return FmtUnknown } - return fmtProtoDelim + return FmtProtoDelim case textType: if v, ok := params["version"]; ok && v != TextVersion { - return fmtUnknown + return FmtUnknown } - return fmtText + return FmtText } - return fmtUnknown + return FmtUnknown } // NewDecoder returns a new decoder based on the given input format. diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index ff5ef7a9d9204..cf0c150c2e184 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -77,18 +77,18 @@ func Negotiate(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return fmtProtoDelim + escapingScheme + return FmtProtoDelim + escapingScheme case "text": - return fmtProtoText + escapingScheme + return FmtProtoText + escapingScheme case "compact-text": - return fmtProtoCompact + escapingScheme + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return fmtText + escapingScheme + return FmtText + escapingScheme } } - return fmtText + escapingScheme + return FmtText + escapingScheme } // NegotiateIncludingOpenMetrics works like Negotiate but includes @@ -110,26 +110,26 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return fmtProtoDelim + escapingScheme + return FmtProtoDelim + escapingScheme case "text": - return fmtProtoText + escapingScheme + return FmtProtoText + escapingScheme case "compact-text": - return fmtProtoCompact + escapingScheme + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return fmtText + escapingScheme + return FmtText + escapingScheme } if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { switch ver { case OpenMetricsVersion_1_0_0: - return fmtOpenMetrics_1_0_0 + escapingScheme + return FmtOpenMetrics_1_0_0 + escapingScheme default: - return fmtOpenMetrics_0_0_1 + escapingScheme + return FmtOpenMetrics_0_0_1 + escapingScheme } } } - return fmtText + escapingScheme + return FmtText + escapingScheme } // NewEncoder returns a new encoder based on content type negotiation. All diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index 051b38cd17813..d942af8edd46d 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -32,24 +32,31 @@ type Format string // it on the wire, new content-type strings will have to be agreed upon and // added here. const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" OpenMetricsType = `application/openmetrics-text` OpenMetricsVersion_0_0_1 = "0.0.1" OpenMetricsVersion_1_0_0 = "1.0.0" - // The Content-Type values for the different wire protocols. Note that these - // values are now unexported. If code was relying on comparisons to these - // constants, instead use FormatType(). - fmtUnknown Format = `<unknown>` - fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - fmtProtoDelim Format = protoFmt + ` encoding=delimited` - fmtProtoText Format = protoFmt + ` encoding=text` - fmtProtoCompact Format = protoFmt + ` encoding=compact-text` - fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` - fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` + // The Content-Type values for the different wire protocols. Do not do direct + // comparisons to these constants, instead use the comparison functions. + // Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead. + FmtUnknown Format = `<unknown>` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead. + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead. + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead. + FmtProtoText Format = ProtoFmt + ` encoding=text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) const ( @@ -79,17 +86,17 @@ const ( func NewFormat(t FormatType) Format { switch t { case TypeProtoCompact: - return fmtProtoCompact + return FmtProtoCompact case TypeProtoDelim: - return fmtProtoDelim + return FmtProtoDelim case TypeProtoText: - return fmtProtoText + return FmtProtoText case TypeTextPlain: - return fmtText + return FmtText case TypeOpenMetrics: - return fmtOpenMetrics_1_0_0 + return FmtOpenMetrics_1_0_0 default: - return fmtUnknown + return FmtUnknown } } @@ -97,12 +104,35 @@ func NewFormat(t FormatType) Format { // specified version number. func NewOpenMetricsFormat(version string) (Format, error) { if version == OpenMetricsVersion_0_0_1 { - return fmtOpenMetrics_0_0_1, nil + return FmtOpenMetrics_0_0_1, nil } if version == OpenMetricsVersion_1_0_0 { - return fmtOpenMetrics_1_0_0, nil + return FmtOpenMetrics_1_0_0, nil } - return fmtUnknown, fmt.Errorf("unknown open metrics version string") + return FmtUnknown, fmt.Errorf("unknown open metrics version string") +} + +// WithEscapingScheme returns a copy of Format with the specified escaping +// scheme appended to the end. If an escaping scheme already exists it is +// removed. +func (f Format) WithEscapingScheme(s model.EscapingScheme) Format { + var terms []string + for _, p := range strings.Split(string(f), ";") { + toks := strings.Split(p, "=") + if len(toks) != 2 { + trimmed := strings.TrimSpace(p) + if len(trimmed) > 0 { + terms = append(terms, trimmed) + } + continue + } + key := strings.TrimSpace(toks[0]) + if key != model.EscapingKey { + terms = append(terms, strings.TrimSpace(p)) + } + } + terms = append(terms, model.EscapingKey+"="+s.String()) + return Format(strings.Join(terms, "; ")) } // FormatType deduces an overall FormatType for the given format. diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 353c5e93f92d8..11c8ff4b9dbc5 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -477,7 +477,7 @@ func writeOpenMetricsNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces, quoted. - if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + if !model.IsValidLegacyMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index f9b8265a9ec2a..4b86434b33275 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -354,7 +354,7 @@ func writeNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces. - if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + if !model.IsValidLegacyMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) { // writeName writes a string as-is if it complies with the legacy naming // scheme, or escapes it in double quotes if not. func writeName(w enhancedWriter, name string) (int, error) { - if model.IsValidLegacyMetricName(model.LabelValue(name)) { + if model.IsValidLegacyMetricName(name) { return w.WriteString(name) } var written int diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 26490211af2a5..f085a923f6cdc 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -22,9 +22,9 @@ import ( "math" "strconv" "strings" + "unicode/utf8" dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" "github.com/prometheus/common/model" @@ -60,6 +60,7 @@ type TextParser struct { currentMF *dto.MetricFamily currentMetric *dto.Metric currentLabelPair *dto.LabelPair + currentLabelPairs []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line. // The remaining member variables are only used for summaries/histograms. currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' @@ -74,6 +75,9 @@ type TextParser struct { // count and sum of that summary/histogram. currentIsSummaryCount, currentIsSummarySum bool currentIsHistogramCount, currentIsHistogramSum bool + // These indicate if the metric name from the current line being parsed is inside + // braces and if that metric name was found respectively. + currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool } // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange @@ -137,12 +141,15 @@ func (p *TextParser) reset(in io.Reader) { } p.currentQuantile = math.NaN() p.currentBucket = math.NaN() + p.currentMF = nil } // startOfLine represents the state where the next byte read from p.buf is the // start of a line (or whitespace leading up to it). func (p *TextParser) startOfLine() stateFn { p.lineCount++ + p.currentMetricIsInsideBraces = false + p.currentMetricInsideBracesIsPresent = false if p.skipBlankTab(); p.err != nil { // This is the only place that we expect to see io.EOF, // which is not an error but the signal that we are done. @@ -158,6 +165,9 @@ func (p *TextParser) startOfLine() stateFn { return p.startComment case '\n': return p.startOfLine // Empty line, start the next one. + case '{': + p.currentMetricIsInsideBraces = true + return p.readingLabels } return p.readingMetricName } @@ -275,6 +285,8 @@ func (p *TextParser) startLabelName() stateFn { return nil // Unexpected end of input. } if p.currentByte == '}' { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -287,6 +299,45 @@ func (p *TextParser) startLabelName() stateFn { p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) return nil } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + if p.currentMetricIsInsideBraces { + if p.currentMetricInsideBracesIsPresent { + p.parseError(fmt.Sprintf("multiple metric names for metric %q", p.currentMF.GetName())) + return nil + } + switch p.currentByte { + case ',': + p.setOrCreateCurrentMF() + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + p.currentMetricInsideBracesIsPresent = true + return p.startLabelName + case '}': + p.setOrCreateCurrentMF() + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte)) + return nil + } + } + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + p.currentLabelPairs = nil + return nil + } p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) @@ -296,23 +347,17 @@ func (p *TextParser) startLabelName() stateFn { // labels to 'real' labels. if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) - } - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '=' { - p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) - return nil + p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. labels := make(map[string]struct{}) - for _, l := range p.currentMetric.Label { + for _, l := range p.currentLabelPairs { lName := l.GetName() if _, exists := labels[lName]; !exists { labels[lName] = struct{}{} } else { p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) + p.currentLabelPairs = nil return nil } } @@ -345,6 +390,7 @@ func (p *TextParser) startLabelValue() stateFn { if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } else { @@ -371,12 +417,19 @@ func (p *TextParser) startLabelValue() stateFn { return p.startLabelName case '}': + if p.currentMF == nil { + p.parseError("invalid metric name") + return nil + } + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } return p.readingValue default: p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } @@ -585,6 +638,8 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { p.currentToken.WriteByte(p.currentByte) case 'n': p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) return @@ -610,13 +665,45 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { // but not into p.currentToken. func (p *TextParser) readTokenAsMetricName() { p.currentToken.Reset() + // A UTF-8 metric name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidMetricNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') { return } } @@ -628,13 +715,45 @@ func (p *TextParser) readTokenAsMetricName() { // but not into p.currentToken. func (p *TextParser) readTokenAsLabelName() { p.currentToken.Reset() + // A UTF-8 label name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidLabelNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') { return } } @@ -660,6 +779,7 @@ func (p *TextParser) readTokenAsLabelValue() { p.currentToken.WriteByte('\n') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + p.currentLabelPairs = nil return } escaped = false @@ -718,19 +838,19 @@ func (p *TextParser) setOrCreateCurrentMF() { } func isValidLabelNameStart(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"' } -func isValidLabelNameContinuation(b byte) bool { - return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +func isValidLabelNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b))) } func isValidMetricNameStart(b byte) bool { return isValidLabelNameStart(b) || b == ':' } -func isValidMetricNameContinuation(b byte) bool { - return isValidLabelNameContinuation(b) || b == ':' +func isValidMetricNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameContinuation(b, quoted) || b == ':' } func isBlankOrTab(b byte) bool { diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index 3317ce22ff73d..73b7aa3e60bdd 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -97,26 +97,35 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") // therewith. type LabelName string -// IsValid returns true iff name matches the pattern of LabelNameRE for legacy -// names, and iff it's valid UTF-8 if NameValidationScheme is set to -// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the -// check but a much faster hardcoded implementation. +// IsValid returns true iff the name matches the pattern of LabelNameRE when +// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if +// NameValidationScheme is set to UTF8Validation. func (ln LabelName) IsValid() bool { if len(ln) == 0 { return false } switch NameValidationScheme { case LegacyValidation: - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } + return ln.IsValidLegacy() case UTF8Validation: return utf8.ValidString(string(ln)) default: panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) } +} + +// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for +// legacy names. It does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValidLegacy() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } return true } diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go index 481c47b46e5d4..abb2c9001831c 100644 --- a/vendor/github.com/prometheus/common/model/labelset_string.go +++ b/vendor/github.com/prometheus/common/model/labelset_string.go @@ -11,8 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build go1.21 - package model import ( diff --git a/vendor/github.com/prometheus/common/model/labelset_string_go120.go b/vendor/github.com/prometheus/common/model/labelset_string_go120.go deleted file mode 100644 index c4212685e71fc..0000000000000 --- a/vendor/github.com/prometheus/common/model/labelset_string_go120.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.21 - -package model - -import ( - "fmt" - "sort" - "strings" -) - -// String was optimized using functions not available for go 1.20 -// or lower. We keep the old implementation for compatibility with client_golang. -// Once client golang drops support for go 1.20 (scheduled for August 2024), this -// file can be removed. -func (l LabelSet) String() string { - labelNames := make([]string, 0, len(l)) - for name := range l { - labelNames = append(labelNames, string(name)) - } - sort.Strings(labelNames) - lstrs := make([]string, 0, len(l)) - for _, name := range labelNames { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)])) - } - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index eb865e5a59cf8..f50966bc494e4 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -34,10 +34,13 @@ var ( // goroutines are started. NameValidationScheme = LegacyValidation - // NameEscapingScheme defines the default way that names will be - // escaped when presented to systems that do not support UTF-8 names. If the - // Content-Type "escaping" term is specified, that will override this value. - NameEscapingScheme = ValueEncodingEscaping + // NameEscapingScheme defines the default way that names will be escaped when + // presented to systems that do not support UTF-8 names. If the Content-Type + // "escaping" term is specified, that will override this value. + // NameEscapingScheme should not be set to the NoEscaping value. That string + // is used in content negotiation to indicate that a system supports UTF-8 and + // has that feature enabled. + NameEscapingScheme = UnderscoreEscaping ) // ValidationScheme is a Go enum for determining how metric and label names will @@ -161,7 +164,7 @@ func (m Metric) FastFingerprint() Fingerprint { func IsValidMetricName(n LabelValue) bool { switch NameValidationScheme { case LegacyValidation: - return IsValidLegacyMetricName(n) + return IsValidLegacyMetricName(string(n)) case UTF8Validation: if len(n) == 0 { return false @@ -176,7 +179,7 @@ func IsValidMetricName(n LabelValue) bool { // legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. -func IsValidLegacyMetricName(n LabelValue) bool { +func IsValidLegacyMetricName(n string) bool { if len(n) == 0 { return false } @@ -208,7 +211,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF } // If the name is nil, copy as-is, don't try to escape. - if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) { + if v.Name == nil || IsValidLegacyMetricName(v.GetName()) { out.Name = v.Name } else { out.Name = proto.String(EscapeName(v.GetName(), scheme)) @@ -230,7 +233,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF for _, l := range m.Label { if l.GetName() == MetricNameLabel { - if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) { + if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) { escaped.Label = append(escaped.Label, l) continue } @@ -240,7 +243,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF }) continue } - if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) { + if l.Name == nil || IsValidLegacyMetricName(l.GetName()) { escaped.Label = append(escaped.Label, l) continue } @@ -256,10 +259,10 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF func metricNeedsEscaping(m *dto.Metric) bool { for _, l := range m.Label { - if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) { + if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) { return true } - if !IsValidLegacyMetricName(LabelValue(l.GetName())) { + if !IsValidLegacyMetricName(l.GetName()) { return true } } @@ -283,7 +286,7 @@ func EscapeName(name string, scheme EscapingScheme) string { case NoEscaping: return name case UnderscoreEscaping: - if IsValidLegacyMetricName(LabelValue(name)) { + if IsValidLegacyMetricName(name) { return name } for i, b := range name { @@ -309,7 +312,7 @@ func EscapeName(name string, scheme EscapingScheme) string { } return escaped.String() case ValueEncodingEscaping: - if IsValidLegacyMetricName(LabelValue(name)) { + if IsValidLegacyMetricName(name) { return name } escaped.WriteString("U__") @@ -452,6 +455,6 @@ func ToEscapingScheme(s string) (EscapingScheme, error) { case EscapeValues: return ValueEncodingEscaping, nil default: - return NoEscaping, fmt.Errorf("unknown format scheme " + s) + return NoEscaping, fmt.Errorf("unknown format scheme %s", s) } } diff --git a/vendor/modules.txt b/vendor/modules.txt index 43102f3c10f48..6eae176ab1d0e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1423,8 +1423,8 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint/validations # github.com/prometheus/client_model v0.6.1 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.55.0 -## explicit; go 1.20 +# github.com/prometheus/common v0.60.1 +## explicit; go 1.21 github.com/prometheus/common/config github.com/prometheus/common/expfmt github.com/prometheus/common/helpers/templates
fix
update module github.com/prometheus/common to v0.60.1 (#14962)
ef0253aa7671c0eedd66dce4f562f45209590356
2025-03-18 23:59:44
renovate[bot]
chore(deps): update terraform google to v6.26.0 (main) (#16821)
false
diff --git a/tools/gcplog/main.tf b/tools/gcplog/main.tf index ad9ad1eab474f..9d82f9292518c 100644 --- a/tools/gcplog/main.tf +++ b/tools/gcplog/main.tf @@ -2,7 +2,7 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = "6.25.0" + version = "6.26.0" } } }
chore
update terraform google to v6.26.0 (main) (#16821)
c99634978cb189744946e6dc388f0cc4183e98f2
2024-06-07 15:11:19
Salva Corts
fix: Fix bloom deleter PR after merge (#13167)
false
diff --git a/pkg/bloombuild/planner/planner.go b/pkg/bloombuild/planner/planner.go index 99cc25a498889..ea2ea5db531b2 100644 --- a/pkg/bloombuild/planner/planner.go +++ b/pkg/bloombuild/planner/planner.go @@ -365,10 +365,7 @@ func (p *Planner) processTenantTaskResults( } combined := append(originalMetas, newMetas...) - outdated, err := outdatedMetas(combined) - if err != nil { - return fmt.Errorf("failed to find outdated metas: %w", err) - } + outdated := outdatedMetas(combined) level.Debug(logger).Log("msg", "found outdated metas", "outdated", len(outdated)) if err := p.deleteOutdatedMetasAndBlocks(ctx, table, tenant, outdated); err != nil { @@ -476,7 +473,7 @@ func (p *Planner) loadTenantWork( // If this is the first this we see this table, initialize the map if tenantTableWork[table] == nil { - tenantTableWork[table] = make(map[string][]v1.FingerprintBounds, tenants.Len()) + tenantTableWork[table] = make(map[string][]v1.FingerprintBounds, tenants.Remaining()) } for tenants.Next() && tenants.Err() == nil && ctx.Err() == nil { diff --git a/pkg/bloombuild/planner/versioned_range.go b/pkg/bloombuild/planner/versioned_range.go index 3a436353954e1..578b5d7ef83a6 100644 --- a/pkg/bloombuild/planner/versioned_range.go +++ b/pkg/bloombuild/planner/versioned_range.go @@ -210,17 +210,30 @@ func (t tsdbTokenRange) reassemble(from int) tsdbTokenRange { return t[:len(t)-(reassembleTo-from)] } -func outdatedMetas(metas []bloomshipper.Meta) (outdated []bloomshipper.Meta, err error) { +func outdatedMetas(metas []bloomshipper.Meta) []bloomshipper.Meta { + var outdated []bloomshipper.Meta + // Sort metas descending by most recent source when checking // for outdated metas (older metas are discarded if they don't change the range). sort.Slice(metas, func(i, j int) bool { - a, err := metas[i].MostRecentSource() - if err != nil { - panic(err.Error()) + a, aExists := metas[i].MostRecentSource() + b, bExists := metas[j].MostRecentSource() + + if !aExists && !bExists { + // stable sort two sourceless metas by their bounds (easier testing) + return metas[i].Bounds.Less(metas[j].Bounds) } - b, err := metas[j].MostRecentSource() - if err != nil { - panic(err.Error()) + + if !aExists { + // If a meta has no sources, it's out of date by definition. + // By convention we sort it to the beginning of the list and will mark it for removal later + return true + } + + if !bExists { + // if a exists but b does not, mark b as lesser, sorting b to the + // front + return false } return !a.TS.Before(b.TS) }) @@ -231,9 +244,11 @@ func outdatedMetas(metas []bloomshipper.Meta) (outdated []bloomshipper.Meta, err ) for _, meta := range metas { - mostRecent, err := meta.MostRecentSource() - if err != nil { - return nil, err + mostRecent, exists := meta.MostRecentSource() + if !exists { + // if the meta exists but does not reference a TSDB, it's out of date + // TODO(owen-d): this shouldn't happen, figure out why + outdated = append(outdated, meta) } version := int(model.TimeFromUnixNano(mostRecent.TS.UnixNano())) tokenRange, added = tokenRange.Add(version, meta.Bounds) @@ -242,6 +257,5 @@ func outdatedMetas(metas []bloomshipper.Meta) (outdated []bloomshipper.Meta, err } } - return outdated, nil - + return outdated } diff --git a/pkg/bloombuild/planner/versioned_range_test.go b/pkg/bloombuild/planner/versioned_range_test.go index 9827e9cd932c5..e58f143842f1c 100644 --- a/pkg/bloombuild/planner/versioned_range_test.go +++ b/pkg/bloombuild/planner/versioned_range_test.go @@ -315,8 +315,7 @@ func Test_OutdatedMetas(t *testing.T) { }, } { t.Run(tc.desc, func(t *testing.T) { - outdated, err := outdatedMetas(tc.metas) - require.NoError(t, err) + outdated := outdatedMetas(tc.metas) require.Equal(t, tc.exp, outdated) }) }
fix
Fix bloom deleter PR after merge (#13167)
0b947d956fdfd3c0bbb52934a4f3fcbbe3c7e740
2022-05-26 14:34:47
Karen Miller
docs: Reimplement PR 5649 on troubleshooting timeout errors (#6183)
false
diff --git a/docs/sources/operations/troubleshooting.md b/docs/sources/operations/troubleshooting.md index a13345772453f..4250301bca57c 100644 --- a/docs/sources/operations/troubleshooting.md +++ b/docs/sources/operations/troubleshooting.md @@ -44,6 +44,27 @@ Promtail yet. There may be one of many root causes: - Your pods are running with different labels than the ones Promtail is configured to read. Check `scrape_configs` to validate. +## Loki timeout errors + +Loki 504 errors, context canceled, and error processing requests +can have many possible causes. + +- Review Loki configuration + + - Loki configuration `querier.query_timeout` + - `server.http_server_read_timeout` + - `server.http_server_write_timeout` + - `server.http_server_idle_timeout` + +- Check your Loki deployment. +If you have a reverse proxy in front of Loki, that is, between Loki and Grafana, then check any configured timeouts, such as an NGINX proxy read timeout. + +- Other causes. To determine if the issue is related to Loki itself or another system such as Grafana or a client-side error, +attempt to run a [LogCLI](../../tools/logcli/) query in as direct a manner as you can. For example, if running on virtual machines, run the query on the local machine. If running in a Kubernetes cluster, then port forward the Loki HTTP port, and attempt to run the query there. If you do not get a timeout, then consider these causes: + + - Adjust the [Grafana dataproxy timeout](https://grafana.com/docs/grafana/latest/administration/configuration/#dataproxy). Configure Grafana with a large enough dataproxy timeout. + - Check timeouts for reverse proxies or load balancers between your client and Grafana. Queries to Grafana are made from the your local browser with Grafana serving as a proxy (a dataproxy). Therefore, connections from your client to Grafana must have their timeout configured as well. + ## Troubleshooting targets Promtail exposes two web pages that can be used to understand how its service
docs
Reimplement PR 5649 on troubleshooting timeout errors (#6183)
609bc229335895924759410d7ec9f05eff1c2423
2023-06-08 17:34:45
Dylan Guedes
distributor: Make key configurable when logging failures (#9659)
false
diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md index b975de7cfe7be..d2a9b3ae22070 100644 --- a/docs/sources/configuration/_index.md +++ b/docs/sources/configuration/_index.md @@ -489,6 +489,11 @@ write_failures_logging: # Default: 1KB. # CLI flag: -distributor.write-failures-logging.rate [rate: <int> | default = 1KB] + + # Experimental and subject to change. Whether a insight=true key should be + # logged or not. Default: false. + # CLI flag: -distributor.write-failures-logging.add-insights-label + [add_insights_label: <boolean> | default = false] ``` ### querier diff --git a/pkg/distributor/writefailures/cfg.go b/pkg/distributor/writefailures/cfg.go index 7a5ce02e0cb9c..393000f2573a2 100644 --- a/pkg/distributor/writefailures/cfg.go +++ b/pkg/distributor/writefailures/cfg.go @@ -8,10 +8,14 @@ import ( type Cfg struct { LogRate flagext.ByteSize `yaml:"rate" category:"experimental"` + + AddInsightsLabel bool `yaml:"add_insights_label" category:"experimental"` } // RegisterFlags registers distributor-related flags. func (cfg *Cfg) RegisterFlagsWithPrefix(prefix string, fs *flag.FlagSet) { _ = cfg.LogRate.Set("1KB") fs.Var(&cfg.LogRate, prefix+".rate", "Experimental and subject to change. Log volume allowed (per second). Default: 1KB.") + + fs.BoolVar(&cfg.AddInsightsLabel, prefix+".add-insights-label", false, "Experimental and subject to change. Whether a insight=true key should be logged or not. Default: false.") } diff --git a/pkg/distributor/writefailures/manager.go b/pkg/distributor/writefailures/manager.go index a7da5d392101e..860941833e96e 100644 --- a/pkg/distributor/writefailures/manager.go +++ b/pkg/distributor/writefailures/manager.go @@ -17,7 +17,10 @@ type Manager struct { } func NewManager(logger log.Logger, cfg Cfg, tenants *runtime.TenantConfigs) *Manager { - logger = log.With(logger, "path", "write", "insight", "true") + logger = log.With(logger, "path", "write") + if cfg.AddInsightsLabel { + logger = log.With(logger, "insight", "true") + } strat := newStrategy(cfg.LogRate.Val(), float64(cfg.LogRate.Val()))
distributor
Make key configurable when logging failures (#9659)
fe223a21e5b4cba648bc8a4393200a2ef3e0599c
2023-09-21 18:46:49
Anton Lindholm
helm: improve gitops compatibility (#10452)
false
diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md index e207293c9e2b5..c00f04b837085 100644 --- a/production/helm/loki/CHANGELOG.md +++ b/production/helm/loki/CHANGELOG.md @@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang [//]: # (<AUTOMATED_UPDATES_LOCATOR> : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) +## 5.22.1 + +- ENHANCEMENT #10452 Improve gitops compatibility + ## 5.22.0 - [CHANGE] Changed version of Loki to 2.9.1 @@ -27,7 +31,6 @@ Entries should include a reference to the pull request that introduced the chang - [CHANGE] Changed version of Grafana Enterprise Logs to v1.8.0 - ## 5.19.0 - [FEATURE] Add optional sidecard to load rules from ConfigMaps and Secrets. diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml index c8983eee13055..58c5a3ccc020b 100644 --- a/production/helm/loki/Chart.yaml +++ b/production/helm/loki/Chart.yaml @@ -3,7 +3,7 @@ name: loki description: Helm chart for Grafana Loki in simple, scalable mode type: application appVersion: 2.9.1 -version: 5.22.0 +version: 5.22.1 home: https://grafana.github.io/helm-charts sources: - https://github.com/grafana/loki diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md index 93937159f6440..7103f5b6ff81c 100644 --- a/production/helm/loki/README.md +++ b/production/helm/loki/README.md @@ -1,6 +1,6 @@ # loki -![Version: 5.22.0](https://img.shields.io/badge/Version-5.22.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.1](https://img.shields.io/badge/AppVersion-2.9.1-informational?style=flat-square) +![Version: 5.22.1](https://img.shields.io/badge/Version-5.22.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.1](https://img.shields.io/badge/AppVersion-2.9.1-informational?style=flat-square) Helm chart for Grafana Loki in simple, scalable mode diff --git a/production/helm/loki/templates/backend/statefulset-backend.yaml b/production/helm/loki/templates/backend/statefulset-backend.yaml index 7e7cf6105592d..1ec867d5f503d 100644 --- a/production/helm/loki/templates/backend/statefulset-backend.yaml +++ b/production/helm/loki/templates/backend/statefulset-backend.yaml @@ -252,7 +252,9 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} volumeClaimTemplates: - - metadata: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: name: data spec: accessModes: diff --git a/production/helm/loki/templates/monitoring/servicemonitor.yaml b/production/helm/loki/templates/monitoring/servicemonitor.yaml index 67b03c6feee55..4bc56a84740f5 100644 --- a/production/helm/loki/templates/monitoring/servicemonitor.yaml +++ b/production/helm/loki/templates/monitoring/servicemonitor.yaml @@ -38,9 +38,11 @@ spec: {{- end }} relabelings: - sourceLabels: [job] + action: replace replacement: "{{ $.Release.Namespace }}/$1" targetLabel: job - - replacement: "{{ include "loki.clusterLabel" $ }}" + - action: replace + replacement: "{{ include "loki.clusterLabel" $ }}" targetLabel: cluster {{- with .relabelings }} {{- toYaml . | nindent 8 }} diff --git a/production/helm/loki/templates/read/statefulset-read.yaml b/production/helm/loki/templates/read/statefulset-read.yaml index 37c35df7767d9..a9213e7f98794 100644 --- a/production/helm/loki/templates/read/statefulset-read.yaml +++ b/production/helm/loki/templates/read/statefulset-read.yaml @@ -162,7 +162,9 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} volumeClaimTemplates: - - metadata: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: name: data spec: accessModes: diff --git a/production/helm/loki/templates/write/statefulset-write.yaml b/production/helm/loki/templates/write/statefulset-write.yaml index a10ba2d883a2c..3c1d84fc3b611 100644 --- a/production/helm/loki/templates/write/statefulset-write.yaml +++ b/production/helm/loki/templates/write/statefulset-write.yaml @@ -177,7 +177,9 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} volumeClaimTemplates: - - metadata: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: name: data spec: accessModes:
helm
improve gitops compatibility (#10452)
4e1b210017a19519631db2c451858782de28835a
2024-02-15 23:28:53
Robert Jacob
operator: Provide Azure region for managed credentials using environment variable (#11964)
false
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md index 59afb29708782..d504e4ee31b52 100644 --- a/operator/CHANGELOG.md +++ b/operator/CHANGELOG.md @@ -1,5 +1,6 @@ ## Main +- [11964](https://github.com/grafana/loki/pull/11964) **xperimental**: Provide Azure region for managed credentials using environment variable - [11920](https://github.com/grafana/loki/pull/11920) **xperimental**: Refactor handling of credentials in managed-auth mode - [11869](https://github.com/grafana/loki/pull/11869) **periklis**: Add support for running with Google Workload Identity - [11868](https://github.com/grafana/loki/pull/11868) **xperimental**: Integrate support for OpenShift-managed credentials in Azure diff --git a/operator/internal/config/managed_auth.go b/operator/internal/config/managed_auth.go index 73598e7032f8f..76f9d72f3c262 100644 --- a/operator/internal/config/managed_auth.go +++ b/operator/internal/config/managed_auth.go @@ -26,6 +26,7 @@ func discoverManagedAuthConfig() *ManagedAuthConfig { clientID := os.Getenv("CLIENTID") tenantID := os.Getenv("TENANTID") subscriptionID := os.Getenv("SUBSCRIPTIONID") + region := os.Getenv("REGION") switch { case roleARN != "": @@ -40,6 +41,7 @@ func discoverManagedAuthConfig() *ManagedAuthConfig { ClientID: clientID, SubscriptionID: subscriptionID, TenantID: tenantID, + Region: region, }, } } diff --git a/operator/internal/handlers/credentialsrequest_create.go b/operator/internal/handlers/credentialsrequest.go similarity index 67% rename from operator/internal/handlers/credentialsrequest_create.go rename to operator/internal/handlers/credentialsrequest.go index 50e06375ffd8b..0d562332dc9d5 100644 --- a/operator/internal/handlers/credentialsrequest_create.go +++ b/operator/internal/handlers/credentialsrequest.go @@ -2,12 +2,10 @@ package handlers import ( "context" - "errors" "fmt" "github.com/ViaQ/logerr/v2/kverrors" "github.com/go-logr/logr" - corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" @@ -19,11 +17,8 @@ import ( "github.com/grafana/loki/operator/internal/external/k8s" "github.com/grafana/loki/operator/internal/manifests" "github.com/grafana/loki/operator/internal/manifests/openshift" - "github.com/grafana/loki/operator/internal/manifests/storage" ) -var errAzureNoRegion = errors.New("can not create CredentialsRequest: missing secret field: region") - // CreateCredentialsRequest creates a new CredentialsRequest resource for a Lokistack // to request a cloud credentials Secret resource from the OpenShift cloud-credentials-operator. func CreateCredentialsRequest(ctx context.Context, log logr.Logger, scheme *runtime.Scheme, managedAuth *config.ManagedAuthConfig, k k8s.Client, req ctrl.Request) error { @@ -39,32 +34,6 @@ func CreateCredentialsRequest(ctx context.Context, log logr.Logger, scheme *runt return kverrors.Wrap(err, "failed to lookup LokiStack", "name", req.String()) } - if managedAuth.Azure != nil && managedAuth.Azure.Region == "" { - // Managed environment for Azure does not provide Region, but we need this for the CredentialsRequest. - // This looks like an oversight when creating the UI in OpenShift, but for now we need to pull this data - // from somewhere else -> the Azure Storage Secret - storageSecretName := client.ObjectKey{ - Namespace: stack.Namespace, - Name: stack.Spec.Storage.Secret.Name, - } - storageSecret := &corev1.Secret{} - if err := k.Get(ctx, storageSecretName, storageSecret); err != nil { - if apierrors.IsNotFound(err) { - // Skip this error here as it will be picked up by the LokiStack handler instead - ll.Error(err, "could not find secret for LokiStack", "name", req.String()) - return nil - } - return err - } - - region := storageSecret.Data[storage.KeyAzureRegion] - if len(region) == 0 { - return errAzureNoRegion - } - - managedAuth.Azure.Region = string(region) - } - opts := openshift.Options{ BuildOpts: openshift.BuildOptions{ LokiStackName: stack.Name, diff --git a/operator/internal/handlers/credentialsrequest_create_test.go b/operator/internal/handlers/credentialsrequest_test.go similarity index 71% rename from operator/internal/handlers/credentialsrequest_create_test.go rename to operator/internal/handlers/credentialsrequest_test.go index 626302a113274..dd6dfb50d77dc 100644 --- a/operator/internal/handlers/credentialsrequest_create_test.go +++ b/operator/internal/handlers/credentialsrequest_test.go @@ -6,7 +6,6 @@ import ( cloudcredentialv1 "github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1" "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -19,7 +18,7 @@ import ( "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes" ) -func credentialsRequestFakeClient(cr *cloudcredentialv1.CredentialsRequest, lokistack *lokiv1.LokiStack, secret *corev1.Secret) *k8sfakes.FakeClient { +func credentialsRequestFakeClient(cr *cloudcredentialv1.CredentialsRequest, lokistack *lokiv1.LokiStack) *k8sfakes.FakeClient { k := &k8sfakes.FakeClient{} k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { switch object.(type) { @@ -33,11 +32,6 @@ func credentialsRequestFakeClient(cr *cloudcredentialv1.CredentialsRequest, loki return errors.NewNotFound(schema.GroupResource{}, name.Name) } k.SetClientObject(object, lokistack) - case *corev1.Secret: - if secret == nil { - return errors.NewNotFound(schema.GroupResource{}, name.Name) - } - k.SetClientObject(object, secret) } return nil } @@ -58,7 +52,7 @@ func TestCreateCredentialsRequest_CreateNewResource(t *testing.T) { }, } - k := credentialsRequestFakeClient(nil, lokistack, nil) + k := credentialsRequestFakeClient(nil, lokistack) req := ctrl.Request{ NamespacedName: client.ObjectKey{Name: "my-stack", Namespace: "ns"}, } @@ -89,13 +83,8 @@ func TestCreateCredentialsRequest_CreateNewResourceAzure(t *testing.T) { Namespace: "ns", }, } - secret := &corev1.Secret{ - Data: map[string][]byte{ - "region": []byte(wantRegion), - }, - } - k := credentialsRequestFakeClient(nil, lokistack, secret) + k := credentialsRequestFakeClient(nil, lokistack) req := ctrl.Request{ NamespacedName: client.ObjectKey{Name: "my-stack", Namespace: "ns"}, } @@ -105,6 +94,7 @@ func TestCreateCredentialsRequest_CreateNewResourceAzure(t *testing.T) { ClientID: "test-client-id", SubscriptionID: "test-tenant-id", TenantID: "test-subscription-id", + Region: "test-region", }, } @@ -122,47 +112,6 @@ func TestCreateCredentialsRequest_CreateNewResourceAzure(t *testing.T) { require.Equal(t, wantRegion, providerSpec.AzureRegion) } -func TestCreateCredentialsRequest_CreateNewResourceAzure_Errors(t *testing.T) { - lokistack := &lokiv1.LokiStack{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-stack", - Namespace: "ns", - }, - } - req := ctrl.Request{ - NamespacedName: client.ObjectKey{Name: "my-stack", Namespace: "ns"}, - } - - tt := []struct { - secret *corev1.Secret - wantError string - }{ - { - secret: &corev1.Secret{}, - wantError: errAzureNoRegion.Error(), - }, - } - - for _, tc := range tt { - tc := tc - t.Run(tc.wantError, func(t *testing.T) { - t.Parallel() - - managedAuth := &config.ManagedAuthConfig{ - Azure: &config.AzureEnvironment{ - ClientID: "test-client-id", - SubscriptionID: "test-tenant-id", - TenantID: "test-subscription-id", - }, - } - k := credentialsRequestFakeClient(nil, lokistack, tc.secret) - - err := CreateCredentialsRequest(context.Background(), logger, scheme, managedAuth, k, req) - require.EqualError(t, err, tc.wantError) - }) - } -} - func TestCreateCredentialsRequest_DoNothing_WhenCredentialsRequestExist(t *testing.T) { req := ctrl.Request{ NamespacedName: client.ObjectKey{Name: "my-stack", Namespace: "ns"}, @@ -187,7 +136,7 @@ func TestCreateCredentialsRequest_DoNothing_WhenCredentialsRequestExist(t *testi }, } - k := credentialsRequestFakeClient(cr, lokistack, nil) + k := credentialsRequestFakeClient(cr, lokistack) err := CreateCredentialsRequest(context.Background(), logger, scheme, managedAuth, k, req) require.NoError(t, err) diff --git a/operator/internal/handlers/internal/storage/secrets.go b/operator/internal/handlers/internal/storage/secrets.go index 99bafb911ec26..2492eea4d4191 100644 --- a/operator/internal/handlers/internal/storage/secrets.go +++ b/operator/internal/handlers/internal/storage/secrets.go @@ -182,18 +182,11 @@ func extractAzureConfigSecret(s *corev1.Secret, fg configv1.FeatureGates) (*stor // Extract and validate optional fields endpointSuffix := s.Data[storage.KeyAzureStorageEndpointSuffix] audience := s.Data[storage.KeyAzureAudience] - region := s.Data[storage.KeyAzureRegion] if !workloadIdentity && len(audience) > 0 { return nil, fmt.Errorf("%w: %s", errSecretFieldNotAllowed, storage.KeyAzureAudience) } - if fg.OpenShift.ManagedAuthEnv { - if len(region) == 0 { - return nil, fmt.Errorf("%w: %s", errSecretMissingField, storage.KeyAzureRegion) - } - } - return &storage.AzureStorageConfig{ Env: string(env), Container: string(container), diff --git a/operator/internal/handlers/internal/storage/secrets_test.go b/operator/internal/handlers/internal/storage/secrets_test.go index 1363cd4a660a6..ca3623b718c1b 100644 --- a/operator/internal/handlers/internal/storage/secrets_test.go +++ b/operator/internal/handlers/internal/storage/secrets_test.go @@ -156,27 +156,6 @@ func TestAzureExtract(t *testing.T) { }, wantError: "missing secret field: subscription_id", }, - { - name: "managed auth - no region", - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Data: map[string][]byte{ - "environment": []byte("here"), - "account_name": []byte("test-account-name"), - "container": []byte("this,that"), - }, - }, - managedSecret: &corev1.Secret{ - Data: map[string][]byte{}, - }, - featureGates: configv1.FeatureGates{ - OpenShift: configv1.OpenShiftFeatureGates{ - Enabled: true, - ManagedAuthEnv: true, - }, - }, - wantError: "missing secret field: region", - }, { name: "managed auth - no auth override", secret: &corev1.Secret{ diff --git a/operator/internal/manifests/openshift/credentialsrequest.go b/operator/internal/manifests/openshift/credentialsrequest.go index 0e97dd97c2b19..0c0a19adc98d3 100644 --- a/operator/internal/manifests/openshift/credentialsrequest.go +++ b/operator/internal/manifests/openshift/credentialsrequest.go @@ -12,6 +12,8 @@ import ( "github.com/grafana/loki/operator/internal/manifests/storage" ) +const azureFallbackRegion = "centralus" + func BuildCredentialsRequest(opts Options) (*cloudcredentialv1.CredentialsRequest, error) { stack := client.ObjectKey{Name: opts.BuildOpts.LokiStackName, Namespace: opts.BuildOpts.LokiStackNamespace} @@ -62,6 +64,15 @@ func encodeProviderSpec(env *config.ManagedAuthConfig) (*runtime.RawExtension, e } case env.Azure != nil: azure := env.Azure + if azure.Region == "" { + // The OpenShift Console currently does not provide a UI to configure the Azure Region + // for an operator using managed credentials. Because the CredentialsRequest is currently + // not used to create a Managed Identity, the region is actually never used. + // We default to the US region if nothing is set, so that the CredentialsRequest can be + // created. This should have no effect on the generated credential secret. + // The region can be configured by setting an environment variable on the operator Subscription. + azure.Region = azureFallbackRegion + } spec = &cloudcredentialv1.AzureProviderSpec{ Permissions: []string{ diff --git a/operator/internal/manifests/storage/options.go b/operator/internal/manifests/storage/options.go index 6693d2261e978..56e2b8e870df1 100644 --- a/operator/internal/manifests/storage/options.go +++ b/operator/internal/manifests/storage/options.go @@ -63,7 +63,6 @@ type AzureStorageConfig struct { Container string EndpointSuffix string Audience string - Region string WorkloadIdentity bool } diff --git a/operator/internal/manifests/storage/var.go b/operator/internal/manifests/storage/var.go index cbd944a821c34..1f236406bdd09 100644 --- a/operator/internal/manifests/storage/var.go +++ b/operator/internal/manifests/storage/var.go @@ -88,8 +88,6 @@ const ( KeyAzureStorageEndpointSuffix = "endpoint_suffix" // KeyAzureEnvironmentName is the secret data key for the Azure cloud environment name. KeyAzureEnvironmentName = "environment" - // KeyAzureRegion is the secret data key for storing the Azure cloud region. - KeyAzureRegion = "region" // KeyAzureAudience is the secret data key for customizing the audience used for the ServiceAccount token. KeyAzureAudience = "audience"
operator
Provide Azure region for managed credentials using environment variable (#11964)
a95e900a763eaaf37a9e7dcd372785b80f438d55
2022-09-14 22:44:50
Robert Jacob
operator: Skip enforcing matcher for certain tenants on OpenShift (#7037)
false
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md index 8b40fd3d800a1..ca623159edd65 100644 --- a/operator/CHANGELOG.md +++ b/operator/CHANGELOG.md @@ -1,5 +1,6 @@ ## Main +- [7037](https://github.com/grafana/loki/pull/7037) **xperimental**: Skip enforcing matcher for certain tenants on OpenShift - [7106](https://github.com/grafana/loki/pull/7106) **xperimental**: Manage global stream-based retention - [7092](https://github.com/grafana/loki/pull/7092) **aminesnow**: Configure kube-rbac-proxy sidecar to use Intermediate TLS security profile in OCP - [6870](https://github.com/grafana/loki/pull/6870) **aminesnow**: Configure gateway to honor the global tlsSecurityProfile on Openshift diff --git a/operator/internal/manifests/gateway_tenants_test.go b/operator/internal/manifests/gateway_tenants_test.go index c8782f1e7249a..19de9dd6a1af5 100644 --- a/operator/internal/manifests/gateway_tenants_test.go +++ b/operator/internal/manifests/gateway_tenants_test.go @@ -354,6 +354,8 @@ func TestConfigureDeploymentForMode(t *testing.T) { Image: "quay.io/observatorium/opa-openshift:latest", Args: []string{ "--log.level=warn", + "--opa.skip-tenants=audit,infrastructure", + "--opa.admin-groups=system:cluster-admins,cluster-admin,dedicated-admin", "--web.listen=:8082", "--web.internal.listen=:8083", "--web.healthchecks.url=http://localhost:8082", @@ -530,6 +532,8 @@ func TestConfigureDeploymentForMode(t *testing.T) { Image: "quay.io/observatorium/opa-openshift:latest", Args: []string{ "--log.level=warn", + "--opa.skip-tenants=audit,infrastructure", + "--opa.admin-groups=system:cluster-admins,cluster-admin,dedicated-admin", "--web.listen=:8082", "--web.internal.listen=:8083", "--web.healthchecks.url=http://localhost:8082", @@ -726,6 +730,8 @@ func TestConfigureDeploymentForMode(t *testing.T) { Image: "quay.io/observatorium/opa-openshift:latest", Args: []string{ "--log.level=warn", + "--opa.skip-tenants=audit,infrastructure", + "--opa.admin-groups=system:cluster-admins,cluster-admin,dedicated-admin", "--web.listen=:8082", "--web.internal.listen=:8083", "--web.healthchecks.url=http://localhost:8082", @@ -896,6 +902,8 @@ func TestConfigureDeploymentForMode(t *testing.T) { Image: "quay.io/observatorium/opa-openshift:latest", Args: []string{ "--log.level=warn", + "--opa.skip-tenants=audit,infrastructure", + "--opa.admin-groups=system:cluster-admins,cluster-admin,dedicated-admin", "--web.listen=:8082", "--web.internal.listen=:8083", "--web.healthchecks.url=http://localhost:8082", @@ -1069,6 +1077,8 @@ func TestConfigureDeploymentForMode(t *testing.T) { Image: "quay.io/observatorium/opa-openshift:latest", Args: []string{ "--log.level=warn", + "--opa.skip-tenants=audit,infrastructure", + "--opa.admin-groups=system:cluster-admins,cluster-admin,dedicated-admin", "--web.listen=:8082", "--web.internal.listen=:8083", "--web.healthchecks.url=http://localhost:8082", @@ -1262,6 +1272,8 @@ func TestConfigureDeploymentForMode(t *testing.T) { Image: "quay.io/observatorium/opa-openshift:latest", Args: []string{ "--log.level=warn", + "--opa.skip-tenants=audit,infrastructure", + "--opa.admin-groups=system:cluster-admins,cluster-admin,dedicated-admin", "--web.listen=:8082", "--web.internal.listen=:8083", "--web.healthchecks.url=http://localhost:8082", diff --git a/operator/internal/manifests/openshift/opa_openshift.go b/operator/internal/manifests/openshift/opa_openshift.go index c66093ee9b2a8..40faf39788182 100644 --- a/operator/internal/manifests/openshift/opa_openshift.go +++ b/operator/internal/manifests/openshift/opa_openshift.go @@ -37,6 +37,8 @@ func newOPAOpenShiftContainer(mode lokiv1.ModeType, secretVolumeName, tlsDir, ce uriScheme = corev1.URISchemeHTTP args = []string{ "--log.level=warn", + "--opa.skip-tenants=audit,infrastructure", + "--opa.admin-groups=system:cluster-admins,cluster-admin,dedicated-admin", fmt.Sprintf("--web.listen=:%d", GatewayOPAHTTPPort), fmt.Sprintf("--web.internal.listen=:%d", GatewayOPAInternalPort), fmt.Sprintf("--web.healthchecks.url=http://localhost:%d", GatewayOPAHTTPPort),
operator
Skip enforcing matcher for certain tenants on OpenShift (#7037)
e3e1f096ea50cdcea8d64e726827368af14702a8
2025-03-20 10:42:20
Sandeep Sukhani
feat: store details of processed streams while processing delete requests (#16825)
false
diff --git a/pkg/compactor/compactor.go b/pkg/compactor/compactor.go index e6e05abcd5a9d..efd7fe7471719 100644 --- a/pkg/compactor/compactor.go +++ b/pkg/compactor/compactor.go @@ -13,14 +13,14 @@ import ( "unsafe" "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/model" - "github.com/grafana/dskit/backoff" "github.com/grafana/dskit/kv" "github.com/grafana/dskit/ring" "github.com/grafana/dskit/services" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" "github.com/grafana/loki/v3/pkg/analytics" "github.com/grafana/loki/v3/pkg/compactor/deletion" @@ -406,13 +406,10 @@ func (c *Compactor) initDeletes(objectClient client.ObjectClient, indexUpdatePro c.DeleteRequestsGRPCHandler = deletion.NewGRPCRequestHandler(c.deleteRequestsStore, limits) - c.deleteRequestsManager = deletion.NewDeleteRequestsManager( - c.deleteRequestsStore, - c.cfg.DeleteRequestCancelPeriod, - c.cfg.DeleteBatchSize, - limits, - r, - ) + c.deleteRequestsManager, err = deletion.NewDeleteRequestsManager(deletionWorkDir, c.deleteRequestsStore, c.cfg.DeleteRequestCancelPeriod, c.cfg.DeleteBatchSize, limits, r) + if err != nil { + return err + } c.expirationChecker = newExpirationChecker(retention.NewExpirationChecker(limits), c.deleteRequestsManager) return nil @@ -853,12 +850,12 @@ func newExpirationChecker(retentionExpiryChecker, deletionExpiryChecker retentio return &expirationChecker{retentionExpiryChecker, deletionExpiryChecker} } -func (e *expirationChecker) Expired(ref retention.ChunkEntry, now model.Time) (bool, filter.Func) { - if expired, nonDeletedIntervals := e.retentionExpiryChecker.Expired(ref, now); expired { +func (e *expirationChecker) Expired(userID []byte, chk retention.Chunk, lbls labels.Labels, seriesID []byte, tableName string, now model.Time) (bool, filter.Func) { + if expired, nonDeletedIntervals := e.retentionExpiryChecker.Expired(userID, chk, lbls, seriesID, tableName, now); expired { return expired, nonDeletedIntervals } - return e.deletionExpiryChecker.Expired(ref, now) + return e.deletionExpiryChecker.Expired(userID, chk, lbls, seriesID, tableName, now) } func (e *expirationChecker) MarkPhaseStarted() { @@ -885,8 +882,12 @@ func (e *expirationChecker) IntervalMayHaveExpiredChunks(interval model.Interval return e.retentionExpiryChecker.IntervalMayHaveExpiredChunks(interval, userID) || e.deletionExpiryChecker.IntervalMayHaveExpiredChunks(interval, userID) } -func (e *expirationChecker) DropFromIndex(ref retention.ChunkEntry, tableEndTime model.Time, now model.Time) bool { - return e.retentionExpiryChecker.DropFromIndex(ref, tableEndTime, now) || e.deletionExpiryChecker.DropFromIndex(ref, tableEndTime, now) +func (e *expirationChecker) DropFromIndex(userID []byte, chk retention.Chunk, labels labels.Labels, tableEndTime model.Time, now model.Time) bool { + return e.retentionExpiryChecker.DropFromIndex(userID, chk, labels, tableEndTime, now) || e.deletionExpiryChecker.DropFromIndex(userID, chk, labels, tableEndTime, now) +} + +func (e *expirationChecker) CanSkipSeries(userID []byte, lbls labels.Labels, seriesID []byte, seriesStart model.Time, tableName string, now model.Time) bool { + return e.retentionExpiryChecker.CanSkipSeries(userID, lbls, seriesID, seriesStart, tableName, now) && e.deletionExpiryChecker.CanSkipSeries(userID, lbls, seriesID, seriesStart, tableName, now) } func (c *Compactor) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, _ string, instanceDesc ring.InstanceDesc) (ring.InstanceState, ring.Tokens) { diff --git a/pkg/compactor/deletion/delete_request.go b/pkg/compactor/deletion/delete_request.go index 5dea77bb32a71..4fc16c9fc66f0 100644 --- a/pkg/compactor/deletion/delete_request.go +++ b/pkg/compactor/deletion/delete_request.go @@ -109,14 +109,14 @@ func allMatch(matchers []*labels.Matcher, labels labels.Labels) bool { // IsDeleted checks if the given ChunkEntry will be deleted by this DeleteRequest. // It returns a filter.Func if the chunk is supposed to be deleted partially or the delete request contains line filters. // If the filter.Func is nil, the whole chunk is supposed to be deleted. -func (d *DeleteRequest) IsDeleted(entry retention.ChunkEntry) (bool, filter.Func) { - if d.UserID != unsafeGetString(entry.UserID) { +func (d *DeleteRequest) IsDeleted(userID []byte, lbls labels.Labels, chunk retention.Chunk) (bool, filter.Func) { + if d.UserID != unsafeGetString(userID) { return false, nil } if !intervalsOverlap(model.Interval{ - Start: entry.From, - End: entry.Through, + Start: chunk.From, + End: chunk.Through, }, model.Interval{ Start: d.StartTime, End: d.EndTime, @@ -137,16 +137,16 @@ func (d *DeleteRequest) IsDeleted(entry retention.ChunkEntry) (bool, filter.Func } } - if !labels.Selector(d.matchers).Matches(entry.Labels) { + if !labels.Selector(d.matchers).Matches(lbls) { return false, nil } - if d.StartTime <= entry.From && d.EndTime >= entry.Through && !d.logSelectorExpr.HasFilter() { + if d.StartTime <= chunk.From && d.EndTime >= chunk.Through && !d.logSelectorExpr.HasFilter() { // Delete request covers the whole chunk and there are no line filters in the logSelectorExpr so the whole chunk will be deleted return true, nil } - ff, err := d.FilterFunction(entry.Labels) + ff, err := d.FilterFunction(lbls) if err != nil { // The query in the delete request is checked when added to the table. // So this error should not occur. diff --git a/pkg/compactor/deletion/delete_request_test.go b/pkg/compactor/deletion/delete_request_test.go index d8b64f2031f2d..325a2c002f7e5 100644 --- a/pkg/compactor/deletion/delete_request_test.go +++ b/pkg/compactor/deletion/delete_request_test.go @@ -33,13 +33,9 @@ func TestDeleteRequest_IsDeleted(t *testing.T) { lblWithStructuredMetadataFilter := `{foo="bar", fizz="buzz"} | ping="pong"` lblWithLineAndStructuredMetadataFilter := `{foo="bar", fizz="buzz"} | ping="pong" |= "filter"` - chunkEntry := retention.ChunkEntry{ - ChunkRef: retention.ChunkRef{ - UserID: []byte(user1), - From: now.Add(-3 * time.Hour), - Through: now.Add(-time.Hour), - }, - Labels: mustParseLabel(lbl), + chunkEntry := retention.Chunk{ + From: now.Add(-3 * time.Hour), + Through: now.Add(-time.Hour), } type resp struct { @@ -275,7 +271,7 @@ func TestDeleteRequest_IsDeleted(t *testing.T) { t.Run(tc.name, func(t *testing.T) { require.NoError(t, tc.deleteRequest.SetQuery(tc.deleteRequest.Query)) tc.deleteRequest.Metrics = newDeleteRequestsManagerMetrics(nil) - isExpired, filterFunc := tc.deleteRequest.IsDeleted(chunkEntry) + isExpired, filterFunc := tc.deleteRequest.IsDeleted([]byte(user1), mustParseLabel(lbl), chunkEntry) require.Equal(t, tc.expectedResp.isDeleted, isExpired) if tc.expectedResp.expectedFilter == nil { require.Nil(t, filterFunc) diff --git a/pkg/compactor/deletion/delete_requests_manager.go b/pkg/compactor/deletion/delete_requests_manager.go index 230a67ce79882..55a566679bc38 100644 --- a/pkg/compactor/deletion/delete_requests_manager.go +++ b/pkg/compactor/deletion/delete_requests_manager.go @@ -2,12 +2,16 @@ package deletion import ( "context" + "encoding/json" "fmt" + "os" + "path/filepath" "sort" "sync" "time" "github.com/go-kit/log/level" + "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" @@ -21,6 +25,8 @@ import ( const ( statusSuccess = "success" statusFail = "fail" + + seriesProgressFilename = "series_progress.json" ) type userDeleteRequests struct { @@ -30,6 +36,7 @@ type userDeleteRequests struct { } type DeleteRequestsManager struct { + workingDir string deleteRequestsStore DeleteRequestsStore deleteRequestCancelPeriod time.Duration @@ -41,10 +48,12 @@ type DeleteRequestsManager struct { done chan struct{} batchSize int limits Limits + processedSeries map[string]struct{} } -func NewDeleteRequestsManager(store DeleteRequestsStore, deleteRequestCancelPeriod time.Duration, batchSize int, limits Limits, registerer prometheus.Registerer) *DeleteRequestsManager { +func NewDeleteRequestsManager(workingDir string, store DeleteRequestsStore, deleteRequestCancelPeriod time.Duration, batchSize int, limits Limits, registerer prometheus.Registerer) (*DeleteRequestsManager, error) { dm := &DeleteRequestsManager{ + workingDir: workingDir, deleteRequestsStore: store, deleteRequestCancelPeriod: deleteRequestCancelPeriod, deleteRequestsToProcess: map[string]*userDeleteRequests{}, @@ -52,6 +61,13 @@ func NewDeleteRequestsManager(store DeleteRequestsStore, deleteRequestCancelPeri done: make(chan struct{}), batchSize: batchSize, limits: limits, + processedSeries: map[string]struct{}{}, + } + + var err error + dm.processedSeries, err = loadSeriesProgress(workingDir) + if err != nil { + return nil, err } go dm.loop() @@ -60,7 +76,7 @@ func NewDeleteRequestsManager(store DeleteRequestsStore, deleteRequestCancelPeri level.Error(util_log.Logger).Log("msg", "failed to merge sharded requests", "err", err) } - return dm + return dm, nil } func (d *DeleteRequestsManager) loop() { @@ -76,6 +92,10 @@ func (d *DeleteRequestsManager) loop() { if err := d.updateMetrics(); err != nil { level.Error(util_log.Logger).Log("msg", "failed to update metrics", "err", err) } + + if err := d.storeSeriesProgress(); err != nil { + level.Error(util_log.Logger).Log("msg", "failed to store series progress", "err", err) + } case <-d.done: return } @@ -85,6 +105,27 @@ func (d *DeleteRequestsManager) loop() { func (d *DeleteRequestsManager) Stop() { close(d.done) d.wg.Wait() + if err := d.storeSeriesProgress(); err != nil { + level.Error(util_log.Logger).Log("msg", "failed to store series progress", "err", err) + } +} + +func (d *DeleteRequestsManager) storeSeriesProgress() error { + if len(d.processedSeries) == 0 { + return nil + } + + data, err := json.Marshal(d.processedSeries) + if err != nil { + return errors.Wrap(err, "failed to json encode series progress") + } + + err = os.WriteFile(filepath.Join(d.workingDir, seriesProgressFilename), data, 0640) + if err != nil { + return errors.Wrap(err, "failed to store series progress to the file") + } + + return nil } func (d *DeleteRequestsManager) updateMetrics() error { @@ -180,6 +221,13 @@ func (d *DeleteRequestsManager) loadDeleteRequestsToProcess() error { break } + if deleteRequest.logSelectorExpr == nil { + err := deleteRequest.SetQuery(deleteRequest.Query) + if err != nil { + return errors.Wrapf(err, "failed to init log selector expr for request_id=%s, user_id=%s", deleteRequest.RequestID, deleteRequest.UserID) + } + } + level.Info(util_log.Logger).Log( "msg", "Started processing delete request for user", "delete_request_id", deleteRequest.RequestID, @@ -278,14 +326,39 @@ func (d *DeleteRequestsManager) shouldProcessRequest(dr DeleteRequest) (bool, er return mode == deletionmode.FilterAndDelete, nil } -func (d *DeleteRequestsManager) Expired(ref retention.ChunkEntry, _ model.Time) (bool, filter.Func) { +func (d *DeleteRequestsManager) CanSkipSeries(userID []byte, lbls labels.Labels, seriesID []byte, _ model.Time, tableName string, _ model.Time) bool { + userIDStr := unsafeGetString(userID) + + d.deleteRequestsToProcessMtx.Lock() + defer d.deleteRequestsToProcessMtx.Unlock() + + if d.deleteRequestsToProcess[userIDStr] == nil { + return true + } + + for _, deleteRequest := range d.deleteRequestsToProcess[userIDStr].requests { + // if the delete request does not touch the series, continue looking for other matching requests + if !labels.Selector(deleteRequest.matchers).Matches(lbls) { + continue + } + + // The delete request touches the series. Do not skip if the series is not processed yet. + if _, ok := d.processedSeries[buildProcessedSeriesKey(deleteRequest.RequestID, seriesID, tableName)]; !ok { + return false + } + } + + return true +} + +func (d *DeleteRequestsManager) Expired(userID []byte, chk retention.Chunk, lbls labels.Labels, seriesID []byte, tableName string, _ model.Time) (bool, filter.Func) { d.deleteRequestsToProcessMtx.Lock() defer d.deleteRequestsToProcessMtx.Unlock() - userIDStr := unsafeGetString(ref.UserID) + userIDStr := unsafeGetString(userID) if d.deleteRequestsToProcess[userIDStr] == nil || !intervalsOverlap(d.deleteRequestsToProcess[userIDStr].requestsInterval, model.Interval{ - Start: ref.From, - End: ref.Through, + Start: chk.From, + End: chk.Through, }) { return false, nil } @@ -293,7 +366,10 @@ func (d *DeleteRequestsManager) Expired(ref retention.ChunkEntry, _ model.Time) var filterFuncs []filter.Func for _, deleteRequest := range d.deleteRequestsToProcess[userIDStr].requests { - isDeleted, ff := deleteRequest.IsDeleted(ref) + if _, ok := d.processedSeries[buildProcessedSeriesKey(deleteRequest.RequestID, seriesID, tableName)]; ok { + continue + } + isDeleted, ff := deleteRequest.IsDeleted(userID, lbls, chk) if !isDeleted { continue } @@ -304,9 +380,9 @@ func (d *DeleteRequestsManager) Expired(ref retention.ChunkEntry, _ model.Time) "delete_request_id", deleteRequest.RequestID, "sequence_num", deleteRequest.SequenceNum, "user", deleteRequest.UserID, - "chunkID", string(ref.ChunkID), + "chunkID", string(chk.ChunkID), ) - d.metrics.deleteRequestsChunksSelectedTotal.WithLabelValues(string(ref.UserID)).Inc() + d.metrics.deleteRequestsChunksSelectedTotal.WithLabelValues(string(userID)).Inc() return true, nil } filterFuncs = append(filterFuncs, ff) @@ -316,7 +392,7 @@ func (d *DeleteRequestsManager) Expired(ref retention.ChunkEntry, _ model.Time) return false, nil } - d.metrics.deleteRequestsChunksSelectedTotal.WithLabelValues(string(ref.UserID)).Inc() + d.metrics.deleteRequestsChunksSelectedTotal.WithLabelValues(string(userID)).Inc() return true, func(ts time.Time, s string, structuredMetadata ...labels.Label) bool { for _, ff := range filterFuncs { if ff(ts, s, structuredMetadata...) { @@ -401,6 +477,11 @@ func (d *DeleteRequestsManager) MarkPhaseFinished() { if err := d.deleteRequestsStore.MergeShardedRequests(context.Background()); err != nil { level.Error(util_log.Logger).Log("msg", "failed to merge sharded requests", "err", err) } + + d.processedSeries = map[string]struct{}{} + if err := os.Remove(filepath.Join(d.workingDir, seriesProgressFilename)); err != nil && !os.IsNotExist(err) { + level.Error(util_log.Logger).Log("msg", "failed to remove series progress file", "err", err) + } } func (d *DeleteRequestsManager) IntervalMayHaveExpiredChunks(_ model.Interval, userID string) bool { @@ -417,10 +498,35 @@ func (d *DeleteRequestsManager) IntervalMayHaveExpiredChunks(_ model.Interval, u return len(d.deleteRequestsToProcess) != 0 } -func (d *DeleteRequestsManager) DropFromIndex(_ retention.ChunkEntry, _ model.Time, _ model.Time) bool { +func (d *DeleteRequestsManager) DropFromIndex(_ []byte, _ retention.Chunk, _ labels.Labels, _ model.Time, _ model.Time) bool { return false } +func (d *DeleteRequestsManager) MarkSeriesAsProcessed(userID, seriesID []byte, lbls labels.Labels, tableName string) error { + userIDStr := unsafeGetString(userID) + if d.deleteRequestsToProcess[userIDStr] == nil { + return nil + } + + for _, req := range d.deleteRequestsToProcess[userIDStr].requests { + // if the delete request does not touch the series, do not waste space in storing the marker + if !labels.Selector(req.matchers).Matches(lbls) { + continue + } + processedSeriesKey := buildProcessedSeriesKey(req.RequestID, seriesID, tableName) + if _, ok := d.processedSeries[processedSeriesKey]; ok { + return fmt.Errorf("series for [table: %s, series: %s, user: %s, req: %s]", tableName, seriesID, userID, req.RequestID) + } + d.processedSeries[processedSeriesKey] = struct{}{} + } + + return nil +} + +func buildProcessedSeriesKey(requestID string, seriesID []byte, tableName string) string { + return fmt.Sprintf("%s/%s/%s", requestID, tableName, seriesID) +} + func getMaxRetentionInterval(userID string, limits Limits) time.Duration { maxRetention := model.Duration(limits.RetentionPeriod(userID)) if maxRetention == 0 { @@ -438,3 +544,19 @@ func getMaxRetentionInterval(userID string, limits Limits) time.Duration { return time.Duration(maxRetention) } + +func loadSeriesProgress(workingDir string) (map[string]struct{}, error) { + data, err := os.ReadFile(filepath.Join(workingDir, seriesProgressFilename)) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + + processedSeries := map[string]struct{}{} + if len(data) > 0 { + if err := json.Unmarshal(data, &processedSeries); err != nil { + return nil, err + } + } + + return processedSeries, nil +} diff --git a/pkg/compactor/deletion/delete_requests_manager_test.go b/pkg/compactor/deletion/delete_requests_manager_test.go index 24123a703ed43..f586cdf0e1923 100644 --- a/pkg/compactor/deletion/delete_requests_manager_test.go +++ b/pkg/compactor/deletion/delete_requests_manager_test.go @@ -2,6 +2,7 @@ package deletion import ( "context" + "path/filepath" "strings" "testing" "time" @@ -31,13 +32,9 @@ func TestDeleteRequestsManager_Expired(t *testing.T) { streamSelectorWithStructuredMetadataFilters := lblFoo.String() + `| ping="pong"` streamSelectorWithLineAndStructuredMetadataFilters := lblFoo.String() + `| ping="pong" |= "fizz"` - chunkEntry := retention.ChunkEntry{ - ChunkRef: retention.ChunkRef{ - UserID: []byte(testUserID), - From: now.Add(-12 * time.Hour), - Through: now.Add(-time.Hour), - }, - Labels: lblFoo, + chunkEntry := retention.Chunk{ + From: now.Add(-12 * time.Hour), + Through: now.Add(-time.Hour), } for _, tc := range []struct { @@ -936,10 +933,11 @@ func TestDeleteRequestsManager_Expired(t *testing.T) { } { t.Run(tc.name, func(t *testing.T) { mockDeleteRequestsStore := &mockDeleteRequestsStore{deleteRequests: tc.deleteRequestsFromStore} - mgr := NewDeleteRequestsManager(mockDeleteRequestsStore, time.Hour, tc.batchSize, &fakeLimits{defaultLimit: limit{ + mgr, err := NewDeleteRequestsManager(t.TempDir(), mockDeleteRequestsStore, time.Hour, tc.batchSize, &fakeLimits{defaultLimit: limit{ retentionPeriod: 7 * 24 * time.Hour, deletionMode: tc.deletionMode.String(), }}, nil) + require.NoError(t, err) require.NoError(t, mgr.loadDeleteRequestsToProcess()) for _, deleteRequests := range mgr.deleteRequestsToProcess { @@ -948,7 +946,7 @@ func TestDeleteRequestsManager_Expired(t *testing.T) { } } - isExpired, filterFunc := mgr.Expired(chunkEntry, model.Now()) + isExpired, filterFunc := mgr.Expired([]byte(testUserID), chunkEntry, lblFoo, nil, "", model.Now()) require.Equal(t, tc.expectedResp.isExpired, isExpired) if tc.expectedResp.expectedFilter == nil { require.Nil(t, filterFunc) @@ -1007,7 +1005,8 @@ func TestDeleteRequestsManager_IntervalMayHaveExpiredChunks(t *testing.T) { } for _, tc := range tt { - mgr := NewDeleteRequestsManager(&mockDeleteRequestsStore{deleteRequests: tc.deleteRequestsFromStore}, time.Hour, 70, &fakeLimits{defaultLimit: limit{deletionMode: deletionmode.FilterAndDelete.String()}}, nil) + mgr, err := NewDeleteRequestsManager(t.TempDir(), &mockDeleteRequestsStore{deleteRequests: tc.deleteRequestsFromStore}, time.Hour, 70, &fakeLimits{defaultLimit: limit{deletionMode: deletionmode.FilterAndDelete.String()}}, nil) + require.NoError(t, err) require.NoError(t, mgr.loadDeleteRequestsToProcess()) interval := model.Interval{Start: 300, End: 600} @@ -1015,6 +1014,208 @@ func TestDeleteRequestsManager_IntervalMayHaveExpiredChunks(t *testing.T) { } } +func TestDeleteRequestsManager_SeriesProgress(t *testing.T) { + user1 := []byte("user1") + user2 := []byte("user2") + lblFooBar := mustParseLabel(`{foo="bar"}`) + lblFizzBuzz := mustParseLabel(`{fizz="buzz"}`) + deleteRequestsStore := &mockDeleteRequestsStore{deleteRequests: []DeleteRequest{ + {RequestID: "1", Query: lblFooBar.String(), UserID: string(user1), StartTime: 0, EndTime: 100, Status: StatusReceived}, + {RequestID: "2", Query: lblFooBar.String(), UserID: string(user2), StartTime: 0, EndTime: 100, Status: StatusReceived}, + }} + type markSeriesProcessed struct { + userID, seriesID []byte + lbls labels.Labels + tableName string + } + + type chunkEntry struct { + userID []byte + chk retention.Chunk + lbls labels.Labels + seriesID []byte + tableName string + } + + for _, tc := range []struct { + name string + seriesToMarkProcessed []markSeriesProcessed + chunkEntry chunkEntry + expSkipSeries bool + expExpired bool + }{ + { + name: "no series marked as processed", + chunkEntry: chunkEntry{ + userID: user1, + chk: retention.Chunk{ + From: 10, + Through: 20, + }, + lbls: lblFooBar, + seriesID: []byte(lblFooBar.String()), + tableName: "t1", + }, + expSkipSeries: false, + expExpired: true, + }, + { + name: "chunk's series marked as processed", + seriesToMarkProcessed: []markSeriesProcessed{ + { + userID: user1, + seriesID: []byte(lblFooBar.String()), + lbls: lblFooBar, + tableName: "t1", + }, + }, + chunkEntry: chunkEntry{ + userID: user1, + chk: retention.Chunk{ + From: 10, + Through: 20, + }, + lbls: lblFooBar, + seriesID: []byte(lblFooBar.String()), + tableName: "t1", + }, + expSkipSeries: true, + expExpired: false, + }, + { + name: "a different series marked as processed", + seriesToMarkProcessed: []markSeriesProcessed{ + { + userID: user1, + seriesID: []byte(lblFizzBuzz.String()), + lbls: lblFizzBuzz, + tableName: "t1", + }, + }, + chunkEntry: chunkEntry{ + userID: user1, + chk: retention.Chunk{ + From: 10, + Through: 20, + }, + lbls: lblFooBar, + seriesID: []byte(lblFooBar.String()), + tableName: "t1", + }, + expSkipSeries: false, + expExpired: true, + }, + { + name: "a different users series marked as processed", + seriesToMarkProcessed: []markSeriesProcessed{ + { + userID: user2, + seriesID: []byte(lblFooBar.String()), + lbls: lblFooBar, + tableName: "t1", + }, + }, + chunkEntry: chunkEntry{ + userID: user1, + chk: retention.Chunk{ + From: 10, + Through: 20, + }, + lbls: lblFooBar, + seriesID: []byte(lblFooBar.String()), + tableName: "t1", + }, + expSkipSeries: false, + expExpired: true, + }, + { + name: "series from different table marked as processed", + seriesToMarkProcessed: []markSeriesProcessed{ + { + userID: user1, + seriesID: []byte(lblFooBar.String()), + lbls: lblFooBar, + tableName: "t2", + }, + }, + chunkEntry: chunkEntry{ + userID: user1, + chk: retention.Chunk{ + From: 10, + Through: 20, + }, + lbls: lblFooBar, + seriesID: []byte(lblFooBar.String()), + tableName: "t1", + }, + expSkipSeries: false, + expExpired: true, + }, + { + name: "multiple series marked as processed", + seriesToMarkProcessed: []markSeriesProcessed{ + { + userID: user1, + seriesID: []byte(lblFooBar.String()), + lbls: lblFooBar, + tableName: "t1", + }, + { + userID: user1, + seriesID: []byte(lblFooBar.String()), + lbls: lblFooBar, + tableName: "t2", + }, + { + userID: user2, + seriesID: []byte(lblFooBar.String()), + lbls: lblFooBar, + tableName: "t1", + }, + }, + chunkEntry: chunkEntry{ + userID: user1, + chk: retention.Chunk{ + From: 10, + Through: 20, + }, + lbls: lblFooBar, + seriesID: []byte(lblFooBar.String()), + tableName: "t1", + }, + expSkipSeries: true, + expExpired: false, + }, + } { + t.Run(tc.name, func(t *testing.T) { + workingDir := t.TempDir() + mgr, err := NewDeleteRequestsManager(workingDir, deleteRequestsStore, time.Hour, 70, &fakeLimits{defaultLimit: limit{deletionMode: deletionmode.FilterAndDelete.String()}}, nil) + require.NoError(t, err) + require.NoError(t, mgr.loadDeleteRequestsToProcess()) + + for _, m := range tc.seriesToMarkProcessed { + require.NoError(t, mgr.MarkSeriesAsProcessed(m.userID, m.seriesID, m.lbls, m.tableName)) + } + + require.Equal(t, tc.expSkipSeries, mgr.CanSkipSeries(tc.chunkEntry.userID, tc.chunkEntry.lbls, tc.chunkEntry.seriesID, 0, tc.chunkEntry.tableName, 0)) + isExpired, _ := mgr.Expired(tc.chunkEntry.userID, tc.chunkEntry.chk, tc.chunkEntry.lbls, tc.chunkEntry.seriesID, tc.chunkEntry.tableName, 0) + require.Equal(t, tc.expExpired, isExpired) + + // see if stopping the manager properly retains the progress and loads back when initialized + storedSeriesProgress := mgr.processedSeries + mgr.Stop() + mgr, err = NewDeleteRequestsManager(workingDir, deleteRequestsStore, time.Hour, 70, &fakeLimits{defaultLimit: limit{deletionMode: deletionmode.FilterAndDelete.String()}}, nil) + require.NoError(t, err) + require.Equal(t, storedSeriesProgress, mgr.processedSeries) + + // when the mark phase ends, series progress should get cleared + mgr.MarkPhaseFinished() + require.Len(t, mgr.processedSeries, 0) + require.NoFileExists(t, filepath.Join(workingDir, seriesProgressFilename)) + }) + } +} + type storeAddReqDetails struct { userID, query string startTime, endTime model.Time diff --git a/pkg/compactor/retention/expiration.go b/pkg/compactor/retention/expiration.go index a1d2415aceb95..b1e9fe0f5e831 100644 --- a/pkg/compactor/retention/expiration.go +++ b/pkg/compactor/retention/expiration.go @@ -23,13 +23,14 @@ type IntervalFilter struct { } type ExpirationChecker interface { - Expired(ref ChunkEntry, now model.Time) (bool, filter.Func) + Expired(userID []byte, chk Chunk, lbls labels.Labels, seriesID []byte, tableName string, now model.Time) (bool, filter.Func) IntervalMayHaveExpiredChunks(interval model.Interval, userID string) bool MarkPhaseStarted() MarkPhaseFailed() MarkPhaseTimedOut() MarkPhaseFinished() - DropFromIndex(ref ChunkEntry, tableEndTime model.Time, now model.Time) bool + DropFromIndex(userID []byte, chk Chunk, labels labels.Labels, tableEndTime model.Time, now model.Time) bool + CanSkipSeries(userID []byte, lbls labels.Labels, seriesID []byte, seriesStart model.Time, tableName string, now model.Time) bool } type expirationChecker struct { @@ -52,22 +53,22 @@ func NewExpirationChecker(limits Limits) ExpirationChecker { } // Expired tells if a ref chunk is expired based on retention rules. -func (e *expirationChecker) Expired(ref ChunkEntry, now model.Time) (bool, filter.Func) { - userID := unsafeGetString(ref.UserID) - period := e.tenantsRetention.RetentionPeriodFor(userID, ref.Labels) +func (e *expirationChecker) Expired(userID []byte, chk Chunk, lbls labels.Labels, _ []byte, _ string, now model.Time) (bool, filter.Func) { + userIDStr := unsafeGetString(userID) + period := e.tenantsRetention.RetentionPeriodFor(userIDStr, lbls) // The 0 value should disable retention if period <= 0 { return false, nil } - return now.Sub(ref.Through) > period, nil + return now.Sub(chk.Through) > period, nil } // DropFromIndex tells if it is okay to drop the chunk entry from index table. // We check if tableEndTime is out of retention period, calculated using the labels from the chunk. // If the tableEndTime is out of retention then we can drop the chunk entry without removing the chunk from the store. -func (e *expirationChecker) DropFromIndex(ref ChunkEntry, tableEndTime model.Time, now model.Time) bool { - userID := unsafeGetString(ref.UserID) - period := e.tenantsRetention.RetentionPeriodFor(userID, ref.Labels) +func (e *expirationChecker) DropFromIndex(userID []byte, _ Chunk, labels labels.Labels, tableEndTime model.Time, now model.Time) bool { + userIDStr := unsafeGetString(userID) + period := e.tenantsRetention.RetentionPeriodFor(userIDStr, labels) // The 0 value should disable retention if period <= 0 { return false @@ -84,6 +85,16 @@ func (e *expirationChecker) MarkPhaseStarted() { func (e *expirationChecker) MarkPhaseFailed() {} func (e *expirationChecker) MarkPhaseTimedOut() {} func (e *expirationChecker) MarkPhaseFinished() {} +func (e *expirationChecker) CanSkipSeries(userID []byte, lbls labels.Labels, _ []byte, seriesStart model.Time, _ string, now model.Time) bool { + userIDStr := unsafeGetString(userID) + period := e.tenantsRetention.RetentionPeriodFor(userIDStr, lbls) + // The 0 value should disable retention + if period <= 0 { + return true + } + + return now.Sub(seriesStart) < period +} func (e *expirationChecker) IntervalMayHaveExpiredChunks(interval model.Interval, userID string) bool { // when userID is empty, it means we are checking for common index table. In this case we use e.overallLatestRetentionStartTime. @@ -109,7 +120,7 @@ func NeverExpiringExpirationChecker(_ Limits) ExpirationChecker { type neverExpiringExpirationChecker struct{} -func (e *neverExpiringExpirationChecker) Expired(_ ChunkEntry, _ model.Time) (bool, filter.Func) { +func (e *neverExpiringExpirationChecker) Expired(_ []byte, _ Chunk, _ labels.Labels, _ []byte, _ string, _ model.Time) (bool, filter.Func) { return false, nil } func (e *neverExpiringExpirationChecker) IntervalMayHaveExpiredChunks(_ model.Interval, _ string) bool { @@ -119,9 +130,12 @@ func (e *neverExpiringExpirationChecker) MarkPhaseStarted() {} func (e *neverExpiringExpirationChecker) MarkPhaseFailed() {} func (e *neverExpiringExpirationChecker) MarkPhaseTimedOut() {} func (e *neverExpiringExpirationChecker) MarkPhaseFinished() {} -func (e *neverExpiringExpirationChecker) DropFromIndex(_ ChunkEntry, _ model.Time, _ model.Time) bool { +func (e *neverExpiringExpirationChecker) DropFromIndex(_ []byte, _ Chunk, _ labels.Labels, _ model.Time, _ model.Time) bool { return false } +func (e *neverExpiringExpirationChecker) CanSkipSeries(_ []byte, _ labels.Labels, _ []byte, _ model.Time, _ string, _ model.Time) bool { + return true +} type TenantsRetention struct { limits Limits diff --git a/pkg/compactor/retention/expiration_test.go b/pkg/compactor/retention/expiration_test.go index 09e04c4517082..154b2eebc845a 100644 --- a/pkg/compactor/retention/expiration_test.go +++ b/pkg/compactor/retention/expiration_test.go @@ -108,20 +108,22 @@ func Test_expirationChecker_Expired(t *testing.T) { e := NewExpirationChecker(o) tests := []struct { - name string - ref ChunkEntry - want bool + name string + userID string + labels string + chunk Chunk + want bool }{ - {"expired tenant", newChunkEntry("1", `{foo="buzz"}`, model.Now().Add(-3*time.Hour), model.Now().Add(-2*time.Hour)), true}, - {"just expired tenant", newChunkEntry("1", `{foo="buzz"}`, model.Now().Add(-3*time.Hour), model.Now().Add(-1*time.Hour+(10*time.Millisecond))), false}, - {"not expired tenant", newChunkEntry("1", `{foo="buzz"}`, model.Now().Add(-3*time.Hour), model.Now().Add(-30*time.Minute)), false}, - {"not expired tenant by far", newChunkEntry("2", `{foo="buzz"}`, model.Now().Add(-72*time.Hour), model.Now().Add(-3*time.Hour)), false}, - {"expired stream override", newChunkEntry("2", `{foo="bar"}`, model.Now().Add(-12*time.Hour), model.Now().Add(-10*time.Hour)), true}, - {"non expired stream override", newChunkEntry("1", `{foo="bar"}`, model.Now().Add(-3*time.Hour), model.Now().Add(-90*time.Minute)), false}, + {"expired tenant", "1", `{foo="buzz"}`, Chunk{From: model.Now().Add(-3 * time.Hour), Through: model.Now().Add(-2 * time.Hour)}, true}, + {"just expired tenant", "1", `{foo="buzz"}`, Chunk{From: model.Now().Add(-3 * time.Hour), Through: model.Now().Add(-1*time.Hour + (10 * time.Millisecond))}, false}, + {"not expired tenant", "1", `{foo="buzz"}`, Chunk{From: model.Now().Add(-3 * time.Hour), Through: model.Now().Add(-30 * time.Minute)}, false}, + {"not expired tenant by far", "2", `{foo="buzz"}`, Chunk{From: model.Now().Add(-72 * time.Hour), Through: model.Now().Add(-3 * time.Hour)}, false}, + {"expired stream override", "2", `{foo="bar"}`, Chunk{From: model.Now().Add(-12 * time.Hour), Through: model.Now().Add(-10 * time.Hour)}, true}, + {"non expired stream override", "1", `{foo="bar"}`, Chunk{From: model.Now().Add(-3 * time.Hour), Through: model.Now().Add(-90 * time.Minute)}, false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - actual, nonDeletedIntervalFilters := e.Expired(tt.ref, model.Now()) + actual, nonDeletedIntervalFilters := e.Expired([]byte(tt.userID), tt.chunk, mustParseLabels(tt.labels), nil, "", model.Now()) require.Equal(t, tt.want, actual) require.Nil(t, nonDeletedIntervalFilters) }) @@ -183,18 +185,20 @@ func Test_expirationChecker_Expired_zeroValue(t *testing.T) { require.NoError(t, err) e := NewExpirationChecker(o) tests := []struct { - name string - ref ChunkEntry - want bool + name string + userID string + labels string + chunk Chunk + want bool }{ - {"tenant with no override should not delete", newChunkEntry("1", `{foo="buzz"}`, model.Now().Add(-3*time.Hour), model.Now().Add(-2*time.Hour)), false}, - {"tenant with no override, REALLY old chunk should not delete", newChunkEntry("1", `{foo="buzz"}`, model.Now().Add(-10000*time.Hour+(1*time.Hour)), model.Now().Add(-10000*time.Hour)), false}, - {"tenant with override chunk less than retention should not delete", newChunkEntry("2", `{foo="buzz"}`, model.Now().Add(-3*time.Hour), model.Now().Add(-2*time.Hour)), false}, - {"tenant with override should delete", newChunkEntry("2", `{foo="buzz"}`, model.Now().Add(-31*time.Hour), model.Now().Add(-30*time.Hour)), true}, + {"tenant with no override should not delete", "1", `{foo="buzz"}`, Chunk{From: model.Now().Add(-3 * time.Hour), Through: model.Now().Add(-2 * time.Hour)}, false}, + {"tenant with no override, REALLY old chunk should not delete", "1", `{foo="buzz"}`, Chunk{From: model.Now().Add(-10000*time.Hour + (1 * time.Hour)), Through: model.Now().Add(-10000 * time.Hour)}, false}, + {"tenant with override chunk less than retention should not delete", "2", `{foo="buzz"}`, Chunk{From: model.Now().Add(-3 * time.Hour), Through: model.Now().Add(-2 * time.Hour)}, false}, + {"tenant with override should delete", "2", `{foo="buzz"}`, Chunk{From: model.Now().Add(-31 * time.Hour), Through: model.Now().Add(-30 * time.Hour)}, true}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - actual, nonDeletedIntervalFilters := e.Expired(tt.ref, model.Now()) + actual, nonDeletedIntervalFilters := e.Expired([]byte(tt.userID), tt.chunk, mustParseLabels(tt.labels), nil, "", model.Now()) require.Equal(t, tt.want, actual) require.Nil(t, nonDeletedIntervalFilters) }) @@ -229,17 +233,19 @@ func Test_expirationChecker_Expired_zeroValueOverride(t *testing.T) { e := NewExpirationChecker(o) tests := []struct { - name string - ref ChunkEntry - want bool + name string + userID string + labels string + chunk Chunk + want bool }{ - {"tenant with no override should delete", newChunkEntry("1", `{foo="buzz"}`, model.Now().Add(-31*time.Hour), model.Now().Add(-30*time.Hour)), true}, - {"tenant with override should not delete", newChunkEntry("2", `{foo="buzz"}`, model.Now().Add(-31*time.Hour), model.Now().Add(-30*time.Hour)), false}, - {"tenant with zero value without unit should not delete", newChunkEntry("3", `{foo="buzz"}`, model.Now().Add(-31*time.Hour), model.Now().Add(-30*time.Hour)), false}, + {"tenant with no override should delete", "1", `{foo="buzz"}`, Chunk{From: model.Now().Add(-31 * time.Hour), Through: model.Now().Add(-30 * time.Hour)}, true}, + {"tenant with override should not delete", "2", `{foo="buzz"}`, Chunk{From: model.Now().Add(-31 * time.Hour), Through: model.Now().Add(-30 * time.Hour)}, false}, + {"tenant with zero value without unit should not delete", "3", `{foo="buzz"}`, Chunk{From: model.Now().Add(-31 * time.Hour), Through: model.Now().Add(-30 * time.Hour)}, false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - actual, nonDeletedIntervalFilters := e.Expired(tt.ref, model.Now()) + actual, nonDeletedIntervalFilters := e.Expired([]byte(tt.userID), tt.chunk, mustParseLabels(tt.labels), nil, "", model.Now()) require.Equal(t, tt.want, actual) require.Nil(t, nonDeletedIntervalFilters) }) @@ -267,17 +273,55 @@ func Test_expirationChecker_DropFromIndex_zeroValue(t *testing.T) { chunkThrough := model.Now().Add(-2 * time.Hour) tests := []struct { name string - ref ChunkEntry + userID string + labels string + chunk Chunk tableEndTime model.Time want bool }{ - {"tenant with no override should not delete", newChunkEntry("1", `{foo="buzz"}`, chunkFrom, chunkThrough), model.Now().Add(-48 * time.Hour), false}, - {"tenant with override tableEndTime within retention period should not delete", newChunkEntry("2", `{foo="buzz"}`, chunkFrom, chunkThrough), model.Now().Add(-1 * time.Hour), false}, - {"tenant with override should delete", newChunkEntry("2", `{foo="buzz"}`, chunkFrom, chunkThrough), model.Now().Add(-48 * time.Hour), true}, + {"tenant with no override should not delete", "1", `{foo="buzz"}`, Chunk{From: chunkFrom, Through: chunkThrough}, model.Now().Add(-48 * time.Hour), false}, + {"tenant with override tableEndTime within retention period should not delete", "2", `{foo="buzz"}`, Chunk{From: chunkFrom, Through: chunkThrough}, model.Now().Add(-1 * time.Hour), false}, + {"tenant with override should delete", "2", `{foo="buzz"}`, Chunk{From: chunkFrom, Through: chunkThrough}, model.Now().Add(-48 * time.Hour), true}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - actual := e.DropFromIndex(tt.ref, tt.tableEndTime, model.Now()) + actual := e.DropFromIndex([]byte(tt.userID), tt.chunk, mustParseLabels(tt.labels), tt.tableEndTime, model.Now()) + require.Equal(t, tt.want, actual) + }) + } +} + +func Test_expirationChecker_CanSkipSeries(t *testing.T) { + // Default retention should be zero + d := defaultLimitsTestConfig() + + // Override tenant 2 to have 24 hour retention + tl := defaultLimitsTestConfig() + oneDay, _ := model.ParseDuration("24h") + tl.RetentionPeriod = oneDay + f := fakeOverrides{ + tenantLimits: map[string]*validation.Limits{ + "2": &tl, + }, + } + o, err := overridesTestConfig(d, f) + require.NoError(t, err) + e := NewExpirationChecker(o) + + tests := []struct { + name string + userID string + labels string + seriesStart model.Time + want bool + }{ + {"tenant with no override should skip series", "1", `{foo="buzz"}`, model.Now().Add(-48 * time.Hour), true}, + {"tenant with override, seriesStart within retention period should skip series", "2", `{foo="buzz"}`, model.Now().Add(-1 * time.Hour), true}, + {"tenant with override, seriesStart outside retention period should not skip series", "2", `{foo="buzz"}`, model.Now().Add(-48 * time.Hour), false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := e.CanSkipSeries([]byte(tt.userID), mustParseLabels(tt.labels), nil, tt.seriesStart, "", model.Now()) require.Equal(t, tt.want, actual) }) } diff --git a/pkg/compactor/retention/retention.go b/pkg/compactor/retention/retention.go index 1e7be7b81d27c..9cdfe0d85b9b2 100644 --- a/pkg/compactor/retention/retention.go +++ b/pkg/compactor/retention/retention.go @@ -31,30 +31,57 @@ const ( MarkersFolder = "markers" ) -type ChunkRef struct { - UserID []byte - SeriesID []byte - ChunkID []byte - From model.Time - Through model.Time +type Chunk struct { + ChunkID []byte + From model.Time + Through model.Time } -func (c ChunkRef) String() string { - return fmt.Sprintf("UserID: %s , SeriesID: %s , Time: [%s,%s]", c.UserID, c.SeriesID, c.From, c.Through) +func (c Chunk) String() string { + return fmt.Sprintf("ChunkID: %s", c.ChunkID) } -type ChunkEntry struct { - ChunkRef - Labels labels.Labels +type Series struct { + seriesID, userID []byte + labels labels.Labels + chunks []Chunk } -type ChunkEntryCallback func(ChunkEntry) (deleteChunk bool, err error) +func (s *Series) SeriesID() []byte { + return s.seriesID +} + +func (s *Series) UserID() []byte { + return s.userID +} + +func (s *Series) Labels() labels.Labels { + return s.labels +} + +func (s *Series) Chunks() []Chunk { + return s.chunks +} -type ChunkIterator interface { - ForEachChunk(ctx context.Context, callback ChunkEntryCallback) error +func (s *Series) Reset(seriesID, userID []byte, labels labels.Labels) { + s.seriesID = seriesID + s.userID = userID + s.labels = labels + s.chunks = s.chunks[:0] } -type SeriesCleaner interface { +func (s *Series) AppendChunks(ref ...Chunk) { + s.chunks = append(s.chunks, ref...) +} + +type SeriesCallback func(series Series) (err error) + +type SeriesIterator interface { + ForEachSeries(ctx context.Context, callback SeriesCallback) error +} + +type IndexCleaner interface { + RemoveChunk(from, through model.Time, userID []byte, labels labels.Labels, chunkID []byte) error // CleanupSeries is for cleaning up the series that do have any chunks left in the index. // It would only be called for the series that have all their chunks deleted without adding new ones. CleanupSeries(userID []byte, lbls labels.Labels) error @@ -70,9 +97,9 @@ type chunkIndexer interface { } type IndexProcessor interface { - ChunkIterator + SeriesIterator chunkIndexer - SeriesCleaner + IndexCleaner } var errNoChunksFound = errors.New("no chunks found in table, please check if there are really no chunks and manually drop the table or " + @@ -176,57 +203,84 @@ func markForDelete( iterCtx, cancel := ctxForTimeout(timeout) defer cancel() - err := indexFile.ForEachChunk(iterCtx, func(c ChunkEntry) (bool, error) { + err := indexFile.ForEachSeries(iterCtx, func(s Series) error { + chunks := s.Chunks() + if len(chunks) == 0 { + // add the series to series map so that it gets cleaned up + seriesMap.Add(s.SeriesID(), s.UserID(), s.Labels()) + return nil + } + chunksFound = true - seriesMap.Add(c.SeriesID, c.UserID, c.Labels) - - // see if the chunk is deleted completely or partially - if expired, filterFunc := expiration.Expired(c, now); expired { - linesDeleted := true // tracks whether we deleted at least some data from the chunk - if filterFunc != nil { - wroteChunks := false - var err error - wroteChunks, linesDeleted, err = chunkRewriter.rewriteChunk(ctx, c, tableInterval, filterFunc) - if err != nil { - return false, fmt.Errorf("failed to rewrite chunk %s with error %s", c.ChunkID, err) - } + seriesStart := chunks[0].From + for i := 0; i < len(chunks); i++ { + if chunks[i].From < seriesStart { + seriesStart = chunks[i].From + } + } + + if expiration.CanSkipSeries(s.UserID(), s.labels, s.SeriesID(), seriesStart, tableName, now) { + empty = false + return nil + } + seriesMap.Add(s.SeriesID(), s.UserID(), s.Labels()) + + for i := 0; i < len(chunks) && iterCtx.Err() == nil; i++ { + c := chunks[i] + // see if the chunk is deleted completely or partially + if expired, filterFunc := expiration.Expired(s.UserID(), c, s.Labels(), s.SeriesID(), tableName, now); expired { + linesDeleted := true // tracks whether we deleted at least some data from the chunk + if filterFunc != nil { + wroteChunks := false + var err error + wroteChunks, linesDeleted, err = chunkRewriter.rewriteChunk(ctx, s.UserID(), c, tableInterval, filterFunc) + if err != nil { + return fmt.Errorf("failed to rewrite chunk %s with error %s", c.ChunkID, err) + } - if wroteChunks { - // we have re-written chunk to the storage so the table won't be empty and the series are still being referred. - empty = false - seriesMap.MarkSeriesNotDeleted(c.SeriesID, c.UserID) + if wroteChunks { + // we have re-written chunk to the storage so the table won't be empty and the series are still being referred. + empty = false + seriesMap.MarkSeriesNotDeleted(s.SeriesID(), s.UserID()) + } } - } - if linesDeleted { - modified = true + if linesDeleted { + modified = true - // Mark the chunk for deletion only if it is completely deleted, or this is the last table that the chunk is index in. - // For a partially deleted chunk, if we delete the source chunk before all the tables which index it are processed then - // the retention would fail because it would fail to find it in the storage. - if filterFunc == nil || c.From >= tableInterval.Start { - if err := marker.Put(c.ChunkID); err != nil { - return false, err + // Mark the chunk for deletion only if it is completely deleted, or this is the last table that the chunk is index in. + // For a partially deleted chunk, if we delete the source chunk before all the tables which index it are processed then + // the retention would fail because it would fail to find it in the storage. + if filterFunc == nil || c.From >= tableInterval.Start { + if err := marker.Put(c.ChunkID); err != nil { + return err + } + } + if err := indexFile.RemoveChunk(c.From, c.Through, s.UserID(), s.Labels(), c.ChunkID); err != nil { + return fmt.Errorf("failed to remove chunk %s from index with error %s", c.ChunkID, err) } + continue } - return true, nil } - } - // The chunk is not deleted, now see if we can drop its index entry based on end time from tableInterval. - // If chunk end time is after the end time of tableInterval, it means the chunk would also be indexed in the next table. - // We would now check if the end time of the tableInterval is out of retention period so that - // we can drop the chunk entry from this table without removing the chunk from the store. - if c.Through.After(tableInterval.End) { - if expiration.DropFromIndex(c, tableInterval.End, now) { - modified = true - return true, nil + // The chunk is not deleted, now see if we can drop its index entry based on end time from tableInterval. + // If chunk end time is after the end time of tableInterval, it means the chunk would also be indexed in the next table. + // We would now check if the end time of the tableInterval is out of retention period so that + // we can drop the chunk entry from this table without removing the chunk from the store. + if c.Through.After(tableInterval.End) { + if expiration.DropFromIndex(s.UserID(), c, nil, tableInterval.End, now) { + modified = true + if err := indexFile.RemoveChunk(c.From, c.Through, s.UserID(), s.Labels(), c.ChunkID); err != nil { + return fmt.Errorf("failed to remove chunk %s from index with error %s", c.ChunkID, err) + } + continue + } } - } - empty = false - seriesMap.MarkSeriesNotDeleted(c.SeriesID, c.UserID) - return false, nil + empty = false + seriesMap.MarkSeriesNotDeleted(s.SeriesID(), s.UserID()) + } + return iterCtx.Err() }) if err != nil { if errors.Is(err, context.DeadlineExceeded) && errors.Is(iterCtx.Err(), context.DeadlineExceeded) { @@ -366,11 +420,11 @@ func newChunkRewriter(chunkClient client.Client, tableName string, chunkIndexer // If the newChunk is different, linesDeleted would be true. // The newChunk is indexed and uploaded only if it belongs to the current index table being processed, // the status of which is set to wroteChunks. -func (c *chunkRewriter) rewriteChunk(ctx context.Context, ce ChunkEntry, tableInterval model.Interval, filterFunc filter.Func) (wroteChunks bool, linesDeleted bool, err error) { - userID := unsafeGetString(ce.UserID) +func (c *chunkRewriter) rewriteChunk(ctx context.Context, userID []byte, ce Chunk, tableInterval model.Interval, filterFunc filter.Func) (wroteChunks bool, linesDeleted bool, err error) { + userIDStr := unsafeGetString(userID) chunkID := unsafeGetString(ce.ChunkID) - chk, err := chunk.ParseExternalKey(userID, chunkID) + chk, err := chunk.ParseExternalKey(userIDStr, chunkID) if err != nil { return false, false, err } @@ -381,7 +435,7 @@ func (c *chunkRewriter) rewriteChunk(ctx context.Context, ce ChunkEntry, tableIn } if len(chks) != 1 { - return false, false, fmt.Errorf("expected 1 entry for chunk %s but found %d in storage", chunkID, len(chks)) + return false, false, fmt.Errorf("expected 1 entry for chunk %s but found %d in storage", ce.ChunkID, len(chks)) } newChunkData, err := chks[0].Data.Rebound(ce.From, ce.Through, func(ts time.Time, s string, structuredMetadata ...labels.Label) bool { @@ -394,7 +448,7 @@ func (c *chunkRewriter) rewriteChunk(ctx context.Context, ce ChunkEntry, tableIn }) if err != nil { if errors.Is(err, chunk.ErrSliceNoDataInRange) { - level.Info(util_log.Logger).Log("msg", "Delete request filterFunc leaves an empty chunk", "chunk ref", string(ce.ChunkRef.ChunkID)) + level.Info(util_log.Logger).Log("msg", "Delete request filterFunc leaves an empty chunk", "chunk ref", string(ce.ChunkID)) return false, true, nil } return false, false, err @@ -418,7 +472,7 @@ func (c *chunkRewriter) rewriteChunk(ctx context.Context, ce ChunkEntry, tableIn } newChunk := chunk.NewChunk( - userID, chks[0].FingerprintModel(), chks[0].Metric, + userIDStr, chks[0].FingerprintModel(), chks[0].Metric, facade, newChunkStart, newChunkEnd, diff --git a/pkg/compactor/retention/retention_test.go b/pkg/compactor/retention/retention_test.go index cdc7ef61dbc9c..e14097f44bb34 100644 --- a/pkg/compactor/retention/retention_test.go +++ b/pkg/compactor/retention/retention_test.go @@ -259,10 +259,18 @@ func Test_EmptyTable(t *testing.T) { tables := store.indexTables() require.Len(t, tables, 1) + + // disabled retention should not do anything to the table + empty, modified, err := markForDelete(context.Background(), 0, tables[0].name, &noopWriter{}, tables[0], NewExpirationChecker(&fakeLimits{}), nil, util_log.Logger) + require.NoError(t, err) + require.False(t, empty) + require.False(t, modified) + // Set a very low retention to make sure all chunks are marked for deletion which will create an empty table. - empty, _, err := markForDelete(context.Background(), 0, tables[0].name, &noopWriter{}, tables[0], NewExpirationChecker(&fakeLimits{perTenant: map[string]retentionLimit{"1": {retentionPeriod: time.Second}, "2": {retentionPeriod: time.Second}}}), nil, util_log.Logger) + empty, modified, err = markForDelete(context.Background(), 0, tables[0].name, &noopWriter{}, tables[0], NewExpirationChecker(&fakeLimits{perTenant: map[string]retentionLimit{"1": {retentionPeriod: time.Second}, "2": {retentionPeriod: time.Second}}}), nil, util_log.Logger) require.NoError(t, err) require.True(t, empty) + require.True(t, modified) _, _, err = markForDelete(context.Background(), 0, tables[0].name, &noopWriter{}, newTable("test"), NewExpirationChecker(&fakeLimits{}), nil, util_log.Logger) require.Equal(t, err, errNoChunksFound) @@ -575,7 +583,11 @@ func TestChunkRewriter(t *testing.T) { for _, indexTable := range indexTables { cr := newChunkRewriter(store.chunkClient, indexTable.name, indexTable) - wroteChunks, linesDeleted, err := cr.rewriteChunk(context.Background(), entryFromChunk(tt.chunk), ExtractIntervalFromTableName(indexTable.name), tt.filterFunc) + wroteChunks, linesDeleted, err := cr.rewriteChunk(context.Background(), []byte(tt.chunk.UserID), Chunk{ + ChunkID: []byte(getChunkID(tt.chunk.ChunkRef)), + From: tt.chunk.From, + Through: tt.chunk.Through, + }, ExtractIntervalFromTableName(indexTable.name), tt.filterFunc) require.NoError(t, err) require.Equal(t, tt.expectedRespByTables[indexTable.name].mustDeleteLines, linesDeleted) require.Equal(t, tt.expectedRespByTables[indexTable.name].mustRewriteChunk, wroteChunks) @@ -657,25 +669,26 @@ type chunkExpiry struct { type mockExpirationChecker struct { ExpirationChecker - chunksExpiry map[string]chunkExpiry - delay time.Duration - calls int - timedOut bool + chunksExpiry map[string]chunkExpiry + skipSeries map[string]bool + delay time.Duration + numExpiryChecks int + timedOut bool } -func newMockExpirationChecker(chunksExpiry map[string]chunkExpiry) *mockExpirationChecker { - return &mockExpirationChecker{chunksExpiry: chunksExpiry} +func newMockExpirationChecker(chunksExpiry map[string]chunkExpiry, skipSeries map[string]bool) *mockExpirationChecker { + return &mockExpirationChecker{chunksExpiry: chunksExpiry, skipSeries: skipSeries} } -func (m *mockExpirationChecker) Expired(ref ChunkEntry, _ model.Time) (bool, filter.Func) { +func (m *mockExpirationChecker) Expired(_ []byte, chk Chunk, _ labels.Labels, _ []byte, _ string, _ model.Time) (bool, filter.Func) { time.Sleep(m.delay) - m.calls++ + m.numExpiryChecks++ - ce := m.chunksExpiry[string(ref.ChunkID)] + ce := m.chunksExpiry[string(chk.ChunkID)] return ce.isExpired, ce.filterFunc } -func (m *mockExpirationChecker) DropFromIndex(_ ChunkEntry, _ model.Time, _ model.Time) bool { +func (m *mockExpirationChecker) DropFromIndex(_ []byte, _ Chunk, _ labels.Labels, _ model.Time, _ model.Time) bool { return false } @@ -683,6 +696,10 @@ func (m *mockExpirationChecker) MarkPhaseTimedOut() { m.timedOut = true } +func (m *mockExpirationChecker) CanSkipSeries(_ []byte, lbls labels.Labels, _ []byte, _ model.Time, _ string, _ model.Time) bool { + return m.skipSeries[lbls.String()] +} + func TestMarkForDelete_SeriesCleanup(t *testing.T) { now := model.Now() schema := allSchemas[2] @@ -690,13 +707,15 @@ func TestMarkForDelete_SeriesCleanup(t *testing.T) { todaysTableInterval := ExtractIntervalFromTableName(schema.config.IndexTables.TableFor(now)) for _, tc := range []struct { - name string - chunks []chunk.Chunk - expiry []chunkExpiry - expectedDeletedSeries []map[uint64]struct{} - expectedEmpty []bool - expectedModified []bool - numChunksDeleted []int64 + name string + chunks []chunk.Chunk + expiry []chunkExpiry + skipSeries map[string]bool + expectedDeletedSeries []map[uint64]struct{} + expectedEmpty []bool + expectedModified []bool + numChunksDeleted []int64 + numExpectedExpiryChecks int }{ { name: "no chunk and series deleted", @@ -720,6 +739,7 @@ func TestMarkForDelete_SeriesCleanup(t *testing.T) { numChunksDeleted: []int64{ 0, }, + numExpectedExpiryChecks: 1, }, { name: "chunk deleted with filter but no lines matching", @@ -746,6 +766,7 @@ func TestMarkForDelete_SeriesCleanup(t *testing.T) { numChunksDeleted: []int64{ 0, }, + numExpectedExpiryChecks: 1, }, { name: "only one chunk in store which gets deleted", @@ -769,6 +790,7 @@ func TestMarkForDelete_SeriesCleanup(t *testing.T) { numChunksDeleted: []int64{ 1, }, + numExpectedExpiryChecks: 1, }, { name: "only one chunk in store which gets partially deleted", @@ -800,6 +822,7 @@ func TestMarkForDelete_SeriesCleanup(t *testing.T) { numChunksDeleted: []int64{ 1, }, + numExpectedExpiryChecks: 1, }, { name: "one of two chunks deleted", @@ -827,6 +850,65 @@ func TestMarkForDelete_SeriesCleanup(t *testing.T) { numChunksDeleted: []int64{ 1, }, + numExpectedExpiryChecks: 2, + }, + { + name: "one of two series skipped", + chunks: []chunk.Chunk{ + createChunk(t, userID, labels.Labels{labels.Label{Name: "foo", Value: "1"}}, todaysTableInterval.Start, todaysTableInterval.Start.Add(30*time.Minute)), + createChunk(t, userID, labels.Labels{labels.Label{Name: "foo", Value: "2"}}, todaysTableInterval.Start, todaysTableInterval.Start.Add(30*time.Minute)), + }, + skipSeries: map[string]bool{`{foo="1"}`: true}, + expiry: []chunkExpiry{ + { + isExpired: false, + }, + { + isExpired: true, + }, + }, + expectedDeletedSeries: []map[uint64]struct{}{ + {labels.Labels{labels.Label{Name: "foo", Value: "2"}}.Hash(): struct{}{}}, + }, + expectedEmpty: []bool{ + false, + }, + expectedModified: []bool{ + true, + }, + numChunksDeleted: []int64{ + 1, + }, + numExpectedExpiryChecks: 1, + }, + { + name: "all series skipped", + chunks: []chunk.Chunk{ + createChunk(t, userID, labels.Labels{labels.Label{Name: "foo", Value: "1"}}, todaysTableInterval.Start, todaysTableInterval.Start.Add(30*time.Minute)), + createChunk(t, userID, labels.Labels{labels.Label{Name: "foo", Value: "2"}}, todaysTableInterval.Start, todaysTableInterval.Start.Add(30*time.Minute)), + }, + skipSeries: map[string]bool{`{foo="1"}`: true, `{foo="2"}`: true}, + expiry: []chunkExpiry{ + { + isExpired: false, + }, + { + isExpired: false, + }, + }, + expectedDeletedSeries: []map[uint64]struct{}{ + nil, + }, + expectedEmpty: []bool{ + false, + }, + expectedModified: []bool{ + false, + }, + numChunksDeleted: []int64{ + 0, + }, + numExpectedExpiryChecks: 0, }, { name: "one of two chunks partially deleted", @@ -862,6 +944,7 @@ func TestMarkForDelete_SeriesCleanup(t *testing.T) { numChunksDeleted: []int64{ 1, }, + numExpectedExpiryChecks: 2, }, { name: "one big chunk partially deleted for yesterdays table without rewrite", @@ -888,6 +971,7 @@ func TestMarkForDelete_SeriesCleanup(t *testing.T) { numChunksDeleted: []int64{ 1, 0, }, + numExpectedExpiryChecks: 2, }, { name: "one big chunk partially deleted for yesterdays table with rewrite", @@ -914,6 +998,7 @@ func TestMarkForDelete_SeriesCleanup(t *testing.T) { numChunksDeleted: []int64{ 1, 0, }, + numExpectedExpiryChecks: 2, }, } { t.Run(tc.name, func(t *testing.T) { @@ -925,7 +1010,7 @@ func TestMarkForDelete_SeriesCleanup(t *testing.T) { chunksExpiry[getChunkID(chunk.ChunkRef)] = tc.expiry[i] } - expirationChecker := newMockExpirationChecker(chunksExpiry) + expirationChecker := newMockExpirationChecker(chunksExpiry, tc.skipSeries) store.Stop() @@ -945,6 +1030,8 @@ func TestMarkForDelete_SeriesCleanup(t *testing.T) { require.EqualValues(t, tc.expectedDeletedSeries[i], seriesCleanRecorder.deletedSeries[userID]) } + + require.Equal(t, tc.numExpectedExpiryChecks, expirationChecker.numExpiryChecks) }) } } @@ -967,7 +1054,7 @@ func TestDeleteTimeout(t *testing.T) { require.NoError(t, store.Put(context.TODO(), chunks)) store.Stop() - expirationChecker := newMockExpirationChecker(map[string]chunkExpiry{}) + expirationChecker := newMockExpirationChecker(map[string]chunkExpiry{}, nil) expirationChecker.delay = 10 * time.Millisecond table := store.indexTables()[0] @@ -985,7 +1072,7 @@ func TestDeleteTimeout(t *testing.T) { require.NoError(t, err) require.False(t, empty) require.False(t, isModified) - require.Equal(t, tc.calls, expirationChecker.calls) + require.Equal(t, tc.calls, expirationChecker.numExpiryChecks) require.Equal(t, tc.timedOut, expirationChecker.timedOut) } } diff --git a/pkg/compactor/retention/series.go b/pkg/compactor/retention/series.go index e81d80a99ebdc..b0d6b8d7f47b3 100644 --- a/pkg/compactor/retention/series.go +++ b/pkg/compactor/retention/series.go @@ -9,7 +9,7 @@ type userSeries struct { seriesIDLen int } -func newUserSeries(seriesID []byte, userID []byte) userSeries { +func newUserSeries(seriesID, userID []byte) userSeries { key := make([]byte, 0, len(seriesID)+len(userID)) key = append(key, seriesID...) key = append(key, userID...) @@ -31,16 +31,6 @@ func (us userSeries) UserID() []byte { return us.key[us.seriesIDLen:] } -func (us *userSeries) Reset(seriesID []byte, userID []byte) { - if us.key == nil { - us.key = make([]byte, 0, len(seriesID)+len(userID)) - } - us.key = us.key[:0] - us.key = append(us.key, seriesID...) - us.key = append(us.key, userID...) - us.seriesIDLen = len(seriesID) -} - type userSeriesInfo struct { userSeries isDeleted bool @@ -53,7 +43,7 @@ func newUserSeriesMap() userSeriesMap { return make(userSeriesMap) } -func (u userSeriesMap) Add(seriesID []byte, userID []byte, lbls labels.Labels) { +func (u userSeriesMap) Add(seriesID, userID []byte, lbls labels.Labels) { us := newUserSeries(seriesID, userID) if _, ok := u[us.Key()]; ok { return @@ -67,7 +57,7 @@ func (u userSeriesMap) Add(seriesID []byte, userID []byte, lbls labels.Labels) { } // MarkSeriesNotDeleted is used to mark series not deleted when it still has some chunks left in the store -func (u userSeriesMap) MarkSeriesNotDeleted(seriesID []byte, userID []byte) { +func (u userSeriesMap) MarkSeriesNotDeleted(seriesID, userID []byte) { us := newUserSeries(seriesID, userID) usi := u[us.Key()] usi.isDeleted = false diff --git a/pkg/compactor/retention/util_test.go b/pkg/compactor/retention/util_test.go index 3597a11565adb..f30713fc042d9 100644 --- a/pkg/compactor/retention/util_test.go +++ b/pkg/compactor/retention/util_test.go @@ -117,51 +117,50 @@ var ( sweepMetrics = newSweeperMetrics(prometheus.DefaultRegisterer) ) -func newChunkEntry(userID, labels string, from, through model.Time) ChunkEntry { +func mustParseLabels(labels string) labels.Labels { lbs, err := syntax.ParseLabels(labels) if err != nil { panic(err) } - return ChunkEntry{ - ChunkRef: ChunkRef{ - UserID: []byte(userID), - SeriesID: labelsSeriesID(lbs), - From: from, - Through: through, - }, - Labels: lbs, - } + + return lbs } type table struct { name string - chunks map[string][]chunk.Chunk + chunks map[string]map[string][]chunk.Chunk } -func (t *table) ForEachChunk(ctx context.Context, callback ChunkEntryCallback) error { - for userID, chks := range t.chunks { - i := 0 - for j := 0; j < len(chks) && ctx.Err() == nil; j++ { - chk := chks[j] - deleteChunk, err := callback(entryFromChunk(chk)) - if err != nil { - return err +func (t *table) ForEachSeries(ctx context.Context, callback SeriesCallback) error { + for userID := range t.chunks { + for seriesID := range t.chunks[userID] { + chunks := make([]Chunk, 0, len(t.chunks[userID][seriesID])) + for _, chk := range t.chunks[userID][seriesID] { + chunks = append(chunks, Chunk{ + ChunkID: []byte(getChunkID(chk.ChunkRef)), + From: chk.From, + Through: chk.Through, + }) } - - if !deleteChunk { - t.chunks[userID][i] = chk - i++ + series := Series{} + series.Reset( + []byte(seriesID), + []byte(userID), + labels.NewBuilder(t.chunks[userID][seriesID][0].Metric).Del(labels.MetricName).Labels(), + ) + series.AppendChunks(chunks...) + if err := callback(series); err != nil { + return err } } - - t.chunks[userID] = t.chunks[userID][:i] } return ctx.Err() } func (t *table) IndexChunk(chunk chunk.Chunk) (bool, error) { - t.chunks[chunk.UserID] = append(t.chunks[chunk.UserID], chunk) + seriesID := string(labelsSeriesID(chunk.Metric)) + t.chunks[chunk.UserID][seriesID] = append(t.chunks[chunk.UserID][seriesID], chunk) return true, nil } @@ -169,19 +168,34 @@ func (t *table) CleanupSeries(_ []byte, _ labels.Labels) error { return nil } +func (t *table) RemoveChunk(_, _ model.Time, userID []byte, lbls labels.Labels, chunkID []byte) error { + seriesID := string(labelsSeriesID(labels.NewBuilder(lbls).Set(labels.MetricName, "logs").Labels())) + for i, chk := range t.chunks[string(userID)][seriesID] { + if getChunkID(chk.ChunkRef) == string(chunkID) { + t.chunks[string(userID)][seriesID] = append(t.chunks[string(userID)][seriesID][:i], t.chunks[string(userID)][seriesID][i+1:]...) + } + } + + return nil +} + func newTable(name string) *table { return &table{ name: name, - chunks: map[string][]chunk.Chunk{}, + chunks: map[string]map[string][]chunk.Chunk{}, } } func (t *table) Put(chk chunk.Chunk) { if _, ok := t.chunks[chk.UserID]; !ok { - t.chunks[chk.UserID] = []chunk.Chunk{} + t.chunks[chk.UserID] = make(map[string][]chunk.Chunk) + } + seriesID := string(labelsSeriesID(chk.Metric)) + if _, ok := t.chunks[chk.UserID][seriesID]; !ok { + t.chunks[chk.UserID][seriesID] = []chunk.Chunk{} } - t.chunks[chk.UserID] = append(t.chunks[chk.UserID], chk) + t.chunks[chk.UserID][seriesID] = append(t.chunks[chk.UserID][seriesID], chk) } func (t *table) GetChunks(userID string, from, through model.Time, metric labels.Labels) []chunk.Chunk { @@ -191,11 +205,13 @@ func (t *table) GetChunks(userID string, from, through model.Time, metric labels matchers = append(matchers, labels.MustNewMatcher(labels.MatchEqual, l.Name, l.Value)) } - for _, chk := range t.chunks[userID] { - if chk.From > through || chk.Through < from || !allMatch(matchers, chk.Metric) { - continue + for seriesID := range t.chunks[userID] { + for _, chk := range t.chunks[userID][seriesID] { + if chk.From > through || chk.Through < from || !allMatch(matchers, chk.Metric) { + continue + } + chunks = append(chunks, chk) } - chunks = append(chunks, chk) } return chunks @@ -311,19 +327,6 @@ func (t *testStore) GetChunks(userID string, from, through model.Time, metric la return fetchedChunk } -func entryFromChunk(c chunk.Chunk) ChunkEntry { - return ChunkEntry{ - ChunkRef: ChunkRef{ - UserID: []byte(c.UserID), - SeriesID: labelsSeriesID(c.Metric), - ChunkID: []byte(getChunkID(c.ChunkRef)), - From: c.From, - Through: c.Through, - }, - Labels: labels.NewBuilder(c.Metric).Del(labels.MetricName).Labels(), - } -} - func getChunkID(c logproto.ChunkRef) string { return schemaCfg.ExternalKey(c) } diff --git a/pkg/compactor/testutil.go b/pkg/compactor/testutil.go index feba141ba514d..c60c98fd94ab3 100644 --- a/pkg/compactor/testutil.go +++ b/pkg/compactor/testutil.go @@ -14,6 +14,7 @@ import ( "github.com/go-kit/log" "github.com/klauspost/compress/gzip" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/require" @@ -159,7 +160,7 @@ func openCompactedIndex(path string) (*compactedIndex, error) { return &compactedIndex{indexFile: idxFile}, nil } -func (c compactedIndex) ForEachChunk(_ context.Context, _ retention.ChunkEntryCallback) error { +func (c compactedIndex) ForEachSeries(_ context.Context, _ retention.SeriesCallback) error { return nil } @@ -171,6 +172,10 @@ func (c compactedIndex) CleanupSeries(_ []byte, _ labels.Labels) error { return nil } +func (c compactedIndex) RemoveChunk(_, _ model.Time, _ []byte, _ labels.Labels, _ []byte) error { + return nil +} + func (c compactedIndex) Cleanup() { _ = c.indexFile.Close() } diff --git a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/compacted_index.go b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/compacted_index.go index cb73f9aa95bf0..2ba43fe602621 100644 --- a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/compacted_index.go +++ b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/compacted_index.go @@ -9,6 +9,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "go.etcd.io/bbolt" @@ -136,7 +137,7 @@ func (c *CompactedIndex) setupIndexProcessors() error { return nil } -func (c *CompactedIndex) ForEachChunk(ctx context.Context, callback retention.ChunkEntryCallback) error { +func (c *CompactedIndex) ForEachSeries(ctx context.Context, callback retention.SeriesCallback) error { if err := c.setupIndexProcessors(); err != nil { return err } @@ -146,7 +147,7 @@ func (c *CompactedIndex) ForEachChunk(ctx context.Context, callback retention.Ch return fmt.Errorf("required boltdb bucket not found") } - return ForEachChunk(ctx, bucket, c.periodConfig, callback) + return ForEachSeries(ctx, bucket, c.periodConfig, callback) } func (c *CompactedIndex) IndexChunk(chunk chunk.Chunk) (bool, error) { @@ -165,6 +166,14 @@ func (c *CompactedIndex) CleanupSeries(userID []byte, lbls labels.Labels) error return c.seriesCleaner.CleanupSeries(userID, lbls) } +func (c *CompactedIndex) RemoveChunk(from, through model.Time, userID []byte, labels labels.Labels, chunkID []byte) error { + if err := c.setupIndexProcessors(); err != nil { + return err + } + + return c.seriesCleaner.RemoveChunk(from, through, userID, labels, chunkID) +} + func (c *CompactedIndex) ToIndexFile() (shipperindex.Index, error) { if c.boltdbTx != nil { err := c.boltdbTx.Commit() diff --git a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/compacted_index_test.go b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/compacted_index_test.go index 433835e0ff7e9..36fedee7192e3 100644 --- a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/compacted_index_test.go +++ b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/compacted_index_test.go @@ -47,18 +47,23 @@ func TestCompactedIndex_IndexProcessor(t *testing.T) { // remove c1, c2 chunk and index c4 with same labels as c2 c4 := createChunk(t, chunkfmt, headfmt, "2", labels.Labels{labels.Label{Name: "foo", Value: "bar"}, labels.Label{Name: "fizz", Value: "buzz"}}, tt.from, tt.from.Add(30*time.Minute)) - err = compactedIndex.ForEachChunk(context.Background(), func(entry retention.ChunkEntry) (deleteChunk bool, err error) { - if entry.Labels.Get("fizz") == "buzz" { + err = compactedIndex.ForEachSeries(context.Background(), func(series retention.Series) (err error) { + if series.Labels().Get("fizz") == "buzz" { chunkIndexed, err := compactedIndex.IndexChunk(c4) require.NoError(t, err) require.True(t, chunkIndexed) } - return entry.Labels.Get("foo") == "bar", nil + if series.Labels().Get("foo") == "bar" { + for _, chk := range series.Chunks() { + require.NoError(t, compactedIndex.RemoveChunk(chk.From, chk.Through, series.UserID(), series.Labels(), chk.ChunkID)) + } + } + return nil }) require.NoError(t, err) // remove series for c1 since all its chunks are deleted - err = compactedIndex.CleanupSeries(entryFromChunk(testSchema, c1).UserID, c1.Metric) + err = compactedIndex.CleanupSeries([]byte(c1.UserID), c1.Metric) require.NoError(t, err) indexFile, err := compactedIndex.ToIndexFile() @@ -74,7 +79,7 @@ func TestCompactedIndex_IndexProcessor(t *testing.T) { err = modifiedBoltDB.View(func(tx *bbolt.Tx) error { return tx.Bucket(local.IndexBucketName).ForEach(func(k, _ []byte) error { - c1SeriesID := entryFromChunk(testSchema, c1).SeriesID + c1SeriesID := labelsSeriesID(c1.Metric) series, ok, err := parseLabelIndexSeriesID(decodeKey(k)) if !ok { return nil @@ -92,15 +97,15 @@ func TestCompactedIndex_IndexProcessor(t *testing.T) { }) require.NoError(t, err) - expectedChunkEntries := []retention.ChunkEntry{ - entryFromChunk(testSchema, c3), - entryFromChunk(testSchema, c4), + expectedChunkEntries := []retention.Chunk{ + retentionChunkFromChunk(testSchema, c3), + retentionChunkFromChunk(testSchema, c4), } - chunkEntriesFound := []retention.ChunkEntry{} + var chunkEntriesFound []retention.Chunk err = modifiedBoltDB.View(func(tx *bbolt.Tx) error { - return ForEachChunk(context.Background(), tx.Bucket(local.IndexBucketName), tt.config, func(entry retention.ChunkEntry) (deleteChunk bool, err error) { - chunkEntriesFound = append(chunkEntriesFound, entry) - return false, nil + return ForEachSeries(context.Background(), tx.Bucket(local.IndexBucketName), tt.config, func(series retention.Series) (err error) { + chunkEntriesFound = append(chunkEntriesFound, series.Chunks()...) + return nil }) }) require.NoError(t, err) diff --git a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/index.go b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/index.go index 73e87e06e1e76..3e4826cc096a3 100644 --- a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/index.go +++ b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/index.go @@ -7,8 +7,6 @@ import ( "strconv" "github.com/prometheus/common/model" - - "github.com/grafana/loki/v3/pkg/compactor/retention" ) const ( @@ -39,36 +37,44 @@ func (e InvalidIndexKeyError) Is(target error) bool { return target == ErrInvalidIndexKey } -func parseChunkRef(hashKey, rangeKey []byte) (retention.ChunkRef, bool, error) { +type chunkRef struct { + UserID []byte + SeriesID []byte + ChunkID []byte + From model.Time + Through model.Time +} + +func parseChunkRef(hashKey, rangeKey []byte) (chunkRef, bool, error) { componentsRef := getComponents() defer putComponents(componentsRef) components := componentsRef.components components = decodeRangeKey(rangeKey, components) if len(components) == 0 { - return retention.ChunkRef{}, false, newInvalidIndexKeyError(hashKey, rangeKey) + return chunkRef{}, false, newInvalidIndexKeyError(hashKey, rangeKey) } keyType := components[len(components)-1] if len(keyType) == 0 || keyType[0] != chunkTimeRangeKeyV3 { - return retention.ChunkRef{}, false, nil + return chunkRef{}, false, nil } chunkID := components[len(components)-2] userID, hexFrom, hexThrough, ok := parseChunkID(chunkID) if !ok { - return retention.ChunkRef{}, false, newInvalidIndexKeyError(hashKey, rangeKey) + return chunkRef{}, false, newInvalidIndexKeyError(hashKey, rangeKey) } from, err := strconv.ParseInt(unsafeGetString(hexFrom), 16, 64) if err != nil { - return retention.ChunkRef{}, false, err + return chunkRef{}, false, err } through, err := strconv.ParseInt(unsafeGetString(hexThrough), 16, 64) if err != nil { - return retention.ChunkRef{}, false, err + return chunkRef{}, false, err } - return retention.ChunkRef{ + return chunkRef{ UserID: userID, SeriesID: seriesFromHash(hashKey), From: model.Time(from), diff --git a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/iterator.go b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/iterator.go index 7b2422fdc1149..f504a3418a51c 100644 --- a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/iterator.go +++ b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/iterator.go @@ -1,6 +1,7 @@ package compactor import ( + "bytes" "context" "fmt" @@ -19,17 +20,17 @@ const ( ) var ( - _ retention.SeriesCleaner = &seriesCleaner{} + _ retention.IndexCleaner = &seriesCleaner{} ) -func ForEachChunk(ctx context.Context, bucket *bbolt.Bucket, config config.PeriodConfig, callback retention.ChunkEntryCallback) error { +func ForEachSeries(ctx context.Context, bucket *bbolt.Bucket, config config.PeriodConfig, callback retention.SeriesCallback) error { labelsMapper, err := newSeriesLabelsMapper(bucket, config) if err != nil { return err } cursor := bucket.Cursor() - var current retention.ChunkEntry + var current retention.Series for key, _ := cursor.First(); key != nil && ctx.Err() == nil; key, _ = cursor.Next() { ref, ok, err := parseChunkRef(decodeKey(key)) @@ -40,17 +41,32 @@ func ForEachChunk(ctx context.Context, bucket *bbolt.Bucket, config config.Perio if !ok { continue } - current.ChunkRef = ref - current.Labels = labelsMapper.Get(ref.SeriesID, ref.UserID) - deleteChunk, err := callback(current) - if err != nil { - return err - } - if deleteChunk { - if err := cursor.Delete(); err != nil { + if len(current.Chunks()) == 0 { + current.Reset(ref.SeriesID, ref.UserID, labelsMapper.Get(ref.SeriesID, ref.UserID)) + } else if bytes.Compare(current.UserID(), ref.UserID) != 0 || bytes.Compare(current.SeriesID(), ref.SeriesID) != 0 { + err = callback(current) + if err != nil { return err } + + current.Reset(ref.SeriesID, ref.UserID, labelsMapper.Get(ref.SeriesID, ref.UserID)) + } + + current.AppendChunks(retention.Chunk{ + ChunkID: ref.ChunkID, + From: ref.From, + Through: ref.Through, + }) + } + if ctx.Err() != nil { + return ctx.Err() + } + + if len(current.Chunks()) != 0 { + err = callback(current) + if err != nil { + return err } } @@ -117,3 +133,32 @@ func (s *seriesCleaner) CleanupSeries(userID []byte, lbls labels.Labels) error { return nil } + +func (s *seriesCleaner) RemoveChunk(from, through model.Time, userID []byte, lbls labels.Labels, chunkID []byte) error { + // We need to add metric name label as well if it is missing since the series ids are calculated including that. + if lbls.Get(labels.MetricName) == "" { + lbls = append(lbls, labels.Label{ + Name: labels.MetricName, + Value: logMetricName, + }) + } + + indexEntries, err := s.schema.GetChunkWriteEntries(from, through, string(userID), logMetricName, lbls, string(chunkID)) + if err != nil { + return err + } + + for _, indexEntry := range indexEntries { + key := make([]byte, 0, len(indexEntry.HashValue)+len(separator)+len(indexEntry.RangeValue)) + key = append(key, []byte(indexEntry.HashValue)...) + key = append(key, []byte(separator)...) + key = append(key, indexEntry.RangeValue...) + + err := s.bucket.Delete(key) + if err != nil { + return err + } + } + + return nil +} diff --git a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/iterator_test.go b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/iterator_test.go index c4acfd33b67ac..d97616af0fab6 100644 --- a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/iterator_test.go +++ b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/iterator_test.go @@ -42,30 +42,34 @@ func Test_ChunkIterator(t *testing.T) { tables := store.indexTables() require.Len(t, tables, 1) - var actual []retention.ChunkEntry + var actual []retention.Chunk err = tables[0].DB.Update(func(tx *bbolt.Tx) error { - return ForEachChunk(context.Background(), tx.Bucket(local.IndexBucketName), tt.config, func(entry retention.ChunkEntry) (deleteChunk bool, err error) { - actual = append(actual, entry) - return len(actual) == 2, nil + seriesCleaner := newSeriesCleaner(tx.Bucket(local.IndexBucketName), tt.config, tables[0].name) + return ForEachSeries(context.Background(), tx.Bucket(local.IndexBucketName), tt.config, func(series retention.Series) (err error) { + actual = append(actual, series.Chunks()...) + if string(series.UserID()) == c2.UserID { + return seriesCleaner.RemoveChunk(actual[1].From, actual[1].Through, series.UserID(), series.Labels(), actual[1].ChunkID) + } + return nil }) }) require.NoError(t, err) - require.Equal(t, []retention.ChunkEntry{ - entryFromChunk(store.schemaCfg, c1), - entryFromChunk(store.schemaCfg, c2), + require.Equal(t, []retention.Chunk{ + retentionChunkFromChunk(store.schemaCfg, c1), + retentionChunkFromChunk(store.schemaCfg, c2), }, actual) // second pass we delete c2 actual = actual[:0] err = tables[0].DB.Update(func(tx *bbolt.Tx) error { - return ForEachChunk(context.Background(), tx.Bucket(local.IndexBucketName), tt.config, func(entry retention.ChunkEntry) (deleteChunk bool, err error) { - actual = append(actual, entry) - return false, nil + return ForEachSeries(context.Background(), tx.Bucket(local.IndexBucketName), tt.config, func(series retention.Series) (err error) { + actual = append(actual, series.Chunks()...) + return nil }) }) require.NoError(t, err) - require.Equal(t, []retention.ChunkEntry{ - entryFromChunk(store.schemaCfg, c1), + require.Equal(t, []retention.Chunk{ + retentionChunkFromChunk(store.schemaCfg, c1), }, actual) }) } @@ -92,12 +96,12 @@ func Test_ChunkIteratorContextCancelation(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var actual []retention.ChunkEntry + var actual []retention.Chunk err = tables[0].DB.Update(func(tx *bbolt.Tx) error { - return ForEachChunk(ctx, tx.Bucket(local.IndexBucketName), schemaCfg.Configs[0], func(entry retention.ChunkEntry) (deleteChunk bool, err error) { - actual = append(actual, entry) + return ForEachSeries(ctx, tx.Bucket(local.IndexBucketName), schemaCfg.Configs[0], func(series retention.Series) (err error) { + actual = append(actual, series.Chunks()...) cancel() - return len(actual) == 2, nil + return nil }) }) @@ -110,7 +114,6 @@ func Test_SeriesCleaner(t *testing.T) { t.Run(tt.schema, func(t *testing.T) { cm := storage.NewClientMetrics() defer cm.Unregister() - testSchema := config.SchemaConfig{Configs: []config.PeriodConfig{tt.config}} store := newTestStore(t, cm) chunkfmt, headfmt, err := tt.config.ChunkFormat() require.NoError(t, err) @@ -129,27 +132,33 @@ func Test_SeriesCleaner(t *testing.T) { require.Len(t, tables, 1) // remove c1, c2 chunk err = tables[0].DB.Update(func(tx *bbolt.Tx) error { - return ForEachChunk(context.Background(), tx.Bucket(local.IndexBucketName), tt.config, func(entry retention.ChunkEntry) (deleteChunk bool, err error) { - return entry.Labels.Get("bar") == "foo", nil + seriesCleaner := newSeriesCleaner(tx.Bucket(local.IndexBucketName), tt.config, tables[0].name) + return ForEachSeries(context.Background(), tx.Bucket(local.IndexBucketName), tt.config, func(series retention.Series) (err error) { + if series.Labels().Get("bar") == "foo" { + for _, chk := range series.Chunks() { + require.NoError(t, seriesCleaner.RemoveChunk(chk.From, chk.Through, series.UserID(), series.Labels(), chk.ChunkID)) + } + } + return nil }) }) require.NoError(t, err) err = tables[0].DB.Update(func(tx *bbolt.Tx) error { cleaner := newSeriesCleaner(tx.Bucket(local.IndexBucketName), tt.config, tables[0].name) - if err := cleaner.CleanupSeries(entryFromChunk(testSchema, c2).UserID, c2.Metric); err != nil { + if err := cleaner.CleanupSeries([]byte(c2.UserID), c2.Metric); err != nil { return err } // remove series for c1 without __name__ label, which should work just fine - return cleaner.CleanupSeries(entryFromChunk(testSchema, c1).UserID, labels.NewBuilder(c1.Metric).Del(labels.MetricName).Labels()) + return cleaner.CleanupSeries([]byte(c1.UserID), labels.NewBuilder(c1.Metric).Del(labels.MetricName).Labels()) }) require.NoError(t, err) err = tables[0].DB.View(func(tx *bbolt.Tx) error { return tx.Bucket(local.IndexBucketName).ForEach(func(k, _ []byte) error { - c1SeriesID := entryFromChunk(testSchema, c1).SeriesID - c2SeriesID := entryFromChunk(testSchema, c2).SeriesID + c1SeriesID := labelsSeriesID(c1.Metric) + c2SeriesID := labelsSeriesID(c2.Metric) series, ok, err := parseLabelIndexSeriesID(decodeKey(k)) if !ok { return nil @@ -215,21 +224,14 @@ func labelsString(ls labels.Labels) string { return b.String() } -func entryFromChunk(s config.SchemaConfig, c chunk.Chunk) retention.ChunkEntry { - return retention.ChunkEntry{ - ChunkRef: retention.ChunkRef{ - UserID: []byte(c.UserID), - SeriesID: labelsSeriesID(c.Metric), - ChunkID: []byte(s.ExternalKey(c.ChunkRef)), - From: c.From, - Through: c.Through, - }, - Labels: labels.NewBuilder(c.Metric).Del(labels.MetricName).Labels(), +func retentionChunkFromChunk(s config.SchemaConfig, c chunk.Chunk) retention.Chunk { + return retention.Chunk{ + ChunkID: []byte(s.ExternalKey(c.ChunkRef)), + From: c.From, + Through: c.Through, } } -var chunkEntry retention.ChunkEntry - func Benchmark_ChunkIterator(b *testing.B) { cm := storage.NewClientMetrics() defer cm.Unregister() @@ -249,14 +251,13 @@ func Benchmark_ChunkIterator(b *testing.B) { b.ReportAllocs() b.ResetTimer() - var total int64 + var total int _ = store.indexTables()[0].Update(func(tx *bbolt.Tx) error { bucket := tx.Bucket(local.IndexBucketName) for n := 0; n < b.N; n++ { - err := ForEachChunk(context.Background(), bucket, allSchemas[0].config, func(entry retention.ChunkEntry) (deleteChunk bool, err error) { - chunkEntry = entry - total++ - return true, nil + err := ForEachSeries(context.Background(), bucket, allSchemas[0].config, func(series retention.Series) (err error) { + total += len(series.Chunks()) + return nil }) require.NoError(b, err) } diff --git a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/series.go b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/series.go index 2e53a37b44984..4e51bdf762c40 100644 --- a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/series.go +++ b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/series.go @@ -22,15 +22,15 @@ func newUserSeries(seriesID []byte, userID []byte) userSeries { } } -func (us userSeries) Key() string { +func (us *userSeries) Key() string { return unsafeGetString(us.key) } -func (us userSeries) SeriesID() []byte { +func (us *userSeries) SeriesID() []byte { return us.key[:us.seriesIDLen] } -func (us userSeries) UserID() []byte { +func (us *userSeries) UserID() []byte { return us.key[us.seriesIDLen:] } diff --git a/pkg/storage/stores/shipper/indexshipper/tsdb/compactor.go b/pkg/storage/stores/shipper/indexshipper/tsdb/compactor.go index ee9784a02db8e..eafdb4b60edde 100644 --- a/pkg/storage/stores/shipper/indexshipper/tsdb/compactor.go +++ b/pkg/storage/stores/shipper/indexshipper/tsdb/compactor.go @@ -288,24 +288,23 @@ func newCompactedIndex(ctx context.Context, tableName, userID, workingDir string } } -// ForEachChunk iterates over all the chunks in the builder and calls the callback function. -func (c *compactedIndex) ForEachChunk(ctx context.Context, callback retention.ChunkEntryCallback) error { +// ForEachSeries iterates over all the chunks in the builder and calls the callback function. +func (c *compactedIndex) ForEachSeries(ctx context.Context, callback retention.SeriesCallback) error { schemaCfg := config.SchemaConfig{ Configs: []config.PeriodConfig{c.periodConfig}, } - chunkEntry := retention.ChunkEntry{ - ChunkRef: retention.ChunkRef{ - UserID: getUnsafeBytes(c.userID), - }, - } logprotoChunkRef := logproto.ChunkRef{ UserID: c.userID, } + var series retention.Series for seriesID, stream := range c.builder.streams { + series.Reset( + getUnsafeBytes(seriesID), + getUnsafeBytes(c.userID), + withoutTenantLabel(stream.labels), + ) logprotoChunkRef.Fingerprint = uint64(stream.fp) - chunkEntry.SeriesID = getUnsafeBytes(seriesID) - chunkEntry.Labels = withoutTenantLabel(stream.labels) for i := 0; i < len(stream.chunks) && ctx.Err() == nil; i++ { chk := stream.chunks[i] @@ -313,19 +312,19 @@ func (c *compactedIndex) ForEachChunk(ctx context.Context, callback retention.Ch logprotoChunkRef.Through = chk.Through() logprotoChunkRef.Checksum = chk.Checksum - chunkEntry.ChunkID = getUnsafeBytes(schemaCfg.ExternalKey(logprotoChunkRef)) - chunkEntry.From = logprotoChunkRef.From - chunkEntry.Through = logprotoChunkRef.Through - - deleteChunk, err := callback(chunkEntry) - if err != nil { - return err - } + series.AppendChunks(retention.Chunk{ + ChunkID: getUnsafeBytes(schemaCfg.ExternalKey(logprotoChunkRef)), + From: logprotoChunkRef.From, + Through: logprotoChunkRef.Through, + }) + } + if ctx.Err() != nil { + return ctx.Err() + } - if deleteChunk { - // add the chunk to the list of chunks to delete which would be taken care of while building the index. - c.deleteChunks[seriesID] = append(c.deleteChunks[seriesID], chk) - } + err := callback(series) + if err != nil { + return err } } @@ -368,6 +367,22 @@ func (c *compactedIndex) CleanupSeries(_ []byte, lbls labels.Labels) error { return nil } +func (c *compactedIndex) RemoveChunk(from, through model.Time, userID []byte, labels labels.Labels, chunkID []byte) error { + chk, err := chunk.ParseExternalKey(string(userID), string(chunkID)) + if err != nil { + return err + } + + seriesID := labels.String() + c.deleteChunks[seriesID] = append(c.deleteChunks[seriesID], tsdbindex.ChunkMeta{ + Checksum: chk.Checksum, + MinTime: int64(from), + MaxTime: int64(through), + }) + + return nil +} + func (c *compactedIndex) Cleanup() {} // ToIndexFile creates an indexFile from the chunksmetas stored in the builder. diff --git a/pkg/storage/stores/shipper/indexshipper/tsdb/compactor_test.go b/pkg/storage/stores/shipper/indexshipper/tsdb/compactor_test.go index be0a343309c5c..f5f8ff27e9a58 100644 --- a/pkg/storage/stores/shipper/indexshipper/tsdb/compactor_test.go +++ b/pkg/storage/stores/shipper/indexshipper/tsdb/compactor_test.go @@ -626,18 +626,13 @@ func TestCompactor_Compact(t *testing.T) { } } -func chunkMetasToChunkEntry(schemaCfg config.SchemaConfig, userID string, lbls labels.Labels, chunkMetas index.ChunkMetas) []retention.ChunkEntry { - chunkEntries := make([]retention.ChunkEntry, 0, len(chunkMetas)) +func chunkMetasToRetentionChunk(schemaCfg config.SchemaConfig, userID string, lbls labels.Labels, chunkMetas index.ChunkMetas) []retention.Chunk { + chunkEntries := make([]retention.Chunk, 0, len(chunkMetas)) for _, chunkMeta := range chunkMetas { - chunkEntries = append(chunkEntries, retention.ChunkEntry{ - ChunkRef: retention.ChunkRef{ - UserID: []byte(userID), - SeriesID: []byte(lbls.String()), - ChunkID: []byte(schemaCfg.ExternalKey(chunkMetaToChunkRef(userID, chunkMeta, lbls))), - From: chunkMeta.From(), - Through: chunkMeta.Through(), - }, - Labels: lbls, + chunkEntries = append(chunkEntries, retention.Chunk{ + ChunkID: []byte(schemaCfg.ExternalKey(chunkMetaToChunkRef(userID, chunkMeta, lbls))), + From: chunkMeta.From(), + Through: chunkMeta.Through(), }) } @@ -658,35 +653,58 @@ func TestCompactedIndex(t *testing.T) { testCtx := setupCompactedIndex(t) for name, tc := range map[string]struct { - deleteChunks map[string]index.ChunkMetas + deleteChunks map[string][]retention.Chunk addChunks []chunk.Chunk deleteSeries []labels.Labels shouldErr bool - finalExpectedChunks map[string]index.ChunkMetas + finalExpectedChunks map[string][]retention.Chunk }{ "no changes": { - finalExpectedChunks: map[string]index.ChunkMetas{ - testCtx.lbls1.String(): buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(10)), - testCtx.lbls2.String(): buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(20)), + finalExpectedChunks: map[string][]retention.Chunk{ + testCtx.lbls1.String(): chunkMetasToRetentionChunk(testCtx.schemaCfg, testCtx.userID, testCtx.lbls1, buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(10))), + testCtx.lbls2.String(): chunkMetasToRetentionChunk(testCtx.schemaCfg, testCtx.userID, testCtx.lbls2, buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(20))), }, }, "delete some chunks from a stream": { - deleteChunks: map[string]index.ChunkMetas{ - testCtx.lbls1.String(): append(buildChunkMetas(testCtx.shiftTableStart(3), testCtx.shiftTableStart(5)), buildChunkMetas(testCtx.shiftTableStart(7), testCtx.shiftTableStart(8))...), + deleteChunks: map[string][]retention.Chunk{ + testCtx.lbls1.String(): chunkMetasToRetentionChunk( + testCtx.schemaCfg, testCtx.userID, testCtx.lbls1, + append( + buildChunkMetas(testCtx.shiftTableStart(3), testCtx.shiftTableStart(5)), + buildChunkMetas(testCtx.shiftTableStart(7), testCtx.shiftTableStart(8))..., + ), + ), }, - finalExpectedChunks: map[string]index.ChunkMetas{ - testCtx.lbls1.String(): append(buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(2)), append(buildChunkMetas(testCtx.shiftTableStart(6), testCtx.shiftTableStart(6)), buildChunkMetas(testCtx.shiftTableStart(9), testCtx.shiftTableStart(10))...)...), - testCtx.lbls2.String(): buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(20)), + finalExpectedChunks: map[string][]retention.Chunk{ + testCtx.lbls1.String(): chunkMetasToRetentionChunk( + testCtx.schemaCfg, testCtx.userID, testCtx.lbls1, + append( + buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(2)), + append(buildChunkMetas(testCtx.shiftTableStart(6), testCtx.shiftTableStart(6)), + buildChunkMetas(testCtx.shiftTableStart(9), testCtx.shiftTableStart(10))..., + )..., + ), + ), + testCtx.lbls2.String(): chunkMetasToRetentionChunk( + testCtx.schemaCfg, testCtx.userID, testCtx.lbls2, + buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(20)), + ), }, }, "delete all chunks from a stream": { - deleteChunks: map[string]index.ChunkMetas{ - testCtx.lbls1.String(): buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(10)), + deleteChunks: map[string][]retention.Chunk{ + testCtx.lbls1.String(): chunkMetasToRetentionChunk( + testCtx.schemaCfg, testCtx.userID, testCtx.lbls1, + buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(10)), + ), }, deleteSeries: []labels.Labels{testCtx.lbls1}, - finalExpectedChunks: map[string]index.ChunkMetas{ - testCtx.lbls2.String(): buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(20)), + finalExpectedChunks: map[string][]retention.Chunk{ + testCtx.lbls2.String(): chunkMetasToRetentionChunk( + testCtx.schemaCfg, testCtx.userID, testCtx.lbls2, + buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(20)), + ), }, }, "add some chunks to a stream": { @@ -702,9 +720,15 @@ func TestCompactedIndex(t *testing.T) { Data: dummyChunkData{}, }, }, - finalExpectedChunks: map[string]index.ChunkMetas{ - testCtx.lbls1.String(): buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(12)), - testCtx.lbls2.String(): buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(20)), + finalExpectedChunks: map[string][]retention.Chunk{ + testCtx.lbls1.String(): chunkMetasToRetentionChunk( + testCtx.schemaCfg, testCtx.userID, testCtx.lbls1, + buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(12)), + ), + testCtx.lbls2.String(): chunkMetasToRetentionChunk( + testCtx.schemaCfg, testCtx.userID, testCtx.lbls2, + buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(20)), + ), }, }, "__name__ label should get dropped while indexing chunks": { @@ -720,9 +744,15 @@ func TestCompactedIndex(t *testing.T) { Data: dummyChunkData{}, }, }, - finalExpectedChunks: map[string]index.ChunkMetas{ - testCtx.lbls1.String(): buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(12)), - testCtx.lbls2.String(): buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(20)), + finalExpectedChunks: map[string][]retention.Chunk{ + testCtx.lbls1.String(): chunkMetasToRetentionChunk( + testCtx.schemaCfg, testCtx.userID, testCtx.lbls1, + buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(12)), + ), + testCtx.lbls2.String(): chunkMetasToRetentionChunk( + testCtx.schemaCfg, testCtx.userID, testCtx.lbls2, + buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(20)), + ), }, }, "add some chunks out of table interval to a stream": { @@ -749,9 +779,15 @@ func TestCompactedIndex(t *testing.T) { Data: dummyChunkData{}, }, }, - finalExpectedChunks: map[string]index.ChunkMetas{ - testCtx.lbls1.String(): buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(12)), - testCtx.lbls2.String(): buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(20)), + finalExpectedChunks: map[string][]retention.Chunk{ + testCtx.lbls1.String(): chunkMetasToRetentionChunk( + testCtx.schemaCfg, testCtx.userID, testCtx.lbls1, + buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(12)), + ), + testCtx.lbls2.String(): chunkMetasToRetentionChunk( + testCtx.schemaCfg, testCtx.userID, testCtx.lbls2, + buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(20)), + ), }, }, "add and delete some chunks in a stream": { @@ -767,12 +803,24 @@ func TestCompactedIndex(t *testing.T) { Data: dummyChunkData{}, }, }, - deleteChunks: map[string]index.ChunkMetas{ - testCtx.lbls1.String(): buildChunkMetas(testCtx.shiftTableStart(3), testCtx.shiftTableStart(5)), + deleteChunks: map[string][]retention.Chunk{ + testCtx.lbls1.String(): chunkMetasToRetentionChunk( + testCtx.schemaCfg, testCtx.userID, testCtx.lbls1, + buildChunkMetas(testCtx.shiftTableStart(3), testCtx.shiftTableStart(5)), + ), }, - finalExpectedChunks: map[string]index.ChunkMetas{ - testCtx.lbls1.String(): append(buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(2)), buildChunkMetas(testCtx.shiftTableStart(6), testCtx.shiftTableStart(12))...), - testCtx.lbls2.String(): buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(20)), + finalExpectedChunks: map[string][]retention.Chunk{ + testCtx.lbls1.String(): chunkMetasToRetentionChunk( + testCtx.schemaCfg, testCtx.userID, testCtx.lbls1, + append( + buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(2)), + buildChunkMetas(testCtx.shiftTableStart(6), testCtx.shiftTableStart(12))..., + ), + ), + testCtx.lbls2.String(): chunkMetasToRetentionChunk( + testCtx.schemaCfg, testCtx.userID, testCtx.lbls2, + buildChunkMetas(testCtx.shiftTableStart(0), testCtx.shiftTableStart(20)), + ), }, }, "adding chunk to non-existing stream should error": { @@ -789,19 +837,17 @@ func TestCompactedIndex(t *testing.T) { t.Run(name, func(t *testing.T) { compactedIndex := testCtx.buildCompactedIndex() - foundChunkEntries := map[string][]retention.ChunkEntry{} - err := compactedIndex.ForEachChunk(context.Background(), func(chunkEntry retention.ChunkEntry) (deleteChunk bool, err error) { - seriesIDStr := string(chunkEntry.SeriesID) - foundChunkEntries[seriesIDStr] = append(foundChunkEntries[seriesIDStr], chunkEntry) - if chks, ok := tc.deleteChunks[string(chunkEntry.SeriesID)]; ok { + foundChunkEntries := map[string][]retention.Chunk{} + err := compactedIndex.ForEachSeries(context.Background(), func(series retention.Series) error { + seriesIDStr := string(series.SeriesID()) + foundChunkEntries[seriesIDStr] = append(foundChunkEntries[seriesIDStr], series.Chunks()...) + if chks, ok := tc.deleteChunks[string(series.SeriesID())]; ok { for _, chk := range chks { - if chk.MinTime == int64(chunkEntry.From) && chk.MaxTime == int64(chunkEntry.Through) { - return true, nil - } + require.NoError(t, compactedIndex.RemoveChunk(chk.From, chk.Through, series.UserID(), series.Labels(), chk.ChunkID)) } } - return false, nil + return nil }) require.NoError(t, err) @@ -823,9 +869,9 @@ func TestCompactedIndex(t *testing.T) { } require.NoError(t, err) - foundChunks := map[string]index.ChunkMetas{} + foundChunks := map[string][]retention.Chunk{} err = indexFile.(*TSDBFile).Index.(*TSDBIndex).ForSeries(context.Background(), "", nil, 0, math.MaxInt64, func(lbls labels.Labels, _ model.Fingerprint, chks []index.ChunkMeta) (stop bool) { - foundChunks[lbls.String()] = append(index.ChunkMetas{}, chks...) + foundChunks[lbls.String()] = chunkMetasToRetentionChunk(testCtx.schemaCfg, testCtx.userID, lbls, chks) return false }, labels.MustNewMatcher(labels.MatchEqual, "", "")) require.NoError(t, err) @@ -843,11 +889,8 @@ func TestIteratorContextCancelation(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() - var foundChunkEntries []retention.ChunkEntry - err := compactedIndex.ForEachChunk(ctx, func(chunkEntry retention.ChunkEntry) (deleteChunk bool, err error) { - foundChunkEntries = append(foundChunkEntries, chunkEntry) - - return false, nil + err := compactedIndex.ForEachSeries(ctx, func(_ retention.Series) error { + return nil }) require.ErrorIs(t, err, context.Canceled) @@ -860,7 +903,8 @@ type testContext struct { tableInterval model.Interval shiftTableStart func(ms int64) int64 buildCompactedIndex func() *compactedIndex - expectedChunkEntries map[string][]retention.ChunkEntry + expectedChunkEntries map[string][]retention.Chunk + schemaCfg config.SchemaConfig } func setupCompactedIndex(t *testing.T) *testContext { @@ -903,12 +947,12 @@ func setupCompactedIndex(t *testing.T) *testContext { return newCompactedIndex(context.Background(), tableName.Prefix, buildUserID(0), t.TempDir(), periodConfig, builder) } - expectedChunkEntries := map[string][]retention.ChunkEntry{ - lbls1.String(): chunkMetasToChunkEntry(schemaCfg, userID, lbls1, buildChunkMetas(shiftTableStart(0), shiftTableStart(10))), - lbls2.String(): chunkMetasToChunkEntry(schemaCfg, userID, lbls2, buildChunkMetas(shiftTableStart(0), shiftTableStart(20))), + expectedChunkEntries := map[string][]retention.Chunk{ + lbls1.String(): chunkMetasToRetentionChunk(schemaCfg, userID, lbls1, buildChunkMetas(shiftTableStart(0), shiftTableStart(10))), + lbls2.String(): chunkMetasToRetentionChunk(schemaCfg, userID, lbls2, buildChunkMetas(shiftTableStart(0), shiftTableStart(20))), } - return &testContext{lbls1, lbls2, userID, tableInterval, shiftTableStart, buildCompactedIndex, expectedChunkEntries} + return &testContext{lbls1, lbls2, userID, tableInterval, shiftTableStart, buildCompactedIndex, expectedChunkEntries, schemaCfg} } type dummyChunkData struct { diff --git a/pkg/storage/stores/shipper/indexshipper/tsdb/index/chunk.go b/pkg/storage/stores/shipper/indexshipper/tsdb/index/chunk.go index 8094e19af0c5e..b33c019fb6ecd 100644 --- a/pkg/storage/stores/shipper/indexshipper/tsdb/index/chunk.go +++ b/pkg/storage/stores/shipper/indexshipper/tsdb/index/chunk.go @@ -151,7 +151,7 @@ func (c ChunkMetas) Drop(chk ChunkMeta) (ChunkMetas, bool) { return ichk.Checksum >= chk.Checksum }) - if j >= len(c) || c[j] != chk { + if j >= len(c) || c[j].Checksum != chk.Checksum || c[j].MinTime != chk.MinTime || c[j].MaxTime != chk.MaxTime { return c, false } diff --git a/pkg/tool/audit/audit.go b/pkg/tool/audit/audit.go index 017a3a9e93b42..b155f169a10c6 100644 --- a/pkg/tool/audit/audit.go +++ b/pkg/tool/audit/audit.go @@ -101,20 +101,22 @@ func ValidateCompactedIndex(ctx context.Context, objClient client.ObjectClient, g, ctx := errgroup.WithContext(ctx) g.SetLimit(parallelism) - compactedIdx.ForEachChunk(ctx, func(ce retention.ChunkEntry) (deleteChunk bool, err error) { //nolint:errcheck + compactedIdx.ForEachSeries(ctx, func(s retention.Series) (err error) { //nolint:errcheck bar.Add(1) // nolint:errcheck g.Go(func() error { - exists, err := CheckChunkExistance(string(ce.ChunkID), objClient) - if err != nil || !exists { - missingChunks.Add(1) - logger.Log("msg", "chunk is missing", "err", err, "chunk_id", string(ce.ChunkID)) - return nil + for _, c := range s.Chunks() { + exists, err := CheckChunkExistance(string(c.ChunkID), objClient) + if err != nil || !exists { + missingChunks.Add(1) + logger.Log("msg", "chunk is missing", "err", err, "chunk_id", string(c.ChunkID)) + return nil + } + foundChunks.Add(1) } - foundChunks.Add(1) return nil }) - return false, nil + return nil }) g.Wait() // nolint:errcheck diff --git a/pkg/tool/audit/audit_test.go b/pkg/tool/audit/audit_test.go index 4e20b075be857..b8cfb689f5184 100644 --- a/pkg/tool/audit/audit_test.go +++ b/pkg/tool/audit/audit_test.go @@ -44,28 +44,25 @@ func (t testObjClient) GetAttributes(_ context.Context, object string) (client.O type testCompactedIdx struct { compactor.CompactedIndex - chunks []retention.ChunkEntry + chunks []retention.Chunk } -func (t testCompactedIdx) ForEachChunk(_ context.Context, f retention.ChunkEntryCallback) error { - for _, chunk := range t.chunks { - if _, err := f(chunk); err != nil { - return err - } - } - return nil +func (t testCompactedIdx) ForEachSeries(_ context.Context, f retention.SeriesCallback) error { + var series retention.Series + series.AppendChunks(t.chunks...) + return f(series) } func TestAuditIndex(t *testing.T) { ctx := context.Background() objClient := testObjClient{} compactedIdx := testCompactedIdx{ - chunks: []retention.ChunkEntry{ - {ChunkRef: retention.ChunkRef{ChunkID: []byte("found-1")}}, - {ChunkRef: retention.ChunkRef{ChunkID: []byte("found-2")}}, - {ChunkRef: retention.ChunkRef{ChunkID: []byte("found-3")}}, - {ChunkRef: retention.ChunkRef{ChunkID: []byte("found-4")}}, - {ChunkRef: retention.ChunkRef{ChunkID: []byte("missing-1")}}, + chunks: []retention.Chunk{ + {ChunkID: []byte("found-1")}, + {ChunkID: []byte("found-2")}, + {ChunkID: []byte("found-3")}, + {ChunkID: []byte("found-4")}, + {ChunkID: []byte("missing-1")}, }, } logger := log.NewNopLogger() diff --git a/tools/tsdb/tsdb-map/main.go b/tools/tsdb/tsdb-map/main.go index 9f35b53fe48c6..0d06908ad1622 100644 --- a/tools/tsdb/tsdb-map/main.go +++ b/tools/tsdb/tsdb-map/main.go @@ -78,15 +78,19 @@ func main() { // loads everything into memory. if err := db.View(func(t *bbolt.Tx) error { - return boltdbcompactor.ForEachChunk(context.Background(), t.Bucket([]byte("index")), periodConfig, func(entry retention.ChunkEntry) (bool, error) { - builder.AddSeries(entry.Labels, model.Fingerprint(entry.Labels.Hash()), []index.ChunkMeta{{ - Checksum: extractChecksumFromChunkID(entry.ChunkID), - MinTime: int64(entry.From), - MaxTime: int64(entry.Through), - KB: ((3 << 20) / 4) / 1024, // guess: 0.75mb, 1/2 of the max size, rounded to KB - Entries: 10000, // guess: 10k entries - }}) - return false, nil + return boltdbcompactor.ForEachSeries(context.Background(), t.Bucket([]byte("index")), periodConfig, func(s retention.Series) error { + chunkMetas := make([]index.ChunkMeta, 0, len(s.Chunks())) + for _, chunk := range s.Chunks() { + chunkMetas = append(chunkMetas, index.ChunkMeta{ + Checksum: extractChecksumFromChunkID(chunk.ChunkID), + MinTime: int64(chunk.From), + MaxTime: int64(chunk.Through), + KB: ((3 << 20) / 4) / 1024, // guess: 0.75mb, 1/2 of the max size, rounded to KB + Entries: 10000, // guess: 10k entries + }) + } + builder.AddSeries(s.Labels(), model.Fingerprint(s.Labels().Hash()), chunkMetas) + return nil }) }); err != nil { panic(err)
feat
store details of processed streams while processing delete requests (#16825)
527510d1a84a981250047dbabba8d492177b8452
2024-07-12 23:54:35
Robert Jacob
fix(operator): Remove duplicate conditions from status (#13497)
false
diff --git a/operator/internal/status/conditions.go b/operator/internal/status/conditions.go index 637a50e6f89f4..e23597c1253f7 100644 --- a/operator/internal/status/conditions.go +++ b/operator/internal/status/conditions.go @@ -2,36 +2,35 @@ package status import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +type conditionKey struct { + Type string + Reason string +} + func mergeConditions(old, active []metav1.Condition, now metav1.Time) []metav1.Condition { + conditions := map[conditionKey]bool{} merged := make([]metav1.Condition, 0, len(old)+len(active)) - for len(old) > 0 { - c := old[0] - found := -1 - for i, ac := range active { - if c.Type == ac.Type && c.Reason == ac.Reason { - found = i - break - } - } + for _, c := range active { + c.Status = metav1.ConditionTrue + c.LastTransitionTime = now + + merged = append(merged, c) + conditions[conditionKey{Type: c.Type, Reason: c.Reason}] = true + } - if found != -1 { - c = active[found] - active = append(active[:found], active[found+1:]...) + for _, c := range old { + if conditions[conditionKey{c.Type, c.Reason}] { + continue + } - c.Status = metav1.ConditionTrue - } else { + if c.Status != metav1.ConditionFalse { + c.LastTransitionTime = now c.Status = metav1.ConditionFalse } - c.LastTransitionTime = now merged = append(merged, c) - old = old[1:] + conditions[conditionKey{c.Type, c.Reason}] = true } - for _, c := range active { - c.Status = metav1.ConditionTrue - c.LastTransitionTime = now - merged = append(merged, c) - } return merged } diff --git a/operator/internal/status/conditions_test.go b/operator/internal/status/conditions_test.go index 3d85942df8753..a3fd76dff5cf6 100644 --- a/operator/internal/status/conditions_test.go +++ b/operator/internal/status/conditions_test.go @@ -11,7 +11,8 @@ import ( ) func TestMergeConditions(t *testing.T) { - now := metav1.NewTime(time.Unix(0, 0)) + oldTime := metav1.NewTime(time.Unix(0, 0)) + now := metav1.NewTime(time.Unix(10, 0)) tt := []struct { desc string old []metav1.Condition @@ -37,12 +38,25 @@ func TestMergeConditions(t *testing.T) { { desc: "reset old condition", old: []metav1.Condition{ - conditionPending, + { + Type: conditionPending.Type, + Status: metav1.ConditionTrue, + LastTransitionTime: oldTime, + Reason: conditionPending.Reason, + Message: conditionPending.Message, + }, }, active: []metav1.Condition{ conditionReady, }, wantMerged: []metav1.Condition{ + { + Type: conditionReady.Type, + Status: metav1.ConditionTrue, + LastTransitionTime: now, + Reason: conditionReady.Reason, + Message: conditionReady.Message, + }, { Type: conditionPending.Type, Status: metav1.ConditionFalse, @@ -50,12 +64,27 @@ func TestMergeConditions(t *testing.T) { Reason: conditionPending.Reason, Message: conditionPending.Message, }, + }, + }, + { + desc: "keep transition time of old condition", + old: []metav1.Condition{ { - Type: conditionReady.Type, - Status: metav1.ConditionTrue, - LastTransitionTime: now, - Reason: conditionReady.Reason, - Message: conditionReady.Message, + Type: conditionPending.Type, + Status: metav1.ConditionFalse, + LastTransitionTime: oldTime, + Reason: conditionPending.Reason, + Message: conditionPending.Message, + }, + }, + active: []metav1.Condition{}, + wantMerged: []metav1.Condition{ + { + Type: conditionPending.Type, + Status: metav1.ConditionFalse, + LastTransitionTime: oldTime, + Reason: conditionPending.Reason, + Message: conditionPending.Message, }, }, }, @@ -72,7 +101,7 @@ func TestMergeConditions(t *testing.T) { { Type: conditionPending.Type, Status: metav1.ConditionFalse, - LastTransitionTime: now, + LastTransitionTime: oldTime, Reason: conditionPending.Reason, Message: conditionPending.Message, }, @@ -93,13 +122,56 @@ func TestMergeConditions(t *testing.T) { Reason: conditionReady.Reason, Message: conditionReady.Message, }, + { + Type: string(lokiv1.ConditionWarning), + Status: metav1.ConditionTrue, + LastTransitionTime: now, + Reason: "test-warning", + Message: "test-warning-message", + }, { Type: conditionPending.Type, Status: metav1.ConditionFalse, - LastTransitionTime: now, + LastTransitionTime: oldTime, Reason: conditionPending.Reason, Message: conditionPending.Message, }, + }, + }, + { + desc: "remove duplicates", + old: []metav1.Condition{ + { + Type: conditionReady.Type, + Status: metav1.ConditionTrue, + LastTransitionTime: now, + Reason: conditionReady.Reason, + Message: conditionReady.Message, + }, + { + Type: conditionReady.Type, + Status: metav1.ConditionTrue, + LastTransitionTime: now, + Reason: conditionReady.Reason, + Message: conditionReady.Message, + }, + }, + active: []metav1.Condition{ + conditionReady, + { + Type: string(lokiv1.ConditionWarning), + Reason: "test-warning", + Message: "test-warning-message", + }, + }, + wantMerged: []metav1.Condition{ + { + Type: conditionReady.Type, + Status: metav1.ConditionTrue, + LastTransitionTime: now, + Reason: conditionReady.Reason, + Message: conditionReady.Message, + }, { Type: string(lokiv1.ConditionWarning), Status: metav1.ConditionTrue,
fix
Remove duplicate conditions from status (#13497)
f9350d6415d45c3cc2f9c0b4f7cd6f8f219907f2
2024-04-05 20:16:16
Bayan Taani
fix(operator): Improve validation of provided S3 storage configuration (#12181)
false
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md index a6f7cae8bab2d..ef3d8ccc0cdb8 100644 --- a/operator/CHANGELOG.md +++ b/operator/CHANGELOG.md @@ -1,5 +1,6 @@ ## Main +- [12181](https://github.com/grafana/loki/pull/12181) **btaani**: Improve validation of provided S3 storage configuration - [12370](https://github.com/grafana/loki/pull/12370) **periklis**: Update Loki operand to v2.9.6 - [12333](https://github.com/grafana/loki/pull/12333) **periklis**: Bump max OpenShift version to next release diff --git a/operator/internal/handlers/internal/storage/secrets.go b/operator/internal/handlers/internal/storage/secrets.go index 570415236a404..7188216311dff 100644 --- a/operator/internal/handlers/internal/storage/secrets.go +++ b/operator/internal/handlers/internal/storage/secrets.go @@ -9,7 +9,9 @@ import ( "errors" "fmt" "io" + "net/url" "sort" + "strings" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -38,6 +40,11 @@ var ( errAzureInvalidEnvironment = errors.New("azure environment invalid (valid values: AzureGlobal, AzureChinaCloud, AzureGermanCloud, AzureUSGovernment)") errAzureInvalidAccountKey = errors.New("azure account key is not valid base64") + errS3EndpointUnparseable = errors.New("can not parse S3 endpoint as URL") + errS3EndpointNoURL = errors.New("endpoint for S3 must be an HTTP or HTTPS URL") + errS3EndpointUnsupportedScheme = errors.New("scheme of S3 endpoint URL is unsupported") + errS3EndpointAWSInvalid = errors.New("endpoint for AWS S3 must include correct region") + errGCPParseCredentialsFile = errors.New("gcp storage secret cannot be parsed from JSON content") errGCPWrongCredentialSourceFile = errors.New("credential source in secret needs to point to token file") @@ -49,7 +56,10 @@ var ( } ) -const gcpAccountTypeExternal = "external_account" +const ( + awsEndpointSuffix = ".amazonaws.com" + gcpAccountTypeExternal = "external_account" +) func getSecrets(ctx context.Context, k k8s.Client, stack *lokiv1.LokiStack, fg configv1.FeatureGates) (*corev1.Secret, *corev1.Secret, error) { var ( @@ -416,17 +426,17 @@ func extractS3ConfigSecret(s *corev1.Secret, credentialMode lokiv1.CredentialMod if len(roleArn) != 0 { return nil, fmt.Errorf("%w: %s", errSecretFieldNotAllowed, storage.KeyAWSRoleArn) } + // In the STS case region is not an optional field if len(region) == 0 { return nil, fmt.Errorf("%w: %s", errSecretMissingField, storage.KeyAWSRegion) } - return cfg, nil case lokiv1.CredentialModeStatic: cfg.Endpoint = string(endpoint) - if len(endpoint) == 0 { - return nil, fmt.Errorf("%w: %s", errSecretMissingField, storage.KeyAWSEndpoint) + if err := validateS3Endpoint(string(endpoint), string(region)); err != nil { + return nil, err } if len(id) == 0 { return nil, fmt.Errorf("%w: %s", errSecretMissingField, storage.KeyAWSAccessKeyID) @@ -450,6 +460,38 @@ func extractS3ConfigSecret(s *corev1.Secret, credentialMode lokiv1.CredentialMod } } +func validateS3Endpoint(endpoint string, region string) error { + if len(endpoint) == 0 { + return fmt.Errorf("%w: %s", errSecretMissingField, storage.KeyAWSEndpoint) + } + + parsedURL, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("%w: %w", errS3EndpointUnparseable, err) + } + + if parsedURL.Scheme == "" { + // Assume "just a hostname" when scheme is empty and produce a clearer error message + return errS3EndpointNoURL + } + + if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" { + return fmt.Errorf("%w: %s", errS3EndpointUnsupportedScheme, parsedURL.Scheme) + } + + if strings.HasSuffix(endpoint, awsEndpointSuffix) { + if len(region) == 0 { + return fmt.Errorf("%w: %s", errSecretMissingField, storage.KeyAWSRegion) + } + + validEndpoint := fmt.Sprintf("https://s3.%s%s", region, awsEndpointSuffix) + if endpoint != validEndpoint { + return fmt.Errorf("%w: %s", errS3EndpointAWSInvalid, validEndpoint) + } + } + return nil +} + func extractS3SSEConfig(d map[string][]byte) (storage.S3SSEConfig, error) { var ( sseType storage.S3SSEType diff --git a/operator/internal/handlers/internal/storage/secrets_test.go b/operator/internal/handlers/internal/storage/secrets_test.go index 80b8b48dd2418..a85e2f6911d6e 100644 --- a/operator/internal/handlers/internal/storage/secrets_test.go +++ b/operator/internal/handlers/internal/storage/secrets_test.go @@ -392,7 +392,8 @@ func TestS3Extract(t *testing.T) { name: "missing access_key_id", secret: &corev1.Secret{ Data: map[string][]byte{ - "endpoint": []byte("here"), + "endpoint": []byte("https://s3.test-region.amazonaws.com"), + "region": []byte("test-region"), "bucketnames": []byte("this,that"), }, }, @@ -402,7 +403,8 @@ func TestS3Extract(t *testing.T) { name: "missing access_key_secret", secret: &corev1.Secret{ Data: map[string][]byte{ - "endpoint": []byte("here"), + "endpoint": []byte("https://s3.test-region.amazonaws.com"), + "region": []byte("test-region"), "bucketnames": []byte("this,that"), "access_key_id": []byte("id"), }, @@ -413,7 +415,7 @@ func TestS3Extract(t *testing.T) { name: "unsupported SSE type", secret: &corev1.Secret{ Data: map[string][]byte{ - "endpoint": []byte("here"), + "endpoint": []byte("https://s3.REGION.amazonaws.com"), "bucketnames": []byte("this,that"), "access_key_id": []byte("id"), "access_key_secret": []byte("secret"), @@ -426,7 +428,8 @@ func TestS3Extract(t *testing.T) { name: "missing SSE-KMS kms_key_id", secret: &corev1.Secret{ Data: map[string][]byte{ - "endpoint": []byte("here"), + "endpoint": []byte("https://s3.test-region.amazonaws.com"), + "region": []byte("test-region"), "bucketnames": []byte("this,that"), "access_key_id": []byte("id"), "access_key_secret": []byte("secret"), @@ -441,7 +444,8 @@ func TestS3Extract(t *testing.T) { secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, Data: map[string][]byte{ - "endpoint": []byte("here"), + "endpoint": []byte("https://s3.test-region.amazonaws.com"), + "region": []byte("test-region"), "bucketnames": []byte("this,that"), "access_key_id": []byte("id"), "access_key_secret": []byte("secret"), @@ -456,7 +460,8 @@ func TestS3Extract(t *testing.T) { secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, Data: map[string][]byte{ - "endpoint": []byte("here"), + "endpoint": []byte("https://s3.test-region.amazonaws.com"), + "region": []byte("test-region"), "bucketnames": []byte("this,that"), "access_key_id": []byte("id"), "access_key_secret": []byte("secret"), @@ -472,7 +477,8 @@ func TestS3Extract(t *testing.T) { secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, Data: map[string][]byte{ - "endpoint": []byte("here"), + "endpoint": []byte("https://s3.test-region.amazonaws.com"), + "region": []byte("test-region"), "bucketnames": []byte("this,that"), "access_key_id": []byte("id"), "access_key_secret": []byte("secret"), @@ -486,7 +492,8 @@ func TestS3Extract(t *testing.T) { secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, Data: map[string][]byte{ - "endpoint": []byte("here"), + "endpoint": []byte("https://s3.test-region.amazonaws.com"), + "region": []byte("test-region"), "bucketnames": []byte("this,that"), "access_key_id": []byte("id"), "access_key_secret": []byte("secret"), @@ -530,6 +537,62 @@ func TestS3Extract(t *testing.T) { }, wantCredentialMode: lokiv1.CredentialModeToken, }, + { + name: "endpoint is just hostname", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Data: map[string][]byte{ + "endpoint": []byte("hostname.example.com"), + "region": []byte("region"), + "bucketnames": []byte("this,that"), + "access_key_id": []byte("id"), + "access_key_secret": []byte("secret"), + }, + }, + wantError: "endpoint for S3 must be an HTTP or HTTPS URL", + }, + { + name: "endpoint unsupported scheme", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Data: map[string][]byte{ + "endpoint": []byte("invalid://hostname"), + "region": []byte("region"), + "bucketnames": []byte("this,that"), + "access_key_id": []byte("id"), + "access_key_secret": []byte("secret"), + }, + }, + wantError: "scheme of S3 endpoint URL is unsupported: invalid", + }, + { + name: "s3 region used in endpoint URL is incorrect", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Data: map[string][]byte{ + "endpoint": []byte("https://s3.wrong.amazonaws.com"), + "region": []byte("region"), + "bucketnames": []byte("this,that"), + "access_key_id": []byte("id"), + "access_key_secret": []byte("secret"), + }, + }, + wantError: "endpoint for AWS S3 must include correct region: https://s3.region.amazonaws.com", + }, + { + name: "s3 endpoint format is not a valid s3 URL", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Data: map[string][]byte{ + "endpoint": []byte("http://region.amazonaws.com"), + "region": []byte("region"), + "bucketnames": []byte("this,that"), + "access_key_id": []byte("id"), + "access_key_secret": []byte("secret"), + }, + }, + wantError: "endpoint for AWS S3 must include correct region: https://s3.region.amazonaws.com", + }, } for _, tst := range table { tst := tst diff --git a/operator/internal/handlers/internal/storage/storage_test.go b/operator/internal/handlers/internal/storage/storage_test.go index 3e805c60a5be2..857d0be4de4a3 100644 --- a/operator/internal/handlers/internal/storage/storage_test.go +++ b/operator/internal/handlers/internal/storage/storage_test.go @@ -39,7 +39,7 @@ var ( Namespace: "some-ns", }, Data: map[string][]byte{ - "endpoint": []byte("s3://your-endpoint"), + "endpoint": []byte("https://s3.a-region.amazonaws.com"), "region": []byte("a-region"), "bucketnames": []byte("bucket1,bucket2"), "access_key_id": []byte("a-secret-id"), diff --git a/operator/internal/handlers/lokistack_create_or_update_test.go b/operator/internal/handlers/lokistack_create_or_update_test.go index bef5ffc9efb70..c7677e49a05a4 100644 --- a/operator/internal/handlers/lokistack_create_or_update_test.go +++ b/operator/internal/handlers/lokistack_create_or_update_test.go @@ -53,7 +53,7 @@ var ( Namespace: "some-ns", }, Data: map[string][]byte{ - "endpoint": []byte("s3://your-endpoint"), + "endpoint": []byte("https://s3.a-region.amazonaws.com"), "region": []byte("a-region"), "bucketnames": []byte("bucket1,bucket2"), "access_key_id": []byte("a-secret-id"),
fix
Improve validation of provided S3 storage configuration (#12181)
69089eff41a6dda2d4ab22aa5524bcfbbc44b00d
2025-02-17 22:24:08
Dylan Guedes
feat: Add support for blocking a policy to be ingested (#16203)
false
diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md index e065282ea722c..d75d6a4307ae8 100644 --- a/docs/sources/shared/configuration.md +++ b/docs/sources/shared/configuration.md @@ -3641,6 +3641,11 @@ otlp_config: # drop them altogether [log_attributes: <list of attributes_configs>] +# Block ingestion for policy until the configured date. The time should be in +# RFC3339 format. The policy is based on the policy_stream_mapping +# configuration. +[block_ingestion_policy_until: <map of string to Time>] + # Block ingestion until the configured date. The time should be in RFC3339 # format. # CLI flag: -limits.block-ingestion-until diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index d48625843ad35..27a963424fe45 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -550,9 +550,24 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log err := fmt.Errorf(validation.MissingEnforcedLabelsErrorMsg, strings.Join(lbsMissing, ","), tenantID) d.writeFailuresManager.Log(tenantID, err) validationErrors.Add(err) - validation.DiscardedSamples.WithLabelValues(validation.MissingEnforcedLabels, tenantID, retentionHours, policy).Add(float64(len(stream.Entries))) discardedBytes := util.EntriesTotalSize(stream.Entries) - validation.DiscardedBytes.WithLabelValues(validation.MissingEnforcedLabels, tenantID, retentionHours, policy).Add(float64(discardedBytes)) + d.validator.reportDiscardedData(validation.MissingEnforcedLabels, validationContext, retentionHours, policy, discardedBytes, len(stream.Entries)) + continue + } + + if block, statusCode, reason, err := d.validator.ShouldBlockIngestion(validationContext, now, policy); block { + d.writeFailuresManager.Log(tenantID, err) + discardedBytes := util.EntriesTotalSize(stream.Entries) + d.validator.reportDiscardedData(reason, validationContext, retentionHours, policy, discardedBytes, len(stream.Entries)) + + // If the status code is 200, return success. + // Note that we still log the error and increment the metrics. + if statusCode == http.StatusOK { + // do not add error to validationErrors. + continue + } + + validationErrors.Add(err) continue } @@ -639,21 +654,6 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log return &logproto.PushResponse{}, validationErr } - if block, until, retStatusCode := d.validator.ShouldBlockIngestion(validationContext, now); block { - d.trackDiscardedData(ctx, req, validationContext, tenantID, validationContext.validationMetrics, validation.BlockedIngestion) - - err = fmt.Errorf(validation.BlockedIngestionErrorMsg, tenantID, until.Format(time.RFC3339), retStatusCode) - d.writeFailuresManager.Log(tenantID, err) - - // If the status code is 200, return success. - // Note that we still log the error and increment the metrics. - if retStatusCode == http.StatusOK { - return &logproto.PushResponse{}, nil - } - - return nil, httpgrpc.Errorf(retStatusCode, "%s", err.Error()) - } - if !d.ingestionRateLimiter.AllowN(now, tenantID, validationContext.validationMetrics.aggregatedPushStats.lineSize) { d.trackDiscardedData(ctx, req, validationContext, tenantID, validationContext.validationMetrics, validation.RateLimited) diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 12fe12dbfa455..331fe687249fe 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -17,11 +17,10 @@ import ( otlptranslate "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" - "github.com/grafana/loki/pkg/push" - "github.com/c2h5oh/datasize" "github.com/go-kit/log" "github.com/grafana/dskit/flagext" + dskit_flagext "github.com/grafana/dskit/flagext" "github.com/grafana/dskit/httpgrpc" "github.com/grafana/dskit/kv" "github.com/grafana/dskit/kv/consul" @@ -51,6 +50,8 @@ import ( loki_net "github.com/grafana/loki/v3/pkg/util/net" "github.com/grafana/loki/v3/pkg/util/test" "github.com/grafana/loki/v3/pkg/validation" + + "github.com/grafana/loki/pkg/push" ) const ( @@ -441,6 +442,7 @@ func Test_MissingEnforcedLabels(t *testing.T) { // request with all required labels. lbs := labels.FromMap(map[string]string{"app": "foo", "env": "prod", "cluster": "cluster1", "namespace": "ns1"}) missing, missingLabels := distributors[0].missingEnforcedLabels(lbs, "test", "policy1") + assert.False(t, missing) assert.Empty(t, missingLabels) @@ -462,25 +464,42 @@ func Test_PushWithEnforcedLabels(t *testing.T) { flagext.DefaultValues(limits) // makeWriteRequest only contains a `{foo="bar"}` label. - req := makeWriteRequest(100, 100) + req := makeWriteRequest(100, 100) // 100 lines of 100 bytes each limits.EnforcedLabels = []string{"app", "env"} distributors, _ := prepare(t, 1, 3, limits, nil) + + // reset metrics in case they were set from a previous test. + validation.DiscardedBytes.Reset() + validation.DiscardedSamples.Reset() + // enforced labels configured, but all labels are missing. _, err := distributors[0].Push(ctx, req) require.Error(t, err) expectedErr := httpgrpc.Errorf(http.StatusBadRequest, validation.MissingEnforcedLabelsErrorMsg, "app,env", "test") require.EqualError(t, err, expectedErr.Error()) + // Verify metrics for discarded samples due to missing enforced labels + assert.Equal(t, float64(10000), testutil.ToFloat64(validation.DiscardedBytes)) // 100 lines * 100 bytes + assert.Equal(t, float64(100), testutil.ToFloat64(validation.DiscardedSamples)) // 100 lines + // enforced labels, but all labels are present. req = makeWriteRequestWithLabels(100, 100, []string{`{app="foo", env="prod"}`}, false, false, false) _, err = distributors[0].Push(ctx, req) require.NoError(t, err) + // Metrics should not have increased since this push was successful + assert.Equal(t, float64(10000), testutil.ToFloat64(validation.DiscardedBytes)) + assert.Equal(t, float64(100), testutil.ToFloat64(validation.DiscardedSamples)) + // no enforced labels, so no errors. limits.EnforcedLabels = []string{} distributors, _ = prepare(t, 1, 3, limits, nil) _, err = distributors[0].Push(ctx, req) require.NoError(t, err) + + // Metrics should remain unchanged + assert.Equal(t, float64(10000), testutil.ToFloat64(validation.DiscardedBytes)) + assert.Equal(t, float64(100), testutil.ToFloat64(validation.DiscardedSamples)) } func TestDistributorPushConcurrently(t *testing.T) { @@ -1672,7 +1691,105 @@ func TestDistributor_PushIngestionBlocked(t *testing.T) { if tc.expectError { expectedErr := fmt.Sprintf(validation.BlockedIngestionErrorMsg, "test", tc.blockUntil.Format(time.RFC3339), tc.blockStatusCode) require.ErrorContains(t, err, expectedErr) - require.Nil(t, response) + } else { + require.NoError(t, err) + require.Equal(t, success, response) + } + }) + } +} + +func TestDistributor_PushIngestionBlockedByPolicy(t *testing.T) { + now := time.Now() + defaultErrCode := 260 + + for _, tc := range []struct { + name string + blockUntil map[string]time.Time + policy string + labels string + expectError bool + expectedErrorMsg string + yes bool + }{ + { + name: "not blocked - no policy block configured", + policy: "test-policy", + labels: `{foo="bar"}`, + expectError: false, + }, + { + name: "not blocked - policy block expired", + blockUntil: map[string]time.Time{ + "test-policy": now.Add(-1 * time.Hour), + }, + policy: "test-policy", + labels: `{foo="bar"}`, + expectError: false, + }, + { + name: "blocked - policy block active", + blockUntil: map[string]time.Time{ + "test-policy": now.Add(1 * time.Hour), + }, + policy: "test-policy", + labels: `{foo="bar"}`, + expectError: true, + expectedErrorMsg: fmt.Sprintf(validation.BlockedIngestionPolicyErrorMsg, "test", now.Add(1*time.Hour).Format(time.RFC3339), defaultErrCode), + yes: true, + }, + { + name: "not blocked - different policy", + blockUntil: map[string]time.Time{ + "blocked-policy": now.Add(1 * time.Hour), + }, + policy: "test-policy", + labels: `{foo="bar"}`, + expectError: false, + }, + { + name: "blocked - custom status code", + blockUntil: map[string]time.Time{ + "test-policy": now.Add(1 * time.Hour), + }, + policy: "test-policy", + labels: `{foo="bar"}`, + expectError: true, + expectedErrorMsg: fmt.Sprintf(validation.BlockedIngestionPolicyErrorMsg, "test", now.Add(1*time.Hour).Format(time.RFC3339), defaultErrCode), + }, + } { + t.Run(tc.name, func(t *testing.T) { + if !tc.yes { + return + } + limits := &validation.Limits{} + flagext.DefaultValues(limits) + + // Configure policy mapping + limits.PolicyStreamMapping = validation.PolicyStreamMapping{ + tc.policy: []*validation.PriorityStream{ + { + Selector: tc.labels, + Priority: 1, + }, + }, + } + + // Configure policy blocks + if tc.blockUntil != nil { + limits.BlockIngestionPolicyUntil = make(map[string]dskit_flagext.Time) + for policy, until := range tc.blockUntil { + limits.BlockIngestionPolicyUntil[policy] = dskit_flagext.Time(until) + } + } + + distributors, _ := prepare(t, 1, 3, limits, nil) + request := makeWriteRequestWithLabels(1, 1024, []string{tc.labels}, false, false, false) + response, err := distributors[0].Push(ctx, request) + + if tc.expectError { + require.Error(t, err) + require.Contains(t, err.Error(), tc.expectedErrorMsg) } else { require.NoError(t, err) require.Equal(t, success, response) diff --git a/pkg/distributor/limits.go b/pkg/distributor/limits.go index 90a35ce0d1e25..127aa97b80b2f 100644 --- a/pkg/distributor/limits.go +++ b/pkg/distributor/limits.go @@ -39,6 +39,7 @@ type Limits interface { BlockIngestionUntil(userID string) time.Time BlockIngestionStatusCode(userID string) int + BlockIngestionPolicyUntil(userID string, policy string) time.Time EnforcedLabels(userID string) []string PolicyEnforcedLabels(userID string, policy string) []string diff --git a/pkg/distributor/validator.go b/pkg/distributor/validator.go index 4f99feffb9ae5..083473a5a9b4c 100644 --- a/pkg/distributor/validator.go +++ b/pkg/distributor/validator.go @@ -104,21 +104,13 @@ func (v Validator) ValidateEntry(ctx context.Context, vCtx validationContext, la // Makes time string on the error message formatted consistently. formatedEntryTime := entry.Timestamp.Format(timeFormat) formatedRejectMaxAgeTime := time.Unix(0, vCtx.rejectOldSampleMaxAge).Format(timeFormat) - validation.DiscardedSamples.WithLabelValues(validation.GreaterThanMaxSampleAge, vCtx.userID, retentionHours, policy).Inc() - validation.DiscardedBytes.WithLabelValues(validation.GreaterThanMaxSampleAge, vCtx.userID, retentionHours, policy).Add(entrySize) - if v.usageTracker != nil { - v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.GreaterThanMaxSampleAge, labels, entrySize) - } + v.reportDiscardedDataWithTracker(ctx, validation.GreaterThanMaxSampleAge, vCtx, labels, retentionHours, policy, int(entrySize), 1) return fmt.Errorf(validation.GreaterThanMaxSampleAgeErrorMsg, labels, formatedEntryTime, formatedRejectMaxAgeTime) } if ts > vCtx.creationGracePeriod { formatedEntryTime := entry.Timestamp.Format(timeFormat) - validation.DiscardedSamples.WithLabelValues(validation.TooFarInFuture, vCtx.userID, retentionHours, policy).Inc() - validation.DiscardedBytes.WithLabelValues(validation.TooFarInFuture, vCtx.userID, retentionHours, policy).Add(entrySize) - if v.usageTracker != nil { - v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.TooFarInFuture, labels, entrySize) - } + v.reportDiscardedDataWithTracker(ctx, validation.TooFarInFuture, vCtx, labels, retentionHours, policy, int(entrySize), 1) return fmt.Errorf(validation.TooFarInFutureErrorMsg, labels, formatedEntryTime) } @@ -127,39 +119,23 @@ func (v Validator) ValidateEntry(ctx context.Context, vCtx validationContext, la // an orthogonal concept (we need not use ValidateLabels in this context) // but the upstream cortex_validation pkg uses it, so we keep this // for parity. - validation.DiscardedSamples.WithLabelValues(validation.LineTooLong, vCtx.userID, retentionHours, policy).Inc() - validation.DiscardedBytes.WithLabelValues(validation.LineTooLong, vCtx.userID, retentionHours, policy).Add(entrySize) - if v.usageTracker != nil { - v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.LineTooLong, labels, entrySize) - } + v.reportDiscardedDataWithTracker(ctx, validation.LineTooLong, vCtx, labels, retentionHours, policy, int(entrySize), 1) return fmt.Errorf(validation.LineTooLongErrorMsg, maxSize, labels, len(entry.Line)) } if structuredMetadataCount > 0 { if !vCtx.allowStructuredMetadata { - validation.DiscardedSamples.WithLabelValues(validation.DisallowedStructuredMetadata, vCtx.userID, retentionHours, policy).Inc() - validation.DiscardedBytes.WithLabelValues(validation.DisallowedStructuredMetadata, vCtx.userID, retentionHours, policy).Add(entrySize) - if v.usageTracker != nil { - v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.DisallowedStructuredMetadata, labels, entrySize) - } + v.reportDiscardedDataWithTracker(ctx, validation.DisallowedStructuredMetadata, vCtx, labels, retentionHours, policy, int(entrySize), 1) return fmt.Errorf(validation.DisallowedStructuredMetadataErrorMsg, labels) } if maxSize := vCtx.maxStructuredMetadataSize; maxSize != 0 && structuredMetadataSizeBytes > maxSize { - validation.DiscardedSamples.WithLabelValues(validation.StructuredMetadataTooLarge, vCtx.userID, retentionHours, policy).Inc() - validation.DiscardedBytes.WithLabelValues(validation.StructuredMetadataTooLarge, vCtx.userID, retentionHours, policy).Add(entrySize) - if v.usageTracker != nil { - v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.StructuredMetadataTooLarge, labels, entrySize) - } + v.reportDiscardedDataWithTracker(ctx, validation.StructuredMetadataTooLarge, vCtx, labels, retentionHours, policy, int(entrySize), 1) return fmt.Errorf(validation.StructuredMetadataTooLargeErrorMsg, labels, structuredMetadataSizeBytes, vCtx.maxStructuredMetadataSize) } if maxCount := vCtx.maxStructuredMetadataCount; maxCount != 0 && structuredMetadataCount > maxCount { - validation.DiscardedSamples.WithLabelValues(validation.StructuredMetadataTooMany, vCtx.userID, retentionHours, policy).Inc() - validation.DiscardedBytes.WithLabelValues(validation.StructuredMetadataTooMany, vCtx.userID, retentionHours, policy).Add(entrySize) - if v.usageTracker != nil { - v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.StructuredMetadataTooMany, labels, entrySize) - } + v.reportDiscardedDataWithTracker(ctx, validation.StructuredMetadataTooMany, vCtx, labels, retentionHours, policy, int(entrySize), 1) return fmt.Errorf(validation.StructuredMetadataTooManyErrorMsg, labels, structuredMetadataCount, vCtx.maxStructuredMetadataCount) } } @@ -168,9 +144,10 @@ func (v Validator) ValidateEntry(ctx context.Context, vCtx validationContext, la } // Validate labels returns an error if the labels are invalid -func (v Validator) ValidateLabels(ctx validationContext, ls labels.Labels, stream logproto.Stream, retentionHours, policy string) error { +func (v Validator) ValidateLabels(vCtx validationContext, ls labels.Labels, stream logproto.Stream, retentionHours, policy string) error { if len(ls) == 0 { - validation.DiscardedSamples.WithLabelValues(validation.MissingLabels, ctx.userID, retentionHours, policy).Inc() + // TODO: is this one correct? + validation.DiscardedSamples.WithLabelValues(validation.MissingLabels, vCtx.userID, retentionHours, policy).Inc() return fmt.Errorf(validation.MissingLabelsErrorMsg) } @@ -186,21 +163,23 @@ func (v Validator) ValidateLabels(ctx validationContext, ls labels.Labels, strea numLabelNames-- } - if numLabelNames > ctx.maxLabelNamesPerSeries { - updateMetrics(validation.MaxLabelNamesPerSeries, ctx.userID, stream, retentionHours, policy) - return fmt.Errorf(validation.MaxLabelNamesPerSeriesErrorMsg, stream.Labels, numLabelNames, ctx.maxLabelNamesPerSeries) + entriesSize := util.EntriesTotalSize(stream.Entries) + + if numLabelNames > vCtx.maxLabelNamesPerSeries { + v.reportDiscardedData(validation.MaxLabelNamesPerSeries, vCtx, retentionHours, policy, entriesSize, len(stream.Entries)) + return fmt.Errorf(validation.MaxLabelNamesPerSeriesErrorMsg, stream.Labels, numLabelNames, vCtx.maxLabelNamesPerSeries) } lastLabelName := "" for _, l := range ls { - if len(l.Name) > ctx.maxLabelNameLength { - updateMetrics(validation.LabelNameTooLong, ctx.userID, stream, retentionHours, policy) + if len(l.Name) > vCtx.maxLabelNameLength { + v.reportDiscardedData(validation.LabelNameTooLong, vCtx, retentionHours, policy, entriesSize, len(stream.Entries)) return fmt.Errorf(validation.LabelNameTooLongErrorMsg, stream.Labels, l.Name) - } else if len(l.Value) > ctx.maxLabelValueLength { - updateMetrics(validation.LabelValueTooLong, ctx.userID, stream, retentionHours, policy) + } else if len(l.Value) > vCtx.maxLabelValueLength { + v.reportDiscardedData(validation.LabelValueTooLong, vCtx, retentionHours, policy, entriesSize, len(stream.Entries)) return fmt.Errorf(validation.LabelValueTooLongErrorMsg, stream.Labels, l.Value) } else if cmp := strings.Compare(lastLabelName, l.Name); cmp == 0 { - updateMetrics(validation.DuplicateLabelNames, ctx.userID, stream, retentionHours, policy) + v.reportDiscardedData(validation.DuplicateLabelNames, vCtx, retentionHours, policy, entriesSize, len(stream.Entries)) return fmt.Errorf(validation.DuplicateLabelNamesErrorMsg, stream.Labels, l.Name) } lastLabelName = l.Name @@ -208,17 +187,62 @@ func (v Validator) ValidateLabels(ctx validationContext, ls labels.Labels, strea return nil } +func (v Validator) reportDiscardedData(reason string, vCtx validationContext, retentionHours string, policy string, entrySize, entryCount int) { + validation.DiscardedSamples.WithLabelValues(reason, vCtx.userID, retentionHours, policy).Add(float64(entryCount)) + validation.DiscardedBytes.WithLabelValues(reason, vCtx.userID, retentionHours, policy).Add(float64(entrySize)) +} + +func (v Validator) reportDiscardedDataWithTracker(ctx context.Context, reason string, vCtx validationContext, labels labels.Labels, retentionHours string, policy string, entrySize, entryCount int) { + v.reportDiscardedData(reason, vCtx, retentionHours, policy, entrySize, entryCount) + if v.usageTracker != nil { + v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, reason, labels, float64(entrySize)) + } +} + // ShouldBlockIngestion returns whether ingestion should be blocked, until when and the status code. -func (v Validator) ShouldBlockIngestion(ctx validationContext, now time.Time) (bool, time.Time, int) { +func (v Validator) ShouldBlockIngestion(ctx validationContext, now time.Time, policy string) (bool, int, string, error) { + if block, code, reason, err := v.shouldBlockGlobalPolicy(ctx, now); block { + return block, code, reason, err + } + + if block, until, code := v.shouldBlockPolicy(ctx, policy, now); block { + err := fmt.Errorf(validation.BlockedIngestionPolicyErrorMsg, ctx.userID, until.Format(time.RFC3339), code) + return true, code, validation.BlockedIngestionPolicy, err + } + + return false, 0, "", nil +} + +func (v Validator) shouldBlockGlobalPolicy(ctx validationContext, now time.Time) (bool, int, string, error) { if ctx.blockIngestionUntil.IsZero() { - return false, time.Time{}, 0 + return false, 0, "", nil } - return now.Before(ctx.blockIngestionUntil), ctx.blockIngestionUntil, ctx.blockIngestionStatusCode + if now.Before(ctx.blockIngestionUntil) { + err := fmt.Errorf(validation.BlockedIngestionErrorMsg, ctx.userID, ctx.blockIngestionUntil.Format(time.RFC3339), ctx.blockIngestionStatusCode) + return true, ctx.blockIngestionStatusCode, validation.BlockedIngestion, err + } + + return false, 0, "", nil } -func updateMetrics(reason, userID string, stream logproto.Stream, retentionHours, policy string) { - validation.DiscardedSamples.WithLabelValues(reason, userID, retentionHours, policy).Add(float64(len(stream.Entries))) - bytes := util.EntriesTotalSize(stream.Entries) - validation.DiscardedBytes.WithLabelValues(reason, userID, retentionHours, policy).Add(float64(bytes)) +// ShouldBlockPolicy checks if ingestion should be blocked for the given policy. +// It returns true if ingestion should be blocked, along with the block until time and status code. +func (v *Validator) shouldBlockPolicy(ctx validationContext, policy string, now time.Time) (bool, time.Time, int) { + // No policy provided, don't block + if policy == "" { + return false, time.Time{}, 0 + } + + // Check if this policy is blocked in tenant configs + blockUntil := v.Limits.BlockIngestionPolicyUntil(ctx.userID, policy) + if blockUntil.IsZero() { + return false, time.Time{}, 0 + } + + if now.Before(blockUntil) { + return true, blockUntil, ctx.blockIngestionStatusCode + } + + return false, time.Time{}, 0 } diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go index 468fde3920efe..bb2f5c4b245c0 100644 --- a/pkg/validation/limits.go +++ b/pkg/validation/limits.go @@ -227,11 +227,12 @@ type Limits struct { OTLPConfig push.OTLPConfig `yaml:"otlp_config" json:"otlp_config" doc:"description=OTLP log ingestion configurations"` GlobalOTLPConfig push.GlobalOTLPConfig `yaml:"-" json:"-"` - BlockIngestionUntil dskit_flagext.Time `yaml:"block_ingestion_until" json:"block_ingestion_until"` - BlockIngestionStatusCode int `yaml:"block_ingestion_status_code" json:"block_ingestion_status_code"` - EnforcedLabels []string `yaml:"enforced_labels" json:"enforced_labels" category:"experimental"` - PolicyEnforcedLabels map[string][]string `yaml:"policy_enforced_labels" json:"policy_enforced_labels" category:"experimental" doc:"description=Map of policies to enforced labels. Example:\n policy_enforced_labels: \n policy1: \n - label1 \n - label2 \n policy2: \n - label3 \n - label4"` - PolicyStreamMapping PolicyStreamMapping `yaml:"policy_stream_mapping" json:"policy_stream_mapping" category:"experimental" doc:"description=Map of policies to stream selectors with a priority. Experimental. Example:\n policy_stream_mapping: \n finance: \n - selector: '{namespace=\"prod\", container=\"billing\"}' \n priority: 2 \n ops: \n - selector: '{namespace=\"prod\", container=\"ops\"}' \n priority: 1 \n staging: \n - selector: '{namespace=\"staging\"}' \n priority: 1"` + BlockIngestionPolicyUntil map[string]dskit_flagext.Time `yaml:"block_ingestion_policy_until" json:"block_ingestion_policy_until" category:"experimental" doc:"description=Block ingestion for policy until the configured date. The time should be in RFC3339 format. The policy is based on the policy_stream_mapping configuration."` + BlockIngestionUntil dskit_flagext.Time `yaml:"block_ingestion_until" json:"block_ingestion_until" category:"experimental"` + BlockIngestionStatusCode int `yaml:"block_ingestion_status_code" json:"block_ingestion_status_code"` + EnforcedLabels []string `yaml:"enforced_labels" json:"enforced_labels" category:"experimental"` + PolicyEnforcedLabels map[string][]string `yaml:"policy_enforced_labels" json:"policy_enforced_labels" category:"experimental" doc:"description=Map of policies to enforced labels. Example:\n policy_enforced_labels: \n policy1: \n - label1 \n - label2 \n policy2: \n - label3 \n - label4"` + PolicyStreamMapping PolicyStreamMapping `yaml:"policy_stream_mapping" json:"policy_stream_mapping" category:"experimental" doc:"description=Map of policies to stream selectors with a priority. Experimental. Example:\n policy_stream_mapping: \n finance: \n - selector: '{namespace=\"prod\", container=\"billing\"}' \n priority: 2 \n ops: \n - selector: '{namespace=\"prod\", container=\"ops\"}' \n priority: 1 \n staging: \n - selector: '{namespace=\"staging\"}' \n priority: 1"` IngestionPartitionsTenantShardSize int `yaml:"ingestion_partitions_tenant_shard_size" json:"ingestion_partitions_tenant_shard_size" category:"experimental"` @@ -1122,6 +1123,18 @@ func (o *Overrides) BlockIngestionStatusCode(userID string) int { return o.getOverridesForUser(userID).BlockIngestionStatusCode } +func (o *Overrides) BlockIngestionPolicyUntil(userID string, policy string) time.Time { + limits := o.getOverridesForUser(userID) + if limits == nil || limits.BlockIngestionPolicyUntil == nil { + return time.Time{} // Zero time means no blocking + } + + if blockUntil, ok := limits.BlockIngestionPolicyUntil[policy]; ok { + return time.Time(blockUntil) + } + return time.Time{} // Zero time means no blocking +} + func (o *Overrides) EnforcedLabels(userID string) []string { return o.getOverridesForUser(userID).EnforcedLabels } diff --git a/pkg/validation/limits_test.go b/pkg/validation/limits_test.go index 9ecc8937e818b..7dcabe65c7867 100644 --- a/pkg/validation/limits_test.go +++ b/pkg/validation/limits_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + dskit_flagext "github.com/grafana/dskit/flagext" "github.com/prometheus/common/model" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -226,10 +227,11 @@ ruler_remote_write_headers: Selector: `{a="b"}`, }, }, - OTLPConfig: defaultOTLPConfig, - EnforcedLabels: []string{}, - PolicyEnforcedLabels: map[string][]string{}, - PolicyStreamMapping: PolicyStreamMapping{}, + OTLPConfig: defaultOTLPConfig, + EnforcedLabels: []string{}, + PolicyEnforcedLabels: map[string][]string{}, + PolicyStreamMapping: PolicyStreamMapping{}, + BlockIngestionPolicyUntil: map[string]dskit_flagext.Time{}, }, }, { @@ -248,10 +250,11 @@ ruler_remote_write_headers: Selector: `{a="b"}`, }, }, - OTLPConfig: defaultOTLPConfig, - EnforcedLabels: []string{}, - PolicyEnforcedLabels: map[string][]string{}, - PolicyStreamMapping: PolicyStreamMapping{}, + OTLPConfig: defaultOTLPConfig, + EnforcedLabels: []string{}, + PolicyEnforcedLabels: map[string][]string{}, + PolicyStreamMapping: PolicyStreamMapping{}, + BlockIngestionPolicyUntil: map[string]dskit_flagext.Time{}, }, }, { @@ -273,11 +276,12 @@ retention_stream: }, // Rest from new defaults - RulerRemoteWriteHeaders: OverwriteMarshalingStringMap{map[string]string{"a": "b"}}, - OTLPConfig: defaultOTLPConfig, - EnforcedLabels: []string{}, - PolicyEnforcedLabels: map[string][]string{}, - PolicyStreamMapping: PolicyStreamMapping{}, + RulerRemoteWriteHeaders: OverwriteMarshalingStringMap{map[string]string{"a": "b"}}, + OTLPConfig: defaultOTLPConfig, + EnforcedLabels: []string{}, + PolicyEnforcedLabels: map[string][]string{}, + PolicyStreamMapping: PolicyStreamMapping{}, + BlockIngestionPolicyUntil: map[string]dskit_flagext.Time{}, }, }, { @@ -299,10 +303,11 @@ reject_old_samples: true Selector: `{a="b"}`, }, }, - OTLPConfig: defaultOTLPConfig, - EnforcedLabels: []string{}, - PolicyEnforcedLabels: map[string][]string{}, - PolicyStreamMapping: PolicyStreamMapping{}, + OTLPConfig: defaultOTLPConfig, + EnforcedLabels: []string{}, + PolicyEnforcedLabels: map[string][]string{}, + PolicyStreamMapping: PolicyStreamMapping{}, + BlockIngestionPolicyUntil: map[string]dskit_flagext.Time{}, }, }, { @@ -325,10 +330,11 @@ query_timeout: 5m Selector: `{a="b"}`, }, }, - OTLPConfig: defaultOTLPConfig, - EnforcedLabels: []string{}, - PolicyEnforcedLabels: map[string][]string{}, - PolicyStreamMapping: PolicyStreamMapping{}, + OTLPConfig: defaultOTLPConfig, + EnforcedLabels: []string{}, + PolicyEnforcedLabels: map[string][]string{}, + PolicyStreamMapping: PolicyStreamMapping{}, + BlockIngestionPolicyUntil: map[string]dskit_flagext.Time{}, }, }, } { diff --git a/pkg/validation/validate.go b/pkg/validation/validate.go index e6af919adceff..c7c71d285c4ae 100644 --- a/pkg/validation/validate.go +++ b/pkg/validation/validate.go @@ -71,6 +71,8 @@ const ( StructuredMetadataTooManyErrorMsg = "stream '%s' has too many structured metadata labels: '%d', limit: '%d'. Please see `limits_config.max_structured_metadata_entries_count` or contact your Loki administrator to increase it." BlockedIngestion = "blocked_ingestion" BlockedIngestionErrorMsg = "ingestion blocked for user %s until '%s' with status code '%d'" + BlockedIngestionPolicy = "blocked_ingestion_policy" + BlockedIngestionPolicyErrorMsg = "ingestion blocked for user %s until '%s' with status code '%d'" MissingEnforcedLabels = "missing_enforced_labels" MissingEnforcedLabelsErrorMsg = "missing required labels %s for user %s" )
feat
Add support for blocking a policy to be ingested (#16203)
d4fcef5229d088c8ba61a2665c163c6e7330c682
2024-06-10 17:22:29
Christian Haudum
chore(blooms): Some boom gateway cleanups (#13165)
false
diff --git a/pkg/bloomgateway/bloomgateway.go b/pkg/bloomgateway/bloomgateway.go index 165e2d652473b..5747f6e7993e8 100644 --- a/pkg/bloomgateway/bloomgateway.go +++ b/pkg/bloomgateway/bloomgateway.go @@ -1,43 +1,7 @@ /* -Bloom Gateway package - The bloom gateway is a component that can be run as a standalone microserivce target and provides capabilities for filtering ChunkRefs based on a given list of line filter expressions. - - Querier Query Frontend - | | - ................................... service boundary - | | - +----+------+ - | - indexgateway.Gateway - | - bloomgateway.BloomQuerier - | - bloomgateway.GatewayClient - | - logproto.BloomGatewayClient - | - ................................... service boundary - | - bloomgateway.Gateway - | - queue.RequestQueue - | - bloomgateway.Worker - | - bloomgateway.Processor - | - bloomshipper.Store - | - bloomshipper.Client - | - ObjectClient - | - ................................... service boundary - | - object storage */ package bloomgateway @@ -63,13 +27,10 @@ import ( "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" "github.com/grafana/loki/v3/pkg/util" "github.com/grafana/loki/v3/pkg/util/constants" - util_log "github.com/grafana/loki/v3/pkg/util/log" utillog "github.com/grafana/loki/v3/pkg/util/log" "github.com/grafana/loki/v3/pkg/util/spanlogger" ) -var errGatewayUnhealthy = errors.New("bloom-gateway is unhealthy in the ring") - const ( metricsSubsystem = "bloom_gateway" querierMetricsSubsystem = "bloom_gateway_querier" @@ -209,7 +170,7 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk stats, ctx := ContextWithEmptyStats(ctx) logger := spanlogger.FromContextWithFallback( ctx, - util_log.WithContext(ctx, g.logger), + utillog.WithContext(ctx, g.logger), ) defer func() { @@ -261,9 +222,6 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk }, nil } - // TODO(chaudum): I intentionally keep the logic for handling multiple tasks, - // so that the PR does not explode in size. This should be cleaned up at some point. - seriesByDay := partitionRequest(req) stats.NumTasks = len(seriesByDay) @@ -279,14 +237,13 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk return nil, errors.New("request time range must span exactly one day") } - tasks := make([]Task, 0, len(seriesByDay)) - responses := make([][]v1.Output, 0, len(seriesByDay)) - for _, seriesForDay := range seriesByDay { - task := newTask(ctx, tenantID, seriesForDay, filters, blocks) - // TODO(owen-d): include capacity in constructor? - task.responses = responsesPool.Get(len(seriesForDay.series)) - tasks = append(tasks, task) - } + series := seriesByDay[0] + task := newTask(ctx, tenantID, series, filters, blocks) + + // TODO(owen-d): include capacity in constructor? + task.responses = responsesPool.Get(len(series.series)) + // free up the responses + defer responsesPool.Put(task.responses) g.activeUsers.UpdateUserTimestamp(tenantID, time.Now()) @@ -297,62 +254,41 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk preFilterChunks += len(series.Refs) } - // Ideally we could use an unbuffered channel here, but since we return the - // request on the first error, there can be cases where the request context - // is not done yet and the consumeTask() function wants to send to the - // tasksCh, but nobody reads from it any more. - queueStart := time.Now() - tasksCh := make(chan Task, len(tasks)) - for _, task := range tasks { - task := task - task.enqueueTime = time.Now() - - // TODO(owen-d): gracefully handle full queues - if err := g.queue.Enqueue(tenantID, nil, task, func() { - // When enqueuing, we also add the task to the pending tasks - _ = g.pendingTasks.Inc() - }); err != nil { - stats.Status = labelFailure - return nil, errors.Wrap(err, "failed to enqueue task") - } - // TODO(owen-d): use `concurrency` lib, bound parallelism - go g.consumeTask(ctx, task, tasksCh) - } - - sp.LogKV("msg", "enqueued tasks", "duration", time.Since(queueStart).String()) + tasksCh := make(chan Task, 1) - remaining := len(tasks) + // TODO(owen-d): gracefully handle full queues + task.enqueueTime = time.Now() + if err := g.queue.Enqueue(tenantID, nil, task, func() { + // When enqueuing, we also add the task to the pending tasks + _ = g.pendingTasks.Inc() + }); err != nil { + stats.Status = labelFailure + return nil, errors.Wrap(err, "failed to enqueue task") + } + // TODO(owen-d): use `concurrency` lib, bound parallelism + go g.consumeTask(ctx, task, tasksCh) combinedRecorder := v1.NewBloomRecorder(ctx, "combined") - for remaining > 0 { - select { - case <-ctx.Done(): - stats.Status = "cancel" - return nil, errors.Wrap(ctx.Err(), "request failed") - case task := <-tasksCh: - if task.Err() != nil { - stats.Status = labelFailure - return nil, errors.Wrap(task.Err(), "request failed") - } - responses = append(responses, task.responses) - combinedRecorder.Merge(task.recorder) - remaining-- + + select { + case <-ctx.Done(): + stats.Status = "cancel" + return nil, errors.Wrap(ctx.Err(), "request failed") + case task = <-tasksCh: + if task.Err() != nil { + stats.Status = labelFailure + return nil, errors.Wrap(task.Err(), "request failed") } + combinedRecorder.Merge(task.recorder) } - combinedRecorder.Report(util_log.WithContext(ctx, g.logger), g.bloomStore.BloomMetrics()) - sp.LogKV("msg", "received all responses") + combinedRecorder.Report(utillog.WithContext(ctx, g.logger), g.bloomStore.BloomMetrics()) start := time.Now() - filtered := filterChunkRefs(req, responses) + filtered := filterChunkRefs(req, task.responses) duration := time.Since(start) stats.AddPostProcessingTime(duration) - // free up the responses - for _, resp := range responses { - responsesPool.Put(resp) - } - var postFilterSeries, postFilterChunks int postFilterSeries = len(filtered) for _, group := range filtered { @@ -404,35 +340,13 @@ func (g *Gateway) consumeTask(ctx context.Context, task Task, tasksCh chan<- Tas } } -// merges a list of responses via a heap. The same fingerprints and chunks can be present in multiple responses. -// Individual responses do not need to be be ordered beforehand. -func orderedResponsesByFP(responses [][]v1.Output) v1.Iterator[v1.Output] { - if len(responses) == 0 { - return v1.NewEmptyIter[v1.Output]() - } - if len(responses) == 1 { - sort.Slice(responses[0], func(i, j int) bool { return responses[0][i].Fp < responses[0][j].Fp }) - return v1.NewSliceIter(responses[0]) - } - - itrs := make([]v1.PeekingIterator[v1.Output], 0, len(responses)) - for _, r := range responses { - sort.Slice(r, func(i, j int) bool { return r[i].Fp < r[j].Fp }) - itrs = append(itrs, v1.NewPeekingIter(v1.NewSliceIter(r))) - } - return v1.NewHeapIterator[v1.Output]( - func(o1, o2 v1.Output) bool { return o1.Fp < o2.Fp }, - itrs..., - ) -} - // TODO(owen-d): improve perf. This can be faster with a more specialized impl // NB(owen-d): `req` is mutated in place for performance, but `responses` is not // Removals of the outputs must be sorted. -func filterChunkRefs( - req *logproto.FilterChunkRefRequest, - responses [][]v1.Output, -) []*logproto.GroupedChunkRefs { +func filterChunkRefs(req *logproto.FilterChunkRefRequest, responses []v1.Output) []*logproto.GroupedChunkRefs { + // sort responses by fingerprint + sort.Slice(responses, func(i, j int) bool { return responses[i].Fp < responses[j].Fp }) + res := make([]*logproto.GroupedChunkRefs, 0, len(req.Refs)) // dedupe outputs, merging the same series. @@ -481,7 +395,7 @@ func filterChunkRefs( res.Removals = chks return res }, - v1.NewPeekingIter(orderedResponsesByFP(responses)), + v1.NewPeekingIter(v1.NewSliceIter(responses)), ) // Iterate through the requested and filtered series/chunks, diff --git a/pkg/bloomgateway/bloomgateway_test.go b/pkg/bloomgateway/bloomgateway_test.go index fdcd7df117f3f..d16e833fc437a 100644 --- a/pkg/bloomgateway/bloomgateway_test.go +++ b/pkg/bloomgateway/bloomgateway_test.go @@ -517,18 +517,15 @@ func TestFilterChunkRefs(t *testing.T) { fp uint64 checksums []uint32 } - mkRemovals := func(xs [][]instruction) [][]v1.Output { - out := make([][]v1.Output, len(xs)) + mkRemovals := func(xs []instruction) []v1.Output { + out := make([]v1.Output, len(xs)) for i, x := range xs { - out[i] = make([]v1.Output, len(x)) - for j, c := range x { - out[i][j] = v1.Output{ - Fp: model.Fingerprint(c.fp), - Removals: make(v1.ChunkRefs, len(c.checksums)), - } - for k, chk := range c.checksums { - out[i][j].Removals[k] = v1.ChunkRef{Checksum: chk} - } + out[i] = v1.Output{ + Fp: model.Fingerprint(x.fp), + Removals: make(v1.ChunkRefs, len(x.checksums)), + } + for k, chk := range x.checksums { + out[i].Removals[k] = v1.ChunkRef{Checksum: chk} } } return out @@ -551,7 +548,7 @@ func TestFilterChunkRefs(t *testing.T) { for _, tc := range []struct { desc string input *logproto.FilterChunkRefRequest - removals [][]instruction + removals []instruction expected *logproto.FilterChunkRefRequest }{ { @@ -562,22 +559,18 @@ func TestFilterChunkRefs(t *testing.T) { { desc: "remove all", input: mkInput(2, 2), - removals: [][]instruction{ - { - {fp: 0, checksums: []uint32{0, 1}}, - {fp: 1, checksums: []uint32{0, 1}}, - }, + removals: []instruction{ + {fp: 0, checksums: []uint32{0, 1}}, + {fp: 1, checksums: []uint32{0, 1}}, }, expected: mkInput(0, 0), }, { desc: "remove every other series", input: mkInput(4, 2), - removals: [][]instruction{ - { - {fp: 0, checksums: []uint32{0, 1}}, - {fp: 2, checksums: []uint32{0, 1}}, - }, + removals: []instruction{ + {fp: 0, checksums: []uint32{0, 1}}, + {fp: 2, checksums: []uint32{0, 1}}, }, expected: mkResult([]instruction{ {fp: 1, checksums: []uint32{0, 1}}, @@ -587,13 +580,11 @@ func TestFilterChunkRefs(t *testing.T) { { desc: "remove the last chunk for each series", input: mkInput(4, 2), - removals: [][]instruction{ - { - {fp: 0, checksums: []uint32{1}}, - {fp: 1, checksums: []uint32{1}}, - {fp: 2, checksums: []uint32{1}}, - {fp: 3, checksums: []uint32{1}}, - }, + removals: []instruction{ + {fp: 0, checksums: []uint32{1}}, + {fp: 1, checksums: []uint32{1}}, + {fp: 2, checksums: []uint32{1}}, + {fp: 3, checksums: []uint32{1}}, }, expected: mkResult([]instruction{ {fp: 0, checksums: []uint32{0}}, @@ -605,11 +596,9 @@ func TestFilterChunkRefs(t *testing.T) { { desc: "remove the middle chunk for every other series", input: mkInput(4, 3), - removals: [][]instruction{ - { - {fp: 0, checksums: []uint32{1}}, - {fp: 2, checksums: []uint32{1}}, - }, + removals: []instruction{ + {fp: 0, checksums: []uint32{1}}, + {fp: 2, checksums: []uint32{1}}, }, expected: mkResult([]instruction{ {fp: 0, checksums: []uint32{0, 2}}, @@ -621,10 +610,8 @@ func TestFilterChunkRefs(t *testing.T) { { desc: "remove the first chunk of the last series", input: mkInput(4, 3), - removals: [][]instruction{ - { - {fp: 3, checksums: []uint32{0}}, - }, + removals: []instruction{ + {fp: 3, checksums: []uint32{0}}, }, expected: mkResult([]instruction{ {fp: 0, checksums: []uint32{0, 1, 2}}, @@ -636,13 +623,11 @@ func TestFilterChunkRefs(t *testing.T) { { desc: "duplicate removals", input: mkInput(4, 3), - removals: [][]instruction{ - { - {fp: 0, checksums: []uint32{0, 1}}, - {fp: 0, checksums: []uint32{0, 1, 2}}, - {fp: 1, checksums: []uint32{0, 2}}, - {fp: 2, checksums: []uint32{1}}, - }, + removals: []instruction{ + {fp: 0, checksums: []uint32{0, 1}}, + {fp: 0, checksums: []uint32{0, 1, 2}}, + {fp: 1, checksums: []uint32{0, 2}}, + {fp: 2, checksums: []uint32{1}}, }, expected: mkResult([]instruction{ {fp: 1, checksums: []uint32{1}}, @@ -650,45 +635,19 @@ func TestFilterChunkRefs(t *testing.T) { {fp: 3, checksums: []uint32{0, 1, 2}}, }), }, - { - desc: "middle duplicates across 2 days", - input: mkInput(4, 3), - removals: [][]instruction{ - { - {fp: 0, checksums: []uint32{1}}, - {fp: 2, checksums: []uint32{1}}, - }, - { - {fp: 0, checksums: []uint32{1}}, - {fp: 2, checksums: []uint32{1}}, - }, - }, - expected: mkResult([]instruction{ - {fp: 0, checksums: []uint32{0, 2}}, - {fp: 1, checksums: []uint32{0, 1, 2}}, - {fp: 2, checksums: []uint32{0, 2}}, - {fp: 3, checksums: []uint32{0, 1, 2}}, - }), - }, { desc: "unordered fingerprints", input: mkInput(4, 3), - removals: [][]instruction{ - { - {fp: 3, checksums: []uint32{2}}, - {fp: 0, checksums: []uint32{1, 2}}, - {fp: 2, checksums: []uint32{1, 2}}, - }, - { - {fp: 1, checksums: []uint32{1}}, - {fp: 2, checksums: []uint32{0, 1}}, - {fp: 3, checksums: []uint32{0}}, - }, + removals: []instruction{ + {fp: 3, checksums: []uint32{2}}, + {fp: 0, checksums: []uint32{1, 2}}, + {fp: 2, checksums: []uint32{1, 2}}, }, expected: mkResult([]instruction{ {fp: 0, checksums: []uint32{0}}, - {fp: 1, checksums: []uint32{0, 2}}, - {fp: 3, checksums: []uint32{1}}, + {fp: 1, checksums: []uint32{0, 1, 2}}, + {fp: 2, checksums: []uint32{0}}, + {fp: 3, checksums: []uint32{0, 1}}, }), }, } { @@ -752,7 +711,7 @@ func BenchmarkFilterChunkRefs(b *testing.B) { { desc: "filterChunkRefs", f: func(req *logproto.FilterChunkRefRequest, responses []v1.Output) { - filterChunkRefs(req, [][]v1.Output{responses}) + filterChunkRefs(req, responses) }, }, } { diff --git a/pkg/bloomgateway/util.go b/pkg/bloomgateway/util.go index df3a93fcafeda..5f115ba75cfc8 100644 --- a/pkg/bloomgateway/util.go +++ b/pkg/bloomgateway/util.go @@ -28,23 +28,17 @@ func getFromThrough(refs []*logproto.ShortRef) (model.Time, model.Time) { } maxItem := slices.MaxFunc(refs, func(a, b *logproto.ShortRef) int { - if a.Through > b.Through { - return 1 - } else if a.Through < b.Through { - return -1 - } - return 0 + return int(a.Through) - int(b.Through) }) return refs[0].From, maxItem.Through } // convertToChunkRefs converts a []*logproto.ShortRef into v1.ChunkRefs -// TODO(chaudum): Avoid conversion by transferring v1.ChunkRefs in gRPC request. func convertToChunkRefs(refs []*logproto.ShortRef) v1.ChunkRefs { result := make(v1.ChunkRefs, 0, len(refs)) - for _, ref := range refs { - result = append(result, v1.ChunkRef{From: ref.From, Through: ref.Through, Checksum: ref.Checksum}) + for i := range refs { + result = append(result, v1.ChunkRef(*refs[i])) } return result }
chore
Some boom gateway cleanups (#13165)
9e13abd454536646b8334a31af4cd600222a569e
2025-01-09 05:36:50
renovate[bot]
fix(deps): update module github.com/ibm/sarama to v1.45.0 (#15636)
false
diff --git a/go.mod b/go.mod index 071ee3704f32d..f13f5b9f6be1d 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/Azure/azure-storage-blob-go v0.15.0 github.com/Azure/go-autorest/autorest/adal v0.9.24 github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 - github.com/IBM/sarama v1.44.0 + github.com/IBM/sarama v1.45.0 github.com/Masterminds/sprig/v3 v3.3.0 github.com/NYTimes/gziphandler v1.1.1 github.com/Workiva/go-datastructures v1.1.5 diff --git a/go.sum b/go.sum index 695b88caaca38..15cb39121db77 100644 --- a/go.sum +++ b/go.sum @@ -134,8 +134,8 @@ github.com/IBM/go-sdk-core/v5 v5.18.3 h1:q6IDU3N2bHGwijK9pMnzKC5gqdaRII56NzB4ZNd github.com/IBM/go-sdk-core/v5 v5.18.3/go.mod h1:5kILxqEWOrwMhoD2b7J6Xv9Z2M6YIdT/6Oy+XRSsCGQ= github.com/IBM/ibm-cos-sdk-go v1.12.0 h1:Wrk3ve4JS3euhl7XjNFd3RlvPT56199G2/rKaPWpRKU= github.com/IBM/ibm-cos-sdk-go v1.12.0/go.mod h1:v/VBvFuysZMIX9HcaIrz6a+FLVw9px8fq6XabFwD+E4= -github.com/IBM/sarama v1.44.0 h1:puNKqcScjSAgVLramjsuovZrS0nJZFVsrvuUymkWqhE= -github.com/IBM/sarama v1.44.0/go.mod h1:MxQ9SvGfvKIorbk077Ff6DUnBlGpidiQOtU2vuBaxVw= +github.com/IBM/sarama v1.45.0 h1:IzeBevTn809IJ/dhNKhP5mpxEXTmELuezO2tgHD9G5E= +github.com/IBM/sarama v1.45.0/go.mod h1:EEay63m8EZkeumco9TDXf2JT3uDnZsZqFgV46n4yZdY= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/MasslessParticle/azure-storage-blob-go v0.14.1-0.20240322194317-344980fda573 h1:DCPjdUAi+jcGnL7iN+A7uNY8xG584oMRuisYh/VE21E= github.com/MasslessParticle/azure-storage-blob-go v0.14.1-0.20240322194317-344980fda573/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= diff --git a/vendor/github.com/IBM/sarama/Dockerfile.kafka b/vendor/github.com/IBM/sarama/Dockerfile.kafka index b4d5c6acbbcef..d2234e3918f2d 100644 --- a/vendor/github.com/IBM/sarama/Dockerfile.kafka +++ b/vendor/github.com/IBM/sarama/Dockerfile.kafka @@ -1,17 +1,17 @@ -FROM registry.access.redhat.com/ubi8/ubi-minimal:8.10@sha256:cf095e5668919ba1b4ace3888107684ad9d587b1830d3eb56973e6a54f456e67 +FROM registry.access.redhat.com/ubi9/ubi-minimal:9.5@sha256:daa61d6103e98bccf40d7a69a0d4f8786ec390e2204fd94f7cc49053e9949360 USER root RUN microdnf update -y \ - && microdnf install -y curl gzip java-11-openjdk-headless tar tzdata-java \ + && microdnf install -y git gzip java-17-openjdk-headless tar tzdata-java \ && microdnf reinstall -y tzdata \ && microdnf clean all -ENV JAVA_HOME=/usr/lib/jvm/jre-11 +ENV JAVA_HOME=/usr/lib/jvm/jre-17 # https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html # Ensure Java doesn't cache any dns results -RUN cd /etc/java/java-11-openjdk/*/conf/security \ +RUN cd /etc/java/java-17-openjdk/*/conf/security \ && sed -e '/networkaddress.cache.ttl/d' -e '/networkaddress.cache.negative.ttl/d' -i java.security \ && echo 'networkaddress.cache.ttl=0' >> java.security \ && echo 'networkaddress.cache.negative.ttl=0' >> java.security @@ -19,24 +19,46 @@ RUN cd /etc/java/java-11-openjdk/*/conf/security \ ARG SCALA_VERSION="2.13" ARG KAFKA_VERSION="3.6.2" +WORKDIR /tmp + # https://github.com/apache/kafka/blob/2e2b0a58eda3e677763af974a44a6aaa3c280214/tests/docker/Dockerfile#L77-L105 ARG KAFKA_MIRROR="https://s3-us-west-2.amazonaws.com/kafka-packages" SHELL ["/bin/bash", "-o", "pipefail", "-c"] -RUN mkdir -p "/opt/kafka-${KAFKA_VERSION}" \ +RUN --mount=type=bind,target=.,rw=true \ + mkdir -p "/opt/kafka-${KAFKA_VERSION}" \ && chmod a+rw "/opt/kafka-${KAFKA_VERSION}" \ - && curl -s "$KAFKA_MIRROR/kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz" | tar xz --strip-components=1 -C "/opt/kafka-${KAFKA_VERSION}" + && if [ "$KAFKA_VERSION" = "4.0.0" ]; then \ + microdnf install -y java-17-openjdk-devel \ + && git clone --depth=50 --single-branch -b 4.0 https://github.com/apache/kafka /usr/src/kafka \ + && cd /usr/src/kafka \ + && : PIN TO COMMIT BEFORE KAFKA-17616 ZOOKEEPER REMOVAL STARTED \ + && git reset --hard d1504649fb \ + && export JAVA_TOOL_OPTIONS=-XX:MaxRAMPercentage=80 \ + && sed -e '/version=/s/-SNAPSHOT//' -e '/org.gradle.jvmargs/d' -e '/org.gradle.parallel/s/true/false/' -i gradle.properties && ./gradlew -PmaxParallelForks=1 -PmaxScalacThreads=1 --no-daemon releaseTarGz -x siteDocsTar -x javadoc \ + && tar xzf core/build/distributions/kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz --strip-components=1 -C "/opt/kafka-${KAFKA_VERSION}" \ + && cp /tmp/server.properties "/opt/kafka-${KAFKA_VERSION}/config/" \ + && microdnf remove -y java-17-openjdk-devel \ + && rm -rf /usr/src/kafka ; \ + else \ + curl -s "$KAFKA_MIRROR/kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz" | tar xz --strip-components=1 -C "/opt/kafka-${KAFKA_VERSION}" ; \ + fi # older kafka versions depend upon jaxb-api being bundled with the JDK, but it # was removed from Java 11 so work around that by including it in the kafka # libs dir regardless -WORKDIR /tmp RUN curl -sLO "https://repo1.maven.org/maven2/javax/xml/bind/jaxb-api/2.3.0/jaxb-api-2.3.0.jar" \ && for DIR in /opt/kafka-*; do cp -v jaxb-api-2.3.0.jar $DIR/libs/ ; done \ && rm -f jaxb-api-2.3.0.jar +# older kafka versions with the zookeeper 3.4.13 client aren't compatible with Java 17 so quietly bump them to 3.5.9 +RUN [ -f "/opt/kafka-${KAFKA_VERSION}/libs/zookeeper-3.4.13.jar" ] || exit 0 ; \ + rm -f "/opt/kafka-${KAFKA_VERSION}/libs/zookeeper-3.4.13.jar" \ + && curl --fail -sSL -o "/opt/kafka-${KAFKA_VERSION}/libs/zookeeper-3.5.9.jar" "https://repo1.maven.org/maven2/org/apache/zookeeper/zookeeper/3.5.9/zookeeper-3.5.9.jar" \ + && curl --fail -sSL -o "/opt/kafka-${KAFKA_VERSION}/libs/zookeeper-jute-3.5.9.jar" "https://repo1.maven.org/maven2/org/apache/zookeeper/zookeeper-jute/3.5.9/zookeeper-jute-3.5.9.jar" + WORKDIR /opt/kafka-${KAFKA_VERSION} -ENV JAVA_MAJOR_VERSION=11 +ENV JAVA_MAJOR_VERSION=17 RUN sed -e "s/JAVA_MAJOR_VERSION=.*/JAVA_MAJOR_VERSION=${JAVA_MAJOR_VERSION}/" -i"" ./bin/kafka-run-class.sh diff --git a/vendor/github.com/IBM/sarama/Makefile b/vendor/github.com/IBM/sarama/Makefile index 7cefc2a2c36bf..ba6f46e41a5ba 100644 --- a/vendor/github.com/IBM/sarama/Makefile +++ b/vendor/github.com/IBM/sarama/Makefile @@ -9,7 +9,7 @@ FILES := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name TESTS := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -name '*_test.go') $(GOBIN)/tparse: - GOBIN=$(GOBIN) go install github.com/mfridman/[email protected] + GOBIN=$(GOBIN) go install github.com/mfridman/[email protected] get: $(GO) get ./... $(GO) mod verify diff --git a/vendor/github.com/IBM/sarama/admin.go b/vendor/github.com/IBM/sarama/admin.go index 6549c7e6fb07f..8aa1f374e4d63 100644 --- a/vendor/github.com/IBM/sarama/admin.go +++ b/vendor/github.com/IBM/sarama/admin.go @@ -3,6 +3,7 @@ package sarama import ( "errors" "fmt" + "io" "math/rand" "strconv" "sync" @@ -144,6 +145,10 @@ type ClusterAdmin interface { // locally cached value if it's available. Controller() (*Broker, error) + // Coordinator returns the coordinating broker for a consumer group. It will + // return a locally cached value if it's available. + Coordinator(group string) (*Broker, error) + // Remove members from the consumer group by given member identities. // This operation is supported by brokers with version 2.3 or higher // This is for static membership feature. KIP-345 @@ -195,14 +200,25 @@ func (ca *clusterAdmin) Controller() (*Broker, error) { return ca.client.Controller() } +func (ca *clusterAdmin) Coordinator(group string) (*Broker, error) { + return ca.client.Coordinator(group) +} + func (ca *clusterAdmin) refreshController() (*Broker, error) { return ca.client.RefreshController() } -// isErrNotController returns `true` if the given error type unwraps to an -// `ErrNotController` response from Kafka -func isErrNotController(err error) bool { - return errors.Is(err, ErrNotController) +// isRetriableControllerError returns `true` if the given error type unwraps to +// an `ErrNotController` or `EOF` response from Kafka +func isRetriableControllerError(err error) bool { + return errors.Is(err, ErrNotController) || errors.Is(err, io.EOF) +} + +// isRetriableGroupCoordinatorError returns `true` if the given error type +// unwraps to an `ErrNotCoordinatorForConsumer`, +// `ErrConsumerCoordinatorNotAvailable` or `EOF` response from Kafka +func isRetriableGroupCoordinatorError(err error) bool { + return errors.Is(err, ErrNotCoordinatorForConsumer) || errors.Is(err, ErrConsumerCoordinatorNotAvailable) || errors.Is(err, io.EOF) } // retryOnError will repeatedly call the given (error-returning) func in the @@ -252,7 +268,7 @@ func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateO request.Version = 1 } - return ca.retryOnError(isErrNotController, func() error { + return ca.retryOnError(isRetriableControllerError, func() error { b, err := ca.Controller() if err != nil { return err @@ -269,7 +285,7 @@ func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateO } if !errors.Is(topicErr.Err, ErrNoError) { - if errors.Is(topicErr.Err, ErrNotController) { + if isRetriableControllerError(topicErr.Err) { _, _ = ca.refreshController() } return topicErr @@ -281,14 +297,14 @@ func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateO func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) { var response *MetadataResponse - err = ca.retryOnError(isErrNotController, func() error { + err = ca.retryOnError(isRetriableControllerError, func() error { controller, err := ca.Controller() if err != nil { return err } request := NewMetadataRequest(ca.conf.Version, topics) response, err = controller.GetMetadata(request) - if isErrNotController(err) { + if isRetriableControllerError(err) { _, _ = ca.refreshController() } return err @@ -301,7 +317,7 @@ func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetada func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) { var response *MetadataResponse - err = ca.retryOnError(isErrNotController, func() error { + err = ca.retryOnError(isRetriableControllerError, func() error { controller, err := ca.Controller() if err != nil { return err @@ -309,7 +325,7 @@ func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32 request := NewMetadataRequest(ca.conf.Version, nil) response, err = controller.GetMetadata(request) - if isErrNotController(err) { + if isRetriableControllerError(err) { _, _ = ca.refreshController() } return err @@ -441,7 +457,7 @@ func (ca *clusterAdmin) DeleteTopic(topic string) error { request.Version = 1 } - return ca.retryOnError(isErrNotController, func() error { + return ca.retryOnError(isRetriableControllerError, func() error { b, err := ca.Controller() if err != nil { return err @@ -485,7 +501,7 @@ func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [ request.Version = 1 } - return ca.retryOnError(isErrNotController, func() error { + return ca.retryOnError(isRetriableControllerError, func() error { b, err := ca.Controller() if err != nil { return err @@ -526,7 +542,7 @@ func (ca *clusterAdmin) AlterPartitionReassignments(topic string, assignment [][ request.AddBlock(topic, int32(i), assignment[i]) } - return ca.retryOnError(isErrNotController, func() error { + return ca.retryOnError(isRetriableControllerError, func() error { b, err := ca.Controller() if err != nil { return err @@ -573,7 +589,7 @@ func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []in request.AddBlock(topic, partitions) var rsp *ListPartitionReassignmentsResponse - err = ca.retryOnError(isErrNotController, func() error { + err = ca.retryOnError(isRetriableControllerError, func() error { b, err := ca.Controller() if err != nil { return err @@ -581,7 +597,7 @@ func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []in _ = b.Open(ca.client.Config()) rsp, err = b.ListPartitionReassignments(request) - if isErrNotController(err) { + if isRetriableControllerError(err) { _, _ = ca.refreshController() } return err @@ -924,7 +940,7 @@ func (ca *clusterAdmin) ElectLeaders(electionType ElectionType, partitions map[s } var res *ElectLeadersResponse - err := ca.retryOnError(isErrNotController, func() error { + if err := ca.retryOnError(isRetriableControllerError, func() error { b, err := ca.Controller() if err != nil { return err @@ -932,12 +948,17 @@ func (ca *clusterAdmin) ElectLeaders(electionType ElectionType, partitions map[s _ = b.Open(ca.client.Config()) res, err = b.ElectLeaders(request) - if isErrNotController(err) { - _, _ = ca.refreshController() + if err != nil { + return err } - return err - }) - if err != nil { + if !errors.Is(res.ErrorCode, ErrNoError) { + if isRetriableControllerError(res.ErrorCode) { + _, _ = ca.refreshController() + } + return res.ErrorCode + } + return nil + }); err != nil { return nil, err } return res.ReplicaElectionResults, nil @@ -947,11 +968,11 @@ func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*Group groupsPerBroker := make(map[*Broker][]string) for _, group := range groups { - controller, err := ca.client.Coordinator(group) + coordinator, err := ca.client.Coordinator(group) if err != nil { return nil, err } - groupsPerBroker[controller] = append(groupsPerBroker[controller], group) + groupsPerBroker[coordinator] = append(groupsPerBroker[coordinator], group) } for broker, brokerGroups := range groupsPerBroker { @@ -1043,22 +1064,36 @@ func (ca *clusterAdmin) ListConsumerGroups() (allGroups map[string]string, err e } func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) { - coordinator, err := ca.client.Coordinator(group) - if err != nil { - return nil, err - } - + var response *OffsetFetchResponse request := NewOffsetFetchRequest(ca.conf.Version, group, topicPartitions) + err := ca.retryOnError(isRetriableGroupCoordinatorError, func() (err error) { + defer func() { + if err != nil && isRetriableGroupCoordinatorError(err) { + _ = ca.client.RefreshCoordinator(group) + } + }() + + coordinator, err := ca.client.Coordinator(group) + if err != nil { + return err + } - return coordinator.FetchOffset(request) + response, err = coordinator.FetchOffset(request) + if err != nil { + return err + } + if !errors.Is(response.Err, ErrNoError) { + return response.Err + } + + return nil + }) + + return response, err } func (ca *clusterAdmin) DeleteConsumerGroupOffset(group string, topic string, partition int32) error { - coordinator, err := ca.client.Coordinator(group) - if err != nil { - return err - } - + var response *DeleteOffsetsResponse request := &DeleteOffsetsRequest{ Group: group, partitions: map[string][]int32{ @@ -1066,27 +1101,35 @@ func (ca *clusterAdmin) DeleteConsumerGroupOffset(group string, topic string, pa }, } - resp, err := coordinator.DeleteOffsets(request) - if err != nil { - return err - } + return ca.retryOnError(isRetriableGroupCoordinatorError, func() (err error) { + defer func() { + if err != nil && isRetriableGroupCoordinatorError(err) { + _ = ca.client.RefreshCoordinator(group) + } + }() - if !errors.Is(resp.ErrorCode, ErrNoError) { - return resp.ErrorCode - } + coordinator, err := ca.client.Coordinator(group) + if err != nil { + return err + } - if !errors.Is(resp.Errors[topic][partition], ErrNoError) { - return resp.Errors[topic][partition] - } - return nil + response, err = coordinator.DeleteOffsets(request) + if err != nil { + return err + } + if !errors.Is(response.ErrorCode, ErrNoError) { + return response.ErrorCode + } + if !errors.Is(response.Errors[topic][partition], ErrNoError) { + return response.Errors[topic][partition] + } + + return nil + }) } func (ca *clusterAdmin) DeleteConsumerGroup(group string) error { - coordinator, err := ca.client.Coordinator(group) - if err != nil { - return err - } - + var response *DeleteGroupsResponse request := &DeleteGroupsRequest{ Groups: []string{group}, } @@ -1094,21 +1137,34 @@ func (ca *clusterAdmin) DeleteConsumerGroup(group string) error { request.Version = 1 } - resp, err := coordinator.DeleteGroups(request) - if err != nil { - return err - } + return ca.retryOnError(isRetriableGroupCoordinatorError, func() (err error) { + defer func() { + if err != nil && isRetriableGroupCoordinatorError(err) { + _ = ca.client.RefreshCoordinator(group) + } + }() - groupErr, ok := resp.GroupErrorCodes[group] - if !ok { - return ErrIncompleteResponse - } + coordinator, err := ca.client.Coordinator(group) + if err != nil { + return err + } - if !errors.Is(groupErr, ErrNoError) { - return groupErr - } + response, err = coordinator.DeleteGroups(request) + if err != nil { + return err + } - return nil + groupErr, ok := response.GroupErrorCodes[group] + if !ok { + return ErrIncompleteResponse + } + + if !errors.Is(groupErr, ErrNoError) { + return groupErr + } + + return nil + }) } func (ca *clusterAdmin) DescribeLogDirs(brokerIds []int32) (allLogDirs map[int32][]DescribeLogDirsResponseDirMetadata, err error) { @@ -1206,7 +1262,7 @@ func (ca *clusterAdmin) AlterUserScramCredentials(u []AlterUserScramCredentialsU } var rsp *AlterUserScramCredentialsResponse - err := ca.retryOnError(isErrNotController, func() error { + err := ca.retryOnError(isRetriableControllerError, func() error { b, err := ca.Controller() if err != nil { return err @@ -1284,18 +1340,14 @@ func (ca *clusterAdmin) AlterClientQuotas(entity []QuotaEntityComponent, op Clie return nil } -func (ca *clusterAdmin) RemoveMemberFromConsumerGroup(groupId string, groupInstanceIds []string) (*LeaveGroupResponse, error) { +func (ca *clusterAdmin) RemoveMemberFromConsumerGroup(group string, groupInstanceIds []string) (*LeaveGroupResponse, error) { if !ca.conf.Version.IsAtLeast(V2_4_0_0) { return nil, ConfigurationError("Removing members from a consumer group headers requires Kafka version of at least v2.4.0") } - - controller, err := ca.client.Coordinator(groupId) - if err != nil { - return nil, err - } + var response *LeaveGroupResponse request := &LeaveGroupRequest{ Version: 3, - GroupId: groupId, + GroupId: group, } for _, instanceId := range groupInstanceIds { groupInstanceId := instanceId @@ -1303,5 +1355,28 @@ func (ca *clusterAdmin) RemoveMemberFromConsumerGroup(groupId string, groupInsta GroupInstanceId: &groupInstanceId, }) } - return controller.LeaveGroup(request) + err := ca.retryOnError(isRetriableGroupCoordinatorError, func() (err error) { + defer func() { + if err != nil && isRetriableGroupCoordinatorError(err) { + _ = ca.client.RefreshCoordinator(group) + } + }() + + coordinator, err := ca.client.Coordinator(group) + if err != nil { + return err + } + + response, err = coordinator.LeaveGroup(request) + if err != nil { + return err + } + if !errors.Is(response.Err, ErrNoError) { + return response.Err + } + + return nil + }) + + return response, err } diff --git a/vendor/github.com/IBM/sarama/balance_strategy.go b/vendor/github.com/IBM/sarama/balance_strategy.go index 30d41779c1e9b..b5bc30a13bdfc 100644 --- a/vendor/github.com/IBM/sarama/balance_strategy.go +++ b/vendor/github.com/IBM/sarama/balance_strategy.go @@ -989,6 +989,7 @@ func (p *partitionMovements) getTheActualPartitionToBeMoved(partition topicParti return reversePairPartition } +//nolint:unused // this is used but only in unittests as a helper (which are excluded by the integration build tag) func (p *partitionMovements) isLinked(src, dst string, pairs []consumerPair, currentPath []string) ([]string, bool) { if src == dst { return currentPath, false @@ -1023,6 +1024,7 @@ func (p *partitionMovements) isLinked(src, dst string, pairs []consumerPair, cur return currentPath, false } +//nolint:unused // this is used but only in unittests as a helper (which are excluded by the integration build tag) func (p *partitionMovements) in(cycle []string, cycles [][]string) bool { superCycle := make([]string, len(cycle)-1) for i := 0; i < len(cycle)-1; i++ { @@ -1037,6 +1039,7 @@ func (p *partitionMovements) in(cycle []string, cycles [][]string) bool { return false } +//nolint:unused // this is used but only in unittests as a helper (which are excluded by the integration build tag) func (p *partitionMovements) hasCycles(pairs []consumerPair) bool { cycles := make([][]string, 0) for _, pair := range pairs { @@ -1068,6 +1071,7 @@ func (p *partitionMovements) hasCycles(pairs []consumerPair) bool { return false } +//nolint:unused // this is used but only in unittests as a helper (which are excluded by the integration build tag) func (p *partitionMovements) isSticky() bool { for topic, movements := range p.PartitionMovementsByTopic { movementPairs := make([]consumerPair, len(movements)) @@ -1085,6 +1089,7 @@ func (p *partitionMovements) isSticky() bool { return true } +//nolint:unused // this is used but only in unittests as a helper (which are excluded by the integration build tag) func indexOfSubList(source []string, target []string) int { targetSize := len(target) maxCandidate := len(source) - targetSize diff --git a/vendor/github.com/IBM/sarama/create_topics_request.go b/vendor/github.com/IBM/sarama/create_topics_request.go index 8382d17c20a74..e8c0f01472c35 100644 --- a/vendor/github.com/IBM/sarama/create_topics_request.go +++ b/vendor/github.com/IBM/sarama/create_topics_request.go @@ -16,6 +16,21 @@ type CreateTopicsRequest struct { ValidateOnly bool } +func NewCreateTopicsRequest(version KafkaVersion, topicDetails map[string]*TopicDetail, timeout time.Duration) *CreateTopicsRequest { + r := &CreateTopicsRequest{ + TopicDetails: topicDetails, + Timeout: timeout, + } + if version.IsAtLeast(V2_0_0_0) { + r.Version = 3 + } else if version.IsAtLeast(V0_11_0_0) { + r.Version = 2 + } else if version.IsAtLeast(V0_10_2_0) { + r.Version = 1 + } + return r +} + func (c *CreateTopicsRequest) encode(pe packetEncoder) error { if err := pe.putArrayLength(len(c.TopicDetails)); err != nil { return err diff --git a/vendor/github.com/IBM/sarama/delete_topics_request.go b/vendor/github.com/IBM/sarama/delete_topics_request.go index 252c0d0259461..f38f32770be7b 100644 --- a/vendor/github.com/IBM/sarama/delete_topics_request.go +++ b/vendor/github.com/IBM/sarama/delete_topics_request.go @@ -8,6 +8,21 @@ type DeleteTopicsRequest struct { Timeout time.Duration } +func NewDeleteTopicsRequest(version KafkaVersion, topics []string, timeout time.Duration) *DeleteTopicsRequest { + d := &DeleteTopicsRequest{ + Topics: topics, + Timeout: timeout, + } + if version.IsAtLeast(V2_1_0_0) { + d.Version = 3 + } else if version.IsAtLeast(V2_0_0_0) { + d.Version = 2 + } else if version.IsAtLeast(V0_11_0_0) { + d.Version = 1 + } + return d +} + func (d *DeleteTopicsRequest) encode(pe packetEncoder) error { if err := pe.putStringArray(d.Topics); err != nil { return err diff --git a/vendor/github.com/IBM/sarama/docker-compose.yml b/vendor/github.com/IBM/sarama/docker-compose.yml index a0e3d2e21e925..1e66cca0ce5d4 100644 --- a/vendor/github.com/IBM/sarama/docker-compose.yml +++ b/vendor/github.com/IBM/sarama/docker-compose.yml @@ -1,6 +1,6 @@ services: zookeeper-1: - hostname: 'zookeeper-1' + container_name: 'zookeeper-1' image: 'docker.io/library/zookeeper:3.7.2' init: true restart: always @@ -13,7 +13,7 @@ services: ZOO_MAX_CLIENT_CNXNS: '0' ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok' zookeeper-2: - hostname: 'zookeeper-2' + container_name: 'zookeeper-2' image: 'docker.io/library/zookeeper:3.7.2' init: true restart: always @@ -26,7 +26,7 @@ services: ZOO_MAX_CLIENT_CNXNS: '0' ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok' zookeeper-3: - hostname: 'zookeeper-3' + container_name: 'zookeeper-3' image: 'docker.io/library/zookeeper:3.7.2' init: true restart: always @@ -39,7 +39,7 @@ services: ZOO_MAX_CLIENT_CNXNS: '0' ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok' kafka-1: - hostname: 'kafka-1' + container_name: 'kafka-1' image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.2}' init: true build: @@ -74,6 +74,7 @@ services: KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR: '2' KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' KAFKA_CFG_BROKER_ID: '1' KAFKA_CFG_BROKER_RACK: '1' @@ -85,7 +86,7 @@ services: KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" kafka-2: - hostname: 'kafka-2' + container_name: 'kafka-2' image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.2}' init: true build: @@ -120,6 +121,7 @@ services: KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR: '2' KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' KAFKA_CFG_BROKER_ID: '2' KAFKA_CFG_BROKER_RACK: '2' @@ -131,7 +133,7 @@ services: KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" kafka-3: - hostname: 'kafka-3' + container_name: 'kafka-3' image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.2}' init: true build: @@ -166,6 +168,7 @@ services: KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR: '2' KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' KAFKA_CFG_BROKER_ID: '3' KAFKA_CFG_BROKER_RACK: '3' @@ -177,7 +180,7 @@ services: KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" kafka-4: - hostname: 'kafka-4' + container_name: 'kafka-4' image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.2}' init: true build: @@ -212,6 +215,7 @@ services: KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR: '2' KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' KAFKA_CFG_BROKER_ID: '4' KAFKA_CFG_BROKER_RACK: '4' @@ -223,7 +227,7 @@ services: KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" kafka-5: - hostname: 'kafka-5' + container_name: 'kafka-5' image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.2}' init: true build: @@ -258,6 +262,7 @@ services: KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR: '2' KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' KAFKA_CFG_BROKER_ID: '5' KAFKA_CFG_BROKER_RACK: '5' @@ -269,7 +274,7 @@ services: KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" toxiproxy: - hostname: 'toxiproxy' + container_name: 'toxiproxy' image: 'ghcr.io/shopify/toxiproxy:2.4.0' init: true healthcheck: diff --git a/vendor/github.com/IBM/sarama/errors.go b/vendor/github.com/IBM/sarama/errors.go index 2c431aecb05f0..842d302571ae7 100644 --- a/vendor/github.com/IBM/sarama/errors.go +++ b/vendor/github.com/IBM/sarama/errors.go @@ -304,7 +304,7 @@ func (err KError) Error() string { case ErrOffsetsLoadInProgress: return "kafka server: The coordinator is still loading offsets and cannot currently process requests" case ErrConsumerCoordinatorNotAvailable: - return "kafka server: Offset's topic has not yet been created" + return "kafka server: The coordinator is not available" case ErrNotCoordinatorForConsumer: return "kafka server: Request was for a consumer group that is not coordinated by this broker" case ErrInvalidTopic: diff --git a/vendor/github.com/IBM/sarama/server.properties b/vendor/github.com/IBM/sarama/server.properties new file mode 100644 index 0000000000000..21ba1c7d9c61b --- /dev/null +++ b/vendor/github.com/IBM/sarama/server.properties @@ -0,0 +1,138 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# This configuration file is intended for use in ZK-based mode, where Apache ZooKeeper is required. +# See kafka.server.KafkaConfig for additional details and defaults +# + +############################# Server Basics ############################# + +# The id of the broker. This must be set to a unique integer for each broker. +broker.id=0 + +############################# Socket Server Settings ############################# + +# The address the socket server listens on. If not configured, the host name will be equal to the value of +# java.net.InetAddress.getCanonicalHostName(), with PLAINTEXT listener name, and port 9092. +# FORMAT: +# listeners = listener_name://host_name:port +# EXAMPLE: +# listeners = PLAINTEXT://your.host.name:9092 +#listeners=PLAINTEXT://:9092 + +# Listener name, hostname and port the broker will advertise to clients. +# If not set, it uses the value for "listeners". +#advertised.listeners=PLAINTEXT://your.host.name:9092 + +# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details +#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL + +# The number of threads that the server uses for receiving requests from the network and sending responses to the network +num.network.threads=3 + +# The number of threads that the server uses for processing requests, which may include disk I/O +num.io.threads=8 + +# The send buffer (SO_SNDBUF) used by the socket server +socket.send.buffer.bytes=102400 + +# The receive buffer (SO_RCVBUF) used by the socket server +socket.receive.buffer.bytes=102400 + +# The maximum size of a request that the socket server will accept (protection against OOM) +socket.request.max.bytes=104857600 + + +############################# Log Basics ############################# + +# A comma separated list of directories under which to store log files +log.dirs=/tmp/kafka-logs + +# The default number of log partitions per topic. More partitions allow greater +# parallelism for consumption, but this will also result in more files across +# the brokers. +num.partitions=1 + +# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +# This value is recommended to be increased for installations with data dirs located in RAID array. +num.recovery.threads.per.data.dir=1 + +############################# Internal Topic Settings ############################# +# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" +# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. +offsets.topic.replication.factor=1 +transaction.state.log.replication.factor=1 +transaction.state.log.min.isr=1 + +############################# Log Flush Policy ############################# + +# Messages are immediately written to the filesystem but by default we only fsync() to sync +# the OS cache lazily. The following configurations control the flush of data to disk. +# There are a few important trade-offs here: +# 1. Durability: Unflushed data may be lost if you are not using replication. +# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. +# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. +# The settings below allow one to configure the flush policy to flush data after a period of time or +# every N messages (or both). This can be done globally and overridden on a per-topic basis. + +# The number of messages to accept before forcing a flush of data to disk +#log.flush.interval.messages=10000 + +# The maximum amount of time a message can sit in a log before we force a flush +#log.flush.interval.ms=1000 + +############################# Log Retention Policy ############################# + +# The following configurations control the disposal of log segments. The policy can +# be set to delete segments after a period of time, or after a given size has accumulated. +# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens +# from the end of the log. + +# The minimum age of a log file to be eligible for deletion due to age +log.retention.hours=168 + +# A size-based retention policy for logs. Segments are pruned from the log unless the remaining +# segments drop below log.retention.bytes. Functions independently of log.retention.hours. +#log.retention.bytes=1073741824 + +# The maximum size of a log segment file. When this size is reached a new log segment will be created. +#log.segment.bytes=1073741824 + +# The interval at which log segments are checked to see if they can be deleted according +# to the retention policies +log.retention.check.interval.ms=300000 + +############################# Zookeeper ############################# + +# Zookeeper connection string (see zookeeper docs for details). +# This is a comma separated host:port pairs, each corresponding to a zk +# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". +# You can also append an optional chroot string to the urls to specify the +# root directory for all kafka znodes. +zookeeper.connect=localhost:2181 + +# Timeout in ms for connecting to zookeeper +zookeeper.connection.timeout.ms=18000 + + +############################# Group Coordinator Settings ############################# + +# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance. +# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms. +# The default value for this is 3 seconds. +# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing. +# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup. +group.initial.rebalance.delay.ms=0 diff --git a/vendor/github.com/IBM/sarama/utils.go b/vendor/github.com/IBM/sarama/utils.go index d5b77e0d92b36..b0e1aceff14d4 100644 --- a/vendor/github.com/IBM/sarama/utils.go +++ b/vendor/github.com/IBM/sarama/utils.go @@ -206,6 +206,7 @@ var ( V3_8_0_0 = newKafkaVersion(3, 8, 0, 0) V3_8_1_0 = newKafkaVersion(3, 8, 1, 0) V3_9_0_0 = newKafkaVersion(3, 9, 0, 0) + V4_0_0_0 = newKafkaVersion(4, 0, 0, 0) SupportedVersions = []KafkaVersion{ V0_8_2_0, @@ -277,9 +278,10 @@ var ( V3_8_0_0, V3_8_1_0, V3_9_0_0, + V4_0_0_0, } MinVersion = V0_8_2_0 - MaxVersion = V3_9_0_0 + MaxVersion = V4_0_0_0 DefaultVersion = V2_1_0_0 // reduced set of protocol versions to matrix test diff --git a/vendor/modules.txt b/vendor/modules.txt index 5ff5397c6ec11..5040e9ea62e0e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -263,8 +263,8 @@ github.com/IBM/ibm-cos-sdk-go/private/protocol/restxml github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil github.com/IBM/ibm-cos-sdk-go/service/s3 github.com/IBM/ibm-cos-sdk-go/service/s3/s3iface -# github.com/IBM/sarama v1.44.0 -## explicit; go 1.20 +# github.com/IBM/sarama v1.45.0 +## explicit; go 1.21 github.com/IBM/sarama # github.com/Masterminds/goutils v1.1.1 ## explicit
fix
update module github.com/ibm/sarama to v1.45.0 (#15636)
86d943d8f43f3c7f6e7c210eb273708d10a6ee79
2023-06-23 14:39:06
Hans Kristian Flaatten
helm: add updateStategy for canary pods (#9760)
false
diff --git a/docs/sources/installation/helm/reference.md b/docs/sources/installation/helm/reference.md index 6c50c40dbeec0..05f7b6a10de49 100644 --- a/docs/sources/installation/helm/reference.md +++ b/docs/sources/installation/helm/reference.md @@ -2455,6 +2455,20 @@ null <td><pre lang="json"> [] </pre> +</td> + </tr> + <tr> + <td>monitoring.lokiCanary.updateStrategy</td> + <td>object</td> + <td>Update strategy for the `loki-canary` Daemonset pods</td> + <td><pre lang="json"> +{ + "rollingUpdate": { + "maxUnavailable": 1 + }, + "type": "RollingUpdate" +} +</pre> </td> </tr> <tr> diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md index 954661f242ee2..0ddc8730c2ea4 100644 --- a/production/helm/loki/CHANGELOG.md +++ b/production/helm/loki/CHANGELOG.md @@ -12,6 +12,9 @@ Entries should be ordered as follows: Entries should include a reference to the pull request that introduced the change. [//]: # (<AUTOMATED_UPDATES_LOCATOR> : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) +## 5.8.4 + +- [ENHANCEMENT] Add loki.lokiCanary.updateStrategy configuration ## 5.8.3 diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml index 5006a51d0a3f6..4ca317af91cd1 100644 --- a/production/helm/loki/Chart.yaml +++ b/production/helm/loki/Chart.yaml @@ -3,7 +3,7 @@ name: loki description: Helm chart for Grafana Loki in simple, scalable mode type: application appVersion: 2.8.2 -version: 5.8.3 +version: 5.8.4 home: https://grafana.github.io/helm-charts sources: - https://github.com/grafana/loki diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md index ff04bd0f916dd..3cf0f1819edca 100644 --- a/production/helm/loki/README.md +++ b/production/helm/loki/README.md @@ -1,6 +1,6 @@ # loki -![Version: 5.8.3](https://img.shields.io/badge/Version-5.8.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.8.2](https://img.shields.io/badge/AppVersion-2.8.2-informational?style=flat-square) +![Version: 5.8.4](https://img.shields.io/badge/Version-5.8.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.8.2](https://img.shields.io/badge/AppVersion-2.8.2-informational?style=flat-square) Helm chart for Grafana Loki in simple, scalable mode diff --git a/production/helm/loki/templates/loki-canary/daemonset.yaml b/production/helm/loki/templates/loki-canary/daemonset.yaml index f2dc06f8bdb6f..c127ced279433 100644 --- a/production/helm/loki/templates/loki-canary/daemonset.yaml +++ b/production/helm/loki/templates/loki-canary/daemonset.yaml @@ -12,6 +12,10 @@ spec: selector: matchLabels: {{- include "loki-canary.selectorLabels" $ | nindent 6 }} + {{- with .updateStrategy }} + updateStrategy: + {{- toYaml . | nindent 4 }} + {{- end }} template: metadata: annotations: diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml index a20351fe0f111..24679f158fbee 100644 --- a/production/helm/loki/values.yaml +++ b/production/helm/loki/values.yaml @@ -661,6 +661,12 @@ monitoring: digest: null # -- Docker image pull policy pullPolicy: IfNotPresent + # -- Update strategy for the `loki-canary` Daemonset pods + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + # Configuration for the write pod(s) write: # -- Number of replicas for the write
helm
add updateStategy for canary pods (#9760)
03f18acdffa1804d6149a9811d7939383befc484
2025-01-31 02:13:02
renovate[bot]
fix(deps): update module github.com/aws/aws-sdk-go-v2/config to v1.29.3 (main) (#16022)
false
diff --git a/tools/lambda-promtail/go.mod b/tools/lambda-promtail/go.mod index 7ab4750057389..8869c9e6e6965 100644 --- a/tools/lambda-promtail/go.mod +++ b/tools/lambda-promtail/go.mod @@ -6,8 +6,8 @@ toolchain go1.23.5 require ( github.com/aws/aws-lambda-go v1.47.0 - github.com/aws/aws-sdk-go-v2 v1.34.0 - github.com/aws/aws-sdk-go-v2/config v1.29.2 + github.com/aws/aws-sdk-go-v2 v1.35.0 + github.com/aws/aws-sdk-go-v2/config v1.29.3 github.com/aws/aws-sdk-go-v2/service/s3 v1.75.0 github.com/go-kit/log v0.2.1 github.com/gogo/protobuf v1.3.2 @@ -27,19 +27,19 @@ require ( github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.55 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.25 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.29 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.29 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.56 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.26 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.30 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.30 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.29 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.10 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.11 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.10 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.24.12 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.11 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.33.10 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.13 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.12 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.11 // indirect github.com/aws/smithy-go v1.22.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500 // indirect diff --git a/tools/lambda-promtail/go.sum b/tools/lambda-promtail/go.sum index 0bf157a9d2683..5aaaf8b6141df 100644 --- a/tools/lambda-promtail/go.sum +++ b/tools/lambda-promtail/go.sum @@ -48,20 +48,20 @@ github.com/aws/aws-lambda-go v1.47.0 h1:0H8s0vumYx/YKs4sE7YM0ktwL2eWse+kfopsRI1s github.com/aws/aws-lambda-go v1.47.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7RfgJv23DymV8A= github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= -github.com/aws/aws-sdk-go-v2 v1.34.0 h1:9iyL+cjifckRGEVpRKZP3eIxVlL06Qk1Tk13vreaVQU= -github.com/aws/aws-sdk-go-v2 v1.34.0/go.mod h1:JgstGg0JjWU1KpVJjD5H0y0yyAIpSdKEq556EI6yOOM= +github.com/aws/aws-sdk-go-v2 v1.35.0 h1:jTPxEJyzjSuuz0wB+302hr8Eu9KUI+Zv8zlujMGJpVI= +github.com/aws/aws-sdk-go-v2 v1.35.0/go.mod h1:JgstGg0JjWU1KpVJjD5H0y0yyAIpSdKEq556EI6yOOM= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 h1:zAxi9p3wsZMIaVCdoiQp2uZ9k1LsZvmAnoTBeZPXom0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8/go.mod h1:3XkePX5dSaxveLAYY7nsbsZZrKxCyEuE5pM4ziFxyGg= -github.com/aws/aws-sdk-go-v2/config v1.29.2 h1:JuIxOEPcSKpMB0J+khMjznG9LIhIBdmqNiEcPclnwqc= -github.com/aws/aws-sdk-go-v2/config v1.29.2/go.mod h1:HktTHregOZwNSM/e7WTfVSu9RCX+3eOv+6ij27PtaYs= -github.com/aws/aws-sdk-go-v2/credentials v1.17.55 h1:CDhKnDEaGkLA5ZszV/qw5uwN5M8rbv9Cl0JRN+PRsaM= -github.com/aws/aws-sdk-go-v2/credentials v1.17.55/go.mod h1:kPD/vj+RB5MREDUky376+zdnjZpR+WgdBBvwrmnlmKE= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.25 h1:kU7tmXNaJ07LsyN3BUgGqAmVmQtq0w6duVIHAKfp0/w= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.25/go.mod h1:OiC8+OiqrURb1wrwmr/UbOVLFSWEGxjinj5C299VQdo= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.29 h1:Ej0Rf3GMv50Qh4G4852j2djtoDb7AzQ7MuQeFHa3D70= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.29/go.mod h1:oeNTC7PwJNoM5AznVr23wxhLnuJv0ZDe5v7w0wqIs9M= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.29 h1:6e8a71X+9GfghragVevC5bZqvATtc3mAMgxpSNbgzF0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.29/go.mod h1:c4jkZiQ+BWpNqq7VtrxjwISrLrt/VvPq3XiopkUIolI= +github.com/aws/aws-sdk-go-v2/config v1.29.3 h1:a5Ucjxe6iV+LHEBmYA9w40rT5aGxWybx/4l/O/fvJlE= +github.com/aws/aws-sdk-go-v2/config v1.29.3/go.mod h1:pt9z1x12zDiDb4iFLrxoeAKLVCU/Gp9DL/5BnwlY77o= +github.com/aws/aws-sdk-go-v2/credentials v1.17.56 h1:JKMBreKudV+ozx6rZJLvEtiexv48aEdhdC7mXUw9MLs= +github.com/aws/aws-sdk-go-v2/credentials v1.17.56/go.mod h1:S3xRjIHD8HHFgMTz4L56q/7IldfNtGL9JjH/vP3U6DA= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.26 h1:XMBqBEuZLf8yxtH+mU/uUDyQbN4iD/xv9h6he2+lzhw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.26/go.mod h1:d0+wQ/3CYGPuHEfBTPpQdfUX7gjk0/Lxs5Q6KzdEGY8= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.30 h1:+7AzSGNhHoY53di13lvztf9Dyd/9ofzoYGBllkWp3a0= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.30/go.mod h1:Jxd/FrCny99yURiQiMywgXvBhd7tmgdv6KdlUTNzMSo= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.30 h1:Ex06eY6I5rO7IX0HalGfa5nGjpBoOsS1Qm3xfjkuszs= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.30/go.mod h1:AvyEMA9QcX59kFhVizBpIBpEMThUTXssuJe+emBdcGM= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.29 h1:g9OUETuxA8i/Www5Cby0R3WSTe7ppFTZXHVLNskNS4w= @@ -70,18 +70,18 @@ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/C github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.3 h1:EP1ITDgYVPM2dL1bBBntJ7AW5yTjuWGz9XO+CZwpALU= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.3/go.mod h1:5lWNWeAgWenJ/BZ/CP9k9DjLbC0pjnM045WjXRPPi14= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.10 h1:hN4yJBGswmFTOVYqmbz1GBs9ZMtQe8SrYxPwrkrlRv8= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.10/go.mod h1:TsxON4fEZXyrKY+D+3d2gSTyJkGORexIYab9PTf56DA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.11 h1:5JKQ2J3BBW4ovy6A/5Lwx9SpA6IzgH8jB3bquGZ1NUw= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.11/go.mod h1:VShCk7rfCzK/b9U1aSkzLwcOoaDlYna16482QqEavis= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.10 h1:fXoWC2gi7tdJYNTPnnlSGzEVwewUchOi8xVq/dkg8Qs= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.10/go.mod h1:cvzBApD5dVazHU8C2rbBQzzzsKc8m5+wNJ9mCRZLKPc= github.com/aws/aws-sdk-go-v2/service/s3 v1.75.0 h1:UPQJDyqUXICUt60X4PwbiEf+2QQ4VfXUhDk8OEiGtik= github.com/aws/aws-sdk-go-v2/service/s3 v1.75.0/go.mod h1:hHnELVnIHltd8EOF3YzahVX6F6y2C6dNqpRj1IMkS5I= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.12 h1:kznaW4f81mNMlREkU9w3jUuJvU5g/KsqDV43ab7Rp6s= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.12/go.mod h1:bZy9r8e0/s0P7BSDHgMLXK2KvdyRRBIQ2blKlvLt0IU= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.11 h1:mUwIpAvILeKFnRx4h1dEgGEFGuV8KJ3pEScZWVFYuZA= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.11/go.mod h1:JDJtD+b8HNVv71axz8+S5492KM8wTzHRFpMKQbPlYxw= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.10 h1:g9d+TOsu3ac7SgmY2dUf1qMgu/uJVTlQ4VCbH6hRxSw= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.10/go.mod h1:WZfNmntu92HO44MVZAubQaz3qCuIdeOdog2sADfU6hU= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.13 h1:q4pOAKxypbFoUJzOpgo939bF50qb4DgYshiDfcsdN0M= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.13/go.mod h1:G/0PTg7+vQT42ictQGjJhixzTcVZtHFvrN/OeTXrRfQ= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.12 h1:4sGSGshSSfO1vrcXruPick3ioSf8nhhD6nuB2ni37P4= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.12/go.mod h1:NHpu/pLOelViA4qxkAFH10VLqh+XeLhZfXDaFyMVgSs= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.11 h1:RIXOjp7Dp4siCYJRwBHUcBdVgOWflSJGlq4ZhMI5Ta0= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.11/go.mod h1:ZR17k9bPKPR8u0IkyA6xVsjr56doNQ4ZB1fs7abYBfE= github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
fix
update module github.com/aws/aws-sdk-go-v2/config to v1.29.3 (main) (#16022)
183fe85f1cc27ad59e8c092f50b3fc531b97271f
2023-03-22 14:53:35
W.T. Chang
loki: Add route_randomly to Redis options (#8852)
false
diff --git a/CHANGELOG.md b/CHANGELOG.md index 5a15520f955d9..1728a7b2e9cbc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ ##### Enhancements +* [8852](https://github.com/grafana/loki/pull/8852) **wtchangdm**: Loki: Add `route_randomly` to Redis options. * [8848](https://github.com/grafana/loki/pull/8848) **dannykopping**: Ruler: add configurable rule evaluation jitter. * [8752](https://github.com/grafana/loki/pull/8752) **chaudum**: Add query fairness control across actors within a tenant to scheduler, which can be enabled by passing the `X-Loki-Actor-Path` header to the HTTP request of the query. * [8786](https://github.com/grafana/loki/pull/8786) **DylanGuedes**: Ingester: add new /ingester/prepare_shutdown endpoint. @@ -57,7 +58,7 @@ * [8315](https://github.com/grafana/loki/pull/8315) **thepalbi** Relicense and export `pkg/ingester` WAL code to be used in Promtail's WAL. * [8761](https://github.com/grafana/loki/pull/8761) **slim-bean** Remove "subqueries" from the metrics.go log line and instead provide `splits` and `shards` - + ##### Build #### Promtail diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md index 60c1c74d600f5..87dd3331e2731 100644 --- a/docs/sources/configuration/_index.md +++ b/docs/sources/configuration/_index.md @@ -3532,6 +3532,12 @@ redis: # CLI flag: -<prefix>.redis.max-connection-age [max_connection_age: <duration> | default = 0s] + # By default, the Redis client only reads from the master node. Enabling this + # option can lower pressure on the master node by randomly routing read-only + # commands to the master and any available replicas. + # CLI flag: -<prefix>.redis.route-randomly + [route_randomly: <boolean> | default = false] + embedded_cache: # Whether embedded cache is enabled. # CLI flag: -<prefix>.embedded-cache.enabled diff --git a/pkg/storage/chunk/cache/redis_client.go b/pkg/storage/chunk/cache/redis_client.go index 1fe568a549b76..b7a352594e07e 100644 --- a/pkg/storage/chunk/cache/redis_client.go +++ b/pkg/storage/chunk/cache/redis_client.go @@ -29,6 +29,7 @@ type RedisConfig struct { InsecureSkipVerify bool `yaml:"tls_insecure_skip_verify"` IdleTimeout time.Duration `yaml:"idle_timeout"` MaxConnAge time.Duration `yaml:"max_connection_age"` + RouteRandomly bool `yaml:"route_randomly"` } // RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet @@ -45,6 +46,7 @@ func (cfg *RedisConfig) RegisterFlagsWithPrefix(prefix, description string, f *f f.BoolVar(&cfg.InsecureSkipVerify, prefix+"redis.tls-insecure-skip-verify", false, description+"Skip validating server certificate.") f.DurationVar(&cfg.IdleTimeout, prefix+"redis.idle-timeout", 0, description+"Close connections after remaining idle for this duration. If the value is zero, then idle connections are not closed.") f.DurationVar(&cfg.MaxConnAge, prefix+"redis.max-connection-age", 0, description+"Close connections older than this duration. If the value is zero, then the pool does not close connections based on age.") + f.BoolVar(&cfg.RouteRandomly, prefix+"redis.route-randomly", false, description+"By default, the Redis client only reads from the master node. Enabling this option can lower pressure on the master node by randomly routing read-only commands to the master and any available replicas.") } type RedisClient struct { @@ -74,14 +76,15 @@ func NewRedisClient(cfg *RedisConfig) (*RedisClient, error) { } } opt := &redis.UniversalOptions{ - Addrs: endpoints, - MasterName: cfg.MasterName, - Username: cfg.Username, - Password: cfg.Password.String(), - DB: cfg.DB, - PoolSize: cfg.PoolSize, - IdleTimeout: cfg.IdleTimeout, - MaxConnAge: cfg.MaxConnAge, + Addrs: endpoints, + MasterName: cfg.MasterName, + Username: cfg.Username, + Password: cfg.Password.String(), + DB: cfg.DB, + PoolSize: cfg.PoolSize, + IdleTimeout: cfg.IdleTimeout, + MaxConnAge: cfg.MaxConnAge, + RouteRandomly: cfg.RouteRandomly, } if cfg.EnableTLS { opt.TLSConfig = &tls.Config{InsecureSkipVerify: cfg.InsecureSkipVerify}
loki
Add route_randomly to Redis options (#8852)
cce3d3bed986fd1bd6aad023d3143f2a60a6c7f0
2023-04-04 23:12:17
Kaviraj Kanagaraj
chore(format_query): Change response type to `application/json` (#9016)
false
diff --git a/CHANGELOG.md b/CHANGELOG.md index 89c8a65b98cde..46518b0d025e7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ * [8953](https://github.com/grafana/loki/pull/8953) **dannykopping**: Querier: block queries by hash. * [8851](https://github.com/grafana/loki/pull/8851) **jeschkies**: Introduce limit to require a set of labels for selecting streams. +* [9016](https://github.com/grafana/loki/pull/9016) **kavirajk**: Change response type of `format_query` handler to `application/json` ##### Fixes diff --git a/docs/sources/api/_index.md b/docs/sources/api/_index.md index 50b0e8d83c778..db8fedbec1b98 100644 --- a/docs/sources/api/_index.md +++ b/docs/sources/api/_index.md @@ -729,6 +729,15 @@ Params: The `/loki/api/v1/format_query` endpoint allows to format LogQL queries. It returns an error if the passed LogQL is invalid. It is exposed by all Loki components and helps to improve readability and the debugging experience of LogQL queries. +The following example formats the expression LogQL `{foo= "bar"}` into + +```json +{ + "status" : "success", + "data" : "{foo=\"bar\"}" +} +``` + ## List series The Series API is available under the following: diff --git a/pkg/loki/format_query_handler.go b/pkg/loki/format_query_handler.go index a37583bc3b790..4e65999ebbb38 100644 --- a/pkg/loki/format_query_handler.go +++ b/pkg/loki/format_query_handler.go @@ -1,24 +1,51 @@ package loki import ( - "fmt" + "encoding/json" "net/http" "github.com/grafana/loki/pkg/logql/syntax" - serverutil "github.com/grafana/loki/pkg/util/server" + "github.com/grafana/loki/pkg/util/server" ) func formatQueryHandler() http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { + var ( + statusCode = http.StatusOK + status = "success" + formatted string + errStr string + ) + expr, err := syntax.ParseExpr(r.FormValue("query")) if err != nil { - serverutil.WriteError(err, w) - return + statusCode = http.StatusBadRequest + status = "invalid-query" + errStr = err.Error() + } + + if err == nil { + formatted = syntax.Prettify(expr) + } + + resp := FormatQueryResponse{ + Status: status, + Data: formatted, + Err: errStr, } - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.WriteHeader(statusCode) + + if err := json.NewEncoder(w).Encode(resp); err != nil { + server.WriteError(err, w) + } - fmt.Fprintf(w, "%s", syntax.Prettify(expr)) } } + +type FormatQueryResponse struct { + Status string `json:"status"` + Data string `json:"data,omitempty"` + Err string `json:"error,omitempty"` +} diff --git a/pkg/loki/format_query_handler_test.go b/pkg/loki/format_query_handler_test.go new file mode 100644 index 0000000000000..b0b431175b945 --- /dev/null +++ b/pkg/loki/format_query_handler_test.go @@ -0,0 +1,55 @@ +package loki + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_formatQueryHandlerResponse(t *testing.T) { + cases := []struct { + name string + query string + expected FormatQueryResponse + }{ + { + name: "happy-path", + query: `{foo="bar"}`, + expected: FormatQueryResponse{ + Status: "success", + Data: `{foo="bar"}`, + }, + }, + { + name: "invalid-query", + query: `{foo="bar}`, + expected: FormatQueryResponse{ + Status: "invalid-query", + Err: "parse error at line 1, col 6: literal not terminated", + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + req, err := http.NewRequest("GET", fmt.Sprintf("http://localhost:808?query=%s", tc.query), nil) + require.NoError(t, err) + + w := httptest.NewRecorder() + + formatQueryHandler()(w, req) + + var got FormatQueryResponse + + err = json.NewDecoder(w.Body).Decode(&got) + require.NoError(t, err) + + assert.Equal(t, tc.expected, got) + }) + } +}
chore
Change response type to `application/json` (#9016)
52312cac78a207b29c260f187f0d55357437e39e
2022-03-01 15:53:21
Danny Kopping
ci: moving autodeploy configuration secret to new location (#5491)
false
diff --git a/.drone/drone.jsonnet b/.drone/drone.jsonnet index f50f904ae480e..3e4d5f3c7142c 100644 --- a/.drone/drone.jsonnet +++ b/.drone/drone.jsonnet @@ -44,7 +44,7 @@ local pull_secret = secret('dockerconfigjson', 'secret/data/common/gcr', '.docke local github_secret = secret('github_token', 'infra/data/ci/github/grafanabot', 'pat'); // Injected in a secret because this is a public repository and having the config here would leak our environment names -local deploy_configuration = secret('deploy_config', 'infra/data/ci/loki/deploy', 'config.json'); +local deploy_configuration = secret('deploy_config', 'common/loki/ci/autodeploy', 'config.json'); local run(name, commands) = { diff --git a/.drone/drone.yml b/.drone/drone.yml index 32bc69ce2efa2..0c95d49f80948 100644 --- a/.drone/drone.yml +++ b/.drone/drone.yml @@ -1060,11 +1060,11 @@ name: ecr_secret_key --- get: name: config.json - path: infra/data/ci/loki/deploy + path: common/loki/ci/autodeploy kind: secret name: deploy_config --- kind: signature -hmac: 6d010031b5c18947ac4710106d06f119e77d715ddc687983227700254b27e6d8 +hmac: 6fdb7d9dd0a3d62a7e1a8eaec15ec5e7b940fafb7362261d86730d5523e16e43 ...
ci
moving autodeploy configuration secret to new location (#5491)
411752368cf8ae50646089305be5c6a51a64269f
2024-04-05 00:27:51
Ed Welch
refactor: do not force chunk_retain_period value for TSDB index type (#12475)
false
diff --git a/pkg/loki/config_wrapper.go b/pkg/loki/config_wrapper.go index e10618e88c4ff..a0faed6312960 100644 --- a/pkg/loki/config_wrapper.go +++ b/pkg/loki/config_wrapper.go @@ -411,7 +411,6 @@ func applyPathPrefixDefaults(r, defaults *ConfigWrapper) { } if len(r.StorageConfig.BloomShipperConfig.WorkingDirectory) == 1 && len(r.StorageConfig.BloomShipperConfig.WorkingDirectory) == len(defaults.StorageConfig.BloomShipperConfig.WorkingDirectory) && - r.StorageConfig.BloomShipperConfig.WorkingDirectory[0] == defaults.StorageConfig.BloomShipperConfig.WorkingDirectory[0] { _ = r.StorageConfig.BloomShipperConfig.WorkingDirectory.Set(fmt.Sprintf("%s/blooms", prefix)) } @@ -676,8 +675,12 @@ func applyIngesterReplicationFactor(cfg *ConfigWrapper) { // for at least as long as the TTL on the index queries cache. func applyChunkRetain(cfg, defaults *ConfigWrapper) { if !reflect.DeepEqual(cfg.StorageConfig.IndexQueriesCacheConfig, defaults.StorageConfig.IndexQueriesCacheConfig) { - // Set the retain period to the cache validity plus one minute. One minute is arbitrary but leaves some - // buffer to make sure the chunks are there until the index entries expire. - cfg.Ingester.RetainPeriod = cfg.StorageConfig.IndexCacheValidity + 1*time.Minute + // Only apply this change if the active index period is for boltdb-shipper + p := config.ActivePeriodConfig(cfg.SchemaConfig.Configs) + if cfg.SchemaConfig.Configs[p].IndexType == config.BoltDBShipperType { + // Set the retain period to the cache validity plus one minute. One minute is arbitrary but leaves some + // buffer to make sure the chunks are there until the index entries expire. + cfg.Ingester.RetainPeriod = cfg.StorageConfig.IndexCacheValidity + 1*time.Minute + } } } diff --git a/pkg/loki/config_wrapper_test.go b/pkg/loki/config_wrapper_test.go index 1852846aa2998..bda2b8fa2596f 100644 --- a/pkg/loki/config_wrapper_test.go +++ b/pkg/loki/config_wrapper_test.go @@ -874,6 +874,15 @@ chunk_store_config: t.Run("for the index queries cache config", func(t *testing.T) { t.Run("no embedded cache enabled by default if Redis is set", func(t *testing.T) { configFileString := `--- +schema_config: + configs: + - from: 2020-10-24 + store: boltdb-shipper + object_store: filesystem + schema: v12 + index: + prefix: index_ + period: 24h storage_config: index_queries_cache_config: redis: @@ -886,6 +895,15 @@ storage_config: t.Run("no embedded cache enabled by default if Memcache is set", func(t *testing.T) { configFileString := `--- +schema_config: + configs: + - from: 2020-10-24 + store: boltdb-shipper + object_store: filesystem + schema: v12 + index: + prefix: index_ + period: 24h storage_config: index_queries_cache_config: memcached_client: @@ -1581,6 +1599,15 @@ func Test_applyChunkRetain(t *testing.T) { t.Run("chunk retain is set to IndexCacheValidity + 1 minute", func(t *testing.T) { yamlContent := ` +schema_config: + configs: + - from: 2020-10-24 + store: boltdb-shipper + object_store: filesystem + schema: v12 + index: + prefix: index_ + period: 24h storage_config: index_cache_validity: 10m index_queries_cache_config: @@ -1596,6 +1623,33 @@ storage_config: assert.NoError(t, err) assert.Equal(t, 11*time.Minute, config.Ingester.RetainPeriod) }) + + t.Run("chunk retain is not changed for tsdb index type", func(t *testing.T) { + yamlContent := ` +schema_config: + configs: + - from: 2020-10-24 + store: tsdb + object_store: filesystem + schema: v12 + index: + prefix: index_ + period: 24h +storage_config: + index_cache_validity: 10m + index_queries_cache_config: + memcached: + batch_size: 256 + parallelism: 10 + memcached_client: + consistent_hash: true + host: memcached-index-queries.loki-bigtable.svc.cluster.local + service: memcached-client +` + config, _, err := configWrapperFromYAML(t, yamlContent, nil) + assert.NoError(t, err) + assert.Equal(t, time.Duration(0), config.Ingester.RetainPeriod) + }) } func Test_replicationFactor(t *testing.T) { diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go index c070cbe1025be..dc4a2bb08c4c4 100644 --- a/pkg/loki/loki.go +++ b/pkg/loki/loki.go @@ -54,6 +54,7 @@ import ( "github.com/grafana/loki/v3/pkg/scheduler" internalserver "github.com/grafana/loki/v3/pkg/server" "github.com/grafana/loki/v3/pkg/storage" + "github.com/grafana/loki/v3/pkg/storage/chunk/cache" "github.com/grafana/loki/v3/pkg/storage/config" "github.com/grafana/loki/v3/pkg/storage/stores/series/index" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" @@ -276,8 +277,19 @@ func (c *Config) Validate() error { var errs []error - // Schema version 13 is required to use structured metadata p := config.ActivePeriodConfig(c.SchemaConfig.Configs) + + // If the active index type is not TSDB (which does not use an index cache) + // and the index queries cache is configured + // and the chunk retain period is less than the validity period of the index cache + // throw an error. + if c.SchemaConfig.Configs[p].IndexType != config.TSDBType && + cache.IsCacheConfigured(c.StorageConfig.IndexQueriesCacheConfig) && + c.Ingester.RetainPeriod < c.StorageConfig.IndexCacheValidity { + errs = append(errs, fmt.Errorf("CONFIG ERROR: the active index is %s which is configured to use an `index_cache_validty` (TTL) of %s, however the chunk_retain_period is %s which is LESS than the `index_cache_validity`. This can lead to query gaps, please configure the `chunk_retain_period` to be greater than the `index_cache_validity`", c.SchemaConfig.Configs[p].IndexType, c.StorageConfig.IndexCacheValidity, c.Ingester.RetainPeriod)) + } + + // Schema version 13 is required to use structured metadata version, err := c.SchemaConfig.Configs[p].VersionAsInt() if err != nil { return err diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 599218a5bf513..2f8e977be7fae 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -733,10 +733,6 @@ func (t *Loki) updateConfigForShipperStore() { TTL: t.Cfg.StorageConfig.IndexCacheValidity - 1*time.Minute, }, } - // Force the retain period to be longer than the IndexCacheValidity used in the store, this guarantees we don't - // have query gaps on chunks flushed after an index entry is cached by keeping them retained in the ingester - // and queried as part of live data until the cache TTL expires on the index entry. - t.Cfg.Ingester.RetainPeriod = t.Cfg.StorageConfig.IndexCacheValidity + 1*time.Minute // We do not want ingester to unnecessarily keep downloading files t.Cfg.StorageConfig.BoltDBShipperConfig.Mode = indexshipper.ModeWriteOnly
refactor
do not force chunk_retain_period value for TSDB index type (#12475)
08b3b459899c67b6c865bcde452a22c62704ce7e
2020-04-01 18:52:39
Ed Welch
docs: update promtail docs for backoff (#1879)
false
diff --git a/docs/clients/promtail/configuration.md b/docs/clients/promtail/configuration.md index fd28b561dea86..83420401c066f 100644 --- a/docs/clients/promtail/configuration.md +++ b/docs/clients/promtail/configuration.md @@ -202,12 +202,15 @@ tls_config: # Configures how to retry requests to Loki when a request # fails. +# Default backoff schedule: +# 0.5s, 1s, 2s, 4s, 8s, 16s, 32s, 64s, 128s, 256s(4.267m) +# For a total time of 511.5s(8.5m) before logs are lost backoff_config: # Initial backoff time between retries - [minbackoff: <duration> | default = 100ms] + [minbackoff: <duration> | default = 500ms] # Maximum backoff time between retries - [maxbackoff: <duration> | default = 10s] + [maxbackoff: <duration> | default = 5m] # Maximum number of retries to do [maxretries: <int> | default = 10]
docs
update promtail docs for backoff (#1879)
8fd8697b9320ee63663ec340480010bd75312d63
2025-02-17 18:50:59
renovate[bot]
fix(deps): update dependency @radix-ui/react-toggle to v1.1.2 (main) (#16323)
false
diff --git a/pkg/ui/frontend/package-lock.json b/pkg/ui/frontend/package-lock.json index 960cde29df752..e2bbdba788b82 100644 --- a/pkg/ui/frontend/package-lock.json +++ b/pkg/ui/frontend/package-lock.json @@ -3063,13 +3063,13 @@ } }, "node_modules/@radix-ui/react-toggle": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-toggle/-/react-toggle-1.1.1.tgz", - "integrity": "sha512-i77tcgObYr743IonC1hrsnnPmszDRn8p+EGUsUt+5a/JFn28fxaM88Py6V2mc8J5kELMWishI0rLnuGLFD/nnQ==", + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-toggle/-/react-toggle-1.1.2.tgz", + "integrity": "sha512-lntKchNWx3aCHuWKiDY+8WudiegQvBpDRAYL8dKLRvKEH8VOpl0XX6SSU/bUBqIRJbcTy4+MW06Wv8vgp10rzQ==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.1", - "@radix-ui/react-primitive": "2.0.1", + "@radix-ui/react-primitive": "2.0.2", "@radix-ui/react-use-controllable-state": "1.1.0" }, "peerDependencies": { @@ -3116,6 +3116,54 @@ } } }, + "node_modules/@radix-ui/react-toggle-group/node_modules/@radix-ui/react-toggle": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-toggle/-/react-toggle-1.1.1.tgz", + "integrity": "sha512-i77tcgObYr743IonC1hrsnnPmszDRn8p+EGUsUt+5a/JFn28fxaM88Py6V2mc8J5kELMWishI0rLnuGLFD/nnQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.1", + "@radix-ui/react-primitive": "2.0.1", + "@radix-ui/react-use-controllable-state": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-toggle/node_modules/@radix-ui/react-primitive": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.0.2.tgz", + "integrity": "sha512-Ec/0d38EIuvDF+GZjcMU/Ze6MxntVJYO/fRlCPhCaVUyPY9WTalHJw54tp9sXeJo3tlShWpy41vQRgLRGOuz+w==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-tooltip": { "version": "1.1.7", "resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.1.7.tgz",
fix
update dependency @radix-ui/react-toggle to v1.1.2 (main) (#16323)
a064f5d67009b314fac346818b14b27ecebd27ba
2024-01-26 00:32:40
Trevor Whitney
feat: add release please config (#11333)
false
diff --git a/.github/workflows/minor-release-pr.yml b/.github/workflows/minor-release-pr.yml new file mode 100644 index 0000000000000..59581fa976d33 --- /dev/null +++ b/.github/workflows/minor-release-pr.yml @@ -0,0 +1,21 @@ +--- +name: 'create release PR for minor releases' +on: + push: + branches: + - 'k[0-9]*' + workflow_dispatch: {} +permissions: + contents: 'write' + issues: 'write' + pull-requests: 'write' +jobs: + create-release-pr: + uses: github/loki-release/.github/workflows/release-pr.yml@main + with: + release_repo: grafana/loki + skip_validation: false + versioning_strategy: always-bump-minor + secrets: + GCS_SERVICE_ACCOUNT_KEY: '${{ secrets.BACKEND_ENTERPRISE_DRONE }}' + GH_TOKEN: '${{ secrets.GITHUB_TOKEN }}' diff --git a/.github/workflows/patch-release-pr.yml b/.github/workflows/patch-release-pr.yml new file mode 100644 index 0000000000000..001b00d93b663 --- /dev/null +++ b/.github/workflows/patch-release-pr.yml @@ -0,0 +1,21 @@ +--- +name: 'create release PR for patch releases' +on: + push: + branches: + - 'release-[0-9].[0-9].x' + workflow_dispatch: {} +permissions: + contents: 'write' + issues: 'write' + pull-requests: 'write' +jobs: + create-release-pr: + uses: github/loki-release/.github/workflows/release-pr.yml@main + with: + release_repo: grafana/loki + skip_validation: false + versioning_strategy: always-bump-patch + secrets: + GCS_SERVICE_ACCOUNT_KEY: '${{ secrets.BACKEND_ENTERPRISE_DRONE }}' + GH_TOKEN: '${{ secrets.GITHUB_TOKEN }}' diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000000000..cacdacf773a82 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,19 @@ +--- +name: 'create release' +on: + push: + branches: + - 'release-[0-9].[0-9].x' + - 'k[0-9]*' + workflow_dispatch: {} +permissions: + contents: write + pull-requests: write +jobs: + release: + uses: github/loki-release/.github/workflows/release.yml@main + with: + release_repo: grafana/loki + secrets: + GCS_SERVICE_ACCOUNT_KEY: '${{ secrets.BACKEND_ENTERPRISE_DRONE }}' + GH_TOKEN: '${{ secrets.GH_TOKEN }}' diff --git a/.release-please-manifest.json b/.release-please-manifest.json new file mode 100644 index 0000000000000..0e134950eab83 --- /dev/null +++ b/.release-please-manifest.json @@ -0,0 +1,6 @@ +{ + "cmd/loki": "2.9.4", + "cmd/loki-canary": "2.9.4", + "cmd/logcli": "2.9.4", + "clients/cmd/promtail": "2.9.4" +} diff --git a/Makefile b/Makefile index d311ed1c4f3c6..2acf8b4285047 100644 --- a/Makefile +++ b/Makefile @@ -281,7 +281,16 @@ cmd/migrate/migrate: ############# GOX = gox $(GO_FLAGS) -output="dist/{{.Dir}}-{{.OS}}-{{.Arch}}" CGO_GOX = gox $(DYN_GO_FLAGS) -cgo -output="dist/{{.Dir}}-{{.OS}}-{{.Arch}}" + +SKIP_ARM ?= false dist: clean +ifeq ($(SKIP_ARM),true) + CGO_ENABLED=0 $(GOX) -osarch="linux/amd64 darwin/amd64 windows/amd64 freebsd/amd64" ./cmd/loki + CGO_ENABLED=0 $(GOX) -osarch="linux/amd64 darwin/amd64 windows/amd64 freebsd/amd64" ./cmd/logcli + CGO_ENABLED=0 $(GOX) -osarch="linux/amd64 darwin/amd64 windows/amd64 freebsd/amd64" ./cmd/loki-canary + CGO_ENABLED=0 $(GOX) -osarch="darwin/amd64 windows/amd64 windows/386 freebsd/amd64" ./clients/cmd/promtail + CGO_ENABLED=1 $(CGO_GOX) -tags promtail_journal_enabled -osarch="linux/amd64" ./clients/cmd/promtail +else CGO_ENABLED=0 $(GOX) -osarch="linux/amd64 linux/arm64 linux/arm darwin/amd64 darwin/arm64 windows/amd64 freebsd/amd64" ./cmd/loki CGO_ENABLED=0 $(GOX) -osarch="linux/amd64 linux/arm64 linux/arm darwin/amd64 darwin/arm64 windows/amd64 freebsd/amd64" ./cmd/logcli CGO_ENABLED=0 $(GOX) -osarch="linux/amd64 linux/arm64 linux/arm darwin/amd64 darwin/arm64 windows/amd64 freebsd/amd64" ./cmd/loki-canary @@ -289,6 +298,7 @@ dist: clean PKG_CONFIG_PATH="/usr/lib/aarch64-linux-gnu/pkgconfig" CC="aarch64-linux-gnu-gcc" $(CGO_GOX) -tags promtail_journal_enabled -osarch="linux/arm64" ./clients/cmd/promtail PKG_CONFIG_PATH="/usr/lib/arm-linux-gnueabihf/pkgconfig" CC="arm-linux-gnueabihf-gcc" $(CGO_GOX) -tags promtail_journal_enabled -osarch="linux/arm" ./clients/cmd/promtail CGO_ENABLED=1 $(CGO_GOX) -tags promtail_journal_enabled -osarch="linux/amd64" ./clients/cmd/promtail +endif for i in dist/*; do zip -j -m $$i.zip $$i; done pushd dist && sha256sum * > SHA256SUMS && popd @@ -307,7 +317,7 @@ publish: packages lint: ## run linters go version golangci-lint version - GO111MODULE=on golangci-lint run -v + GO111MODULE=on golangci-lint run -v --timeout 15m faillint -paths "sync/atomic=go.uber.org/atomic" ./... ######## diff --git a/pkg/querier/queryrange/limits_test.go b/pkg/querier/queryrange/limits_test.go index a80cf96dde805..61e310faee4f6 100644 --- a/pkg/querier/queryrange/limits_test.go +++ b/pkg/querier/queryrange/limits_test.go @@ -821,7 +821,6 @@ func TestAcquireWithTiming(t *testing.T) { } } - // Check that the waiting time for the third request is larger than 0 milliseconds and less than or equal to 10-5=5 milliseconds - require.Greater(t, waiting3, 0*time.Millisecond) + require.GreaterOrEqual(t, waiting3, 0*time.Millisecond) require.LessOrEqual(t, waiting3, 5*time.Millisecond) }
feat
add release please config (#11333)
92092ecc322514100ab0f55c334755f63661d763
2024-03-22 23:15:31
Joao Marcal
chore(operator): fix ocp community release (#12323)
false
diff --git a/.github/workflows/operator-reusable-hub-release.yml b/.github/workflows/operator-reusable-hub-release.yml index ba901d4f99c31..862d072401dd3 100644 --- a/.github/workflows/operator-reusable-hub-release.yml +++ b/.github/workflows/operator-reusable-hub-release.yml @@ -63,21 +63,19 @@ jobs: VERSION: ${{ env.version }} OCP_DIR: ${{ env.ocpDir || ''}} run: | - mkdir operators/loki-operator/${VERSION} + mkdir operators/loki-operator/${VERSION} cp -R ./tmp/operator/bundle/community${OCP_DIR}/* operators/loki-operator/${VERSION} rm -f "operators/loki-operator/${VERSION}/bundle.Dockerfile" rm -rf ./tmp - name: Add OpenShift Supported versions to redhat catalog if: ${{ inputs.org == 'redhat-openshift-ecosystem' }} - env: - VERSION: ${{ env.version }} - OCP_SUPPORTED_VERSIONS: ${{ env.ocpSupportedVersions || ''}} uses: fjogeleit/yaml-update-action@main with: - valueFile: "operators/loki-operator/${VERSION}/metadata/annotations.yaml" + valueFile: "operators/loki-operator/${{ env.version }}/metadata/annotations.yaml" propertyPath: "annotations['com.redhat.openshift.versions']" - value: ${OCP_SUPPORTED_VERSIONS} + value: ${{ env.ocpSupportedVersions }} + commitChange: false - name: Use CLA approved github bot run: |
chore
fix ocp community release (#12323)
9b47b19f1822736d93bce760bcffa6812bfe6def
2025-02-17 16:45:04
renovate[bot]
fix(deps): update dependency @radix-ui/react-toast to v1.2.6 (main) (#16320)
false
diff --git a/pkg/ui/frontend/package-lock.json b/pkg/ui/frontend/package-lock.json index da60c332c91a9..960cde29df752 100644 --- a/pkg/ui/frontend/package-lock.json +++ b/pkg/ui/frontend/package-lock.json @@ -2906,23 +2906,146 @@ } }, "node_modules/@radix-ui/react-toast": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/@radix-ui/react-toast/-/react-toast-1.2.5.tgz", - "integrity": "sha512-ZzUsAaOx8NdXZZKcFNDhbSlbsCUy8qQWmzTdgrlrhhZAOx2ofLtKrBDW9fkqhFvXgmtv560Uj16pkLkqML7SHA==", + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/@radix-ui/react-toast/-/react-toast-1.2.6.tgz", + "integrity": "sha512-gN4dpuIVKEgpLn1z5FhzT9mYRUitbfZq9XqN/7kkBMUgFTzTG8x/KszWJugJXHcwxckY8xcKDZPz7kG3o6DsUA==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.1", - "@radix-ui/react-collection": "1.1.1", + "@radix-ui/react-collection": "1.1.2", "@radix-ui/react-compose-refs": "1.1.1", "@radix-ui/react-context": "1.1.1", - "@radix-ui/react-dismissable-layer": "1.1.4", - "@radix-ui/react-portal": "1.1.3", + "@radix-ui/react-dismissable-layer": "1.1.5", + "@radix-ui/react-portal": "1.1.4", "@radix-ui/react-presence": "1.1.2", - "@radix-ui/react-primitive": "2.0.1", + "@radix-ui/react-primitive": "2.0.2", "@radix-ui/react-use-callback-ref": "1.1.0", "@radix-ui/react-use-controllable-state": "1.1.0", "@radix-ui/react-use-layout-effect": "1.1.0", - "@radix-ui/react-visually-hidden": "1.1.1" + "@radix-ui/react-visually-hidden": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-toast/node_modules/@radix-ui/react-collection": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.1.2.tgz", + "integrity": "sha512-9z54IEKRxIa9VityapoEYMuByaG42iSy1ZXlY2KcuLSEtq8x4987/N6m15ppoMffgZX72gER2uHe1D9Y6Unlcw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.1", + "@radix-ui/react-context": "1.1.1", + "@radix-ui/react-primitive": "2.0.2", + "@radix-ui/react-slot": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-toast/node_modules/@radix-ui/react-dismissable-layer": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.5.tgz", + "integrity": "sha512-E4TywXY6UsXNRhFrECa5HAvE5/4BFcGyfTyK36gP+pAW1ed7UTK4vKwdr53gAJYwqbfCWC6ATvJa3J3R/9+Qrg==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.1", + "@radix-ui/react-compose-refs": "1.1.1", + "@radix-ui/react-primitive": "2.0.2", + "@radix-ui/react-use-callback-ref": "1.1.0", + "@radix-ui/react-use-escape-keydown": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-toast/node_modules/@radix-ui/react-portal": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.4.tgz", + "integrity": "sha512-sn2O9k1rPFYVyKd5LAJfo96JlSGVFpa1fS6UuBJfrZadudiw5tAmru+n1x7aMRQ84qDM71Zh1+SzK5QwU0tJfA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.0.2", + "@radix-ui/react-use-layout-effect": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-toast/node_modules/@radix-ui/react-primitive": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.0.2.tgz", + "integrity": "sha512-Ec/0d38EIuvDF+GZjcMU/Ze6MxntVJYO/fRlCPhCaVUyPY9WTalHJw54tp9sXeJo3tlShWpy41vQRgLRGOuz+w==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-toast/node_modules/@radix-ui/react-visually-hidden": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.1.2.tgz", + "integrity": "sha512-1SzA4ns2M1aRlvxErqhLHsBHoS5eI5UUcI2awAMgGUp4LoaoWOKYmvqDY2s/tltuPkh3Yk77YF/r3IRj+Amx4Q==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.0.2" }, "peerDependencies": { "@types/react": "*",
fix
update dependency @radix-ui/react-toast to v1.2.6 (main) (#16320)
ef1df0e66fc8e2fe9327a66aea31279ca5c7307a
2024-09-02 11:26:26
Andre Ziviani
feat(helm): Add persistence option to memcached on Helm chart (#13619)
false
diff --git a/docs/sources/setup/install/helm/reference.md b/docs/sources/setup/install/helm/reference.md index b3c2bba85351e..b616845c7d13a 100644 --- a/docs/sources/setup/install/helm/reference.md +++ b/docs/sources/setup/install/helm/reference.md @@ -1623,6 +1623,56 @@ true <td><pre lang="json"> 5 </pre> +</td> + </tr> + <tr> + <td>chunksCache.persistence</td> + <td>object</td> + <td>Persistence settings for the chunks-cache</td> + <td><pre lang="json"> +{ + "enabled": false, + "mountPath": "/data", + "storageClass": null, + "storageSize": "10G" +} +</pre> +</td> + </tr> + <tr> + <td>chunksCache.persistence.enabled</td> + <td>bool</td> + <td>Enable creating PVCs for the chunks-cache</td> + <td><pre lang="json"> +false +</pre> +</td> + </tr> + <tr> + <td>chunksCache.persistence.mountPath</td> + <td>string</td> + <td>Volume mount path</td> + <td><pre lang="json"> +"/data" +</pre> +</td> + </tr> + <tr> + <td>chunksCache.persistence.storageClass</td> + <td>string</td> + <td>Storage class to be used. If defined, storageClassName: <storageClass>. If set to "-", storageClassName: "", which disables dynamic provisioning. If empty or set to null, no storageClassName spec is set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack).</td> + <td><pre lang="json"> +null +</pre> +</td> + </tr> + <tr> + <td>chunksCache.persistence.storageSize</td> + <td>string</td> + <td>Size of persistent disk</td> + <td><pre lang="json"> +"10G" +</pre> </td> </tr> <tr> @@ -9035,6 +9085,56 @@ true <td><pre lang="json"> {} </pre> +</td> + </tr> + <tr> + <td>resultsCache.persistence</td> + <td>object</td> + <td>Persistence settings for the results-cache</td> + <td><pre lang="json"> +{ + "enabled": false, + "mountPath": "/data", + "storageClass": null, + "storageSize": "10G" +} +</pre> +</td> + </tr> + <tr> + <td>resultsCache.persistence.enabled</td> + <td>bool</td> + <td>Enable creating PVCs for the results-cache</td> + <td><pre lang="json"> +false +</pre> +</td> + </tr> + <tr> + <td>resultsCache.persistence.mountPath</td> + <td>string</td> + <td>Volume mount path</td> + <td><pre lang="json"> +"/data" +</pre> +</td> + </tr> + <tr> + <td>resultsCache.persistence.storageClass</td> + <td>string</td> + <td>Storage class to be used. If defined, storageClassName: <storageClass>. If set to "-", storageClassName: "", which disables dynamic provisioning. If empty or set to null, no storageClassName spec is set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack).</td> + <td><pre lang="json"> +null +</pre> +</td> + </tr> + <tr> + <td>resultsCache.persistence.storageSize</td> + <td>string</td> + <td>Size of persistent disk</td> + <td><pre lang="json"> +"10G" +</pre> </td> </tr> <tr> diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md index ba47de6f8442f..4efe3289a67e8 100644 --- a/production/helm/loki/CHANGELOG.md +++ b/production/helm/loki/CHANGELOG.md @@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang [//]: # (<AUTOMATED_UPDATES_LOCATOR> : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) +## 6.11.0 + +- [FEATURE] Add support for configuring persistence for memcached. + ## 6.10.2 - [CHANGE] Bumped version of `nginxinc/nginx-unprivileged` to 1.27-alpine; this remediates several CVE @@ -27,7 +31,6 @@ Entries should include a reference to the pull request that introduced the chang - [CHANGE] Changed version of Grafana Loki to 3.1.1 - [ENHANCEMENT] Added ability to disable AWS S3 dualstack endpoint usage. - ## 6.9.0 - [BUGFIX] Fixed how we set imagePullSecrets for the memcached and provisioner. diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml index 9c6acfe2bdd28..c6fcd38c5c540 100644 --- a/production/helm/loki/Chart.yaml +++ b/production/helm/loki/Chart.yaml @@ -3,7 +3,7 @@ name: loki description: Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes. type: application appVersion: 3.1.1 -version: 6.10.2 +version: 6.11.0 home: https://grafana.github.io/helm-charts sources: - https://github.com/grafana/loki diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md index 59208a9c6413c..c29dbfe95c4f6 100644 --- a/production/helm/loki/README.md +++ b/production/helm/loki/README.md @@ -1,6 +1,6 @@ # loki -![Version: 6.10.2](https://img.shields.io/badge/Version-6.10.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.1.1](https://img.shields.io/badge/AppVersion-3.1.1-informational?style=flat-square) +![Version: 6.11.0](https://img.shields.io/badge/Version-6.11.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.1.1](https://img.shields.io/badge/AppVersion-3.1.1-informational?style=flat-square) Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes. diff --git a/production/helm/loki/templates/memcached/_memcached-statefulset.tpl b/production/helm/loki/templates/memcached/_memcached-statefulset.tpl index 8e2479af8a05f..ce490ee6cd713 100644 --- a/production/helm/loki/templates/memcached/_memcached-statefulset.tpl +++ b/production/helm/loki/templates/memcached/_memcached-statefulset.tpl @@ -104,7 +104,7 @@ spec: name: client args: - -m {{ .allocatedMemory }} - - --extended=modern,track_sizes{{ with .extraExtendedOptions }},{{ . }}{{ end }} + - --extended=modern,track_sizes{{ if .persistence.enabled }},ext_path={{ .persistence.mountPath }}/file:{{ .persistence.storageSize }}{{ end }}{{ with .extraExtendedOptions }},{{ . }}{{ end }} - -I {{ .maxItemMemory }}m - -c {{ .connectionLimit }} - -v @@ -122,10 +122,16 @@ spec: {{- end }} securityContext: {{- toYaml $.ctx.Values.memcached.containerSecurityContext | nindent 12 }} - {{- if .extraVolumeMounts }} + {{- if or .persistence.enabled .extraVolumeMounts }} volumeMounts: + {{- if .persistence.enabled }} + - name: data + mountPath: {{ .persistence.mountPath }} + {{- end }} + {{- if .extraVolumeMounts }} {{- toYaml .extraVolumeMounts | nindent 12 }} {{- end }} + {{- end }} {{- if $.ctx.Values.memcachedExporter.enabled }} - name: exporter @@ -151,6 +157,19 @@ spec: {{- toYaml .extraVolumeMounts | nindent 12 }} {{- end }} {{- end }} + {{- if .persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + {{- with .persistence.storageClass }} + storageClassName: {{ if (eq "-" .) }}""{{ else }}{{ . }}{{ end }} + {{- end }} + resources: + requests: + storage: {{ .persistence.storageSize | quote }} + {{- end }} {{- end -}} {{- end -}} {{- end -}} diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml index ef0c506f585a4..dd6df92348ca0 100644 --- a/production/helm/loki/values.yaml +++ b/production/helm/loki/values.yaml @@ -2967,6 +2967,20 @@ resultsCache: service: annotations: {} labels: {} + # -- Persistence settings for the results-cache + persistence: + # -- Enable creating PVCs for the results-cache + enabled: false + # -- Size of persistent disk + storageSize: 10G + # -- Storage class to be used. + # If defined, storageClassName: <storageClass>. + # If set to "-", storageClassName: "", which disables dynamic provisioning. + # If empty or set to null, no storageClassName spec is + # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack). + storageClass: null + # -- Volume mount path + mountPath: /data chunksCache: # -- Specifies whether memcached based chunks-cache should be enabled enabled: true @@ -3055,6 +3069,20 @@ chunksCache: service: annotations: {} labels: {} + # -- Persistence settings for the chunks-cache + persistence: + # -- Enable creating PVCs for the chunks-cache + enabled: false + # -- Size of persistent disk + storageSize: 10G + # -- Storage class to be used. + # If defined, storageClassName: <storageClass>. + # If set to "-", storageClassName: "", which disables dynamic provisioning. + # If empty or set to null, no storageClassName spec is + # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack). + storageClass: null + # -- Volume mount path + mountPath: /data ###################################################################################################################### # # Subchart configurations
feat
Add persistence option to memcached on Helm chart (#13619)
1ab642f9703c19d25f1937ac50a751053464a542
2022-06-14 21:57:58
Karen Miller
docs: add unpopulated v2.6 release notes section (#6354)
false
diff --git a/docs/sources/release-notes/_index.md b/docs/sources/release-notes/_index.md index 99663f220a92a..96207dad64724 100644 --- a/docs/sources/release-notes/_index.md +++ b/docs/sources/release-notes/_index.md @@ -7,6 +7,7 @@ weight: 100 Release notes for Loki are in the CHANGELOG for the release and listed here by version number. +- [V2.6 release notes](../release-notes/v2-6/) - [V2.5 release notes](../release-notes/v2-5/) - [V2.4 release notes](../release-notes/v2-4/) - [V2.3 release notes](../release-notes/v2-3/) diff --git a/docs/sources/release-notes/v2-6.md b/docs/sources/release-notes/v2-6.md new file mode 100644 index 0000000000000..217dddec22515 --- /dev/null +++ b/docs/sources/release-notes/v2-6.md @@ -0,0 +1,30 @@ +--- +title: V2.6 +weight: 66 +--- + +# Version 2.6 release notes + +Intro to this 2.6 release goes here. Here's a summary of new enhancements and important fixes. + +## Features and enhancements + +- **First feature** description goes here. + +For a full list of all changes please look at the [CHANGELOG](https://github.com/grafana/loki/blob/main/CHANGELOG.md). + +## Upgrade Considerations + +As always, please read the [upgrade guide](../../upgrading/#250) before upgrading Loki. + +- **Upgrade issue** description goes here. + +## Bug fixes + +### 2.6.0 bug fixes + +V2.6.0 fixes numerous bugs. The [CHANGELOG](https://github.com/grafana/loki/blob/main/CHANGELOG.md) has the complete list. + +A summary of some of the more important fixes: + +- [PR xxxx](link to PR goes here) **github handle**: Description of the bug fix goes here.
docs
add unpopulated v2.6 release notes section (#6354)
bfe97d724f34277baa4cd9f9b25764e718997c46
2024-07-11 21:26:25
George Robinson
feat: Add metrics to WAL Manager (#13490)
false
diff --git a/pkg/ingester-rf1/ingester.go b/pkg/ingester-rf1/ingester.go index 8d1b63f74e89f..581da7c8a4384 100644 --- a/pkg/ingester-rf1/ingester.go +++ b/pkg/ingester-rf1/ingester.go @@ -257,7 +257,7 @@ func New(cfg Config, clientConfig client.Config, MaxAge: wal.DefaultMaxAge, MaxSegments: wal.DefaultMaxSegments, MaxSegmentSize: wal.DefaultMaxSegmentSize, - }) + }, wal.NewMetrics(registerer)) if err != nil { return nil, err } diff --git a/pkg/storage/wal/manager.go b/pkg/storage/wal/manager.go index e1a21f504b34b..d8cc04b2d9602 100644 --- a/pkg/storage/wal/manager.go +++ b/pkg/storage/wal/manager.go @@ -6,6 +6,8 @@ import ( "sync" "time" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/model/labels" "github.com/grafana/loki/v3/pkg/logproto" @@ -81,6 +83,29 @@ type Config struct { MaxSegmentSize int64 } +type Metrics struct { + NumAvailable prometheus.Gauge + NumPending prometheus.Gauge + NumFlushing prometheus.Gauge +} + +func NewMetrics(r prometheus.Registerer) *Metrics { + return &Metrics{ + NumAvailable: promauto.With(r).NewGauge(prometheus.GaugeOpts{ + Name: "wal_segments_available", + Help: "The number of WAL segments accepting writes.", + }), + NumPending: promauto.With(r).NewGauge(prometheus.GaugeOpts{ + Name: "wal_segments_pending", + Help: "The number of WAL segments waiting to be flushed.", + }), + NumFlushing: promauto.With(r).NewGauge(prometheus.GaugeOpts{ + Name: "wal_segments_flushing", + Help: "The number of WAL segments being flushed.", + }), + } +} + // Manager buffers segments in memory, and keeps track of which segments are // available and which are waiting to be flushed. The maximum number of // segments that can be buffered in memory, and their maximum age and maximum @@ -97,7 +122,8 @@ type Config struct { // and returned to the available list. This allows the manager to apply back-pressure // and avoid congestion collapse due to excessive timeouts and retries. type Manager struct { - cfg Config + cfg Config + metrics *Metrics // available is a list of segments that are available and accepting data. // All segments other than the segment at the front of the list are empty, @@ -135,13 +161,16 @@ type PendingItem struct { Writer *SegmentWriter } -func NewManager(cfg Config) (*Manager, error) { +func NewManager(cfg Config, metrics *Metrics) (*Manager, error) { m := Manager{ cfg: cfg, + metrics: metrics, available: list.New(), pending: list.New(), shutdown: make(chan struct{}), } + m.metrics.NumPending.Set(0) + m.metrics.NumFlushing.Set(0) for i := int64(0); i < cfg.MaxSegments; i++ { w, err := NewWalSegmentWriter() if err != nil { @@ -151,6 +180,7 @@ func NewManager(cfg Config) (*Manager, error) { r: &AppendResult{done: make(chan struct{})}, w: w, }) + m.metrics.NumAvailable.Inc() } return &m, nil } @@ -171,7 +201,9 @@ func (m *Manager) Append(r AppendRequest) (*AppendResult, error) { // the closed list to be flushed. if time.Since(it.firstAppendedAt) >= m.cfg.MaxAge || it.w.InputSize() >= m.cfg.MaxSegmentSize { m.pending.PushBack(it) + m.metrics.NumPending.Inc() m.available.Remove(el) + m.metrics.NumAvailable.Dec() } return it.r, nil } @@ -189,7 +221,9 @@ func (m *Manager) NextPending() (*PendingItem, error) { it := el.Value.(*item) if !it.firstAppendedAt.IsZero() && time.Since(it.firstAppendedAt) >= m.cfg.MaxAge { m.pending.PushBack(it) + m.metrics.NumPending.Inc() m.available.Remove(el) + m.metrics.NumAvailable.Dec() } } // If the pending list is still empty return nil. @@ -200,6 +234,8 @@ func (m *Manager) NextPending() (*PendingItem, error) { el := m.pending.Front() it := el.Value.(*item) m.pending.Remove(el) + m.metrics.NumPending.Dec() + m.metrics.NumFlushing.Inc() return &PendingItem{Result: it.r, Writer: it.w}, nil } @@ -209,6 +245,8 @@ func (m *Manager) Put(it *PendingItem) error { m.mu.Lock() defer m.mu.Unlock() it.Writer.Reset() + m.metrics.NumFlushing.Dec() + m.metrics.NumAvailable.Inc() m.available.PushBack(&item{ r: &AppendResult{done: make(chan struct{})}, w: it.Writer, diff --git a/pkg/storage/wal/manager_test.go b/pkg/storage/wal/manager_test.go index 25e2e2cdd9889..461cc05f12435 100644 --- a/pkg/storage/wal/manager_test.go +++ b/pkg/storage/wal/manager_test.go @@ -6,17 +6,19 @@ import ( "testing" "time" - "github.com/grafana/loki/v3/pkg/logproto" - + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/require" + + "github.com/grafana/loki/v3/pkg/logproto" ) func TestManager_Append(t *testing.T) { m, err := NewManager(Config{ MaxSegments: 1, MaxSegmentSize: 1024, // 1KB - }) + }, NewMetrics(nil)) require.NoError(t, err) // Append some data. @@ -96,7 +98,7 @@ func TestManager_Append_ErrFull(t *testing.T) { m, err := NewManager(Config{ MaxSegments: 10, MaxSegmentSize: 1024, // 1KB - }) + }, NewMetrics(nil)) require.NoError(t, err) // Should be able to write to all 10 segments of 1KB each. @@ -140,7 +142,7 @@ func TestManager_NextPending(t *testing.T) { MaxAge: DefaultMaxAge, MaxSegments: 1, MaxSegmentSize: 1024, // 1KB - }) + }, NewMetrics(nil)) require.NoError(t, err) // There should be no items as no data has been written. @@ -195,7 +197,7 @@ func TestManager_Put(t *testing.T) { m, err := NewManager(Config{ MaxSegments: 10, MaxSegmentSize: 1024, // 1KB - }) + }, NewMetrics(nil)) require.NoError(t, err) // There should be 10 available segments, and 0 pending. @@ -242,3 +244,88 @@ func TestManager_Put(t *testing.T) { // The segment should be reset. require.Equal(t, int64(0), it.Writer.InputSize()) } + +func TestManager_Metrics(t *testing.T) { + r := prometheus.NewRegistry() + m, err := NewManager(Config{ + MaxSegments: 1, + MaxSegmentSize: 1024, // 1KB + }, NewMetrics(r)) + require.NoError(t, err) + + metricNames := []string{ + "wal_segments_available", + "wal_segments_flushing", + "wal_segments_pending", + } + expected := ` +# HELP wal_segments_available The number of WAL segments accepting writes. +# TYPE wal_segments_available gauge +wal_segments_available 1 +# HELP wal_segments_flushing The number of WAL segments being flushed. +# TYPE wal_segments_flushing gauge +wal_segments_flushing 0 +# HELP wal_segments_pending The number of WAL segments waiting to be flushed. +# TYPE wal_segments_pending gauge +wal_segments_pending 0 +` + require.NoError(t, testutil.CollectAndCompare(r, strings.NewReader(expected), metricNames...)) + + // Appending 1KB of data. + lbs := labels.Labels{{Name: "foo", Value: "bar"}} + entries := []*logproto.Entry{{Timestamp: time.Now(), Line: strings.Repeat("b", 1024)}} + _, err = m.Append(AppendRequest{ + TenantID: "1", + Labels: lbs, + LabelsStr: lbs.String(), + Entries: entries, + }) + require.NoError(t, err) + + // This should move the segment from the available to the pending list. + expected = ` +# HELP wal_segments_available The number of WAL segments accepting writes. +# TYPE wal_segments_available gauge +wal_segments_available 0 +# HELP wal_segments_flushing The number of WAL segments being flushed. +# TYPE wal_segments_flushing gauge +wal_segments_flushing 0 +# HELP wal_segments_pending The number of WAL segments waiting to be flushed. +# TYPE wal_segments_pending gauge +wal_segments_pending 1 +` + require.NoError(t, testutil.CollectAndCompare(r, strings.NewReader(expected), metricNames...)) + + // Get the segment from the pending list. + it, err := m.NextPending() + require.NoError(t, err) + require.NotNil(t, it) + expected = ` +# HELP wal_segments_available The number of WAL segments accepting writes. +# TYPE wal_segments_available gauge +wal_segments_available 0 +# HELP wal_segments_flushing The number of WAL segments being flushed. +# TYPE wal_segments_flushing gauge +wal_segments_flushing 1 +# HELP wal_segments_pending The number of WAL segments waiting to be flushed. +# TYPE wal_segments_pending gauge +wal_segments_pending 0 +` + require.NoError(t, testutil.CollectAndCompare(r, strings.NewReader(expected), metricNames...)) + + // Reset the segment and put it back in the available list. + require.NoError(t, m.Put(it)) + expected = ` +# HELP wal_segments_available The number of WAL segments accepting writes. +# TYPE wal_segments_available gauge +wal_segments_available 1 +# HELP wal_segments_flushing The number of WAL segments being flushed. +# TYPE wal_segments_flushing gauge +wal_segments_flushing 0 +# HELP wal_segments_pending The number of WAL segments waiting to be flushed. +# TYPE wal_segments_pending gauge +wal_segments_pending 0 +` + require.NoError(t, testutil.CollectAndCompare(r, strings.NewReader(expected), metricNames...)) + +}
feat
Add metrics to WAL Manager (#13490)
ee3c7b3d9d9b78b3f4f50949967466a273cc05b0
2025-02-28 23:05:48
renovate[bot]
chore(deps): update dependency @types/react to v19 (main) (#16365)
false
diff --git a/pkg/ui/frontend/package-lock.json b/pkg/ui/frontend/package-lock.json index 99e22329a4f32..c6c359afc1050 100644 --- a/pkg/ui/frontend/package-lock.json +++ b/pkg/ui/frontend/package-lock.json @@ -8,7 +8,7 @@ "name": "@grafana/loki-ui", "version": "0.0.0", "dependencies": { - "@hookform/resolvers": "^3.10.0", + "@hookform/resolvers": "^4.0.0", "@radix-ui/react-checkbox": "^1.1.3", "@radix-ui/react-collapsible": "^1.1.2", "@radix-ui/react-dialog": "^1.1.5", @@ -27,8 +27,8 @@ "@radix-ui/react-toggle": "^1.1.1", "@radix-ui/react-toggle-group": "^1.1.1", "@radix-ui/react-tooltip": "^1.1.7", - "@tanstack/react-query": "^5.66.0", - "@tanstack/react-query-devtools": "^5.66.0", + "@tanstack/react-query": "^5.66.11", + "@tanstack/react-query-devtools": "^5.66.11", "@types/lodash": "^4.17.15", "@types/react-datepicker": "^7.0.0", "class-variance-authority": "^0.7.1", @@ -45,7 +45,7 @@ "react-dom": "^18.2.0", "react-hook-form": "^7.54.2", "react-icons": "^5.4.0", - "react-router-dom": "^6.22.0", + "react-router-dom": "^6.30.0", "recharts": "^2.15.1", "tailwind-merge": "^2.6.0", "tailwindcss-animate": "^1.0.7", @@ -56,13 +56,13 @@ "@types/node": "^22.12.0", "@types/react": "^18.2.0", "@types/react-dom": "^18.2.0", - "@typescript-eslint/eslint-plugin": "^5.57.1", - "@typescript-eslint/parser": "^5.57.1", + "@typescript-eslint/eslint-plugin": "^8.0.0", + "@typescript-eslint/parser": "^8.0.0", "@vitejs/plugin-react": "^4.2.1", "autoprefixer": "^10.4.20", "depcheck": "^1.4.7", - "eslint": "^8.38.0", - "eslint-plugin-react-hooks": "^5.0.0", + "eslint": "^9.0.0", + "eslint-plugin-react-hooks": "^5.2.0", "eslint-plugin-react-refresh": "^0.4.5", "postcss": "^8.5.1", "tailwindcss": "^3.4.1", @@ -341,9 +341,9 @@ } }, "node_modules/@babel/runtime": { - "version": "7.26.7", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.26.7.tgz", - "integrity": "sha512-AOPI3D+a8dXnja+iwsUqGRjr1BbZIe771sXdapOtYI531gSqpi92vXivKcq2asu/DFpdl1ceFAKZyRzK2PCVcQ==", + "version": "7.26.9", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.26.9.tgz", + "integrity": "sha512-aA63XwOkcl4xxQa3HjPMqOP6LiK0ZDv3mUPYEFXkpHbaFjtGggE1A61FjFzJnB+p7/oy2gA8E+rcBNl/zC1tMg==", "license": "MIT", "dependencies": { "regenerator-runtime": "^0.14.0" @@ -854,17 +854,45 @@ "node": "^12.0.0 || ^14.0.0 || >=16.0.0" } }, + "node_modules/@eslint/config-array": { + "version": "0.19.2", + "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.19.2.tgz", + "integrity": "sha512-GNKqxfHG2ySmJOBSHg7LxeUx4xpuCoFjacmlCoYWEbaPXLwvfIjixRI12xCQZeULksQb23uiA8F40w5TojpV7w==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/object-schema": "^2.1.6", + "debug": "^4.3.1", + "minimatch": "^3.1.2" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/core": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.12.0.tgz", + "integrity": "sha512-cmrR6pytBuSMTaBweKoGMwu3EiHiEC+DoyupPmlZ0HxBJBtIxwe+j/E4XPIKNx+Q74c8lXKPwYawBf5glsTkHg==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@types/json-schema": "^7.0.15" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, "node_modules/@eslint/eslintrc": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", - "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.0.tgz", + "integrity": "sha512-yaVPAiNAalnCZedKLdR21GOGILMLKPyqSLWaAjQFvYA2i/ciDi8ArYVr69Anohb6cH2Ukhqti4aFnYyPm8wdwQ==", "dev": true, "license": "MIT", "dependencies": { "ajv": "^6.12.4", "debug": "^4.3.2", - "espree": "^9.6.0", - "globals": "^13.19.0", + "espree": "^10.0.1", + "globals": "^14.0.0", "ignore": "^5.2.0", "import-fresh": "^3.2.1", "js-yaml": "^4.1.0", @@ -872,36 +900,57 @@ "strip-json-comments": "^3.1.1" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { "url": "https://opencollective.com/eslint" } }, "node_modules/@eslint/eslintrc/node_modules/globals": { - "version": "13.24.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", - "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "version": "14.0.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", + "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", "dev": true, "license": "MIT", - "dependencies": { - "type-fest": "^0.20.2" - }, "engines": { - "node": ">=8" + "node": ">=18" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/@eslint/js": { - "version": "8.57.1", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.1.tgz", - "integrity": "sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==", + "version": "9.21.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.21.0.tgz", + "integrity": "sha512-BqStZ3HX8Yz6LvsF5ByXYrtigrV5AXADWLAGc7PH/1SxOb7/FIYYMszZZWiUou/GB9P2lXWk2SV4d+Z8h0nknw==", "dev": true, "license": "MIT", "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/object-schema": { + "version": "2.1.6", + "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.6.tgz", + "integrity": "sha512-RBMg5FRL0I0gs51M/guSAj5/e14VQ4tpZnQNWwuDT66P14I43ItmPfIZRhO9fUVIPOAQXU47atlywZ/czoqFPA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/plugin-kit": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.2.7.tgz", + "integrity": "sha512-JubJ5B2pJ4k4yGxaNLdbjrnk9d/iDz6/q8wOilpIowd6PJPgaxCuHBnBszq7Ce2TyMrywm5r4PnKm6V3iiZF+g==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/core": "^0.12.0", + "levn": "^0.4.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, "node_modules/@floating-ui/core": { @@ -923,6 +972,21 @@ "@floating-ui/utils": "^0.2.9" } }, + "node_modules/@floating-ui/react": { + "version": "0.27.5", + "resolved": "https://registry.npmjs.org/@floating-ui/react/-/react-0.27.5.tgz", + "integrity": "sha512-BX3jKxo39Ba05pflcQmqPPwc0qdNsdNi/eweAFtoIdrJWNen2sVEWMEac3i6jU55Qfx+lOcdMNKYn2CtWmlnOQ==", + "license": "MIT", + "dependencies": { + "@floating-ui/react-dom": "^2.1.2", + "@floating-ui/utils": "^0.2.9", + "tabbable": "^6.0.0" + }, + "peerDependencies": { + "react": ">=17.0.0", + "react-dom": ">=17.0.0" + } + }, "node_modules/@floating-ui/react-dom": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.1.2.tgz", @@ -943,28 +1007,53 @@ "license": "MIT" }, "node_modules/@hookform/resolvers": { - "version": "3.10.0", - "resolved": "https://registry.npmjs.org/@hookform/resolvers/-/resolvers-3.10.0.tgz", - "integrity": "sha512-79Dv+3mDF7i+2ajj7SkypSKHhl1cbln1OGavqrsF7p6mbUv11xpqpacPsGDCTRvCSjEEIez2ef1NveSVL3b0Ag==", + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/@hookform/resolvers/-/resolvers-4.1.2.tgz", + "integrity": "sha512-wl6H9c9wLOZMJAqGLEVKzbCkxJuV+BYuLFZFCQtCwMe0b3qQk4kUBd/ZAj13SwcSqcx86rCgSCyngQfmA6DOWg==", "license": "MIT", + "dependencies": { + "@standard-schema/utils": "^0.3.0" + }, "peerDependencies": { "react-hook-form": "^7.0.0" } }, - "node_modules/@humanwhocodes/config-array": { - "version": "0.13.0", - "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.13.0.tgz", - "integrity": "sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==", - "deprecated": "Use @eslint/config-array instead", + "node_modules/@humanfs/core": { + "version": "0.19.1", + "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", + "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanfs/node": { + "version": "0.16.6", + "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.6.tgz", + "integrity": "sha512-YuI2ZHQL78Q5HbhDiBA1X4LmYdXCKCMQIfw0pw7piHJwyREFebJUvrQN4cMssyES6x+vfUbx1CIpaQUKYdQZOw==", "dev": true, "license": "Apache-2.0", "dependencies": { - "@humanwhocodes/object-schema": "^2.0.3", - "debug": "^4.3.1", - "minimatch": "^3.0.5" + "@humanfs/core": "^0.19.1", + "@humanwhocodes/retry": "^0.3.0" }, "engines": { - "node": ">=10.10.0" + "node": ">=18.18.0" + } + }, + "node_modules/@humanfs/node/node_modules/@humanwhocodes/retry": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.3.1.tgz", + "integrity": "sha512-JBxkERygn7Bv/GbN5Rv8Ul6LVknS+5Bp6RgDC/O8gEBU/yeH5Ui5C/OlWrTb6qct7LjjfT6Re2NxB0ln0yYybA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" } }, "node_modules/@humanwhocodes/module-importer": { @@ -981,13 +1070,19 @@ "url": "https://github.com/sponsors/nzakas" } }, - "node_modules/@humanwhocodes/object-schema": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz", - "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", - "deprecated": "Use @eslint/object-schema instead", + "node_modules/@humanwhocodes/retry": { + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.2.tgz", + "integrity": "sha512-xeO57FpIu4p1Ri3Jq/EXq4ClRm86dVF2z/+kvFnyqVYRavTZmaFaUBbWCOuuTh0o/g7DSsk6kc2vrS4Vl5oPOQ==", "dev": true, - "license": "BSD-3-Clause" + "license": "Apache-2.0", + "engines": { + "node": ">=18.18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } }, "node_modules/@isaacs/cliui": { "version": "8.0.2", @@ -2174,9 +2269,9 @@ "license": "MIT" }, "node_modules/@remix-run/router": { - "version": "1.22.0", - "resolved": "https://registry.npmjs.org/@remix-run/router/-/router-1.22.0.tgz", - "integrity": "sha512-MBOl8MeOzpK0HQQQshKB7pABXbmyHizdTpqnrIseTbsv0nAepwC2ENZa1aaBExNQcpLoXmWthhak8SABLzvGPw==", + "version": "1.23.0", + "resolved": "https://registry.npmjs.org/@remix-run/router/-/router-1.23.0.tgz", + "integrity": "sha512-O3rHJzAQKamUz1fvE0Qaw0xSFqsA/yafi2iqeE0pvdFtCO1viYx8QL6f3Ln/aCCTLxs68SLf0KPM9eSeM8yBnA==", "license": "MIT", "engines": { "node": ">=14.0.0" @@ -2448,10 +2543,16 @@ "win32" ] }, + "node_modules/@standard-schema/utils": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/@standard-schema/utils/-/utils-0.3.0.tgz", + "integrity": "sha512-e7Mew686owMaPJVNNLs55PUvgz371nKgwsc4vxE49zsODpJEnxgxRo2y/OKrqueavXgZNMDVj3DdHFlaSAeU8g==", + "license": "MIT" + }, "node_modules/@tanstack/query-core": { - "version": "5.66.4", - "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.66.4.tgz", - "integrity": "sha512-skM/gzNX4shPkqmdTCSoHtJAPMTtmIJNS0hE+xwTTUVYwezArCT34NMermABmBVUg5Ls5aiUXEDXfqwR1oVkcA==", + "version": "5.66.11", + "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.66.11.tgz", + "integrity": "sha512-ZEYxgHUcohj3sHkbRaw0gYwFxjY5O6M3IXOYXEun7E1rqNhsP8fOtqjJTKPZpVHcdIdrmX4lzZctT4+pts0OgA==", "license": "MIT", "funding": { "type": "github", @@ -2469,12 +2570,12 @@ } }, "node_modules/@tanstack/react-query": { - "version": "5.66.9", - "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.66.9.tgz", - "integrity": "sha512-NRI02PHJsP5y2gAuWKP+awamTIBFBSKMnO6UVzi03GTclmHHHInH5UzVgzi5tpu4+FmGfsdT7Umqegobtsp23A==", + "version": "5.66.11", + "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.66.11.tgz", + "integrity": "sha512-uPDiQbZScWkAeihmZ9gAm3wOBA1TmLB1KCB1fJ1hIiEKq3dTT+ja/aYM7wGUD+XiEsY4sDSE7p8VIz/21L2Dow==", "license": "MIT", "dependencies": { - "@tanstack/query-core": "5.66.4" + "@tanstack/query-core": "5.66.11" }, "funding": { "type": "github", @@ -2485,9 +2586,9 @@ } }, "node_modules/@tanstack/react-query-devtools": { - "version": "5.66.9", - "resolved": "https://registry.npmjs.org/@tanstack/react-query-devtools/-/react-query-devtools-5.66.9.tgz", - "integrity": "sha512-70G6AR35he53SYUcUK6EdqNR18zejCv1rM6900gjZP408EAex56YLwVSeijzk9lWeU2J42G9Fjh0i1WngUTsgw==", + "version": "5.66.11", + "resolved": "https://registry.npmjs.org/@tanstack/react-query-devtools/-/react-query-devtools-5.66.11.tgz", + "integrity": "sha512-a+zr2TN4dKpxVlJ9YBOC5YmpGWp2Ez2ZfIzsorVbrs/u2R+bVkLrU1u5e8WHzLdf6tXYueATqgeXWLHrvi4Dig==", "license": "MIT", "dependencies": { "@tanstack/query-devtools": "5.65.0" @@ -2497,7 +2598,7 @@ "url": "https://github.com/sponsors/tannerlinsley" }, "peerDependencies": { - "@tanstack/react-query": "^5.66.9", + "@tanstack/react-query": "^5.66.11", "react": "^18 || ^19" } }, @@ -2687,16 +2788,6 @@ "react-datepicker": "*" } }, - "node_modules/@types/react-datepicker/node_modules/date-fns": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-3.6.0.tgz", - "integrity": "sha512-fRHTG8g/Gif+kSh50gaGEdToemgfj74aRX3swtiouboip5JDLAyDE9F11nHMIcvOaXeOC6D7SpNhi7uFyB7Uww==", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/kossnocorp" - } - }, "node_modules/@types/react-dom": { "version": "18.3.5", "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.3.5.tgz", @@ -2707,88 +2798,73 @@ "@types/react": "^18.0.0" } }, - "node_modules/@types/semver": { - "version": "7.5.8", - "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.8.tgz", - "integrity": "sha512-I8EUhyrgfLrcTkzV3TSsGyl1tSuPrEDzr0yd5m90UgNxQkyDXULk3b6MlQqTCpZpNtWe1K0hzclnZkTcLBe2UQ==", - "dev": true, - "license": "MIT" - }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "5.62.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.62.0.tgz", - "integrity": "sha512-TiZzBSJja/LbhNPvk6yc0JrX9XqhQ0hdh6M2svYfsHGejaKFIAGd9MQ+ERIMzLGlN/kZoYIgdxFV0PuljTKXag==", + "version": "8.25.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.25.0.tgz", + "integrity": "sha512-VM7bpzAe7JO/BFf40pIT1lJqS/z1F8OaSsUB3rpFJucQA4cOSuH2RVVVkFULN+En0Djgr29/jb4EQnedUo95KA==", "dev": true, "license": "MIT", "dependencies": { - "@eslint-community/regexpp": "^4.4.0", - "@typescript-eslint/scope-manager": "5.62.0", - "@typescript-eslint/type-utils": "5.62.0", - "@typescript-eslint/utils": "5.62.0", - "debug": "^4.3.4", + "@eslint-community/regexpp": "^4.10.0", + "@typescript-eslint/scope-manager": "8.25.0", + "@typescript-eslint/type-utils": "8.25.0", + "@typescript-eslint/utils": "8.25.0", + "@typescript-eslint/visitor-keys": "8.25.0", "graphemer": "^1.4.0", - "ignore": "^5.2.0", - "natural-compare-lite": "^1.4.0", - "semver": "^7.3.7", - "tsutils": "^3.21.0" + "ignore": "^5.3.1", + "natural-compare": "^1.4.0", + "ts-api-utils": "^2.0.1" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/typescript-eslint" }, "peerDependencies": { - "@typescript-eslint/parser": "^5.0.0", - "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } + "@typescript-eslint/parser": "^8.0.0 || ^8.0.0-alpha.0", + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <5.8.0" } }, "node_modules/@typescript-eslint/parser": { - "version": "5.62.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.62.0.tgz", - "integrity": "sha512-VlJEV0fOQ7BExOsHYAGrgbEiZoi8D+Bl2+f6V2RrXerRSylnp+ZBHmPvaIa8cz0Ajx7WO7Z5RqfgYg7ED1nRhA==", + "version": "8.25.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.25.0.tgz", + "integrity": "sha512-4gbs64bnbSzu4FpgMiQ1A+D+urxkoJk/kqlDJ2W//5SygaEiAP2B4GoS7TEdxgwol2el03gckFV9lJ4QOMiiHg==", "dev": true, - "license": "BSD-2-Clause", + "license": "MIT", "dependencies": { - "@typescript-eslint/scope-manager": "5.62.0", - "@typescript-eslint/types": "5.62.0", - "@typescript-eslint/typescript-estree": "5.62.0", + "@typescript-eslint/scope-manager": "8.25.0", + "@typescript-eslint/types": "8.25.0", + "@typescript-eslint/typescript-estree": "8.25.0", + "@typescript-eslint/visitor-keys": "8.25.0", "debug": "^4.3.4" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/typescript-eslint" }, "peerDependencies": { - "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <5.8.0" } }, "node_modules/@typescript-eslint/scope-manager": { - "version": "5.62.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.62.0.tgz", - "integrity": "sha512-VXuvVvZeQCQb5Zgf4HAxc04q5j+WrNAtNh9OwCsCgpKqESMTu3tF/jhZ3xG6T4NZwWl65Bg8KuS2uEvhSfLl0w==", + "version": "8.25.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.25.0.tgz", + "integrity": "sha512-6PPeiKIGbgStEyt4NNXa2ru5pMzQ8OYKO1hX1z53HMomrmiSB+R5FmChgQAP1ro8jMtNawz+TRQo/cSXrauTpg==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "5.62.0", - "@typescript-eslint/visitor-keys": "5.62.0" + "@typescript-eslint/types": "8.25.0", + "@typescript-eslint/visitor-keys": "8.25.0" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { "type": "opencollective", @@ -2796,41 +2872,37 @@ } }, "node_modules/@typescript-eslint/type-utils": { - "version": "5.62.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-5.62.0.tgz", - "integrity": "sha512-xsSQreu+VnfbqQpW5vnCJdq1Z3Q0U31qiWmRhr98ONQmcp/yhiPJFPq8MXiJVLiksmOKSjIldZzkebzHuCGzew==", + "version": "8.25.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.25.0.tgz", + "integrity": "sha512-d77dHgHWnxmXOPJuDWO4FDWADmGQkN5+tt6SFRZz/RtCWl4pHgFl3+WdYCn16+3teG09DY6XtEpf3gGD0a186g==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/typescript-estree": "5.62.0", - "@typescript-eslint/utils": "5.62.0", + "@typescript-eslint/typescript-estree": "8.25.0", + "@typescript-eslint/utils": "8.25.0", "debug": "^4.3.4", - "tsutils": "^3.21.0" + "ts-api-utils": "^2.0.1" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/typescript-eslint" }, "peerDependencies": { - "eslint": "*" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <5.8.0" } }, "node_modules/@typescript-eslint/types": { - "version": "5.62.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.62.0.tgz", - "integrity": "sha512-87NVngcbVXUahrRTqIK27gD2t5Cu1yuCXxbLcFtCzZGlfyVWWh8mLHkoxzjsB6DDNnvdL+fW8MiwPEJyGJQDgQ==", + "version": "8.25.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.25.0.tgz", + "integrity": "sha512-+vUe0Zb4tkNgznQwicsvLUJgZIRs6ITeWSCclX1q85pR1iOiaj+4uZJIUp//Z27QWu5Cseiw3O3AR8hVpax7Aw==", "dev": true, "license": "MIT", "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { "type": "opencollective", @@ -2838,84 +2910,112 @@ } }, "node_modules/@typescript-eslint/typescript-estree": { - "version": "5.62.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.62.0.tgz", - "integrity": "sha512-CmcQ6uY7b9y694lKdRB8FEel7JbU/40iSAPomu++SjLMntB+2Leay2LO6i8VnJk58MtE9/nQSFIH6jpyRWyYzA==", + "version": "8.25.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.25.0.tgz", + "integrity": "sha512-ZPaiAKEZ6Blt/TPAx5Ot0EIB/yGtLI2EsGoY6F7XKklfMxYQyvtL+gT/UCqkMzO0BVFHLDlzvFqQzurYahxv9Q==", "dev": true, - "license": "BSD-2-Clause", + "license": "MIT", "dependencies": { - "@typescript-eslint/types": "5.62.0", - "@typescript-eslint/visitor-keys": "5.62.0", + "@typescript-eslint/types": "8.25.0", + "@typescript-eslint/visitor-keys": "8.25.0", "debug": "^4.3.4", - "globby": "^11.1.0", + "fast-glob": "^3.3.2", "is-glob": "^4.0.3", - "semver": "^7.3.7", - "tsutils": "^3.21.0" + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^2.0.1" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/typescript-eslint" }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } + "peerDependencies": { + "typescript": ">=4.8.4 <5.8.0" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" } }, "node_modules/@typescript-eslint/utils": { - "version": "5.62.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-5.62.0.tgz", - "integrity": "sha512-n8oxjeb5aIbPFEtmQxQYOLI0i9n5ySBEY/ZEHHZqKQSFnxio1rv6dthascc9dLuwrL0RC5mPCxB7vnAVGAYWAQ==", + "version": "8.25.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.25.0.tgz", + "integrity": "sha512-syqRbrEv0J1wywiLsK60XzHnQe/kRViI3zwFALrNEgnntn1l24Ra2KvOAWwWbWZ1lBZxZljPDGOq967dsl6fkA==", "dev": true, "license": "MIT", "dependencies": { - "@eslint-community/eslint-utils": "^4.2.0", - "@types/json-schema": "^7.0.9", - "@types/semver": "^7.3.12", - "@typescript-eslint/scope-manager": "5.62.0", - "@typescript-eslint/types": "5.62.0", - "@typescript-eslint/typescript-estree": "5.62.0", - "eslint-scope": "^5.1.1", - "semver": "^7.3.7" + "@eslint-community/eslint-utils": "^4.4.0", + "@typescript-eslint/scope-manager": "8.25.0", + "@typescript-eslint/types": "8.25.0", + "@typescript-eslint/typescript-estree": "8.25.0" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/typescript-eslint" }, "peerDependencies": { - "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <5.8.0" } }, "node_modules/@typescript-eslint/visitor-keys": { - "version": "5.62.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.62.0.tgz", - "integrity": "sha512-07ny+LHRzQXepkGg6w0mFY41fVUNBrL2Roj/++7V1txKugfjm/Ci/qSND03r2RhlJhJYMcTn9AhhSSqQp0Ysyw==", + "version": "8.25.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.25.0.tgz", + "integrity": "sha512-kCYXKAum9CecGVHGij7muybDfTS2sD3t0L4bJsEZLkyrXUImiCTq1M3LG2SRtOhiHFwMR9wAFplpT6XHYjTkwQ==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "5.62.0", - "eslint-visitor-keys": "^3.3.0" + "@typescript-eslint/types": "8.25.0", + "eslint-visitor-keys": "^4.2.0" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/typescript-eslint" } }, - "node_modules/@ungap/structured-clone": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz", - "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==", + "node_modules/@typescript-eslint/visitor-keys/node_modules/eslint-visitor-keys": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.0.tgz", + "integrity": "sha512-UyLnSehNt62FFhSwjZlHmeokpRK59rcz29j+F1/aDgbkbRTk7wIc9XzdoasMUbRNKDM0qQt/+BJ4BrpFeABemw==", "dev": true, - "license": "ISC" + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } }, "node_modules/@vitejs/plugin-react": { "version": "4.3.4", @@ -3841,38 +3941,12 @@ "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==", "license": "Apache-2.0" }, - "node_modules/dir-glob": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", - "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", - "dev": true, - "license": "MIT", - "dependencies": { - "path-type": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/dlv": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==", "license": "MIT" }, - "node_modules/doctrine": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", - "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "esutils": "^2.0.2" - }, - "engines": { - "node": ">=6.0.0" - } - }, "node_modules/dom-helpers": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/dom-helpers/-/dom-helpers-5.2.1.tgz", @@ -3990,66 +4064,69 @@ } }, "node_modules/eslint": { - "version": "8.57.1", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.1.tgz", - "integrity": "sha512-ypowyDxpVSYpkXr9WPv2PAZCtNip1Mv5KTW0SCurXv/9iOpcrH9PaqUElksqEB6pChqHGDRCFTyrZlGhnLNGiA==", - "deprecated": "This version is no longer supported. Please see https://eslint.org/version-support for other options.", + "version": "9.21.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.21.0.tgz", + "integrity": "sha512-KjeihdFqTPhOMXTt7StsDxriV4n66ueuF/jfPNC3j/lduHwr/ijDwJMsF+wyMJethgiKi5wniIE243vi07d3pg==", "dev": true, "license": "MIT", "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", - "@eslint-community/regexpp": "^4.6.1", - "@eslint/eslintrc": "^2.1.4", - "@eslint/js": "8.57.1", - "@humanwhocodes/config-array": "^0.13.0", + "@eslint-community/regexpp": "^4.12.1", + "@eslint/config-array": "^0.19.2", + "@eslint/core": "^0.12.0", + "@eslint/eslintrc": "^3.3.0", + "@eslint/js": "9.21.0", + "@eslint/plugin-kit": "^0.2.7", + "@humanfs/node": "^0.16.6", "@humanwhocodes/module-importer": "^1.0.1", - "@nodelib/fs.walk": "^1.2.8", - "@ungap/structured-clone": "^1.2.0", + "@humanwhocodes/retry": "^0.4.2", + "@types/estree": "^1.0.6", + "@types/json-schema": "^7.0.15", "ajv": "^6.12.4", "chalk": "^4.0.0", - "cross-spawn": "^7.0.2", + "cross-spawn": "^7.0.6", "debug": "^4.3.2", - "doctrine": "^3.0.0", "escape-string-regexp": "^4.0.0", - "eslint-scope": "^7.2.2", - "eslint-visitor-keys": "^3.4.3", - "espree": "^9.6.1", - "esquery": "^1.4.2", + "eslint-scope": "^8.2.0", + "eslint-visitor-keys": "^4.2.0", + "espree": "^10.3.0", + "esquery": "^1.5.0", "esutils": "^2.0.2", "fast-deep-equal": "^3.1.3", - "file-entry-cache": "^6.0.1", + "file-entry-cache": "^8.0.0", "find-up": "^5.0.0", "glob-parent": "^6.0.2", - "globals": "^13.19.0", - "graphemer": "^1.4.0", "ignore": "^5.2.0", "imurmurhash": "^0.1.4", "is-glob": "^4.0.0", - "is-path-inside": "^3.0.3", - "js-yaml": "^4.1.0", "json-stable-stringify-without-jsonify": "^1.0.1", - "levn": "^0.4.1", "lodash.merge": "^4.6.2", "minimatch": "^3.1.2", "natural-compare": "^1.4.0", - "optionator": "^0.9.3", - "strip-ansi": "^6.0.1", - "text-table": "^0.2.0" + "optionator": "^0.9.3" }, "bin": { "eslint": "bin/eslint.js" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { - "url": "https://opencollective.com/eslint" + "url": "https://eslint.org/donate" + }, + "peerDependencies": { + "jiti": "*" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + } } }, "node_modules/eslint-plugin-react-hooks": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-5.1.0.tgz", - "integrity": "sha512-mpJRtPgHN2tNAvZ35AMfqeB3Xqeo273QxrHJsbBEPWODRM4r0yB6jfoROqKEYrOn27UtRPpcpHc2UqyBSuUNTw==", + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-5.2.0.tgz", + "integrity": "sha512-+f15FfK64YQwZdJNELETdn5ibXEUQmW1DZL6KXhNnc2heoy/sg9VJJeT7n8TlMWouzWqSWavFkIhHyIbIAEapg==", "dev": true, "license": "MIT", "engines": { @@ -4070,17 +4147,20 @@ } }, "node_modules/eslint-scope": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", - "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.2.0.tgz", + "integrity": "sha512-PHlWUfG6lvPc3yvP5A4PNyBL1W8fkDUccmI21JUu/+GKZBoH/W5u6usENXUrWFRsyoW5ACUjFGgAFQp5gUlb/A==", "dev": true, "license": "BSD-2-Clause", "dependencies": { "esrecurse": "^4.3.0", - "estraverse": "^4.1.1" + "estraverse": "^5.2.0" }, "engines": { - "node": ">=8.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" } }, "node_modules/eslint-visitor-keys": { @@ -4096,62 +4176,45 @@ "url": "https://opencollective.com/eslint" } }, - "node_modules/eslint/node_modules/eslint-scope": { - "version": "7.2.2", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", - "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", + "node_modules/eslint/node_modules/eslint-visitor-keys": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.0.tgz", + "integrity": "sha512-UyLnSehNt62FFhSwjZlHmeokpRK59rcz29j+F1/aDgbkbRTk7wIc9XzdoasMUbRNKDM0qQt/+BJ4BrpFeABemw==", "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "esrecurse": "^4.3.0", - "estraverse": "^5.2.0" - }, + "license": "Apache-2.0", "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { "url": "https://opencollective.com/eslint" } }, - "node_modules/eslint/node_modules/estraverse": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", - "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "node_modules/espree": { + "version": "10.3.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-10.3.0.tgz", + "integrity": "sha512-0QYC8b24HWY8zjRnDTL6RiHfDbAWn63qb4LMj1Z4b076A4une81+z03Kg7l7mn/48PUTqoLptSXez8oknU8Clg==", "dev": true, "license": "BSD-2-Clause", - "engines": { - "node": ">=4.0" - } - }, - "node_modules/eslint/node_modules/globals": { - "version": "13.24.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", - "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", - "dev": true, - "license": "MIT", "dependencies": { - "type-fest": "^0.20.2" + "acorn": "^8.14.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^4.2.0" }, "engines": { - "node": ">=8" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "url": "https://opencollective.com/eslint" } }, - "node_modules/espree": { - "version": "9.6.1", - "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", - "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", + "node_modules/espree/node_modules/eslint-visitor-keys": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.0.tgz", + "integrity": "sha512-UyLnSehNt62FFhSwjZlHmeokpRK59rcz29j+F1/aDgbkbRTk7wIc9XzdoasMUbRNKDM0qQt/+BJ4BrpFeABemw==", "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "acorn": "^8.9.0", - "acorn-jsx": "^5.3.2", - "eslint-visitor-keys": "^3.4.1" - }, + "license": "Apache-2.0", "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { "url": "https://opencollective.com/eslint" @@ -4184,16 +4247,6 @@ "node": ">=0.10" } }, - "node_modules/esquery/node_modules/estraverse": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", - "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", - "dev": true, - "license": "BSD-2-Clause", - "engines": { - "node": ">=4.0" - } - }, "node_modules/esrecurse": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", @@ -4207,7 +4260,7 @@ "node": ">=4.0" } }, - "node_modules/esrecurse/node_modules/estraverse": { + "node_modules/estraverse": { "version": "5.3.0", "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", @@ -4217,16 +4270,6 @@ "node": ">=4.0" } }, - "node_modules/estraverse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", - "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", - "dev": true, - "license": "BSD-2-Clause", - "engines": { - "node": ">=4.0" - } - }, "node_modules/estree-walker": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", @@ -4331,16 +4374,16 @@ } }, "node_modules/file-entry-cache": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", - "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", + "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", "dev": true, "license": "MIT", "dependencies": { - "flat-cache": "^3.0.4" + "flat-cache": "^4.0.0" }, "engines": { - "node": "^10.12.0 || >=12.0.0" + "node": ">=16.0.0" } }, "node_modules/fill-range": { @@ -4389,24 +4432,23 @@ } }, "node_modules/flat-cache": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", - "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz", + "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", "dev": true, "license": "MIT", "dependencies": { "flatted": "^3.2.9", - "keyv": "^4.5.3", - "rimraf": "^3.0.2" + "keyv": "^4.5.4" }, "engines": { - "node": "^10.12.0 || >=12.0.0" + "node": ">=16" } }, "node_modules/flatted": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.2.tgz", - "integrity": "sha512-AiwGJM8YcNOaobumgtng+6NHuOqC3A7MixFeDafM3X9cIUM+xUXoS5Vfgf+OihAYe20fxqNM9yPBXJzRtZ/4eA==", + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", + "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", "dev": true, "license": "ISC" }, @@ -4440,13 +4482,6 @@ "url": "https://github.com/sponsors/rawify" } }, - "node_modules/fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", - "dev": true, - "license": "ISC" - }, "node_modules/fsevents": { "version": "2.3.3", "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", @@ -4499,28 +4534,6 @@ "node": ">=6" } }, - "node_modules/glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", - "deprecated": "Glob versions prior to v9 are no longer supported", - "dev": true, - "license": "ISC", - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - }, - "engines": { - "node": "*" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, "node_modules/glob-parent": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", @@ -4588,27 +4601,6 @@ "node": ">=4" } }, - "node_modules/globby": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", - "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", - "dev": true, - "license": "MIT", - "dependencies": { - "array-union": "^2.1.0", - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.9", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^3.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/graphemer": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", @@ -4688,25 +4680,6 @@ "node": ">=0.8.19" } }, - "node_modules/inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", - "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", - "dev": true, - "license": "ISC", - "dependencies": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "node_modules/inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "dev": true, - "license": "ISC" - }, "node_modules/ini": { "version": "1.3.8", "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", @@ -4796,16 +4769,6 @@ "node": ">=0.12.0" } }, - "node_modules/is-path-inside": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", - "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, "node_modules/is-windows": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", @@ -5138,13 +5101,6 @@ "dev": true, "license": "MIT" }, - "node_modules/natural-compare-lite": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/natural-compare-lite/-/natural-compare-lite-1.4.0.tgz", - "integrity": "sha512-Tj+HTDSJJKaZnfiuw+iaF9skdPpTo2GtEly5JHnWV/hfv2Qj/9RKsGISQtLh2ox3l5EAGw487hnBee0sIJ6v2g==", - "dev": true, - "license": "MIT" - }, "node_modules/next-themes": { "version": "0.4.4", "resolved": "https://registry.npmjs.org/next-themes/-/next-themes-0.4.4.tgz", @@ -5199,16 +5155,6 @@ "node": ">= 6" } }, - "node_modules/once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", - "dev": true, - "license": "ISC", - "dependencies": { - "wrappy": "1" - } - }, "node_modules/optionator": { "version": "0.9.4", "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", @@ -5317,16 +5263,6 @@ "node": ">=8" } }, - "node_modules/path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/path-key": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", @@ -5671,21 +5607,6 @@ "react-dom": "^16.9.0 || ^17 || ^18 || ^19 || ^19.0.0-rc" } }, - "node_modules/react-datepicker/node_modules/@floating-ui/react": { - "version": "0.27.4", - "resolved": "https://registry.npmjs.org/@floating-ui/react/-/react-0.27.4.tgz", - "integrity": "sha512-05mXdkUiVh8NCEcYKQ2C9SV9IkZ9k/dFtYmaEIN2riLv80UHoXylgBM76cgPJYfLJM3dJz7UE5MOVH0FypMd2Q==", - "license": "MIT", - "dependencies": { - "@floating-ui/react-dom": "^2.1.2", - "@floating-ui/utils": "^0.2.9", - "tabbable": "^6.0.0" - }, - "peerDependencies": { - "react": ">=17.0.0", - "react-dom": ">=17.0.0" - } - }, "node_modules/react-dom": { "version": "18.3.1", "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz", @@ -5788,12 +5709,12 @@ } }, "node_modules/react-router": { - "version": "6.29.0", - "resolved": "https://registry.npmjs.org/react-router/-/react-router-6.29.0.tgz", - "integrity": "sha512-DXZJoE0q+KyeVw75Ck6GkPxFak63C4fGqZGNijnWgzB/HzSP1ZfTlBj5COaGWwhrMQ/R8bXiq5Ooy4KG+ReyjQ==", + "version": "6.30.0", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-6.30.0.tgz", + "integrity": "sha512-D3X8FyH9nBcTSHGdEKurK7r8OYE1kKFn3d/CF+CoxbSHkxU7o37+Uh7eAHRXr6k2tSExXYO++07PeXJtA/dEhQ==", "license": "MIT", "dependencies": { - "@remix-run/router": "1.22.0" + "@remix-run/router": "1.23.0" }, "engines": { "node": ">=14.0.0" @@ -5803,13 +5724,13 @@ } }, "node_modules/react-router-dom": { - "version": "6.29.0", - "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-6.29.0.tgz", - "integrity": "sha512-pkEbJPATRJ2iotK+wUwHfy0xs2T59YPEN8BQxVCPeBZvK7kfPESRc/nyxzdcxR17hXgUPYx2whMwl+eo9cUdnQ==", + "version": "6.30.0", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-6.30.0.tgz", + "integrity": "sha512-x30B78HV5tFk8ex0ITwzC9TTZMua4jGyA9IUlH1JLQYQTFyxr/ZxwOJq7evg1JX1qGVUcvhsmQSKdPncQrjTgA==", "license": "MIT", "dependencies": { - "@remix-run/router": "1.22.0", - "react-router": "6.29.0" + "@remix-run/router": "1.23.0", + "react-router": "6.30.0" }, "engines": { "node": ">=14.0.0" @@ -6002,23 +5923,6 @@ "node": ">=0.10.0" } }, - "node_modules/rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "deprecated": "Rimraf versions prior to v4 are no longer supported", - "dev": true, - "license": "ISC", - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, "node_modules/rollup": { "version": "4.32.1", "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.32.1.tgz", @@ -6143,16 +6047,6 @@ "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/slash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", - "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, "node_modules/source-map-js": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", @@ -6425,13 +6319,6 @@ "tailwindcss": ">=3.0.0 || insiders" } }, - "node_modules/text-table": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", - "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", - "dev": true, - "license": "MIT" - }, "node_modules/thenify": { "version": "3.3.1", "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", @@ -6471,6 +6358,19 @@ "node": ">=8.0" } }, + "node_modules/ts-api-utils": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.0.1.tgz", + "integrity": "sha512-dnlgjFSVetynI8nzgJ+qF62efpglpWRk8isUEWZGWlJYySCTD6aKvbUDu+zbPeDakk3bg5H4XpitHukgfL1m9w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18.12" + }, + "peerDependencies": { + "typescript": ">=4.8.4" + } + }, "node_modules/ts-interface-checker": { "version": "0.1.13", "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", @@ -6483,29 +6383,6 @@ "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", "license": "0BSD" }, - "node_modules/tsutils": { - "version": "3.21.0", - "resolved": "https://registry.npmjs.org/tsutils/-/tsutils-3.21.0.tgz", - "integrity": "sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==", - "dev": true, - "license": "MIT", - "dependencies": { - "tslib": "^1.8.1" - }, - "engines": { - "node": ">= 6" - }, - "peerDependencies": { - "typescript": ">=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta" - } - }, - "node_modules/tsutils/node_modules/tslib": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", - "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==", - "dev": true, - "license": "0BSD" - }, "node_modules/type-check": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", @@ -6519,19 +6396,6 @@ "node": ">= 0.8.0" } }, - "node_modules/type-fest": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", - "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", - "dev": true, - "license": "(MIT OR CC0-1.0)", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/typescript": { "version": "5.7.3", "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.7.3.tgz", @@ -6875,13 +6739,6 @@ "url": "https://github.com/chalk/strip-ansi?sponsor=1" } }, - "node_modules/wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", - "dev": true, - "license": "ISC" - }, "node_modules/y18n": { "version": "5.0.8", "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", diff --git a/pkg/ui/frontend/package.json b/pkg/ui/frontend/package.json index 16426fde64921..572d60fc9bdfa 100644 --- a/pkg/ui/frontend/package.json +++ b/pkg/ui/frontend/package.json @@ -10,7 +10,7 @@ "preview": "vite preview" }, "dependencies": { - "@hookform/resolvers": "^3.10.0", + "@hookform/resolvers": "^4.0.0", "@radix-ui/react-checkbox": "^1.1.3", "@radix-ui/react-collapsible": "^1.1.2", "@radix-ui/react-dialog": "^1.1.5", @@ -29,8 +29,8 @@ "@radix-ui/react-toggle": "^1.1.1", "@radix-ui/react-toggle-group": "^1.1.1", "@radix-ui/react-tooltip": "^1.1.7", - "@tanstack/react-query": "^5.66.0", - "@tanstack/react-query-devtools": "^5.66.0", + "@tanstack/react-query": "^5.66.11", + "@tanstack/react-query-devtools": "^5.66.11", "@types/lodash": "^4.17.15", "@types/react-datepicker": "^7.0.0", "class-variance-authority": "^0.7.1", @@ -47,7 +47,7 @@ "react-dom": "^18.2.0", "react-hook-form": "^7.54.2", "react-icons": "^5.4.0", - "react-router-dom": "^6.22.0", + "react-router-dom": "^6.30.0", "recharts": "^2.15.1", "tailwind-merge": "^2.6.0", "tailwindcss-animate": "^1.0.7", @@ -58,13 +58,13 @@ "@types/node": "^22.12.0", "@types/react": "^18.2.0", "@types/react-dom": "^18.2.0", - "@typescript-eslint/eslint-plugin": "^5.57.1", - "@typescript-eslint/parser": "^5.57.1", + "@typescript-eslint/eslint-plugin": "^8.0.0", + "@typescript-eslint/parser": "^8.0.0", "@vitejs/plugin-react": "^4.2.1", "autoprefixer": "^10.4.20", "depcheck": "^1.4.7", - "eslint": "^8.38.0", - "eslint-plugin-react-hooks": "^5.0.0", + "eslint": "^9.0.0", + "eslint-plugin-react-hooks": "^5.2.0", "eslint-plugin-react-refresh": "^0.4.5", "postcss": "^8.5.1", "tailwindcss": "^3.4.1",
chore
update dependency @types/react to v19 (main) (#16365)
f406f528ee305a68f165360ad947200435cf90f3
2023-12-12 21:27:32
sossickd
helm: Added topologySpreadConstraints to read deployment component (#11434)
false
diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md index 41c95ca7b5947..78571b3de600d 100644 --- a/production/helm/loki/CHANGELOG.md +++ b/production/helm/loki/CHANGELOG.md @@ -13,11 +13,14 @@ Entries should include a reference to the pull request that introduced the chang [//]: # (<AUTOMATED_UPDATES_LOCATOR> : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) +## 5.41.1 + +- [FEATURE] Allow topology spread constraints for Loki read deployment component. + ## 5.41.0 - [CHANGE] Changed version of Loki to 2.9.3 - ## 5.40.1 - [BUGFIX] Remove ruler enabled condition in networkpolicies. diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml index e82ba8845009f..d9cf011e4f23e 100644 --- a/production/helm/loki/Chart.yaml +++ b/production/helm/loki/Chart.yaml @@ -3,7 +3,7 @@ name: loki description: Helm chart for Grafana Loki in simple, scalable mode type: application appVersion: 2.9.3 -version: 5.41.0 +version: 5.41.1 home: https://grafana.github.io/helm-charts sources: - https://github.com/grafana/loki diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md index 0d84230fe1b87..3caad398ada44 100644 --- a/production/helm/loki/README.md +++ b/production/helm/loki/README.md @@ -1,6 +1,6 @@ # loki -![Version: 5.41.0](https://img.shields.io/badge/Version-5.41.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.3](https://img.shields.io/badge/AppVersion-2.9.3-informational?style=flat-square) +![Version: 5.41.1](https://img.shields.io/badge/Version-5.41.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.3](https://img.shields.io/badge/AppVersion-2.9.3-informational?style=flat-square) Helm chart for Grafana Loki in simple, scalable mode diff --git a/production/helm/loki/templates/read/deployment-read.yaml b/production/helm/loki/templates/read/deployment-read.yaml index e468752d5723f..a5e7524f2a05f 100644 --- a/production/helm/loki/templates/read/deployment-read.yaml +++ b/production/helm/loki/templates/read/deployment-read.yaml @@ -127,6 +127,10 @@ spec: nodeSelector: {{- toYaml . | nindent 8 }} {{- end }} + {{- with .Values.read.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml . | nindent 8 }} + {{- end }} {{- with .Values.read.tolerations }} tolerations: {{- toYaml . | nindent 8 }}
helm
Added topologySpreadConstraints to read deployment component (#11434)
4bfa3807cfdf906321f9b9fdcbb5f1f278910ccc
2024-11-08 14:51:23
Salva Corts
revert: "perf(bloom): Compute chunkrefs for series right before sending task to builder" (#14839)
false
diff --git a/pkg/bloombuild/common/tsdb.go b/pkg/bloombuild/common/tsdb.go index 868828e72a337..a58b7cd6130f9 100644 --- a/pkg/bloombuild/common/tsdb.go +++ b/pkg/bloombuild/common/tsdb.go @@ -29,10 +29,8 @@ const ( gzipExtension = ".gz" ) -type ForSeries = sharding.ForSeries - type ClosableForSeries interface { - ForSeries + sharding.ForSeries Close() error } @@ -126,21 +124,33 @@ func (b *BloomTSDBStore) LoadTSDB( return idx, nil } -func NewTSDBSeriesIter(ctx context.Context, user string, f sharding.ForSeries, bounds v1.FingerprintBounds) (iter.Iterator[model.Fingerprint], error) { +func NewTSDBSeriesIter(ctx context.Context, user string, f sharding.ForSeries, bounds v1.FingerprintBounds) (iter.Iterator[*v1.Series], error) { // TODO(salvacorts): Create a pool - series := make([]model.Fingerprint, 0, 100) + series := make([]*v1.Series, 0, 100) if err := f.ForSeries( ctx, user, bounds, 0, math.MaxInt64, - func(_ labels.Labels, fp model.Fingerprint, _ []index.ChunkMeta) (stop bool) { + func(_ labels.Labels, fp model.Fingerprint, chks []index.ChunkMeta) (stop bool) { select { case <-ctx.Done(): return true default: - series = append(series, fp) + res := &v1.Series{ + Fingerprint: fp, + Chunks: make(v1.ChunkRefs, 0, len(chks)), + } + for _, chk := range chks { + res.Chunks = append(res.Chunks, v1.ChunkRef{ + From: model.Time(chk.MinTime), + Through: model.Time(chk.MaxTime), + Checksum: chk.Checksum, + }) + } + + series = append(series, res) return false } }, @@ -151,7 +161,7 @@ func NewTSDBSeriesIter(ctx context.Context, user string, f sharding.ForSeries, b select { case <-ctx.Done(): - return iter.NewEmptyIter[model.Fingerprint](), ctx.Err() + return iter.NewEmptyIter[*v1.Series](), ctx.Err() default: return iter.NewCancelableIter(ctx, iter.NewSliceIter(series)), nil } diff --git a/pkg/bloombuild/common/tsdb_test.go b/pkg/bloombuild/common/tsdb_test.go index 13e83c14719f9..b2df7982f4382 100644 --- a/pkg/bloombuild/common/tsdb_test.go +++ b/pkg/bloombuild/common/tsdb_test.go @@ -66,10 +66,10 @@ func TestTSDBSeriesIter(t *testing.T) { itr, err := NewTSDBSeriesIter(context.Background(), "", forSeriesTestImpl(input), v1.NewBounds(0, math.MaxUint64)) require.NoError(t, err) - v1.CompareIterators( + v1.EqualIterators( t, - func(t *testing.T, a model.Fingerprint, b *v1.Series) { - require.Equal(t, a, b.Fingerprint) + func(a, b *v1.Series) { + require.Equal(t, a, b) }, itr, srcItr, diff --git a/pkg/bloombuild/planner/planner.go b/pkg/bloombuild/planner/planner.go index 82c83d594b1e4..7c13dedb50452 100644 --- a/pkg/bloombuild/planner/planner.go +++ b/pkg/bloombuild/planner/planner.go @@ -227,10 +227,9 @@ func (p *Planner) runOne(ctx context.Context) error { } var ( - wg sync.WaitGroup - start = time.Now() - status = statusFailure - openTSDBs strategies.TSDBSet + wg sync.WaitGroup + start = time.Now() + status = statusFailure ) defer func() { p.metrics.buildCompleted.WithLabelValues(status).Inc() @@ -239,15 +238,6 @@ func (p *Planner) runOne(ctx context.Context) error { if status == statusSuccess { p.metrics.buildLastSuccess.SetToCurrentTime() } - - // Close all open TSDBs. - // These are used to get the chunkrefs for the series in the gaps. - // We populate the chunkrefs when we send the task to the builder. - for idx, reader := range openTSDBs { - if err := reader.Close(); err != nil { - level.Error(p.logger).Log("msg", "failed to close tsdb", "tsdb", idx.Name(), "err", err) - } - } }() p.metrics.buildStarted.Inc() @@ -285,19 +275,7 @@ func (p *Planner) runOne(ctx context.Context) error { table: table, } - tsdbs, err := p.tsdbStore.ResolveTSDBs(ctx, table, tenant) - if err != nil { - level.Error(logger).Log("msg", "failed to resolve tsdbs", "err", err) - continue - } - - openTSDBs, err = openAllTSDBs(ctx, table, tenant, p.tsdbStore, tsdbs, openTSDBs) - if err != nil { - level.Error(logger).Log("msg", "failed to open all tsdbs", "err", err) - continue - } - - tasks, existingMetas, err := p.computeTasks(ctx, table, tenant, openTSDBs) + tasks, existingMetas, err := p.computeTasks(ctx, table, tenant) if err != nil { level.Error(logger).Log("msg", "failed to compute tasks", "err", err) continue @@ -308,7 +286,7 @@ func (p *Planner) runOne(ctx context.Context) error { now := time.Now() for _, task := range tasks { - queueTask := NewQueueTask(ctx, now, task, openTSDBs[task.TSDB], resultsCh) + queueTask := NewQueueTask(ctx, now, task, resultsCh) if err := p.enqueueTask(queueTask); err != nil { level.Error(logger).Log("msg", "error enqueuing task", "err", err) continue @@ -396,8 +374,7 @@ func (p *Planner) computeTasks( ctx context.Context, table config.DayTable, tenant string, - tsdbs strategies.TSDBSet, -) ([]*strategies.Task, []bloomshipper.Meta, error) { +) ([]*protos.Task, []bloomshipper.Meta, error) { strategy, err := strategies.NewStrategy(tenant, p.limits, p.logger) if err != nil { return nil, nil, fmt.Errorf("error creating strategy: %w", err) @@ -425,11 +402,29 @@ func (p *Planner) computeTasks( return nil, nil, fmt.Errorf("failed to delete outdated metas during planning: %w", err) } + // Resolve TSDBs + tsdbs, err := p.tsdbStore.ResolveTSDBs(ctx, table, tenant) + if err != nil { + return nil, nil, fmt.Errorf("failed to resolve tsdbs: %w", err) + } + if len(tsdbs) == 0 { return nil, metas, nil } - tasks, err := strategy.Plan(ctx, table, tenant, tsdbs, metas) + openTSDBs, err := openAllTSDBs(ctx, table, tenant, p.tsdbStore, tsdbs) + if err != nil { + return nil, nil, fmt.Errorf("failed to open all tsdbs: %w", err) + } + defer func() { + for idx, reader := range openTSDBs { + if err := reader.Close(); err != nil { + level.Error(logger).Log("msg", "failed to close index", "err", err, "tsdb", idx.Name()) + } + } + }() + + tasks, err := strategy.Plan(ctx, table, tenant, openTSDBs, metas) if err != nil { return nil, nil, fmt.Errorf("failed to plan tasks: %w", err) } @@ -511,26 +506,18 @@ func openAllTSDBs( tenant string, store common.TSDBStore, tsdbs []tsdb.SingleTenantTSDBIdentifier, - alreadyOpen strategies.TSDBSet, -) (strategies.TSDBSet, error) { - if len(alreadyOpen) == 0 { - alreadyOpen = make(strategies.TSDBSet, len(tsdbs)) - } - +) (map[tsdb.SingleTenantTSDBIdentifier]common.ClosableForSeries, error) { + openTSDBs := make(map[tsdb.SingleTenantTSDBIdentifier]common.ClosableForSeries, len(tsdbs)) for _, idx := range tsdbs { - if _, ok := alreadyOpen[idx]; ok { - continue - } - - reader, err := store.LoadTSDB(ctx, table, tenant, idx) + tsdb, err := store.LoadTSDB(ctx, table, tenant, idx) if err != nil { return nil, fmt.Errorf("failed to load tsdb: %w", err) } - alreadyOpen[idx] = reader + openTSDBs[idx] = tsdb } - return alreadyOpen, nil + return openTSDBs, nil } // deleteOutdatedMetasAndBlocks filters out the outdated metas from the `metas` argument and deletes them from the store. @@ -860,13 +847,8 @@ func (p *Planner) forwardTaskToBuilder( builderID string, task *QueueTask, ) (*protos.TaskResult, error) { - protoTask, err := task.ToProtoTask(builder.Context()) - if err != nil { - return nil, fmt.Errorf("error converting task to proto task: %w", err) - } - msg := &protos.PlannerToBuilder{ - Task: protoTask, + Task: task.ToProtoTask(), } if err := builder.Send(msg); err != nil { diff --git a/pkg/bloombuild/planner/planner_test.go b/pkg/bloombuild/planner/planner_test.go index 9b0082f15e08c..6b1b1e0beba16 100644 --- a/pkg/bloombuild/planner/planner_test.go +++ b/pkg/bloombuild/planner/planner_test.go @@ -713,21 +713,12 @@ func (f *fakeBuilder) Recv() (*protos.BuilderToPlanner, error) { } func createTasks(n int, resultsCh chan *protos.TaskResult) []*QueueTask { - forSeries := plannertest.NewFakeForSeries(plannertest.GenV1Series(v1.NewBounds(0, 100))) - tasks := make([]*QueueTask, 0, n) // Enqueue tasks for i := 0; i < n; i++ { task := NewQueueTask( context.Background(), time.Now(), - strategies.NewTask( - config.NewDayTable(plannertest.TestDay, "fake"), - "fakeTenant", - v1.NewBounds(0, 10), - plannertest.TsdbID(1), - nil, - ), - forSeries, + protos.NewTask(config.NewDayTable(plannertest.TestDay, "fake"), "fakeTenant", v1.NewBounds(0, 10), plannertest.TsdbID(1), nil), resultsCh, ) tasks = append(tasks, task) diff --git a/pkg/bloombuild/planner/plannertest/utils.go b/pkg/bloombuild/planner/plannertest/utils.go index 8938966ff83bb..706e0abdf00a7 100644 --- a/pkg/bloombuild/planner/plannertest/utils.go +++ b/pkg/bloombuild/planner/plannertest/utils.go @@ -6,7 +6,6 @@ import ( "time" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" "github.com/grafana/loki/v3/pkg/compression" v2 "github.com/grafana/loki/v3/pkg/iter/v2" @@ -14,7 +13,6 @@ import ( "github.com/grafana/loki/v3/pkg/storage/config" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb" - "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/index" ) var TestDay = ParseDayTime("2023-09-01") @@ -89,23 +87,11 @@ func GenBlock(ref bloomshipper.BlockRef) (bloomshipper.Block, error) { }, nil } -func GenSeries(bounds v1.FingerprintBounds) []model.Fingerprint { +func GenSeries(bounds v1.FingerprintBounds) []*v1.Series { return GenSeriesWithStep(bounds, 1) } -func GenSeriesWithStep(bounds v1.FingerprintBounds, step int) []model.Fingerprint { - series := make([]model.Fingerprint, 0, int(bounds.Max-bounds.Min+1)/step) - for i := bounds.Min; i <= bounds.Max; i += model.Fingerprint(step) { - series = append(series, i) - } - return series -} - -func GenV1Series(bounds v1.FingerprintBounds) []*v1.Series { - return GenV1SeriesWithStep(bounds, 1) -} - -func GenV1SeriesWithStep(bounds v1.FingerprintBounds, step int) []*v1.Series { +func GenSeriesWithStep(bounds v1.FingerprintBounds, step int) []*v1.Series { series := make([]*v1.Series, 0, int(bounds.Max-bounds.Min+1)/step) for i := bounds.Min; i <= bounds.Max; i += model.Fingerprint(step) { series = append(series, &v1.Series{ @@ -153,43 +139,3 @@ func ParseDayTime(s string) config.DayTime { Time: model.TimeFromUnix(t.Unix()), } } - -type FakeForSeries struct { - series []*v1.Series -} - -func NewFakeForSeries(series []*v1.Series) *FakeForSeries { - return &FakeForSeries{ - series: series, - } -} - -func (f FakeForSeries) ForSeries(_ context.Context, _ string, ff index.FingerprintFilter, _ model.Time, _ model.Time, fn func(labels.Labels, model.Fingerprint, []index.ChunkMeta) (stop bool), _ ...*labels.Matcher) error { - overlapping := make([]*v1.Series, 0, len(f.series)) - for _, s := range f.series { - if ff.Match(s.Fingerprint) { - overlapping = append(overlapping, s) - } - } - - for _, s := range overlapping { - chunks := make([]index.ChunkMeta, 0, len(s.Chunks)) - for _, c := range s.Chunks { - chunks = append(chunks, index.ChunkMeta{ - MinTime: int64(c.From), - MaxTime: int64(c.Through), - Checksum: c.Checksum, - KB: 100, - }) - } - - if fn(labels.EmptyLabels(), s.Fingerprint, chunks) { - break - } - } - return nil -} - -func (f FakeForSeries) Close() error { - return nil -} diff --git a/pkg/bloombuild/planner/strategies/chunksize.go b/pkg/bloombuild/planner/strategies/chunksize.go index 3d59f40fb56ab..21f473908dd99 100644 --- a/pkg/bloombuild/planner/strategies/chunksize.go +++ b/pkg/bloombuild/planner/strategies/chunksize.go @@ -12,6 +12,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" + "github.com/grafana/loki/v3/pkg/bloombuild/protos" iter "github.com/grafana/loki/v3/pkg/iter/v2" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" "github.com/grafana/loki/v3/pkg/storage/config" @@ -49,7 +50,7 @@ func (s *ChunkSizeStrategy) Plan( tenant string, tsdbs TSDBSet, metas []bloomshipper.Meta, -) ([]*Task, error) { +) ([]*protos.Task, error) { targetTaskSize := s.limits.BloomTaskTargetSeriesChunksSizeBytes(tenant) logger := log.With(s.logger, "table", table.Addr(), "tenant", tenant) @@ -72,29 +73,29 @@ func (s *ChunkSizeStrategy) Plan( return nil, fmt.Errorf("failed to get sized series iter: %w", err) } - tasks := make([]*Task, 0, iterSize) + tasks := make([]*protos.Task, 0, iterSize) for sizedIter.Next() { - batch := sizedIter.At() - if batch.Len() == 0 { + series := sizedIter.At() + if series.Len() == 0 { // This should never happen, but just in case. - level.Warn(logger).Log("msg", "got empty series batch", "tsdb", batch.TSDB().Name()) + level.Warn(logger).Log("msg", "got empty series batch", "tsdb", series.TSDB().Name()) continue } - bounds := batch.Bounds() + bounds := series.Bounds() blocks, err := getBlocksMatchingBounds(metas, bounds) if err != nil { return nil, fmt.Errorf("failed to get blocks matching bounds: %w", err) } - planGap := Gap{ + planGap := protos.Gap{ Bounds: bounds, - Series: batch.series, + Series: series.V1Series(), Blocks: blocks, } - tasks = append(tasks, NewTask(table, tenant, bounds, batch.TSDB(), []Gap{planGap})) + tasks = append(tasks, protos.NewTask(table, tenant, bounds, series.TSDB(), []protos.Gap{planGap})) } if err := sizedIter.Err(); err != nil { return nil, fmt.Errorf("failed to iterate over sized series: %w", err) @@ -154,16 +155,20 @@ func getBlocksMatchingBounds(metas []bloomshipper.Meta, bounds v1.FingerprintBou return deduped, nil } -type seriesBatch struct { +type seriesWithChunks struct { tsdb tsdb.SingleTenantTSDBIdentifier - series []model.Fingerprint + fp model.Fingerprint + chunks []index.ChunkMeta +} + +type seriesBatch struct { + series []seriesWithChunks size uint64 } -func newSeriesBatch(tsdb tsdb.SingleTenantTSDBIdentifier) seriesBatch { +func newSeriesBatch() seriesBatch { return seriesBatch{ - tsdb: tsdb, - series: make([]model.Fingerprint, 0, 100), + series: make([]seriesWithChunks, 0, 100), } } @@ -174,11 +179,32 @@ func (b *seriesBatch) Bounds() v1.FingerprintBounds { // We assume that the series are sorted by fingerprint. // This is guaranteed since series are iterated in order by the TSDB. - return v1.NewBounds(b.series[0], b.series[len(b.series)-1]) + return v1.NewBounds(b.series[0].fp, b.series[len(b.series)-1].fp) +} + +func (b *seriesBatch) V1Series() []*v1.Series { + series := make([]*v1.Series, 0, len(b.series)) + for _, s := range b.series { + res := &v1.Series{ + Fingerprint: s.fp, + Chunks: make(v1.ChunkRefs, 0, len(s.chunks)), + } + for _, chk := range s.chunks { + res.Chunks = append(res.Chunks, v1.ChunkRef{ + From: model.Time(chk.MinTime), + Through: model.Time(chk.MaxTime), + Checksum: chk.Checksum, + }) + } + + series = append(series, res) + } + + return series } -func (b *seriesBatch) Append(series model.Fingerprint, size uint64) { - b.series = append(b.series, series) +func (b *seriesBatch) Append(s seriesWithChunks, size uint64) { + b.series = append(b.series, s) b.size += size } @@ -191,7 +217,10 @@ func (b *seriesBatch) Size() uint64 { } func (b *seriesBatch) TSDB() tsdb.SingleTenantTSDBIdentifier { - return b.tsdb + if len(b.series) == 0 { + return tsdb.SingleTenantTSDBIdentifier{} + } + return b.series[0].tsdb } func (s *ChunkSizeStrategy) sizedSeriesIter( @@ -201,12 +230,9 @@ func (s *ChunkSizeStrategy) sizedSeriesIter( targetTaskSizeBytes uint64, ) (iter.Iterator[seriesBatch], int, error) { batches := make([]seriesBatch, 0, 100) - var currentBatch seriesBatch + currentBatch := newSeriesBatch() for _, idx := range tsdbsWithGaps { - // We cut a new batch for each TSDB. - currentBatch = newSeriesBatch(idx.tsdbIdentifier) - for _, gap := range idx.gaps { if err := idx.tsdb.ForSeries( ctx, @@ -227,10 +253,14 @@ func (s *ChunkSizeStrategy) sizedSeriesIter( // AND Adding this series to the batch would exceed the target task size. if currentBatch.Len() > 0 && currentBatch.Size()+seriesSize > targetTaskSizeBytes { batches = append(batches, currentBatch) - currentBatch = newSeriesBatch(idx.tsdbIdentifier) + currentBatch = newSeriesBatch() } - currentBatch.Append(fp, seriesSize) + currentBatch.Append(seriesWithChunks{ + tsdb: idx.tsdbIdentifier, + fp: fp, + chunks: chks, + }, seriesSize) return false } }, @@ -239,10 +269,10 @@ func (s *ChunkSizeStrategy) sizedSeriesIter( return nil, 0, err } - // Add the last batch for this gap if it's not empty. + // Add the last batch for this TSDB if it's not empty. if currentBatch.Len() > 0 { batches = append(batches, currentBatch) - currentBatch = newSeriesBatch(idx.tsdbIdentifier) + currentBatch = newSeriesBatch() } } } diff --git a/pkg/bloombuild/planner/strategies/chunksize_test.go b/pkg/bloombuild/planner/strategies/chunksize_test.go index 3c0f88dc39006..951d033e5c100 100644 --- a/pkg/bloombuild/planner/strategies/chunksize_test.go +++ b/pkg/bloombuild/planner/strategies/chunksize_test.go @@ -8,13 +8,14 @@ import ( "github.com/stretchr/testify/require" "github.com/grafana/loki/v3/pkg/bloombuild/planner/plannertest" + "github.com/grafana/loki/v3/pkg/bloombuild/protos" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb" ) -func taskForGap(tsdb tsdb.SingleTenantTSDBIdentifier, bounds v1.FingerprintBounds, blocks []bloomshipper.BlockRef) *Task { - return NewTask(plannertest.TestTable, "fake", bounds, tsdb, []Gap{ +func taskForGap(tsdb tsdb.SingleTenantTSDBIdentifier, bounds v1.FingerprintBounds, blocks []bloomshipper.BlockRef) *protos.Task { + return protos.NewTask(plannertest.TestTable, "fake", bounds, tsdb, []protos.Gap{ { Bounds: bounds, Series: plannertest.GenSeriesWithStep(bounds, 10), @@ -24,14 +25,12 @@ func taskForGap(tsdb tsdb.SingleTenantTSDBIdentifier, bounds v1.FingerprintBound } func Test_ChunkSizeStrategy_Plan(t *testing.T) { - forSeries := plannertest.NewFakeForSeries(plannertest.GenV1SeriesWithStep(v1.NewBounds(0, 100), 10)) - for _, tc := range []struct { name string limits ChunkSizeStrategyLimits originalMetas []bloomshipper.Meta tsdbs TSDBSet - expectedTasks []*Task + expectedTasks []*protos.Task }{ { name: "no previous blocks and metas", @@ -39,11 +38,11 @@ func Test_ChunkSizeStrategy_Plan(t *testing.T) { // Each series will have 1 chunk of 100KB each tsdbs: TSDBSet{ - plannertest.TsdbID(0): forSeries, // 10 series + plannertest.TsdbID(0): newFakeForSeries(plannertest.GenSeriesWithStep(v1.NewBounds(0, 100), 10)), // 10 series }, // We expect 5 tasks, each with 2 series each - expectedTasks: []*Task{ + expectedTasks: []*protos.Task{ taskForGap(plannertest.TsdbID(0), v1.NewBounds(0, 10), nil), taskForGap(plannertest.TsdbID(0), v1.NewBounds(20, 30), nil), taskForGap(plannertest.TsdbID(0), v1.NewBounds(40, 50), nil), @@ -85,11 +84,11 @@ func Test_ChunkSizeStrategy_Plan(t *testing.T) { }, tsdbs: TSDBSet{ - plannertest.TsdbID(0): forSeries, // 10 series + plannertest.TsdbID(0): newFakeForSeries(plannertest.GenSeriesWithStep(v1.NewBounds(0, 100), 10)), // 10 series }, // We expect no tasks - expectedTasks: []*Task{}, + expectedTasks: []*protos.Task{}, }, { name: "Original metas do not cover the entire range", @@ -122,11 +121,11 @@ func Test_ChunkSizeStrategy_Plan(t *testing.T) { }, tsdbs: TSDBSet{ - plannertest.TsdbID(0): forSeries, // 10 series + plannertest.TsdbID(0): newFakeForSeries(plannertest.GenSeriesWithStep(v1.NewBounds(0, 100), 10)), // 10 series }, // We expect 1 tasks for the missing series - expectedTasks: []*Task{ + expectedTasks: []*protos.Task{ taskForGap(plannertest.TsdbID(0), v1.NewBounds(20, 30), nil), }, }, @@ -151,11 +150,11 @@ func Test_ChunkSizeStrategy_Plan(t *testing.T) { }, tsdbs: TSDBSet{ - plannertest.TsdbID(1): forSeries, // 10 series + plannertest.TsdbID(1): newFakeForSeries(plannertest.GenSeriesWithStep(v1.NewBounds(0, 100), 10)), // 10 series }, // We expect 5 tasks, each with 2 series each - expectedTasks: []*Task{ + expectedTasks: []*protos.Task{ taskForGap(plannertest.TsdbID(1), v1.NewBounds(0, 10), []bloomshipper.BlockRef{ plannertest.GenBlockRef(0, 0), plannertest.GenBlockRef(10, 10), @@ -206,11 +205,11 @@ func Test_ChunkSizeStrategy_Plan(t *testing.T) { }, tsdbs: TSDBSet{ - plannertest.TsdbID(1): forSeries, // 10 series + plannertest.TsdbID(1): newFakeForSeries(plannertest.GenSeriesWithStep(v1.NewBounds(0, 100), 10)), // 10 series }, // We expect 5 tasks, each with 2 series each - expectedTasks: []*Task{ + expectedTasks: []*protos.Task{ taskForGap(plannertest.TsdbID(1), v1.NewBounds(0, 10), []bloomshipper.BlockRef{ plannertest.GenBlockRef(0, 0), plannertest.GenBlockRef(10, 10), diff --git a/pkg/bloombuild/planner/strategies/factory.go b/pkg/bloombuild/planner/strategies/factory.go index a3ca12f57c3bd..f58f91e51708d 100644 --- a/pkg/bloombuild/planner/strategies/factory.go +++ b/pkg/bloombuild/planner/strategies/factory.go @@ -7,6 +7,7 @@ import ( "github.com/go-kit/log" "github.com/grafana/loki/v3/pkg/bloombuild/common" + "github.com/grafana/loki/v3/pkg/bloombuild/protos" "github.com/grafana/loki/v3/pkg/storage/config" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb" @@ -28,7 +29,7 @@ type TSDBSet = map[tsdb.SingleTenantTSDBIdentifier]common.ClosableForSeries type PlanningStrategy interface { Name() string // Plan returns a set of tasks for a given tenant-table tuple and TSDBs. - Plan(ctx context.Context, table config.DayTable, tenant string, tsdbs TSDBSet, metas []bloomshipper.Meta) ([]*Task, error) + Plan(ctx context.Context, table config.DayTable, tenant string, tsdbs TSDBSet, metas []bloomshipper.Meta) ([]*protos.Task, error) } func NewStrategy( diff --git a/pkg/bloombuild/planner/strategies/splitkeyspace.go b/pkg/bloombuild/planner/strategies/splitkeyspace.go index ccb2c0141ca22..2e799d1ed4903 100644 --- a/pkg/bloombuild/planner/strategies/splitkeyspace.go +++ b/pkg/bloombuild/planner/strategies/splitkeyspace.go @@ -8,6 +8,7 @@ import ( "github.com/go-kit/log/level" "github.com/grafana/loki/v3/pkg/bloombuild/common" + "github.com/grafana/loki/v3/pkg/bloombuild/protos" iter "github.com/grafana/loki/v3/pkg/iter/v2" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" "github.com/grafana/loki/v3/pkg/storage/config" @@ -44,14 +45,14 @@ func (s *SplitKeyspaceStrategy) Plan( tenant string, tsdbs TSDBSet, metas []bloomshipper.Meta, -) ([]*Task, error) { +) ([]*protos.Task, error) { splitFactor := s.limits.BloomSplitSeriesKeyspaceBy(tenant) ownershipRanges := SplitFingerprintKeyspaceByFactor(splitFactor) logger := log.With(s.logger, "table", table.Addr(), "tenant", tenant) level.Debug(s.logger).Log("msg", "loading work for tenant", "splitFactor", splitFactor) - var tasks []*Task + var tasks []*protos.Task for _, ownershipRange := range ownershipRanges { logger := log.With(logger, "ownership", ownershipRange.String()) @@ -66,7 +67,7 @@ func (s *SplitKeyspaceStrategy) Plan( } for _, gap := range gaps { - tasks = append(tasks, NewTask(table, tenant, ownershipRange, gap.tsdb, gap.gaps)) + tasks = append(tasks, protos.NewTask(table, tenant, ownershipRange, gap.tsdb, gap.gaps)) } } @@ -84,7 +85,7 @@ func (s *SplitKeyspaceStrategy) Plan( // This is a performance optimization to avoid expensive re-reindexing type blockPlan struct { tsdb tsdb.SingleTenantTSDBIdentifier - gaps []Gap + gaps []protos.Gap } func (s *SplitKeyspaceStrategy) findOutdatedGaps( @@ -174,11 +175,11 @@ func blockPlansForGaps( for _, idx := range tsdbs { plan := blockPlan{ tsdb: idx.tsdbIdentifier, - gaps: make([]Gap, 0, len(idx.gaps)), + gaps: make([]protos.Gap, 0, len(idx.gaps)), } for _, gap := range idx.gaps { - planGap := Gap{ + planGap := protos.Gap{ Bounds: gap, } diff --git a/pkg/bloombuild/planner/strategies/splitkeyspace_test.go b/pkg/bloombuild/planner/strategies/splitkeyspace_test.go index e934205199c2b..18480d74c98fc 100644 --- a/pkg/bloombuild/planner/strategies/splitkeyspace_test.go +++ b/pkg/bloombuild/planner/strategies/splitkeyspace_test.go @@ -4,13 +4,17 @@ import ( "context" "testing" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/require" "github.com/grafana/loki/v3/pkg/bloombuild/common" "github.com/grafana/loki/v3/pkg/bloombuild/planner/plannertest" + "github.com/grafana/loki/v3/pkg/bloombuild/protos" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb" + "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/index" ) func Test_gapsBetweenTSDBsAndMetas(t *testing.T) { @@ -135,7 +139,7 @@ func Test_blockPlansForGaps(t *testing.T) { exp: []blockPlan{ { tsdb: plannertest.TsdbID(0), - gaps: []Gap{ + gaps: []protos.Gap{ { Bounds: v1.NewBounds(0, 10), Series: plannertest.GenSeries(v1.NewBounds(0, 10)), @@ -154,7 +158,7 @@ func Test_blockPlansForGaps(t *testing.T) { exp: []blockPlan{ { tsdb: plannertest.TsdbID(0), - gaps: []Gap{ + gaps: []protos.Gap{ { Bounds: v1.NewBounds(0, 10), Series: plannertest.GenSeries(v1.NewBounds(0, 10)), @@ -178,7 +182,7 @@ func Test_blockPlansForGaps(t *testing.T) { exp: []blockPlan{ { tsdb: plannertest.TsdbID(0), - gaps: []Gap{ + gaps: []protos.Gap{ { Bounds: v1.NewBounds(0, 8), Series: plannertest.GenSeries(v1.NewBounds(0, 8)), @@ -198,7 +202,7 @@ func Test_blockPlansForGaps(t *testing.T) { exp: []blockPlan{ { tsdb: plannertest.TsdbID(0), - gaps: []Gap{ + gaps: []protos.Gap{ { Bounds: v1.NewBounds(0, 8), Series: plannertest.GenSeries(v1.NewBounds(0, 8)), @@ -225,7 +229,7 @@ func Test_blockPlansForGaps(t *testing.T) { exp: []blockPlan{ { tsdb: plannertest.TsdbID(0), - gaps: []Gap{ + gaps: []protos.Gap{ // tsdb (id=0) can source chunks from the blocks built from tsdb (id=1) { Bounds: v1.NewBounds(3, 5), @@ -242,7 +246,7 @@ func Test_blockPlansForGaps(t *testing.T) { // tsdb (id=1) can source chunks from the blocks built from tsdb (id=0) { tsdb: plannertest.TsdbID(1), - gaps: []Gap{ + gaps: []protos.Gap{ { Bounds: v1.NewBounds(0, 2), Series: plannertest.GenSeries(v1.NewBounds(0, 2)), @@ -277,7 +281,7 @@ func Test_blockPlansForGaps(t *testing.T) { exp: []blockPlan{ { tsdb: plannertest.TsdbID(0), - gaps: []Gap{ + gaps: []protos.Gap{ { Bounds: v1.NewBounds(0, 10), Series: plannertest.GenSeries(v1.NewBounds(0, 10)), @@ -296,7 +300,7 @@ func Test_blockPlansForGaps(t *testing.T) { // We add series spanning the whole FP ownership range tsdbs := make(map[tsdb.SingleTenantTSDBIdentifier]common.ClosableForSeries) for _, id := range tc.tsdbs { - tsdbs[id] = plannertest.NewFakeForSeries(plannertest.GenV1Series(tc.ownershipRange)) + tsdbs[id] = newFakeForSeries(plannertest.GenSeries(tc.ownershipRange)) } // we reuse the gapsBetweenTSDBsAndMetas function to generate the gaps as this function is tested @@ -318,3 +322,43 @@ func Test_blockPlansForGaps(t *testing.T) { }) } } + +type fakeForSeries struct { + series []*v1.Series +} + +func newFakeForSeries(series []*v1.Series) *fakeForSeries { + return &fakeForSeries{ + series: series, + } +} + +func (f fakeForSeries) ForSeries(_ context.Context, _ string, ff index.FingerprintFilter, _ model.Time, _ model.Time, fn func(labels.Labels, model.Fingerprint, []index.ChunkMeta) (stop bool), _ ...*labels.Matcher) error { + overlapping := make([]*v1.Series, 0, len(f.series)) + for _, s := range f.series { + if ff.Match(s.Fingerprint) { + overlapping = append(overlapping, s) + } + } + + for _, s := range overlapping { + chunks := make([]index.ChunkMeta, 0, len(s.Chunks)) + for _, c := range s.Chunks { + chunks = append(chunks, index.ChunkMeta{ + MinTime: int64(c.From), + MaxTime: int64(c.Through), + Checksum: c.Checksum, + KB: 100, + }) + } + + if fn(labels.EmptyLabels(), s.Fingerprint, chunks) { + break + } + } + return nil +} + +func (f fakeForSeries) Close() error { + return nil +} diff --git a/pkg/bloombuild/planner/strategies/task.go b/pkg/bloombuild/planner/strategies/task.go deleted file mode 100644 index 660c85f449016..0000000000000 --- a/pkg/bloombuild/planner/strategies/task.go +++ /dev/null @@ -1,106 +0,0 @@ -package strategies - -import ( - "context" - "fmt" - "math" - "slices" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - - "github.com/grafana/loki/v3/pkg/bloombuild/common" - "github.com/grafana/loki/v3/pkg/bloombuild/protos" - v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" - "github.com/grafana/loki/v3/pkg/storage/config" - "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" - "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb" - "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/index" -) - -type Gap struct { - Bounds v1.FingerprintBounds - Series []model.Fingerprint - Blocks []bloomshipper.BlockRef -} - -// Task represents a task that is enqueued in the planner. -type Task struct { - *protos.Task - // Override the protos.Task.Gaps field with gaps that use model.Fingerprint instead of v1.Series. - Gaps []Gap -} - -func NewTask( - table config.DayTable, - tenant string, - bounds v1.FingerprintBounds, - tsdb tsdb.SingleTenantTSDBIdentifier, - gaps []Gap, -) *Task { - return &Task{ - Task: protos.NewTask(table, tenant, bounds, tsdb, nil), - Gaps: gaps, - } -} - -// ToProtoTask converts a Task to a ProtoTask. -// It will use the opened TSDB to get the chunks for the series in the gaps. -func (t *Task) ToProtoTask(ctx context.Context, forSeries common.ForSeries) (*protos.ProtoTask, error) { - // Populate the gaps with the series and chunks. - protoGaps := make([]protos.Gap, 0, len(t.Gaps)) - for _, gap := range t.Gaps { - if !slices.IsSorted(gap.Series) { - slices.Sort(gap.Series) - } - - series := make([]*v1.Series, 0, len(gap.Series)) - if err := forSeries.ForSeries( - ctx, - t.Tenant, - gap.Bounds, - 0, math.MaxInt64, - func(_ labels.Labels, fp model.Fingerprint, chks []index.ChunkMeta) (stop bool) { - select { - case <-ctx.Done(): - return true - default: - // Skip this series if it's not in the gap. - // Series are sorted, so we can break early. - if _, found := slices.BinarySearch(gap.Series, fp); !found { - return false - } - - chunks := make(v1.ChunkRefs, 0, len(chks)) - for _, chk := range chks { - chunks = append(chunks, v1.ChunkRef{ - From: model.Time(chk.MinTime), - Through: model.Time(chk.MaxTime), - Checksum: chk.Checksum, - }) - } - - series = append(series, &v1.Series{ - Fingerprint: fp, - Chunks: chunks, - }) - return false - } - }, - labels.MustNewMatcher(labels.MatchEqual, "", ""), - ); err != nil { - return nil, fmt.Errorf("failed to load series from TSDB for gap (%s): %w", gap.Bounds.String(), err) - } - - protoGaps = append(protoGaps, protos.Gap{ - Bounds: gap.Bounds, - Series: series, - Blocks: gap.Blocks, - }) - } - - // Copy inner task and set gaps - task := *t.Task - task.Gaps = protoGaps - return task.ToProtoTask(), nil -} diff --git a/pkg/bloombuild/planner/task.go b/pkg/bloombuild/planner/task.go index a20ed806788f5..3080ec47a171c 100644 --- a/pkg/bloombuild/planner/task.go +++ b/pkg/bloombuild/planner/task.go @@ -6,16 +6,11 @@ import ( "go.uber.org/atomic" - "github.com/grafana/loki/v3/pkg/bloombuild/common" - "github.com/grafana/loki/v3/pkg/bloombuild/planner/strategies" "github.com/grafana/loki/v3/pkg/bloombuild/protos" ) type QueueTask struct { - *strategies.Task - - // We use forSeries in ToProtoTask to get the chunks for the series in the gaps. - forSeries common.ForSeries + *protos.Task resultsChannel chan *protos.TaskResult @@ -28,8 +23,7 @@ type QueueTask struct { func NewQueueTask( ctx context.Context, queueTime time.Time, - task *strategies.Task, - forSeries common.ForSeries, + task *protos.Task, resultsChannel chan *protos.TaskResult, ) *QueueTask { return &QueueTask{ @@ -37,12 +31,5 @@ func NewQueueTask( resultsChannel: resultsChannel, ctx: ctx, queueTime: queueTime, - forSeries: forSeries, } } - -// ToProtoTask converts a Task to a ProtoTask. -// It will use the opened TSDB to get the chunks for the series in the gaps. -func (t *QueueTask) ToProtoTask(ctx context.Context) (*protos.ProtoTask, error) { - return t.Task.ToProtoTask(ctx, t.forSeries) -} diff --git a/pkg/bloombuild/protos/compat.go b/pkg/bloombuild/protos/compat.go index a2fc221728760..7c910d405ad9b 100644 --- a/pkg/bloombuild/protos/compat.go +++ b/pkg/bloombuild/protos/compat.go @@ -20,7 +20,6 @@ type Gap struct { Blocks []bloomshipper.BlockRef } -// Task is a convenience struct equivalent to the protobuf ProtoTask message but with Loki types. type Task struct { ID string @@ -119,7 +118,6 @@ func (t *Task) ToProtoTask() *ProtoTask { blockRefs = append(blockRefs, block.String()) } - // TODO(salvacorts): Cast []*v1.Series to []*ProtoSeries right away series := make([]*ProtoSeries, 0, len(gap.Series)) for _, s := range gap.Series { chunks := make([]*logproto.ShortRef, 0, len(s.Chunks))
revert
"perf(bloom): Compute chunkrefs for series right before sending task to builder" (#14839)
dca859afafc2fea3de8e0b68ac2725a4fe7c19ac
2023-03-22 00:28:45
Gerard Vanloo
operator: Update Dockerfile to go1.20 images (#8857)
false
diff --git a/operator/Dockerfile b/operator/Dockerfile index 7115296313dd1..01971ae8f1f55 100644 --- a/operator/Dockerfile +++ b/operator/Dockerfile @@ -1,5 +1,5 @@ # Build the manager binary -FROM golang:1.19.1 as builder +FROM golang:1.20.1 as builder WORKDIR /workspace # Copy the Go Modules manifests diff --git a/operator/Dockerfile.cross b/operator/Dockerfile.cross index a06cb5f835992..c7776827eddd2 100644 --- a/operator/Dockerfile.cross +++ b/operator/Dockerfile.cross @@ -1,6 +1,6 @@ ARG BUILD_IMAGE=grafana/loki-build-image:0.28.1 -FROM golang:1.19.1-alpine as goenv +FROM golang:1.20.1-alpine as goenv RUN go env GOARCH > /goarch && \ go env GOARM > /goarm
operator
Update Dockerfile to go1.20 images (#8857)
b8168a87f5ad052408b5c5619ffae038b7bbe7d5
2024-12-16 15:37:09
Ned Andreev
fix: data race in distributor tests (#15423)
false
diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 310aa52df7708..ae14f6110d058 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -583,6 +583,8 @@ func TestDistributorPushToKafka(t *testing.T) { require.Equal(t, 1, len(ingesters[0].pushed)) require.Equal(t, 1, len(ingesters[1].pushed)) require.Eventually(t, func() bool { + ingesters[2].mu.Lock() + defer ingesters[2].mu.Unlock() return len(ingesters[2].pushed) == 1 }, time.Second, 10*time.Millisecond) })
fix
data race in distributor tests (#15423)
ffe684c330bcd65f9b07a02d6f93bb475106becc
2024-04-19 23:41:26
Sheikh-Abubaker
feat: area/promtail: Added support to install wget on promtail docker image to support docker healthcheck (#11711)
false
diff --git a/clients/cmd/promtail/Dockerfile b/clients/cmd/promtail/Dockerfile index bb951765411ab..a7e4f1d16ed9e 100644 --- a/clients/cmd/promtail/Dockerfile +++ b/clients/cmd/promtail/Dockerfile @@ -9,7 +9,7 @@ RUN make clean && make BUILD_IN_CONTAINER=false PROMTAIL_JOURNAL_ENABLED=true pr FROM debian:12.5-slim # tzdata required for the timestamp stage to work RUN apt-get update && \ - apt-get install -qy tzdata ca-certificates libsystemd-dev && \ + apt-get install -qy tzdata ca-certificates wget libsystemd-dev && \ rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* COPY --from=build /src/loki/clients/cmd/promtail/promtail /usr/bin/promtail COPY clients/cmd/promtail/promtail-docker-config.yaml /etc/promtail/config.yml diff --git a/clients/cmd/promtail/Dockerfile.arm32 b/clients/cmd/promtail/Dockerfile.arm32 index bb0019332b234..0e745ddb6f265 100644 --- a/clients/cmd/promtail/Dockerfile.arm32 +++ b/clients/cmd/promtail/Dockerfile.arm32 @@ -9,7 +9,7 @@ RUN make clean && make BUILD_IN_CONTAINER=false PROMTAIL_JOURNAL_ENABLED=true pr FROM debian:12.5-slim # tzdata required for the timestamp stage to work RUN apt-get update && \ - apt-get install -qy tzdata ca-certificates libsystemd-dev && \ + apt-get install -qy tzdata ca-certificates wget libsystemd-dev && \ rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* COPY --from=build /src/loki/clients/cmd/promtail/promtail /usr/bin/promtail COPY clients/cmd/promtail/promtail-local-config.yaml /etc/promtail/local-config.yaml diff --git a/clients/cmd/promtail/Dockerfile.cross b/clients/cmd/promtail/Dockerfile.cross index c19c7f4d3f490..0c63c6a6e6e73 100644 --- a/clients/cmd/promtail/Dockerfile.cross +++ b/clients/cmd/promtail/Dockerfile.cross @@ -16,7 +16,7 @@ RUN make clean && GOARCH=$(cat /goarch) GOARM=$(cat /goarm) make BUILD_IN_CONTAI FROM debian:12.5-slim # tzdata required for the timestamp stage to work RUN apt-get update && \ - apt-get install -qy tzdata ca-certificates libsystemd-dev && \ + apt-get install -qy tzdata ca-certificates wget libsystemd-dev && \ rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* COPY --from=build /src/loki/clients/cmd/promtail/promtail /usr/bin/promtail COPY clients/cmd/promtail/promtail-local-config.yaml /etc/promtail/local-config.yaml
feat
area/promtail: Added support to install wget on promtail docker image to support docker healthcheck (#11711)
34383d9263a6ebb6fccb5d8a6a2acb94e01aab4c
2024-12-12 18:58:36
Trevor Whitney
ci: disable renovate toolchain updates (#15279)
false
diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 5551b1d154fd7..218dc45856db6 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -24,7 +24,7 @@ { // Disable Go version updates "matchManagers": ["gomod"], - "matchPackageNames": ["go"], + "matchPackageNames": ["go", "toolchain"], "enabled": false }, {
ci
disable renovate toolchain updates (#15279)
5885aa592b2d447e9576ab4ce91c5dd5f0c15715
2025-01-08 00:31:02
renovate[bot]
fix(deps): update module github.com/baidubce/bce-sdk-go to v0.9.212 (#15633)
false
diff --git a/go.mod b/go.mod index 91bf15461829e..dfe9867a114de 100644 --- a/go.mod +++ b/go.mod @@ -21,7 +21,7 @@ require ( github.com/alicebob/miniredis/v2 v2.34.0 github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible github.com/aws/aws-sdk-go v1.55.5 - github.com/baidubce/bce-sdk-go v0.9.211 + github.com/baidubce/bce-sdk-go v0.9.212 github.com/bmatcuk/doublestar/v4 v4.7.1 github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500 github.com/cespare/xxhash/v2 v2.3.0 diff --git a/go.sum b/go.sum index c1dd0ca10dd92..9782095814d87 100644 --- a/go.sum +++ b/go.sum @@ -224,8 +224,8 @@ github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/axiomhq/hyperloglog v0.2.2 h1:9X9rOdYx82zXKgd1aMsDZNUw3d7DKAHhd2J305HZPA8= github.com/axiomhq/hyperloglog v0.2.2/go.mod h1:DLUK9yIzpU5B6YFLjxTIcbHu1g4Y1WQb1m5RH3radaM= -github.com/baidubce/bce-sdk-go v0.9.211 h1:+IH5z1efcVGiLNuki0HMbXeUwiRnzqFsuXNNqKNuwW4= -github.com/baidubce/bce-sdk-go v0.9.211/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg= +github.com/baidubce/bce-sdk-go v0.9.212 h1:B3PUoaFi4m13wP7gWObznjPLZ5umQ1BHjO/UoSsj3x4= +github.com/baidubce/bce-sdk-go v0.9.212/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= diff --git a/vendor/github.com/baidubce/bce-sdk-go/bce/config.go b/vendor/github.com/baidubce/bce-sdk-go/bce/config.go index 1c3b1a6984442..6ecf3aa8f2fbf 100644 --- a/vendor/github.com/baidubce/bce-sdk-go/bce/config.go +++ b/vendor/github.com/baidubce/bce-sdk-go/bce/config.go @@ -26,7 +26,7 @@ import ( // Constants and default values for the package bce const ( - SDK_VERSION = "0.9.211" + SDK_VERSION = "0.9.212" URI_PREFIX = "/" // now support uri without prefix "v1" so just set root path DEFAULT_DOMAIN = "baidubce.com" DEFAULT_PROTOCOL = "http" diff --git a/vendor/modules.txt b/vendor/modules.txt index dbb32c47b5e80..8d4e1270445f4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -490,7 +490,7 @@ github.com/aws/smithy-go/transport/http/internal/io # github.com/axiomhq/hyperloglog v0.2.2 ## explicit; go 1.23 github.com/axiomhq/hyperloglog -# github.com/baidubce/bce-sdk-go v0.9.211 +# github.com/baidubce/bce-sdk-go v0.9.212 ## explicit; go 1.11 github.com/baidubce/bce-sdk-go/auth github.com/baidubce/bce-sdk-go/bce
fix
update module github.com/baidubce/bce-sdk-go to v0.9.212 (#15633)
635e469e9a8018ff6d47431605be77de62038af3
2020-07-10 01:26:08
Ed Welch
loki: Use a new context to update the ring state after a failed chunk transfer (#2330)
false
diff --git a/pkg/ingester/transfer.go b/pkg/ingester/transfer.go index ac30baf5dfe66..fc67087d2fc66 100644 --- a/pkg/ingester/transfer.go +++ b/pkg/ingester/transfer.go @@ -59,10 +59,18 @@ func (i *Ingester) TransferChunks(stream logproto.Ingester_TransferChunksServer) // Enter PENDING state (only valid from JOINING) if i.lifecycler.GetState() == ring.JOINING { - if err := i.lifecycler.ChangeState(stream.Context(), ring.PENDING); err != nil { - level.Error(logger).Log("msg", "error rolling back failed TransferChunks", "err", err) + // Create a new context here to attempt to update the state back to pending to allow + // a failed transfer to try again. If we fail to set the state back to PENDING then + // exit Loki as we will effectively be hung anyway stuck in a JOINING state and will + // never join. + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + if err := i.lifecycler.ChangeState(ctx, ring.PENDING); err != nil { + level.Error(logger).Log("msg", "failed to update the ring state back to PENDING after "+ + "a chunk transfer failure, there is nothing more Loki can do from this state "+ + "so the process will exit...", "err", err) os.Exit(1) } + cancel() } }()
loki
Use a new context to update the ring state after a failed chunk transfer (#2330)
5fd5e06cfc66bcaa35ce9f7009ec585efda759b7
2024-03-08 00:14:58
Robert Jacob
fix(operator): Update Go builder for size-calculator (#12161)
false
diff --git a/operator/calculator.Dockerfile b/operator/calculator.Dockerfile index e12d7ad490088..be7324ce33df3 100644 --- a/operator/calculator.Dockerfile +++ b/operator/calculator.Dockerfile @@ -1,5 +1,5 @@ # Build the calculator binary -FROM golang:1.20.6 as builder +FROM golang:1.21.7 as builder WORKDIR /workspace # Copy the Go Modules manifests
fix
Update Go builder for size-calculator (#12161)
3025090c5871e77b056aceac27332bb88a817b9c
2025-03-05 22:05:43
Paul Rogers
chore(build): Update to Go 1.24 (#16527)
false
diff --git a/.github/release-workflows.jsonnet b/.github/release-workflows.jsonnet index 5ae2b34fa2665..8b7f6318fcffc 100644 --- a/.github/release-workflows.jsonnet +++ b/.github/release-workflows.jsonnet @@ -7,7 +7,7 @@ local checkTemplate = 'grafana/loki-release/.github/workflows/check.yml@%s' % re local buildImageVersion = std.extVar('BUILD_IMAGE_VERSION'); local goVersion = std.extVar('GO_VERSION'); local buildImage = 'grafana/loki-build-image:%s' % buildImageVersion; -local golangCiLintVersion = 'v1.60.3'; +local golangCiLintVersion = 'v1.64.5'; local imageBuildTimeoutMin = 60; local imagePrefix = 'grafana'; local dockerPluginDir = 'clients/cmd/docker-driver'; diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index dffe7a56b341a..c50fb2368d005 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -2,8 +2,8 @@ "check": "uses": "grafana/loki-release/.github/workflows/check.yml@main" "with": - "build_image": "grafana/loki-build-image:0.34.5" - "golang_ci_lint_version": "v1.60.3" + "build_image": "grafana/loki-build-image:0.34.6" + "golang_ci_lint_version": "v1.64.5" "release_lib_ref": "main" "skip_validation": false "use_github_app_token": true diff --git a/.github/workflows/images.yml b/.github/workflows/images.yml index 9dcecc67a0a50..686cd6762c3cf 100644 --- a/.github/workflows/images.yml +++ b/.github/workflows/images.yml @@ -2,15 +2,15 @@ "check": "uses": "grafana/loki-release/.github/workflows/check.yml@main" "with": - "build_image": "grafana/loki-build-image:0.34.5" - "golang_ci_lint_version": "v1.60.3" + "build_image": "grafana/loki-build-image:0.34.6" + "golang_ci_lint_version": "v1.64.5" "release_lib_ref": "main" "skip_validation": false "use_github_app_token": true "lambda-promtail-image": "env": "BUILD_TIMEOUT": 60 - "GO_VERSION": "1.23.6" + "GO_VERSION": "1.24.1" "IMAGE_PREFIX": "public.ecr.aws/grafana" "RELEASE_LIB_REF": "main" "RELEASE_REPO": "grafana/loki" @@ -98,7 +98,7 @@ "loki-canary-boringcrypto-image": "env": "BUILD_TIMEOUT": 60 - "GO_VERSION": "1.23.6" + "GO_VERSION": "1.24.1" "IMAGE_PREFIX": "grafana" "RELEASE_LIB_REF": "main" "RELEASE_REPO": "grafana/loki" @@ -206,7 +206,7 @@ "loki-canary-image": "env": "BUILD_TIMEOUT": 60 - "GO_VERSION": "1.23.6" + "GO_VERSION": "1.24.1" "IMAGE_PREFIX": "grafana" "RELEASE_LIB_REF": "main" "RELEASE_REPO": "grafana/loki" @@ -314,7 +314,7 @@ "loki-image": "env": "BUILD_TIMEOUT": 60 - "GO_VERSION": "1.23.6" + "GO_VERSION": "1.24.1" "IMAGE_PREFIX": "grafana" "RELEASE_LIB_REF": "main" "RELEASE_REPO": "grafana/loki" @@ -422,7 +422,7 @@ "promtail-image": "env": "BUILD_TIMEOUT": 60 - "GO_VERSION": "1.23.6" + "GO_VERSION": "1.24.1" "IMAGE_PREFIX": "grafana" "RELEASE_LIB_REF": "main" "RELEASE_REPO": "grafana/loki" diff --git a/.github/workflows/minor-release-pr.yml b/.github/workflows/minor-release-pr.yml index 7a15d1367f51b..dbe5d9798f125 100644 --- a/.github/workflows/minor-release-pr.yml +++ b/.github/workflows/minor-release-pr.yml @@ -16,8 +16,8 @@ jobs: check: uses: "grafana/loki-release/.github/workflows/check.yml@main" with: - build_image: "grafana/loki-build-image:0.34.5" - golang_ci_lint_version: "v1.60.3" + build_image: "grafana/loki-build-image:0.34.6" + golang_ci_lint_version: "v1.64.5" release_lib_ref: "main" skip_validation: false use_github_app_token: true @@ -144,7 +144,7 @@ jobs: --env SKIP_ARM \ --volume .:/src/loki \ --workdir /src/loki \ - --entrypoint /bin/sh "grafana/loki-build-image:0.34.5" + --entrypoint /bin/sh "grafana/loki-build-image:0.34.6" git config --global --add safe.directory /src/loki echo "${NFPM_SIGNING_KEY}" > $NFPM_SIGNING_KEY_FILE make dist packages @@ -666,7 +666,7 @@ jobs: build-args: | IMAGE_TAG=${{ needs.version.outputs.version }} GOARCH=${{ steps.platform.outputs.platform_short }} - BUILD_IMAGE=grafana/loki-build-image:0.34.5 + BUILD_IMAGE=grafana/loki-build-image:0.34.6 context: "release" file: "release/clients/cmd/docker-driver/Dockerfile" outputs: "type=local,dest=release/plugins/loki-docker-driver-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}" diff --git a/.github/workflows/patch-release-pr.yml b/.github/workflows/patch-release-pr.yml index a3026c86cc3b0..e99d4e1c41114 100644 --- a/.github/workflows/patch-release-pr.yml +++ b/.github/workflows/patch-release-pr.yml @@ -16,8 +16,8 @@ jobs: check: uses: "grafana/loki-release/.github/workflows/check.yml@main" with: - build_image: "grafana/loki-build-image:0.34.5" - golang_ci_lint_version: "v1.60.3" + build_image: "grafana/loki-build-image:0.34.6" + golang_ci_lint_version: "v1.64.5" release_lib_ref: "main" skip_validation: false use_github_app_token: true @@ -144,7 +144,7 @@ jobs: --env SKIP_ARM \ --volume .:/src/loki \ --workdir /src/loki \ - --entrypoint /bin/sh "grafana/loki-build-image:0.34.5" + --entrypoint /bin/sh "grafana/loki-build-image:0.34.6" git config --global --add safe.directory /src/loki echo "${NFPM_SIGNING_KEY}" > $NFPM_SIGNING_KEY_FILE make dist packages @@ -666,7 +666,7 @@ jobs: build-args: | IMAGE_TAG=${{ needs.version.outputs.version }} GOARCH=${{ steps.platform.outputs.platform_short }} - BUILD_IMAGE=grafana/loki-build-image:0.34.5 + BUILD_IMAGE=grafana/loki-build-image:0.34.6 context: "release" file: "release/clients/cmd/docker-driver/Dockerfile" outputs: "type=local,dest=release/plugins/loki-docker-driver-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}" diff --git a/.github/workflows/promtail-windows-test.yml b/.github/workflows/promtail-windows-test.yml index 0aa131d769abb..b954178b43648 100644 --- a/.github/workflows/promtail-windows-test.yml +++ b/.github/workflows/promtail-windows-test.yml @@ -10,7 +10,7 @@ jobs: runs-on: windows-latest strategy: matrix: - go-version: [ '1.22.2', '1.23.1' ] + go-version: [ '1.22.12', '1.23.6', '1.24.0' ] steps: - uses: actions/checkout@v4 - name: Setup Go ${{ matrix.go-version }} diff --git a/Makefile b/Makefile index 5c1e037220134..5465096cb58e2 100644 --- a/Makefile +++ b/Makefile @@ -18,9 +18,9 @@ BUILD_IN_CONTAINER ?= true CI ?= false # Ensure you run `make release-workflows` after changing this -GO_VERSION := 1.23.6 +GO_VERSION := 1.24.1 # Ensure you run `make IMAGE_TAG=<updated-tag> build-image-push` after changing this -BUILD_IMAGE_TAG := 0.34.5 +BUILD_IMAGE_TAG := 0.34.6 IMAGE_TAG ?= $(shell ./tools/image-tag) GIT_REVISION := $(shell git rev-parse --short HEAD) diff --git a/loki-build-image/Dockerfile b/loki-build-image/Dockerfile index 0d37d9facbec1..295c3935bd484 100644 --- a/loki-build-image/Dockerfile +++ b/loki-build-image/Dockerfile @@ -29,7 +29,7 @@ RUN apk add --no-cache curl && \ FROM alpine:3.21.3 AS golangci RUN apk add --no-cache curl && \ cd / && \ - curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.60.3 + curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.64.5 FROM alpine:3.21.3 AS buf ARG TARGETOS @@ -51,8 +51,8 @@ RUN curl -L "https://github.com/drone/drone-cli/releases/download/v1.7.0/drone_l # github.com/fatih/[email protected] requires golang.org/x/[email protected] # (not golang.org/x/[email protected] from golang.org/x/tools/cmd/goyacc@58d531046acdc757f177387bc1725bfa79895d69) FROM ${GOLANG_BASE_IMAGE} AS faillint -RUN GO111MODULE=on go install github.com/fatih/[email protected] -RUN GO111MODULE=on go install golang.org/x/tools/cmd/[email protected] +RUN GO111MODULE=on go install github.com/fatih/[email protected] +RUN GO111MODULE=on go install golang.org/x/tools/cmd/[email protected] FROM ${GOLANG_BASE_IMAGE} AS delve RUN GO111MODULE=on go install github.com/go-delve/delve/cmd/dlv@latest diff --git a/loki-build-image/README.md b/loki-build-image/README.md index 95cb72b3846ef..2d8c9ca65b649 100644 --- a/loki-build-image/README.md +++ b/loki-build-image/README.md @@ -2,6 +2,10 @@ ## Versions +### 0.34.6 + +- Update to Go 1.24.1 + ### 0.34.5 - Update to Go 1.23.6
chore
Update to Go 1.24 (#16527)
72a88fa721cbb9e57a9be0a7331dfe32560c8ad6
2024-10-01 20:54:20
J Stickler
docs: Reorganize Query Templates page (#14138)
false
diff --git a/docs/sources/query/template_functions.md b/docs/sources/query/template_functions.md index 68f42965cfa7e..499220c5e377c 100644 --- a/docs/sources/query/template_functions.md +++ b/docs/sources/query/template_functions.md @@ -1,31 +1,31 @@ --- -title: Template functions -menuTItle: -description: Describes functions that are supported by the Go text template. -aliases: +title: LogQL template functions +menuTItle: Template functions +description: Describes query functions that are supported by the Go text template. +aliases: - ../logql/template_functions/ weight: 30 --- -# Template functions +# LogQL template functions +The Go templating language is embedded in the Loki query language, LogQL. The [text template](https://golang.org/pkg/text/template) format used in `| line_format` and `| label_format` support the usage of functions. -{{% admonition type="note" %}} +{{< admonition type="note" >}} In the examples below we use backticks to quote the template strings. This is because some template strings contain double quotes, and using backticks lets us avoid escaping the double quotes. If you are using a different quoting style, you may need to escape the double quotes. -{{% /admonition %}} +{{< /admonition >}} -All labels are added as variables in the template engine. They can be referenced using they label name prefixed by a `.`(e.g `.label_name`). For example the following template will output the value of the path label: +For more information, refer to the [Go template documentation](https://pkg.go.dev/text/template). -```template -`{{ .path }}` -``` +Additionally you can also access the log line using the `__line__` function and the timestamp using the `__timestamp__` function. + +## Template pipeline syntax -Additionally you can also access the log line using the [`__line__`](#__line__) function and the timestamp using the [`__timestamp__`](#__timestamp__) function. +A pipeline is a possibly chained sequence of "commands". A command is a simple value (argument) or a function or method call, possibly with multiple arguments. A pipeline may be "chained" by separating a sequence of commands with pipeline characters '|'. In a chained pipeline, the result of each command is passed as the last argument of the following command. The output of the final command in the pipeline is the value of the pipeline. You can take advantage of [pipeline](https://golang.org/pkg/text/template/#hdr-Pipelines) to join together multiple functions. -In a chained pipeline, the result of each command is passed as the last argument of the following command. Example: @@ -40,12 +40,24 @@ Example: ```template `{{ if and (contains "he" "hello") (contains "llo" "hello") }} yes {{end}}` `{{ if or (contains "he" "hello") (contains("llo" "hello") }} yes {{end}}` -`{{ if contains "ErrTimeout" .err }} timeout {{else if contains "he" "hello"}} yes {{else}} no {{end}}` +`{{ if contains .err "ErrTimeout" }} timeout {{else if contains "he" "hello"}} yes {{else}} no {{end}}` +``` + +## Built-in variables for log line properties + +These variables provide a way of referencing something from the log line when writing a template expression. + +### .label_name + +All labels from the Log line are added as variables in the template engine. They can be referenced using the label name prefixed by a `.`(for example,`.label_name`). For example the following template will output the value of the `path` label: + +```template +`{{ .path }}` ``` -## __line__ +### __line__ -This function returns the current log line. +The `__line__` function returns the original log line without any modifications. Signature: `line() string` @@ -56,9 +68,9 @@ Examples: `{{ __line__ }}` ``` -## __timestamp__ +### __timestamp__ -This function returns the current log lines timestamp. +The `__timestamp__` function returns the current log line's timestamp. Signature: `timestamp() time.Time` @@ -68,177 +80,158 @@ Signature: `timestamp() time.Time` `{{ __timestamp__ | unixEpoch }}` ``` -See the blog: [Parsing and formatting date/time in Go](https://www.pauladamsmith.com/blog/2011/05/go_time.html) for more information. +For more information, refer to the blog [Parsing and formatting date/time in Go](https://www.pauladamsmith.com/blog/2011/05/go_time.html). -## regexReplaceAll and regexReplaceAllLiteral +## Date and time -`regexReplaceAll` returns a copy of the input string, replacing matches of the Regexp with the replacement string replacement. Inside string replacement, $ signs are interpreted as in Expand, so for instance $1 represents the text of the first sub-match. See the golang [Regexp.replaceAll documentation](https://golang.org/pkg/regexp/#Regexp.ReplaceAll) for more examples. +You can use the following functions to manipulate dates and times when building LogQL queries. -Example: +### date -```template -`{{ regexReplaceAll "(a*)bc" .some_label "${1}a" }}` -``` +Returns a textual representation of the time value formatted according to the provided [golang datetime layout](https://pkg.go.dev/time#pkg-constants). -`regexReplaceAllLiteral` function returns a copy of the input string and replaces matches of the Regexp with the replacement string replacement. The replacement string is substituted directly, without using Expand. +Signature: `date(fmt string, date interface{}) string` Example: ```template -`{{ regexReplaceAllLiteral "(ts=)" .timestamp "timestamp=" }}` +`{{ date "2006-01-02" now }}` ``` -## lower - -Use this function to convert to lower case. +### duration -Signature: `lower(string) string` +An alias for `duration_seconds` Examples: ```template -`{{ .request_method | lower }}` -`{{ lower "HELLO"}}` +`{{ .foo | duration }}` +`{{ duration .foo }}` ``` -The last example will return `hello`. - -## upper +### duration_seconds -Use this function to convert to upper case. +Convert a humanized time duration to seconds using [time.ParseDuration](https://pkg.go.dev/time#ParseDuration) -Signature: `upper(string) string` +Signature: `duration_seconds(string) float64` Examples: ```template -`{ .request_method | upper }}` -`{{ upper "hello"}}` +`{{ .foo | duration_seconds }}` +`{{ duration_seconds .foo }}` ``` -This results in `HELLO`. - -## title +### now -Convert to title case. +Returns the current time in the local timezone of the Loki server. -Signature: `title(string) string` +Signature: `Now() time.Time` -Examples: +Example: ```template -`{{.request_method | title}}` -`{{ title "hello world"}}` +`{{ now }}` ``` -The last example will return `Hello World`. +### toDate -## trunc +Parses a formatted string and returns the time value it represents using the local timezone of the server running Loki. -Truncate a string and add no suffix. +For more consistency between Loki installations, it's recommended to use `toDateInZone` -Signature: `trunc(count int,value string) string` +The format string must use the exact date as defined in the [golang datetime layout](https://pkg.go.dev/time#pkg-constants) + +Signature: `toDate(fmt, str string) time.Time` Examples: ```template -`{{ .path | trunc 2 }}` -`{{ trunc 5 "hello world"}}` // output: hello -`{{ trunc -5 "hello world"}}` // output: world +`{{ toDate "2006-01-02" "2021-11-02" }}` +`{{ .foo | toDate "2006-01-02T15:04:05.999999999Z" }}` ``` -## substr +### toDateInZone -Get a substring from a string. +Parses a formatted string and returns the time value it represents in the provided timezone. -Signature: `substr(start int,end int,value string) string` +The format string must use the exact date as defined in the [golang datetime layout](https://pkg.go.dev/time#pkg-constants) -If start is < 0, this calls value[:end]. -If start is >= 0 and end < 0 or end bigger than s length, this calls value[start:] -Otherwise, this calls value[start, end]. +The timezone value can be `Local`, `UTC`, or any of the IANA Time Zone database values + +Signature: `toDateInZone(fmt, zone, str string) time.Time` Examples: ```template -`{{ .path | substr 2 5 }}` -`{{ substr 0 5 "hello world"}}` // output: hello -`{{ substr 6 11 "hello world"}}` // output: world +`{{ toDateInZone "2006-01-02" "UTC" "2021-11-02" }}` +`{{ .foo | toDateInZone "2006-01-02T15:04:05.999999999Z" "UTC" }}` ``` -## replace - -This function performs simple string replacement. - -Signature: `replace(old string, new string, src string) string` +### unixEpoch -It takes three arguments: +Returns the number of seconds elapsed since January 1, 1970 UTC. -- `old` string to replace -- `new` string to replace with -- `src` source string +Signature: `unixEpoch(date time.Time) string` Examples: ```template -`{{ .cluster | replace "-cluster" "" }}` -`{{ replace "hello" "world" "hello world" }}` +`{{ unixEpoch now }}` +`{{ .foo | toDateInZone "2006-01-02T15:04:05.999999999Z" "UTC" | unixEpoch }}` ``` -The last example will return `world world`. - -## trim - -The trim function removes space from either side of a string. - -Signature: `trim(string) string` - -Examples: +Example of a query to filter Loki querier jobs which create time is 1 day before: -```template -`{{ .ip | trim }}` -`{{ trim " hello " }}` // output: hello +```logql +{job="loki/querier"} | label_format nowEpoch=`{{(unixEpoch now)}}`,createDateEpoch=`{{unixEpoch (toDate "2006-01-02" .createDate)}}` | label_format dateTimeDiff=`{{sub .nowEpoch .createDateEpoch}}` | dateTimeDiff > 86400 ``` -## trimAll +### unixEpochMillis -Use this function to remove given characters from the front or back of a string. +Returns the number of milliseconds elapsed since January 1, 1970 UTC. -Signature: `trimAll(chars string,src string) string` +Signature: `unixEpochMillis(date time.Time) string` Examples: ```template -`{{ .path | trimAll "/" }}` -`{{ trimAll "$" "$5.00" }}` // output: 5.00 +`{{ unixEpochMillis now }}` +`{{ .foo | toDateInZone "2006-01-02T15:04:05.999999999Z" "UTC" | unixEpochMillis }}` ``` -## trimSuffix +### unixEpochNanos -Use this function to trim just the suffix from a string. +Returns the number of nanoseconds elapsed since January 1, 1970 UTC. -Signature: `trimSuffix(suffix string, src string) string` +Signature: `unixEpochNanos(date time.Time) string` Examples: ```template -`{{ .path | trimSuffix "/" }}` -`{{ trimSuffix "-" "hello-" }}` // output: hello +`{{ unixEpochNanos now }}` +`{{ .foo | toDateInZone "2006-01-02T15:04:05.999999999Z" "UTC" | unixEpochNanos }}` ``` -## trimPrefix +### unixToTime -Use this function to trim just the prefix from a string. +Converts the string epoch to the time value it represents. Epoch times in days, seconds, milliseconds, microseconds and nanoseconds are supported. -Signature: `trimPrefix(prefix string, src string) string` +Signature: `unixToTime(epoch string) time.Time` Examples: -```template -`{{ .path | trimPrefix "/" }}` -`{{ trimPrefix "-" "-hello" }}` // output: hello +Consider the following log line `{"from": "1679577215","to":"1679587215","message":"some message"}`. To print the `from` field as human readable add the following at the end of the LogQL query: + +```logql +... | json | line_format `from="{{date "2006-01-02" (unixToTime .from)}}"` ``` -## alignLeft +## String manipulation + +You can use the following templates to manipulate strings when building LogQL Queries. + +### alignLeft Use this function to format a string to a fixed with, aligning the content to the left. @@ -251,7 +244,7 @@ Examples: `{{ alignLeft 5 "hi"}}` // output: "hi " ``` -## alignRight +### alignRight Use this function to format a string to a fixed with, aligning the content to the right. @@ -264,568 +257,643 @@ Examples: `{{ alignRight 5 "hi"}}` // output: " hi" ``` -## indent +### b64enc -The indent function indents every line in a given string to the specified indent width. This is useful when aligning multi-line strings. +Base64 encode a string. -Signature: `indent(spaces int,src string) string` +Signature: `b64enc(string) string` -Example: +Examples: ```template -`{{ indent 4 .query }}` +`{{ .foo | b64enc }}` +`{{ b64enc .foo }}` ``` -This indents each line contained in the `.query` by four (4) spaces. - -## nindent +### b64dec -The nindent function is the same as the indent function, but prepends a new line to the beginning of the string. +Base64 decode a string. -Signature: `nindent(spaces int,src string) string` +Signature: `b64dec(string) string` -Example: +Examples: ```template -`{{ nindent 4 .query }}` +`{{ .foo | b64dec }}` +`{{ b64dec .foo }}` ``` -This will indent every line of text by 4 space characters and add a new line to the beginning. - -## repeat +### bytes -Use this function to repeat a string multiple times. +Convert a humanized byte string to bytes using go-humanize. Durations can be turned into strings such as "3 days ago", numbers +representing sizes like 82854982 into useful strings like, "83 MB" or +"79 MiB" -Signature: `repeat(c int,value string) string` +Signature: `bytes(string) string` -Example: +Examples: ```template -`{{ repeat 3 "hello" }}` // output: hellohellohello +`{{ .foo | bytes }}` +`{{ bytes .foo }}` ``` -## contains +### default -Use this function to test to see if one string is contained inside of another. +Enables outputting a default value if the source string is otherwise empty. If the 'src' parameter is not empty, this function returns the value of 'src'. Useful for JSON fields that can be missing, like HTTP headers in a log line that aren't required, as in the following example: -Signature: `contains(s string, src string,) bool` +```logql +{job="access_log"} | json | line_format `{{.http_request_headers_x_forwarded_for | default "-"}}` +``` + +Signature: `default(d string, src string) string` Examples: ```template -`{{ if contains "ErrTimeout" .err }} timeout {{end}}` -`{{ if contains "he" "hello" }} yes {{end}}` +`{{ default "-" "" }}` // output: - +`{{ default "-" "foo" }}` // output: foo ``` -## eq +Example of a query to print a `-` if the `http_request_headers_x_forwarded_for` label is empty: -Use this function to test to see if one string has exact matching inside of another. +```logql +{job="access_log"} | json | line_format `{{.http_request_headers_x_forwarded_for | default "-"}}` +``` -Signature: `eq(s string, src string) bool` +### fromJson -Examples: +Decodes a JSON document into a structure. If the input cannot be decoded as JSON the function will return an empty string. + +Signature: `fromJson(v string) interface{}` + +Example: ```template -`{{ if eq "ErrTimeout" .err }} timeout {{end}}` -`{{ if eq "hello" "hello" }} yes {{end}}` +`{{fromJson "{\"foo\": 55}"}}` ``` -## hasPrefix and hasSuffix +Example of a query to print a newline per queries stored as a json array in the log line: -The `hasPrefix` and `hasSuffix` functions test whether a string has a given prefix or suffix. +```logql +{job="loki/querier"} |= "finish in prometheus" | logfmt | line_format `{{ range $q := fromJson .queries }} {{ $q.query }} {{ end }}` +``` -Signatures: +### lower -- `hasPrefix(prefix string, src string) bool` -- `hasSuffix(suffix string, src string) bool` +Use this function to convert to lower case. + +Signature: `lower(string) string` Examples: ```template -`{{ if hasSuffix .err "Timeout" }} timeout {{end}}` -`{{ if hasPrefix "he" "hello" }} yes {{end}}` +`{{ .request_method | lower }}` +`{{ lower "HELLO"}}` ``` -## add +The last example will return `hello`. + +### indent -Sum numbers. Supports multiple numbers +The indent function indents every line in a given string to the specified indent width. This is useful when aligning multi-line strings. -Signature: `func(i ...interface{}) int64` +Signature: `indent(spaces int,src string) string` Example: ```template -`{{ add 3 2 5 }}` // output: 10 +`{{ indent 4 .query }}` ``` -## sub +This indents each line contained in the `.query` by four (4) spaces. + +### nindent -Subtract numbers. +The nindent function is the same as the indent function, but prepends a new line to the beginning of the string. -Signature: `func(a, b interface{}) int64` +Signature: `nindent(spaces int,src string) string` Example: ```template -`{{ sub 5 2 }}` // output: 3 +`{{ nindent 4 .query }}` ``` -## mul +This will indent every line of text by 4 space characters and add a new line to the beginning. -Multiply numbers. Supports multiple numbers. +### repeat -Signature: `func(a interface{}, v ...interface{}) int64` +Use this function to repeat a string multiple times. + +Signature: `repeat(c int,value string) string` Example: ```template -`{{ mul 5 2 3}}` // output: 30 +`{{ repeat 3 "hello" }}` // output: hellohellohello ``` -## div +### printf -Integer divide numbers. +Use this function to format a string in a custom way. For more information about the syntax, refer to the [Go documentation](https://pkg.go.dev/fmt). -Signature: `func(a, b interface{}) int64` +Signature: `printf(format string, a ...interface{})` -Example: +Examples: ```template -`{{ div 10 2}}` // output: 5 -``` - -## addf - -Sum numbers. Supports multiple numbers. +`{{ printf "The IP address was %s" .remote_addr }}` // output: The IP address was 129.168.1.1 -Signature: `func(i ...interface{}) float64` - -Example: +`{{ printf "%-40.40s" .request_uri}} {{printf "%-5.5s" .request_method}}` +// output: +// /a/509965767/alternative-to-my-mtg.html GET +// /id/609259548/hpr.html GET +``` ```template -`{{ addf 3.5 2 5 }}` // output: 10.5 +line_format "\"|\" {{printf \"%15.15s\" .ClientHost}} \"|\"" ``` -## subf +### replace -Subtract numbers. Supports multiple numbers. +This function performs simple string replacement. -Signature: `func(a interface{}, v ...interface{}) float64` +Signature: `replace(old string, new string, src string) string` -Example: +It takes three arguments: + +- `old` string to replace +- `new` string to replace with +- `src` source string + +Examples: ```template -`{{ subf 5.5 2 1.5 }}` // output: 2 +`{{ .cluster | replace "-cluster" "" }}` +`{{ replace "hello" "world" "hello world" }}` ``` -## mulf +The last example will return `world world`. -Multiply numbers. Supports multiple numbers +### substr -Signature: `func(a interface{}, v ...interface{}) float64` +Get a substring from a string. -Example: +Signature: `substr(start int,end int,value string) string` + +If start is < 0, this calls value[:end]. +If start is >= 0 and end < 0 or end bigger than s length, this calls value[start:] +Otherwise, this calls value[start, end]. + +Examples: ```template -`{{ mulf 5.5 2 2.5 }}` // output: 27.5 +`{{ .path | substr 2 5 }}` +`{{ substr 0 5 "hello world"}}` // output: hello +`{{ substr 6 11 "hello world"}}` // output: world ``` -## divf +### title -Divide numbers. Supports multiple numbers. +Convert to title case. -Signature: `func(a interface{}, v ...interface{}) float64` +Signature: `title(string) string` -Example: +Examples: ```template -`{{ divf 10 2 4}}` // output: 1.25 +`{{.request_method | title}}` +`{{ title "hello world"}}` ``` -## mod +The last example will return `Hello World`. -Modulo wit mod. +### trim -Signature: `func(a, b interface{}) int64` +The trim function removes space from either side of a string. -Example: +Signature: `trim(string) string` + +Examples: ```template -`{{ mod 10 3}}` // output: 1 +`{{ .ip | trim }}` +`{{ trim " hello " }}` // output: hello ``` -## max +### trimAll -Return the largest of a series of integers: +Use this function to remove given characters from the front or back of a string. -Signature: `max(a interface{}, i ...interface{}) int64` +Signature: `trimAll(chars string,src string) string` -Example: +Examples: ```template -`{{ max 1 2 3 }}` //output 3 +`{{ .path | trimAll "/" }}` +`{{ trimAll "$" "$5.00" }}` // output: 5.00 ``` -## min +### trimPrefix -Return the smallest of a series of integers. +Use this function to trim just the prefix from a string. -Signature: `min(a interface{}, i ...interface{}) int64` +Signature: `trimPrefix(prefix string, src string) string` -Example: +Examples: ```template -`{{ min 1 2 3 }}`//output 1 +`{{ .path | trimPrefix "/" }}` +`{{ trimPrefix "-" "-hello" }}` // output: hello ``` -## maxf +### trimSuffix -Return the largest of a series of floats: +Use this function to trim just the suffix from a string. -Signature: `maxf(a interface{}, i ...interface{}) float64` +Signature: `trimSuffix(suffix string, src string) string` -Example: +Examples: ```template -`{{ maxf 1 2.5 3 }}` //output 3 +`{{ .path | trimSuffix "/" }}` +`{{ trimSuffix "-" "hello-" }}` // output: hello ``` -## minf +### trunc -Return the smallest of a series of floats. +Truncate a string and add no suffix. -Signature: `minf(a interface{}, i ...interface{}) float64` +Signature: `trunc(count int,value string) string` -Example: +Examples: ```template -`{{ minf 1 2.5 3 }}` //output 1.5 +`{{ .path | trunc 2 }}` +`{{ trunc 5 "hello world"}}` // output: hello +`{{ trunc -5 "hello world"}}` // output: world ``` -## ceil +### upper -Returns the greatest float value greater than or equal to input value +Use this function to convert to upper case. -Signature: `ceil(a interface{}) float64` +Signature: `upper(string) string` -Example: +Examples: ```template -`{{ ceil 123.001 }}` //output 124.0 +`{ .request_method | upper }}` +`{{ upper "hello"}}` ``` -## floor +This results in `HELLO`. + +### urlencode -Returns the greatest float value less than or equal to input value +Use this function to [urlencode](https://en.wikipedia.org/wiki/URL_encoding) a string. -Signature: `floor(a interface{}) float64` +Signature: `urlencode(string) string` -Example: +Examples: ```template -`{{ floor 123.9999 }}` //output 123.0 +`{{ .request_url | urlencode }}` +`{{ urlencode .request_url}}` ``` -## round +### urldecode -Returns a float value with the remainder rounded to the given number of digits after the decimal point. +Use this function to [urldecode](https://en.wikipedia.org/wiki/URL_encoding) a string. -Signature: `round(a interface{}, p int, rOpt ...float64) float64` +Signature: `urldecode(string) string` -Example: +Examples: ```template -`{{ round 123.555555 3 }}` //output 123.556 +`{{ .request_url | urldecode }}` +`{{ urldecode .request_url}}` ``` -We can also provide a `roundOn` number as third parameter +## Logical functions -Example: +You can use the following logical functions to compare strings when building a template expression. + +### contains + +Use this function to test to see if one string is contained inside of another. + +Signature: `contains(s string, src string,) bool` + +Examples: ```template -`{{ round 123.88571428571 5 .2 }}` //output 123.88572 +`{{ if contains "ErrTimeout" .err }} timeout {{end}}` +`{{ if contains "he" "hello" }} yes {{end}}` ``` -With default `roundOn` of `.5` the above value would be `123.88571` - -## int +### eq -Convert value to an int. +Use this function to test to see if one string has exact matching inside of another. -Signature: `toInt(v interface{}) int` +Signature: `eq(s string, src string) bool` -Example: +Examples: ```template -`{{ "3" | int }}` //output 3 +`{{ if eq "ErrTimeout" .err }} timeout {{end}}` +`{{ if eq "hello" "hello" }} yes {{end}}` ``` -## float64 +### hasPrefix and hasSuffix -Convert to a float64. +The `hasPrefix` and `hasSuffix` functions test whether a string has a given prefix or suffix. -Signature: `toFloat64(v interface{}) float64` +Signatures: -Example: +- `hasPrefix(prefix string, src string) bool` +- `hasSuffix(suffix string, src string) bool` + +Examples: ```template -`{{ "3.5" | float64 }}` //output 3.5 +`{{ if hasSuffix .err "Timeout" }} timeout {{end}}` +`{{ if hasPrefix "he" "hello" }} yes {{end}}` ``` -## fromJson +## Mathematical functions -Decodes a JSON document into a structure. If the input cannot be decoded as JSON the function will return an empty string. +You can use the following mathematical functions when writing template expressions. -Signature: `fromJson(v string) interface{}` +### add + +Sum numbers. Supports multiple numbers + +Signature: `func(i ...interface{}) int64` Example: ```template -`{{fromJson "{\"foo\": 55}"}}` +`{{ add 3 2 5 }}` // output: 10 ``` -Example of a query to print a newline per queries stored as a json array in the log line: +### addf -```logql -{job="loki/querier"} |= "finish in prometheus" | logfmt | line_format `{{ range $q := fromJson .queries }} {{ $q.query }} {{ end }}` +Sum floating point numbers. Supports multiple numbers. + +Signature: `func(i ...interface{}) float64` + +Example: + +```template +`{{ addf 3.5 2 5 }}` // output: 10.5 ``` -## now +### ceil -Returns the current time in the local timezone of the Loki server. +Returns the greatest float value greater than or equal to input value -Signature: `Now() time.Time` +Signature: `ceil(a interface{}) float64` Example: ```template -`{{ now }}` +`{{ ceil 123.001 }}` //output 124.0 ``` -## toDate +### div -Parses a formatted string and returns the time value it represents using the local timezone of the server running Loki. +Divide two integers. -For more consistency between Loki installations, it's recommended to use `toDateInZone` - -The format string must use the exact date as defined in the [golang datetime layout](https://pkg.go.dev/time#pkg-constants) - -Signature: `toDate(fmt, str string) time.Time` +Signature: `func(a, b interface{}) int64` -Examples: +Example: ```template -`{{ toDate "2006-01-02" "2021-11-02" }}` -`{{ .foo | toDate "2006-01-02T15:04:05.999999999Z" }}` +`{{ div 10 2}}` // output: 5 ``` -## toDateInZone +### divf -Parses a formatted string and returns the time value it represents in the provided timezone. - -The format string must use the exact date as defined in the [golang datetime layout](https://pkg.go.dev/time#pkg-constants) +Divide floating point numbers. Supports multiple numbers. -The timezone value can be `Local`, `UTC`, or any of the IANA Time Zone database values - -Signature: `toDateInZone(fmt, zone, str string) time.Time` +Signature: `func(a interface{}, v ...interface{}) float64` -Examples: +Example: ```template -`{{ toDateInZone "2006-01-02" "UTC" "2021-11-02" }}` -`{{ .foo | toDateInZone "2006-01-02T15:04:05.999999999Z" "UTC" }}` +`{{ divf 10 2 4}}` // output: 1.25 ``` -## date +### float64 -Returns a textual representation of the time value formatted according to the provided [golang datetime layout](https://pkg.go.dev/time#pkg-constants). +Convert a string to a float64. -Signature: `date(fmt string, date interface{}) string` +Signature: `toFloat64(v interface{}) float64` Example: ```template -`{{ date "2006-01-02" now }}` +`{{ "3.5" | float64 }}` //output 3.5 ``` -## unixEpoch +### floor -Returns the number of seconds elapsed since January 1, 1970 UTC. +Returns the greatest float value less than or equal to input value. -Signature: `unixEpoch(date time.Time) string` +Signature: `floor(a interface{}) float64` -Examples: +Example: ```template -`{{ unixEpoch now }}` -`{{ .foo | toDateInZone "2006-01-02T15:04:05.999999999Z" "UTC" | unixEpoch }}` +`{{ floor 123.9999 }}` //output 123.0 ``` -Example of a query to filter Loki querier jobs which create time is 1 day before: -```logql -{job="loki/querier"} | label_format nowEpoch=`{{(unixEpoch now)}}`,createDateEpoch=`{{unixEpoch (toDate "2006-01-02" .createDate)}}` | label_format dateTimeDiff=`{{sub .nowEpoch .createDateEpoch}}` | dateTimeDiff > 86400 -``` +### int -## unixEpochMillis +Convert value to an integer. -Returns the number of milliseconds elapsed since January 1, 1970 UTC. - -Signature: `unixEpochMillis(date time.Time) string` +Signature: `toInt(v interface{}) int` -Examples: +Example: ```template -`{{ unixEpochMillis now }}` -`{{ .foo | toDateInZone "2006-01-02T15:04:05.999999999Z" "UTC" | unixEpochMillis }}` +`{{ "3" | int }}` //output 3 ``` -## unixEpochNanos +### max -Returns the number of nanoseconds elapsed since January 1, 1970 UTC. +Return the largest of a series of integers: -Signature: `unixEpochNanos(date time.Time) string` +Signature: `max(a interface{}, i ...interface{}) int64` -Examples: +Example: ```template -`{{ unixEpochNanos now }}` -`{{ .foo | toDateInZone "2006-01-02T15:04:05.999999999Z" "UTC" | unixEpochNanos }}` +`{{ max 1 2 3 }}` //output 3 ``` -## unixToTime +### maxf -Converts the string epoch to the time value it represents. Epoch times in days, seconds, milliseconds, microseconds and nanoseconds are supported. +Return the largest of a series of floats: -Signature: `unixToTime(epoch string) time.Time` +Signature: `maxf(a interface{}, i ...interface{}) float64` -Examples: +Example: -Consider the following log line `{"from": "1679577215","to":"1679587215","message":"some message"}`. To print the `from` field as human readable add the following at the end of the LogQL query: -```logql -... | json | line_format `from="{{date "2006-01-02" (unixToTime .from)}}"` +```template +`{{ maxf 1 2.5 3 }}` //output 3 ``` -## default +### min -Checks whether the string(`src`) is set, and returns default(`d`) if not set. +Return the smallest of a series of integers. -Signature: `default(d string, src string) string` +Signature: `min(a interface{}, i ...interface{}) int64` -Examples: +Example: ```template -`{{ default "-" "" }}` // output: - -`{{ default "-" "foo" }}` // output: foo +`{{ min 1 2 3 }}`//output 1 ``` -Example of a query to print a `-` if the `http_request_headers_x_forwarded_for` label is empty: -```logql -{job="access_log"} | json | line_format `{{.http_request_headers_x_forwarded_for | default "-"}}` +### minf + +Return the smallest of a series of floats. + +Signature: `minf(a interface{}, i ...interface{}) float64` + +Example: + +```template +`{{ minf 1 2.5 3 }}` //output 1.5 ``` -## count +### mul -Counts occurrences of the regex (`regex`) in (`src`). +Multiply numbers. Supports multiple numbers. -Signature: `count(regex string, src string) int` +Signature: `mul(a interface{}, v ...interface{}) int64` -Examples: +Example: ```template -`{{ count "a|b" "abab" }}` // output: 4 -`{{ count "o" "foo" }}` // output: 2 +`{{ mul 5 2 3}}` // output: 30 ``` -Example of a query to print how many times XYZ occurs in a line: -```logql -{job="xyzlog"} | line_format `{{ __line__ | count "XYZ"}}` +### mulf + +Multiply floating numbers. Supports multiple numbers + +Signature: `mulf(a interface{}, v ...interface{}) float64` + +Example: + +```template +`{{ mulf 5.5 2 2.5 }}` // output: 27.5 ``` -## urlencode +### mod -Use this function to [urlencode](https://en.wikipedia.org/wiki/URL_encoding) a string. +Returns the remainder when number 'a' is divided by number 'b'. -Signature: `urlencode(string) string` +Signature: `mod(a, b interface{}) int64` -Examples: +Example: ```template -`{{ .request_url | urlencode }}` -`{{ urlencode .request_url}}` +`{{ mod 10 3}}` // output: 1 ``` -## urldecode +### round -Use this function to [urldecode](https://en.wikipedia.org/wiki/URL_encoding) a string. +Returns a float value with the remainder rounded to the given number of digits after the decimal point. -Signature: `urldecode(string) string` +Signature: `round(a interface{}, p int, rOpt ...float64) float64` -Examples: +Example: ```template -`{{ .request_url | urldecode }}` -`{{ urldecode .request_url}}` +`{{ round 123.555555 3 }}` //output 123.556 ``` -## b64enc +We can also provide a `roundOn` number as third parameter -Base64 encode a string. +Example: -Signature: `b64enc(string) string` +```template +`{{ round 123.88571428571 5 .2 }}` //output 123.88572 +``` -Examples: +With default `roundOn` of `.5` the above value would be `123.88571` + +### sub + +Subtract one number from another. + +Signature: `func(a, b interface{}) int64` + +Example: ```template -`{{ .foo | b64enc }}` -`{{ b64enc .foo }}` +`{{ sub 5 2 }}` // output: 3 ``` -## b64dec +### subf -Base64 decode a string. +Subtract floating numbers. Supports multiple numbers. -Signature: `b64dec(string) string` +Signature: `func(a interface{}, v ...interface{}) float64` -Examples: +Example: ```template -`{{ .foo | b64dec }}` -`{{ b64dec .foo }}` +`{{ subf 5.5 2 1.5 }}` // output: 2 ``` -## bytes +## Regular expression functions -Convert a humanized byte string to bytes using [go-humanize](https://pkg.go.dev/github.com/dustin/go-humanize#ParseBytes) +You can use the following functions to perform regular expressions in a template expression. -Signature: `bytes(string) string` +### count + +Counts occurrences of the regex (`regex`) in (`src`). + +Signature: `count(regex string, src string) int` Examples: ```template -`{{ .foo | bytes }}` -`{{ bytes .foo }}` +`{{ count "a|b" "abab" }}` // output: 4 +`{{ count "o" "foo" }}` // output: 2 ``` -## duration +Example of a query to print how many times XYZ occurs in a line: -An alias for `duration_seconds` +```logql +{job="xyzlog"} | line_format `{{ __line__ | count "XYZ"}}` +``` -Examples: +### regexReplaceAll and regexReplaceAllLiteral + +`regexReplaceAll` returns a copy of the input string, replacing matches of the Regexp with the replacement string replacement. Inside string replacement, $ signs are interpreted as in Expand, so for instance $1 represents the text of the first sub-match. See the golang [Regexp.replaceAll documentation](https://golang.org/pkg/regexp/#Regexp.ReplaceAll) for more examples. + +Signature: regexReplaceAll(regex string, src string, replacement string) +(source) + +Example: ```template -`{{ .foo | duration }}` -`{{ duration .foo }}` +`{{ regexReplaceAll "(a*)bc" .some_label "${1}a" }}` ``` -## duration_seconds - -Convert a humanized time duration to seconds using [time.ParseDuration](https://pkg.go.dev/time#ParseDuration) +`regexReplaceAllLiteral` function returns a copy of the input string and replaces matches of the Regexp with the replacement string replacement. The replacement string is substituted directly, without using Expand. -Signature: `duration_seconds(string) float64` +Signature: regexReplaceAllLiteral(regex string, src string, replacement string) -Examples: +Example: ```template -`{{ .foo | duration_seconds }}` -`{{ duration_seconds .foo }}` +`{{ regexReplaceAllLiteral "(ts=)" .timestamp "timestamp=" }}` ```
docs
Reorganize Query Templates page (#14138)
03e9059ecb29f39b05dbba9cf278b2c95e64a612
2020-07-31 03:14:47
Ed Welch
loki: Improve error messages on query timeout or cancel (#2453)
false
diff --git a/pkg/util/server/error.go b/pkg/util/server/error.go index b2faac58f8af7..1c528669b2108 100644 --- a/pkg/util/server/error.go +++ b/pkg/util/server/error.go @@ -14,15 +14,20 @@ import ( // StatusClientClosedRequest is the status code for when a client request cancellation of an http request const StatusClientClosedRequest = 499 +const ( + ErrClientCanceled = "The request was cancelled by the client." + ErrDeadlineExceeded = "Request timed out, decrease the duration of the request or add more label matchers (prefer exact match over regex match) to reduce the amount of data processed." +) + // WriteError write a go error with the correct status code. func WriteError(err error, w http.ResponseWriter) { var queryErr chunk.QueryError switch { case errors.Is(err, context.Canceled): - http.Error(w, err.Error(), StatusClientClosedRequest) + http.Error(w, ErrClientCanceled, StatusClientClosedRequest) case errors.Is(err, context.DeadlineExceeded): - http.Error(w, err.Error(), http.StatusGatewayTimeout) + http.Error(w, ErrDeadlineExceeded, http.StatusGatewayTimeout) case errors.As(err, &queryErr): http.Error(w, err.Error(), http.StatusBadRequest) case logql.IsParseError(err): diff --git a/pkg/util/server/error_test.go b/pkg/util/server/error_test.go index 3df32dc6bd4ec..b557ffa309e53 100644 --- a/pkg/util/server/error_test.go +++ b/pkg/util/server/error_test.go @@ -24,9 +24,8 @@ func Test_writeError(t *testing.T) { msg string expectedStatus int }{ - {"cancelled", context.Canceled, context.Canceled.Error(), StatusClientClosedRequest}, - {"wrapped cancelled", fmt.Errorf("some context here: %w", context.Canceled), "some context here: " + context.Canceled.Error(), StatusClientClosedRequest}, - {"deadline", context.DeadlineExceeded, context.DeadlineExceeded.Error(), http.StatusGatewayTimeout}, + {"cancelled", context.Canceled, ErrClientCanceled, StatusClientClosedRequest}, + {"deadline", context.DeadlineExceeded, ErrDeadlineExceeded, http.StatusGatewayTimeout}, {"parse error", logql.ParseError{}, "parse error : ", http.StatusBadRequest}, {"httpgrpc", httpgrpc.Errorf(http.StatusBadRequest, errors.New("foo").Error()), "foo", http.StatusBadRequest}, {"internal", errors.New("foo"), "foo", http.StatusInternalServerError},
loki
Improve error messages on query timeout or cancel (#2453)
a4aee4f4ff494b525f68c9c6c1ae3417a8e61ebe
2024-09-20 19:55:52
Cyril Tovena
fix: Wait for OwnedStreams service in Ingester startup (#14208)
false
diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 81c3c1a68350a..4e7ac11f515c0 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -573,7 +573,7 @@ func (i *Ingester) starting(ctx context.Context) error { return fmt.Errorf("can not start recalculate owned streams service: %w", err) } - err = i.lifecycler.AwaitRunning(ctx) + err = i.recalculateOwnedStreams.AwaitRunning(ctx) if err != nil { return fmt.Errorf("can not ensure recalculate owned streams service is running: %w", err) }
fix
Wait for OwnedStreams service in Ingester startup (#14208)
be8fd95fc9dd84dd066ae53c145d2d5e22cbe8b0
2025-01-23 01:13:01
Paul Rogers
chore(deps): Update renovate configuration for more automation (#15886)
false
diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 218dc45856db6..362b8537f8f23 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -8,18 +8,16 @@ ], "prHourlyLimit": 4, "baseBranches": [ - "main" + "main", + "release-3.3.x", // Update when a new release is out, 2 minors, 1 major. + "release-3.2.x", // Also ensure to update the 'packageRules' section to match + "release-2.9.x" ], "packageRules": [ { - "matchBaseBranches": [ - "release-2.9.x", - "release-2.8.x" - ], - "enabled": false, - "matchPackageNames": [ - "*" - ] + // Disable updates for all branches - we only want security updates + "matchBaseBranches": ["release-3.3.x", "release-3.2.x", "release-2.9.x"], + "enabled": false }, { // Disable Go version updates @@ -60,12 +58,20 @@ "automerge": false }, { - // Enable all other updates + // Enable all other updates, and auto-merge minor and patch updates "matchFileNames": ["!operator/go.mod", "!operator/api/loki/go.mod"], "groupName": "{{packageName}}", "enabled": true, - "matchUpdateTypes": ["major", "minor", "patch"], - // After we have tested the above configuration, we can enable the following + "matchUpdateTypes": ["minor", "patch"], + "automerge": true, + "autoApprove": true + }, + { + // Enable all other updates, don't auto-merge major updates + "matchFileNames": ["!operator/go.mod", "!operator/api/loki/go.mod"], + "groupName": "{{packageName}}", + "enabled": true, + "matchUpdateTypes": ["major"], "automerge": false, "autoApprove": false } @@ -77,7 +83,9 @@ "enabled": true, "addLabels": [ "area/security" - ] + ], + "automerge": true, + "autoApprove": true }, "osvVulnerabilityAlerts": true, "prConcurrentLimit": 10,
chore
Update renovate configuration for more automation (#15886)
653e37a164a687fc0884278ee5857925d4b77d78
2025-01-25 00:11:49
renovate[bot]
fix(deps): update module google.golang.org/grpc to v1.70.0 (main) (#15955)
false
diff --git a/go.mod b/go.mod index c6983f8ba6148..0c46a0aaba238 100644 --- a/go.mod +++ b/go.mod @@ -104,7 +104,7 @@ require ( golang.org/x/sys v0.29.0 golang.org/x/time v0.9.0 google.golang.org/api v0.218.0 - google.golang.org/grpc v1.69.4 + google.golang.org/grpc v1.70.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/klog/v2 v2.130.1 diff --git a/go.sum b/go.sum index bb6ba21689599..196e5be89f696 100644 --- a/go.sum +++ b/go.sum @@ -1638,8 +1638,8 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= -google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ= +google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/pkg/push/go.mod b/pkg/push/go.mod index e203ecd749939..ad4284a93e60c 100644 --- a/pkg/push/go.mod +++ b/pkg/push/go.mod @@ -7,7 +7,7 @@ toolchain go1.23.3 require ( github.com/gogo/protobuf v1.3.2 github.com/stretchr/testify v1.10.0 - google.golang.org/grpc v1.69.4 + google.golang.org/grpc v1.70.0 ) require ( @@ -18,7 +18,7 @@ require ( golang.org/x/sys v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect - google.golang.org/protobuf v1.35.1 // indirect + google.golang.org/protobuf v1.35.2 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/push/go.sum b/pkg/push/go.sum index f10f9acddd930..97612efa542c2 100644 --- a/pkg/push/go.sum +++ b/pkg/push/go.sum @@ -27,16 +27,16 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= -go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= -go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= -go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= -go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= -go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= -go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= -go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= -go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= -go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= +go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -70,10 +70,10 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= -google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= -google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ= +google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go index 263c024a84c7e..9b59bfc1d9796 100644 --- a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go +++ b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go @@ -35,11 +35,9 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" - "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancer/gracefulswitch" - "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -48,11 +46,7 @@ import ( var PickFirstConfig string func init() { - name := pickfirst.Name - if !envconfig.NewPickFirstEnabled { - name = pickfirstleaf.Name - } - PickFirstConfig = fmt.Sprintf("[{%q: {}}]", name) + PickFirstConfig = fmt.Sprintf("[{%q: {}}]", pickfirstleaf.Name) } // ChildState is the balancer state of a child along with the endpoint which diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index 3f274482c74bf..86d495bb624f8 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.1 +// protoc-gen-go v1.35.2 // protoc v5.27.1 // source: grpc/lb/v1/load_balancer.proto diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go index 2fc0a71f9441e..76fa5fea95f2b 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -54,9 +54,18 @@ func init() { balancer.Register(pickfirstBuilder{}) } -// enableHealthListenerKeyType is a unique key type used in resolver attributes -// to indicate whether the health listener usage is enabled. -type enableHealthListenerKeyType struct{} +type ( + // enableHealthListenerKeyType is a unique key type used in resolver + // attributes to indicate whether the health listener usage is enabled. + enableHealthListenerKeyType struct{} + // managedByPickfirstKeyType is an attribute key type to inform Outlier + // Detection that the generic health listener is being used. + // TODO: https://github.com/grpc/grpc-go/issues/7915 - Remove this when + // implementing the dualstack design. This is a hack. Once Dualstack is + // completed, outlier detection will stop sending ejection updates through + // the connectivity listener. + managedByPickfirstKeyType struct{} +) var ( logger = grpclog.Component("pick-first-leaf-lb") @@ -140,6 +149,17 @@ func EnableHealthListener(state resolver.State) resolver.State { return state } +// IsManagedByPickfirst returns whether an address belongs to a SubConn +// managed by the pickfirst LB policy. +// TODO: https://github.com/grpc/grpc-go/issues/7915 - This is a hack to disable +// outlier_detection via the with connectivity listener when using pick_first. +// Once Dualstack changes are complete, all SubConns will be created by +// pick_first and outlier detection will only use the health listener for +// ejection. This hack can then be removed. +func IsManagedByPickfirst(addr resolver.Address) bool { + return addr.BalancerAttributes.Value(managedByPickfirstKeyType{}) != nil +} + type pfConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` @@ -166,6 +186,7 @@ type scData struct { } func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { + addr.BalancerAttributes = addr.BalancerAttributes.WithValue(managedByPickfirstKeyType{}, true) sd := &scData{ rawConnectivityState: connectivity.Idle, effectiveState: connectivity.Idle, diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go index c9c5b576bb0c8..d7b9dc4666ee4 100644 --- a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go @@ -29,6 +29,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/endpointsharding" + "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" "google.golang.org/grpc/balancer/weightedroundrobin/internal" "google.golang.org/grpc/balancer/weightedtarget" "google.golang.org/grpc/connectivity" @@ -218,7 +219,9 @@ type wrrBalancer struct { } func (b *wrrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { - b.logger.Infof("UpdateCCS: %v", ccs) + if b.logger.V(2) { + b.logger.Infof("UpdateCCS: %v", ccs) + } cfg, ok := ccs.BalancerConfig.(*lbConfig) if !ok { return fmt.Errorf("wrr: received nil or illegal BalancerConfig (type %T): %v", ccs.BalancerConfig, ccs.BalancerConfig) @@ -232,6 +235,9 @@ func (b *wrrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error b.updateEndpointsLocked(ccs.ResolverState.Endpoints) b.mu.Unlock() + // Make pickfirst children use health listeners for outlier detection to + // work. + ccs.ResolverState = pickfirstleaf.EnableHealthListener(ccs.ResolverState) // This causes child to update picker inline and will thus cause inline // picker update. return b.child.UpdateClientConnState(balancer.ClientConnState{ diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go index 8741fdad19dcd..258cdd5db280f 100644 --- a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go @@ -56,6 +56,13 @@ func SetAddrInfo(addr resolver.Address, addrInfo AddrInfo) resolver.Address { return addr } +// SetAddrInfoInEndpoint returns a copy of endpoint in which the Attributes +// field is updated with addrInfo. +func SetAddrInfoInEndpoint(endpoint resolver.Endpoint, addrInfo AddrInfo) resolver.Endpoint { + endpoint.Attributes = endpoint.Attributes.WithValue(attributeKey{}, addrInfo) + return endpoint +} + // GetAddrInfo returns the AddrInfo stored in the BalancerAttributes field of // addr. func GetAddrInfo(addr resolver.Address) AddrInfo { diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index 905817b5fc7b6..c2688376ae74d 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -34,7 +34,15 @@ import ( "google.golang.org/grpc/status" ) -var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) +var ( + setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) + // noOpRegisterHealthListenerFn is used when client side health checking is + // disabled. It sends a single READY update on the registered listener. + noOpRegisterHealthListenerFn = func(_ context.Context, listener func(balancer.SubConnState)) func() { + listener(balancer.SubConnState{ConnectivityState: connectivity.Ready}) + return func() {} + } +) // ccBalancerWrapper sits between the ClientConn and the Balancer. // @@ -277,10 +285,17 @@ type healthData struct { // to the LB policy. This is stored to avoid sending updates when the // SubConn has already exited connectivity state READY. connectivityState connectivity.State + // closeHealthProducer stores function to close the ref counted health + // producer. The health producer is automatically closed when the SubConn + // state changes. + closeHealthProducer func() } func newHealthData(s connectivity.State) *healthData { - return &healthData{connectivityState: s} + return &healthData{ + connectivityState: s, + closeHealthProducer: func() {}, + } } // updateState is invoked by grpc to push a subConn state update to the @@ -413,6 +428,37 @@ func (acbw *acBalancerWrapper) closeProducers() { } } +// healthProducerRegisterFn is a type alias for the health producer's function +// for registering listeners. +type healthProducerRegisterFn = func(context.Context, balancer.SubConn, string, func(balancer.SubConnState)) func() + +// healthListenerRegFn returns a function to register a listener for health +// updates. If client side health checks are disabled, the registered listener +// will get a single READY (raw connectivity state) update. +// +// Client side health checking is enabled when all the following +// conditions are satisfied: +// 1. Health checking is not disabled using the dial option. +// 2. The health package is imported. +// 3. The health check config is present in the service config. +func (acbw *acBalancerWrapper) healthListenerRegFn() func(context.Context, func(balancer.SubConnState)) func() { + if acbw.ccb.cc.dopts.disableHealthCheck { + return noOpRegisterHealthListenerFn + } + regHealthLisFn := internal.RegisterClientHealthCheckListener + if regHealthLisFn == nil { + // The health package is not imported. + return noOpRegisterHealthListenerFn + } + cfg := acbw.ac.cc.healthCheckConfig() + if cfg == nil { + return noOpRegisterHealthListenerFn + } + return func(ctx context.Context, listener func(balancer.SubConnState)) func() { + return regHealthLisFn.(healthProducerRegisterFn)(ctx, acbw, cfg.ServiceName, listener) + } +} + // RegisterHealthListener accepts a health listener from the LB policy. It sends // updates to the health listener as long as the SubConn's connectivity state // doesn't change and a new health listener is not registered. To invalidate @@ -421,6 +467,7 @@ func (acbw *acBalancerWrapper) closeProducers() { func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.SubConnState)) { acbw.healthMu.Lock() defer acbw.healthMu.Unlock() + acbw.healthData.closeHealthProducer() // listeners should not be registered when the connectivity state // isn't Ready. This may happen when the balancer registers a listener // after the connectivityState is updated, but before it is notified @@ -436,6 +483,7 @@ func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.Sub return } + registerFn := acbw.healthListenerRegFn() acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || acbw.ccb.balancer == nil { return @@ -443,10 +491,25 @@ func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.Sub // Don't send updates if a new listener is registered. acbw.healthMu.Lock() defer acbw.healthMu.Unlock() - curHD := acbw.healthData - if curHD != hd { + if acbw.healthData != hd { return } - listener(balancer.SubConnState{ConnectivityState: connectivity.Ready}) + // Serialize the health updates from the health producer with + // other calls into the LB policy. + listenerWrapper := func(scs balancer.SubConnState) { + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + acbw.healthMu.Lock() + defer acbw.healthMu.Unlock() + if acbw.healthData != hd { + return + } + listener(scs) + }) + } + + hd.closeHealthProducer = registerFn(ctx, listenerWrapper) }) } diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 9e9d0806995c3..21dd72969aee3 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.1 +// protoc-gen-go v1.35.2 // protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index 83d23f65aa542..40e42b6ae5827 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.1 +// protoc-gen-go v1.35.2 // protoc v5.27.1 // source: grpc/gcp/altscontext.proto diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index 915b36df82146..2993bbfab15c9 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.1 +// protoc-gen-go v1.35.2 // protoc v5.27.1 // source: grpc/gcp/handshaker.proto diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index e9676db4b52a8..a8d5c4857b806 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.1 +// protoc-gen-go v1.35.2 // protoc v5.27.1 // source: grpc/gcp/transport_security_common.proto diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index e163a473df93e..bd5fe22b6af6a 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -32,6 +32,8 @@ import ( "google.golang.org/grpc/internal/envconfig" ) +const alpnFailureHelpMessage = "If you upgraded from a grpc-go version earlier than 1.67, your TLS connections may have stopped working due to ALPN enforcement. For more details, see: https://github.com/grpc/grpc-go/issues/434" + var logger = grpclog.Component("credentials") // TLSInfo contains the auth information for a TLS authenticated connection. @@ -128,7 +130,7 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon if np == "" { if envconfig.EnforceALPNEnabled { conn.Close() - return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage) } logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName) } @@ -158,7 +160,7 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) if cs.NegotiatedProtocol == "" { if envconfig.EnforceALPNEnabled { conn.Close() - return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage) } else if logger.V(2) { logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases") } diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 7494ae591f164..f3a045296a46d 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -428,6 +428,11 @@ func WithTimeout(d time.Duration) DialOption { // returned by f, gRPC checks the error's Temporary() method to decide if it // should try to reconnect to the network address. // +// Note that gRPC by default performs name resolution on the target passed to +// NewClient. To bypass name resolution and cause the target string to be +// passed directly to the dialer here instead, use the "passthrough" resolver +// by specifying it in the target string, e.g. "passthrough:target". +// // Note: All supported releases of Go (as of December 2023) override the OS // defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive // with OS defaults for keepalive time and interval, use a net.Dialer that sets diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 26e16d91924f2..467de16bdbcdc 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.1 +// protoc-gen-go v1.35.2 // protoc v5.27.1 // source: grpc/health/v1/health.proto diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 6e7dd6b772709..1e42b6fdc8722 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -49,7 +49,7 @@ var ( // XDSFallbackSupport is the env variable that controls whether support for // xDS fallback is turned on. If this is unset or is false, only the first // xDS server in the list of server configs will be used. - XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) + XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", true) // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used // instead of the exiting pickfirst implementation. This can be enabled by // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 29f234acb1b9d..9afeb444d4536 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -53,4 +53,10 @@ var ( // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") + + // XDSDualstackEndpointsEnabled is true if gRPC should read the + // "additional addresses" in the xDS endpoint resource. + // TODO: https://github.com/grpc/grpc-go/issues/7866 - Control this using + // an env variable when all LB policies handle endpoints. + XDSDualstackEndpointsEnabled = false ) diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 3afc1813440e7..c17b98194b3c7 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -31,6 +31,10 @@ import ( var ( // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker + // RegisterClientHealthCheckListener is used to provide a listener for + // updates from the client-side health checking service. It returns a + // function that can be called to stop the health producer. + RegisterClientHealthCheckListener any // func(ctx context.Context, sc balancer.SubConn, serviceName string, listener func(balancer.SubConnState)) func() // BalancerUnregister is exported by package balancer to unregister a balancer. BalancerUnregister func(name string) // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go index 14185ca35a0ca..22731029f5f3c 100644 --- a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.1 +// protoc-gen-go v1.35.2 // protoc v5.27.1 // source: grpc/lookup/v1/rls.proto diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go index 1549a7aa13a37..73b70c25ea398 100644 --- a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.1 +// protoc-gen-go v1.35.2 // protoc v5.27.1 // source: grpc/lookup/v1/rls_config.proto diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index d9305a65d88f7..3dea235735188 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -498,5 +498,5 @@ func mapRecvMsgError(err error) error { if strings.Contains(err.Error(), "body closed by handler") { return status.Error(codes.Canceled, err.Error()) } - return connectionErrorf(true, err, err.Error()) + return connectionErrorf(true, err, "%s", err.Error()) } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 0055fddd7ecf2..997b0a59b586b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -564,7 +564,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade t.logger.Infof("Aborting the stream early: %v", errMsg) } t.controlBuf.put(&earlyAbortStream{ - httpStatus: 405, + httpStatus: http.StatusMethodNotAllowed, streamID: streamID, contentSubtype: s.contentSubtype, status: status.New(codes.Internal, errMsg), @@ -585,7 +585,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade stat = status.New(codes.PermissionDenied, err.Error()) } t.controlBuf.put(&earlyAbortStream{ - httpStatus: 200, + httpStatus: http.StatusOK, streamID: s.id, contentSubtype: s.contentSubtype, status: stat, diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 16065a027ae81..9d5b2884d14ee 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -1360,8 +1360,16 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt } return err } - defer d.Free() + freed := false + dataFree := func() { + if !freed { + d.Free() + freed = true + } + } + defer dataFree() df := func(v any) error { + defer dataFree() if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 7e83027d1994c..8d451e07c7cc0 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -268,18 +268,21 @@ func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { return &serviceconfig.ParseResult{Config: &sc} } +func isValidRetryPolicy(jrp *jsonRetryPolicy) bool { + return jrp.MaxAttempts > 1 && + jrp.InitialBackoff > 0 && + jrp.MaxBackoff > 0 && + jrp.BackoffMultiplier > 0 && + len(jrp.RetryableStatusCodes) > 0 +} + func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) { if jrp == nil { return nil, nil } - if jrp.MaxAttempts <= 1 || - jrp.InitialBackoff <= 0 || - jrp.MaxBackoff <= 0 || - jrp.BackoffMultiplier <= 0 || - len(jrp.RetryableStatusCodes) == 0 { - logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) - return nil, nil + if !isValidRetryPolicy(jrp) { + return nil, fmt.Errorf("invalid retry policy (%+v): ", jrp) } if jrp.MaxAttempts < maxAttempts { diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 17e2267b33201..54adbbced7a65 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -1766,7 +1766,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { return err } if err == io.ErrUnexpectedEOF { - err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + err = status.Error(codes.Internal, io.ErrUnexpectedEOF.Error()) } return toRPCErr(err) } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index d2bba7f3d9ecc..0e03fa4d4f7e8 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.69.4" +const Version = "1.70.0" diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go index dd4d39b3d398e..cd94182fa7175 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go @@ -129,7 +129,7 @@ func (d *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { if d.loadStore != nil { d.loadStore.CallDropped("") } - return balancer.PickResult{}, status.Errorf(codes.Unavailable, err.Error()) + return balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) } } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go index ae2c5fe957a26..f0a8905d374b2 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -234,7 +234,7 @@ func (b *clusterResolverBalancer) updateChildConfig() { b.child = newChildBalancer(b.priorityBuilder, b.cc, b.bOpts) } - childCfgBytes, addrs, err := buildPriorityConfigJSON(b.priorities, &b.config.xdsLBPolicy) + childCfgBytes, endpoints, err := buildPriorityConfigJSON(b.priorities, &b.config.xdsLBPolicy) if err != nil { b.logger.Warningf("Failed to build child policy config: %v", err) return @@ -248,15 +248,33 @@ func (b *clusterResolverBalancer) updateChildConfig() { b.logger.Infof("Built child policy config: %s", pretty.ToJSON(childCfg)) } - endpoints := make([]resolver.Endpoint, len(addrs)) - for i, a := range addrs { - endpoints[i].Attributes = a.BalancerAttributes - endpoints[i].Addresses = []resolver.Address{a} + flattenedAddrs := make([]resolver.Address, len(endpoints)) + for i := range endpoints { + for j := range endpoints[i].Addresses { + addr := endpoints[i].Addresses[j] + addr.BalancerAttributes = endpoints[i].Attributes + // If the endpoint has multiple addresses, only the first is added + // to the flattened address list. This ensures that LB policies + // that don't support endpoints create only one subchannel to a + // backend. + if j == 0 { + flattenedAddrs[i] = addr + } + // BalancerAttributes need to be present in endpoint addresses. This + // temporary workaround is required to make load reporting work + // with the old pickfirst policy which creates SubConns with multiple + // addresses. Since the addresses can be from different localities, + // an Address.BalancerAttribute is used to identify the locality of the + // address used by the transport. This workaround can be removed once + // the old pickfirst is removed. + // See https://github.com/grpc/grpc-go/issues/7339 + endpoints[i].Addresses[j] = addr + } } if err := b.child.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ Endpoints: endpoints, - Addresses: addrs, + Addresses: flattenedAddrs, ServiceConfig: b.configRaw, Attributes: b.attrsWithClient, }, diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go index f62b8e6c8eb59..9a3a71c2e5cc1 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go @@ -48,8 +48,8 @@ type priorityConfig struct { mechanism DiscoveryMechanism // edsResp is set only if type is EDS. edsResp xdsresource.EndpointsUpdate - // addresses is set only if type is DNS. - addresses []string + // endpoints is set only if type is DNS. + endpoints []resolver.Endpoint // Each discovery mechanism has a name generator so that the child policies // can reuse names between updates (EDS updates for example). childNameGen *nameGenerator @@ -71,8 +71,8 @@ type priorityConfig struct { // ┌──────▼─────┐ ┌─────▼──────┐ // │xDSLBPolicy │ │xDSLBPolicy │ (Locality and Endpoint picking layer) // └────────────┘ └────────────┘ -func buildPriorityConfigJSON(priorities []priorityConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]byte, []resolver.Address, error) { - pc, addrs, err := buildPriorityConfig(priorities, xdsLBPolicy) +func buildPriorityConfigJSON(priorities []priorityConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]byte, []resolver.Endpoint, error) { + pc, endpoints, err := buildPriorityConfig(priorities, xdsLBPolicy) if err != nil { return nil, nil, fmt.Errorf("failed to build priority config: %v", err) } @@ -80,23 +80,23 @@ func buildPriorityConfigJSON(priorities []priorityConfig, xdsLBPolicy *internals if err != nil { return nil, nil, fmt.Errorf("failed to marshal built priority config struct into json: %v", err) } - return ret, addrs, nil + return ret, endpoints, nil } -func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*priority.LBConfig, []resolver.Address, error) { +func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*priority.LBConfig, []resolver.Endpoint, error) { var ( - retConfig = &priority.LBConfig{Children: make(map[string]*priority.Child)} - retAddrs []resolver.Address + retConfig = &priority.LBConfig{Children: make(map[string]*priority.Child)} + retEndpoints []resolver.Endpoint ) for _, p := range priorities { switch p.mechanism.Type { case DiscoveryMechanismTypeEDS: - names, configs, addrs, err := buildClusterImplConfigForEDS(p.childNameGen, p.edsResp, p.mechanism, xdsLBPolicy) + names, configs, endpoints, err := buildClusterImplConfigForEDS(p.childNameGen, p.edsResp, p.mechanism, xdsLBPolicy) if err != nil { return nil, nil, err } retConfig.Priorities = append(retConfig.Priorities, names...) - retAddrs = append(retAddrs, addrs...) + retEndpoints = append(retEndpoints, endpoints...) odCfgs := convertClusterImplMapToOutlierDetection(configs, p.mechanism.outlierDetection) for n, c := range odCfgs { retConfig.Children[n] = &priority.Child{ @@ -107,9 +107,9 @@ func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalservi } continue case DiscoveryMechanismTypeLogicalDNS: - name, config, addrs := buildClusterImplConfigForDNS(p.childNameGen, p.addresses, p.mechanism) + name, config, endpoints := buildClusterImplConfigForDNS(p.childNameGen, p.endpoints, p.mechanism) retConfig.Priorities = append(retConfig.Priorities, name) - retAddrs = append(retAddrs, addrs...) + retEndpoints = append(retEndpoints, endpoints...) odCfg := makeClusterImplOutlierDetectionChild(config, p.mechanism.outlierDetection) retConfig.Children[name] = &priority.Child{ Config: &internalserviceconfig.BalancerConfig{Name: outlierdetection.Name, Config: odCfg}, @@ -120,7 +120,7 @@ func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalservi continue } } - return retConfig, retAddrs, nil + return retConfig, retEndpoints, nil } func convertClusterImplMapToOutlierDetection(ciCfgs map[string]*clusterimpl.LBConfig, odCfg outlierdetection.LBConfig) map[string]*outlierdetection.LBConfig { @@ -137,19 +137,22 @@ func makeClusterImplOutlierDetectionChild(ciCfg *clusterimpl.LBConfig, odCfg out return &odCfgRet } -func buildClusterImplConfigForDNS(g *nameGenerator, addrStrs []string, mechanism DiscoveryMechanism) (string, *clusterimpl.LBConfig, []resolver.Address) { +func buildClusterImplConfigForDNS(g *nameGenerator, endpoints []resolver.Endpoint, mechanism DiscoveryMechanism) (string, *clusterimpl.LBConfig, []resolver.Endpoint) { // Endpoint picking policy for DNS is hardcoded to pick_first. const childPolicy = "pick_first" - retAddrs := make([]resolver.Address, 0, len(addrStrs)) + retEndpoints := make([]resolver.Endpoint, len(endpoints)) pName := fmt.Sprintf("priority-%v", g.prefix) - for _, addrStr := range addrStrs { - retAddrs = append(retAddrs, hierarchy.Set(resolver.Address{Addr: addrStr}, []string{pName})) + for i, e := range endpoints { + retEndpoints[i] = hierarchy.SetInEndpoint(e, []string{pName}) + // Copy the nested address field as slice fields are shared by the + // iteration variable and the original slice. + retEndpoints[i].Addresses = append([]resolver.Address{}, e.Addresses...) } return pName, &clusterimpl.LBConfig{ Cluster: mechanism.Cluster, TelemetryLabels: mechanism.TelemetryLabels, ChildPolicy: &internalserviceconfig.BalancerConfig{Name: childPolicy}, - }, retAddrs + }, retEndpoints } // buildClusterImplConfigForEDS returns a list of cluster_impl configs, one for @@ -161,7 +164,7 @@ func buildClusterImplConfigForDNS(g *nameGenerator, addrStrs []string, mechanism // - map{"p0":p0_config, "p1":p1_config} // - [p0_address_0, p0_address_1, p1_address_0, p1_address_1] // - p0 addresses' hierarchy attributes are set to p0 -func buildClusterImplConfigForEDS(g *nameGenerator, edsResp xdsresource.EndpointsUpdate, mechanism DiscoveryMechanism, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]string, map[string]*clusterimpl.LBConfig, []resolver.Address, error) { +func buildClusterImplConfigForEDS(g *nameGenerator, edsResp xdsresource.EndpointsUpdate, mechanism DiscoveryMechanism, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]string, map[string]*clusterimpl.LBConfig, []resolver.Endpoint, error) { drops := make([]clusterimpl.DropConfig, 0, len(edsResp.Drops)) for _, d := range edsResp.Drops { drops = append(drops, clusterimpl.DropConfig{ @@ -183,17 +186,17 @@ func buildClusterImplConfigForEDS(g *nameGenerator, edsResp xdsresource.Endpoint } retNames := g.generate(priorities) retConfigs := make(map[string]*clusterimpl.LBConfig, len(retNames)) - var retAddrs []resolver.Address + var retEndpoints []resolver.Endpoint for i, pName := range retNames { priorityLocalities := priorities[i] - cfg, addrs, err := priorityLocalitiesToClusterImpl(priorityLocalities, pName, mechanism, drops, xdsLBPolicy) + cfg, endpoints, err := priorityLocalitiesToClusterImpl(priorityLocalities, pName, mechanism, drops, xdsLBPolicy) if err != nil { return nil, nil, nil, err } retConfigs[pName] = cfg - retAddrs = append(retAddrs, addrs...) + retEndpoints = append(retEndpoints, endpoints...) } - return retNames, retConfigs, retAddrs, nil + return retNames, retConfigs, retEndpoints, nil } // groupLocalitiesByPriority returns the localities grouped by priority. @@ -244,8 +247,8 @@ func dedupSortedIntSlice(a []int) []int { // priority), and generates a cluster impl policy config, and a list of // addresses with their path hierarchy set to [priority-name, locality-name], so // priority and the xDS LB Policy know which child policy each address is for. -func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priorityName string, mechanism DiscoveryMechanism, drops []clusterimpl.DropConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*clusterimpl.LBConfig, []resolver.Address, error) { - var addrs []resolver.Address +func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priorityName string, mechanism DiscoveryMechanism, drops []clusterimpl.DropConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*clusterimpl.LBConfig, []resolver.Endpoint, error) { + var retEndpoints []resolver.Endpoint for _, locality := range localities { var lw uint32 = 1 if locality.Weight != 0 { @@ -262,21 +265,24 @@ func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priority if endpoint.HealthStatus != xdsresource.EndpointHealthStatusHealthy && endpoint.HealthStatus != xdsresource.EndpointHealthStatusUnknown { continue } - addr := resolver.Address{Addr: endpoint.Address} - addr = hierarchy.Set(addr, []string{priorityName, localityStr}) - addr = internal.SetLocalityID(addr, locality.ID) + resolverEndpoint := resolver.Endpoint{} + for _, as := range endpoint.Addresses { + resolverEndpoint.Addresses = append(resolverEndpoint.Addresses, resolver.Address{Addr: as}) + } + resolverEndpoint = hierarchy.SetInEndpoint(resolverEndpoint, []string{priorityName, localityStr}) + resolverEndpoint = internal.SetLocalityIDInEndpoint(resolverEndpoint, locality.ID) // "To provide the xds_wrr_locality load balancer information about // locality weights received from EDS, the cluster resolver will // populate a new locality weight attribute for each address The // attribute will have the weight (as an integer) of the locality // the address is part of." - A52 - addr = wrrlocality.SetAddrInfo(addr, wrrlocality.AddrInfo{LocalityWeight: lw}) + resolverEndpoint = wrrlocality.SetAddrInfoInEndpoint(resolverEndpoint, wrrlocality.AddrInfo{LocalityWeight: lw}) var ew uint32 = 1 if endpoint.Weight != 0 { ew = endpoint.Weight } - addr = weightedroundrobin.SetAddrInfo(addr, weightedroundrobin.AddrInfo{Weight: lw * ew}) - addrs = append(addrs, addr) + resolverEndpoint = weightedroundrobin.SetAddrInfoInEndpoint(resolverEndpoint, weightedroundrobin.AddrInfo{Weight: lw * ew}) + retEndpoints = append(retEndpoints, resolverEndpoint) } } return &clusterimpl.LBConfig{ @@ -287,5 +293,5 @@ func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priority TelemetryLabels: mechanism.TelemetryLabels, DropCategories: drops, ChildPolicy: xdsLBPolicy, - }, addrs, nil + }, retEndpoints, nil } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go index 3bcfba8732a30..d9315c3acef5d 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -24,6 +24,7 @@ import ( "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -294,8 +295,8 @@ func (rr *resourceResolver) generateLocked(onDone xdsresource.OnDoneFunc) { switch uu := u.(type) { case xdsresource.EndpointsUpdate: ret = append(ret, priorityConfig{mechanism: rDM.dm, edsResp: uu, childNameGen: rDM.childNameGen}) - case []string: - ret = append(ret, priorityConfig{mechanism: rDM.dm, addresses: uu, childNameGen: rDM.childNameGen}) + case []resolver.Endpoint: + ret = append(ret, priorityConfig{mechanism: rDM.dm, endpoints: uu, childNameGen: rDM.childNameGen}) } } select { diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go index cfc871d3b59d6..5f7a21153057e 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go @@ -47,7 +47,7 @@ type dnsDiscoveryMechanism struct { logger *grpclog.PrefixLogger mu sync.Mutex - addrs []string + endpoints []resolver.Endpoint updateReceived bool } @@ -103,7 +103,7 @@ func (dr *dnsDiscoveryMechanism) lastUpdate() (any, bool) { if !dr.updateReceived { return nil, false } - return dr.addrs, true + return dr.endpoints, true } func (dr *dnsDiscoveryMechanism) resolveNow() { @@ -133,23 +133,15 @@ func (dr *dnsDiscoveryMechanism) UpdateState(state resolver.State) error { } dr.mu.Lock() - var addrs []string - if len(state.Endpoints) > 0 { - // Assume 1 address per endpoint, which is how DNS is expected to - // behave. The slice will grow as needed, however. - addrs = make([]string, 0, len(state.Endpoints)) - for _, e := range state.Endpoints { - for _, a := range e.Addresses { - addrs = append(addrs, a.Addr) - } - } - } else { - addrs = make([]string, len(state.Addresses)) + var endpoints = state.Endpoints + if len(endpoints) == 0 { + endpoints = make([]resolver.Endpoint, len(state.Addresses)) for i, a := range state.Addresses { - addrs[i] = a.Addr + endpoints[i] = resolver.Endpoint{Addresses: []resolver.Address{a}} + endpoints[i].Attributes = a.BalancerAttributes } } - dr.addrs = addrs + dr.endpoints = endpoints dr.updateReceived = true dr.mu.Unlock() @@ -172,7 +164,7 @@ func (dr *dnsDiscoveryMechanism) ReportError(err error) { dr.mu.Unlock() return } - dr.addrs = nil + dr.endpoints = nil dr.updateReceived = true dr.mu.Unlock() diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go index c9d496ce09b9c..8f58c00303217 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go @@ -33,6 +33,7 @@ import ( "unsafe" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" @@ -72,7 +73,7 @@ func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Ba } b.logger = prefixLogger(b) b.logger.Infof("Created") - b.child = gracefulswitch.NewBalancer(b, bOpts) + b.child = synchronizingBalancerWrapper{lb: gracefulswitch.NewBalancer(b, bOpts)} go b.run() return b } @@ -152,6 +153,11 @@ type lbCfgUpdate struct { done chan struct{} } +type scHealthUpdate struct { + scw *subConnWrapper + state balancer.SubConnState +} + type outlierDetectionBalancer struct { // These fields are safe to be accessed without holding any mutex because // they are synchronized in run(), which makes these field accesses happen @@ -170,10 +176,7 @@ type outlierDetectionBalancer struct { logger *grpclog.PrefixLogger channelzParent channelz.Identifier - // childMu guards calls into child (to uphold the balancer.Balancer API - // guarantee of synchronous calls). - childMu sync.Mutex - child *gracefulswitch.Balancer + child synchronizingBalancerWrapper // mu guards access to the following fields. It also helps to synchronize // behaviors of the following events: config updates, firing of the interval @@ -190,8 +193,8 @@ type outlierDetectionBalancer struct { // which uses addrs. This balancer waits for the interval timer algorithm to // finish before making the update to the addrs map. // - // This mutex is never held at the same time as childMu (within the context - // of a single goroutine). + // This mutex is never held when calling methods on the child policy + // (within the context of a single goroutine). mu sync.Mutex addrs map[string]*addressInfo cfg *LBConfig @@ -276,13 +279,9 @@ func (b *outlierDetectionBalancer) UpdateClientConnState(s balancer.ClientConnSt // the balancer.Balancer API, so it is guaranteed to be called in a // synchronous manner, so it cannot race with this read. if b.cfg == nil || b.cfg.ChildPolicy.Name != lbCfg.ChildPolicy.Name { - b.childMu.Lock() - err := b.child.SwitchTo(bb) - if err != nil { - b.childMu.Unlock() + if err := b.child.switchTo(bb); err != nil { return fmt.Errorf("outlier detection: error switching to child of type %q: %v", lbCfg.ChildPolicy.Name, err) } - b.childMu.Unlock() } b.mu.Lock() @@ -319,12 +318,10 @@ func (b *outlierDetectionBalancer) UpdateClientConnState(s balancer.ClientConnSt } b.mu.Unlock() - b.childMu.Lock() - err := b.child.UpdateClientConnState(balancer.ClientConnState{ + err := b.child.updateClientConnState(balancer.ClientConnState{ ResolverState: s.ResolverState, BalancerConfig: b.cfg.ChildPolicy.Config, }) - b.childMu.Unlock() done := make(chan struct{}) b.pickerUpdateCh.Put(lbCfgUpdate{ @@ -337,9 +334,7 @@ func (b *outlierDetectionBalancer) UpdateClientConnState(s balancer.ClientConnSt } func (b *outlierDetectionBalancer) ResolverError(err error) { - b.childMu.Lock() - defer b.childMu.Unlock() - b.child.ResolverError(err) + b.child.resolverError(err) } func (b *outlierDetectionBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { @@ -355,6 +350,7 @@ func (b *outlierDetectionBalancer) updateSubConnState(sc balancer.SubConn, state if state.ConnectivityState == connectivity.Shutdown { delete(b.scWrappers, scw.SubConn) } + scw.setLatestConnectivityState(state.ConnectivityState) b.scUpdateCh.Put(&scUpdate{ scw: scw, state: state, @@ -368,9 +364,7 @@ func (b *outlierDetectionBalancer) UpdateSubConnState(sc balancer.SubConn, state func (b *outlierDetectionBalancer) Close() { b.closed.Fire() <-b.done.Done() - b.childMu.Lock() - b.child.Close() - b.childMu.Unlock() + b.child.closeLB() b.scUpdateCh.Close() b.pickerUpdateCh.Close() @@ -383,9 +377,7 @@ func (b *outlierDetectionBalancer) Close() { } func (b *outlierDetectionBalancer) ExitIdle() { - b.childMu.Lock() - defer b.childMu.Unlock() - b.child.ExitIdle() + b.child.exitIdle() } // wrappedPicker delegates to the child policy's picker, and when the request @@ -475,10 +467,13 @@ func (b *outlierDetectionBalancer) NewSubConn(addrs []resolver.Address, opts bal return nil, err } scw := &subConnWrapper{ - SubConn: sc, - addresses: addrs, - scUpdateCh: b.scUpdateCh, - listener: oldListener, + SubConn: sc, + addresses: addrs, + scUpdateCh: b.scUpdateCh, + listener: oldListener, + latestRawConnectivityState: balancer.SubConnState{ConnectivityState: connectivity.Idle}, + latestHealthState: balancer.SubConnState{ConnectivityState: connectivity.Connecting}, + healthListenerEnabled: len(addrs) == 1 && pickfirstleaf.IsManagedByPickfirst(addrs[0]), } b.mu.Lock() defer b.mu.Unlock() @@ -596,34 +591,18 @@ func (b *outlierDetectionBalancer) Target() string { // if the SubConn is not ejected. func (b *outlierDetectionBalancer) handleSubConnUpdate(u *scUpdate) { scw := u.scw - scw.latestState = u.state - if !scw.ejected { - if scw.listener != nil { - b.childMu.Lock() - scw.listener(u.state) - b.childMu.Unlock() - } - } + scw.clearHealthListener() + b.child.updateSubConnState(scw, u.state) +} + +func (b *outlierDetectionBalancer) handleSubConnHealthUpdate(u *scHealthUpdate) { + b.child.updateSubConnHealthState(u.scw, u.state) } // handleEjectedUpdate handles any SubConns that get ejected/unejected, and // forwards the appropriate corresponding subConnState to the child policy. func (b *outlierDetectionBalancer) handleEjectedUpdate(u *ejectionUpdate) { - scw := u.scw - scw.ejected = u.isEjected - // If scw.latestState has never been written to will default to connectivity - // IDLE, which is fine. - stateToUpdate := scw.latestState - if u.isEjected { - stateToUpdate = balancer.SubConnState{ - ConnectivityState: connectivity.TransientFailure, - } - } - if scw.listener != nil { - b.childMu.Lock() - scw.listener(stateToUpdate) - b.childMu.Unlock() - } + b.child.handleEjectionUpdate(u) } // handleChildStateUpdate forwards the picker update wrapped in a wrapped picker @@ -696,6 +675,8 @@ func (b *outlierDetectionBalancer) run() { b.handleSubConnUpdate(u) case *ejectionUpdate: b.handleEjectedUpdate(u) + case *scHealthUpdate: + b.handleSubConnHealthUpdate(u) } case update, ok := <-b.pickerUpdateCh.Get(): if !ok { @@ -880,6 +861,69 @@ func (b *outlierDetectionBalancer) unejectAddress(addrInfo *addressInfo) { } } +// synchronizingBalancerWrapper serializes calls into balancer (to uphold the +// balancer.Balancer API guarantee of synchronous calls). It also ensures a +// consistent order of locking mutexes when using SubConn listeners to avoid +// deadlocks. +type synchronizingBalancerWrapper struct { + // mu should not be used directly from outside this struct, instead use + // methods defined on the struct. + mu sync.Mutex + lb *gracefulswitch.Balancer +} + +func (sbw *synchronizingBalancerWrapper) switchTo(builder balancer.Builder) error { + sbw.mu.Lock() + defer sbw.mu.Unlock() + return sbw.lb.SwitchTo(builder) +} + +func (sbw *synchronizingBalancerWrapper) updateClientConnState(state balancer.ClientConnState) error { + sbw.mu.Lock() + defer sbw.mu.Unlock() + return sbw.lb.UpdateClientConnState(state) +} + +func (sbw *synchronizingBalancerWrapper) resolverError(err error) { + sbw.mu.Lock() + defer sbw.mu.Unlock() + sbw.lb.ResolverError(err) +} + +func (sbw *synchronizingBalancerWrapper) closeLB() { + sbw.mu.Lock() + defer sbw.mu.Unlock() + sbw.lb.Close() +} + +func (sbw *synchronizingBalancerWrapper) exitIdle() { + sbw.mu.Lock() + defer sbw.mu.Unlock() + sbw.lb.ExitIdle() +} + +func (sbw *synchronizingBalancerWrapper) updateSubConnHealthState(scw *subConnWrapper, scs balancer.SubConnState) { + sbw.mu.Lock() + defer sbw.mu.Unlock() + scw.updateSubConnHealthState(scs) +} + +func (sbw *synchronizingBalancerWrapper) updateSubConnState(scw *subConnWrapper, scs balancer.SubConnState) { + sbw.mu.Lock() + defer sbw.mu.Unlock() + scw.updateSubConnConnectivityState(scs) +} + +func (sbw *synchronizingBalancerWrapper) handleEjectionUpdate(u *ejectionUpdate) { + sbw.mu.Lock() + defer sbw.mu.Unlock() + if u.isEjected { + u.scw.handleEjection() + } else { + u.scw.handleUnejection() + } +} + // addressInfo contains the runtime information about an address that pertains // to Outlier Detection. This struct and all of its fields is protected by // outlierDetectionBalancer.mu in the case where it is accessed through the diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go index 0fa422d8f262e..7d710fde1b2a9 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go @@ -19,9 +19,11 @@ package outlierdetection import ( "fmt" + "sync" "unsafe" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/resolver" ) @@ -31,23 +33,54 @@ import ( // whether or not this SubConn is ejected. type subConnWrapper struct { balancer.SubConn - listener func(balancer.SubConnState) - // addressInfo is a pointer to the subConnWrapper's corresponding address - // map entry, if the map entry exists. + // map entry, if the map entry exists. It is accessed atomically. addressInfo unsafe.Pointer // *addressInfo + // The following fields are set during object creation and read-only after + // that. + + listener func(balancer.SubConnState) + // healthListenerEnabled indicates whether the leaf LB policy is using a + // generic health listener. When enabled, ejection updates are sent via the + // health listener instead of the connectivity listener. Once Dualstack + // changes are complete, all SubConns will be created by pickfirst which + // uses the health listener. + // TODO: https://github.com/grpc/grpc-go/issues/7915 - Once Dualstack + // changes are complete, all SubConns will be created by pick_first and + // outlier detection will only use the health listener for ejection and + // this field can be removed. + healthListenerEnabled bool + + scUpdateCh *buffer.Unbounded + + // The following fields are only referenced in the context of a work + // serializing buffer and don't need to be protected by a mutex. + // These two pieces of state will reach eventual consistency due to sync in // run(), and child will always have the correctly updated SubConnState. - // latestState is the latest state update from the underlying SubConn. This - // is used whenever a SubConn gets unejected. - latestState balancer.SubConnState - ejected bool - scUpdateCh *buffer.Unbounded + ejected bool // addresses is the list of address(es) this SubConn was created with to // help support any change in address(es) addresses []resolver.Address + // latestHealthState is tracked to update the child policy during + // unejection. + latestHealthState balancer.SubConnState + // latestRawConnectivityState is tracked to update the child policy during + // unejection. + latestRawConnectivityState balancer.SubConnState + + // Access to the following fields are protected by a mutex. These fields + // should not be accessed from outside this file, instead use methods + // defined on the struct. + mu sync.Mutex + healthListener func(balancer.SubConnState) + // latestReceivedConnectivityState is the SubConn's most recent connectivity + // state received. It may not be delivered to the child balancer yet. It is + // used to ensure a health listener is registered with the SubConn only when + // the SubConn is READY. + latestReceivedConnectivityState connectivity.State } // eject causes the wrapper to report a state update with the TRANSIENT_FAILURE @@ -72,3 +105,108 @@ func (scw *subConnWrapper) uneject() { func (scw *subConnWrapper) String() string { return fmt.Sprintf("%+v", scw.addresses) } + +func (scw *subConnWrapper) RegisterHealthListener(listener func(balancer.SubConnState)) { + // gRPC currently supports two mechanisms that provide a health signal for + // a connection: client-side health checking and outlier detection. Earlier + // both these mechanisms signaled unhealthiness by setting the subchannel + // state to TRANSIENT_FAILURE. As part of the dualstack changes to make + // pick_first the universal leaf policy (see A61), both these mechanisms + // started using the new health listener to make health signal visible to + // the petiole policies without affecting the underlying connectivity + // management of the pick_first policy + if !scw.healthListenerEnabled { + logger.Errorf("Health listener unexpectedly registered on SubConn %v.", scw) + return + } + + scw.mu.Lock() + defer scw.mu.Unlock() + + if scw.latestReceivedConnectivityState != connectivity.Ready { + return + } + scw.healthListener = listener + if listener == nil { + scw.SubConn.RegisterHealthListener(nil) + return + } + + scw.SubConn.RegisterHealthListener(func(scs balancer.SubConnState) { + scw.scUpdateCh.Put(&scHealthUpdate{ + scw: scw, + state: scs, + }) + }) +} + +// updateSubConnHealthState stores the latest health state for unejection and +// sends updates the health listener. +func (scw *subConnWrapper) updateSubConnHealthState(scs balancer.SubConnState) { + scw.latestHealthState = scs + if scw.ejected { + return + } + scw.mu.Lock() + defer scw.mu.Unlock() + if scw.healthListener != nil { + scw.healthListener(scs) + } +} + +// updateSubConnConnectivityState stores the latest connectivity state for +// unejection and updates the raw connectivity listener. +func (scw *subConnWrapper) updateSubConnConnectivityState(scs balancer.SubConnState) { + scw.latestRawConnectivityState = scs + // If the raw connectivity listener is used for ejection, and the SubConn is + // ejected, don't send the update. + if scw.ejected && !scw.healthListenerEnabled { + return + } + if scw.listener != nil { + scw.listener(scs) + } +} + +func (scw *subConnWrapper) clearHealthListener() { + scw.mu.Lock() + defer scw.mu.Unlock() + scw.healthListener = nil +} + +func (scw *subConnWrapper) handleUnejection() { + scw.ejected = false + if !scw.healthListenerEnabled { + // If scw.latestRawConnectivityState has never been written to will + // default to connectivity IDLE, which is fine. + scw.updateSubConnConnectivityState(scw.latestRawConnectivityState) + return + } + // If scw.latestHealthState has never been written to will use the health + // state CONNECTING set during object creation. + scw.updateSubConnHealthState(scw.latestHealthState) +} + +func (scw *subConnWrapper) handleEjection() { + scw.ejected = true + stateToUpdate := balancer.SubConnState{ + ConnectivityState: connectivity.TransientFailure, + } + if !scw.healthListenerEnabled { + if scw.listener != nil { + scw.listener(stateToUpdate) + } + return + } + scw.mu.Lock() + defer scw.mu.Unlock() + if scw.healthListener != nil { + scw.healthListener(stateToUpdate) + } +} + +func (scw *subConnWrapper) setLatestConnectivityState(state connectivity.State) { + scw.mu.Lock() + defer scw.mu.Unlock() + scw.latestReceivedConnectivityState = state +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/balancer.go index 943ee7806ba18..2b289a81143c9 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/balancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/balancer.go @@ -120,6 +120,13 @@ func SetAddrInfo(addr resolver.Address, addrInfo AddrInfo) resolver.Address { return addr } +// SetAddrInfoInEndpoint returns a copy of endpoint in which the Attributes +// field is updated with AddrInfo. +func SetAddrInfoInEndpoint(endpoint resolver.Endpoint, addrInfo AddrInfo) resolver.Endpoint { + endpoint.Attributes = endpoint.Attributes.WithValue(attributeKey{}, addrInfo) + return endpoint +} + func (a AddrInfo) String() string { return fmt.Sprintf("Locality Weight: %d", a.LocalityWeight) } diff --git a/vendor/google.golang.org/grpc/xds/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/internal.go index 1d8a6b03f1b3b..74c9195215514 100644 --- a/vendor/google.golang.org/grpc/xds/internal/internal.go +++ b/vendor/google.golang.org/grpc/xds/internal/internal.go @@ -86,6 +86,12 @@ func SetLocalityID(addr resolver.Address, l LocalityID) resolver.Address { return addr } +// SetLocalityIDInEndpoint sets locality ID in endpoint to l. +func SetLocalityIDInEndpoint(endpoint resolver.Endpoint, l LocalityID) resolver.Endpoint { + endpoint.Attributes = endpoint.Attributes.WithValue(localityKey, l) + return endpoint +} + // ResourceTypeMapForTesting maps TypeUrl to corresponding ResourceType. var ResourceTypeMapForTesting map[string]any diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go index 24673a8d90779..f81685a45e692 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go @@ -639,6 +639,9 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w if a.logger.V(2) { a.logger.Infof("Resource type %q with resource name %q found in cache: %s", rType.TypeName(), resourceName, state.cache.ToJSON()) } + // state can only be accessed in the context of an + // xdsClientSerializer callback. Hence making a copy of the cached + // resource here for watchCallbackSerializer. resource := state.cache a.watcherCallbackSerializer.TrySchedule(func(context.Context) { watcher.OnUpdate(resource, func() {}) }) } @@ -646,9 +649,13 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w // immediately as well. if state.md.Status == xdsresource.ServiceStatusNACKed { if a.logger.V(2) { - a.logger.Infof("Resource type %q with resource name %q was NACKed: %s", rType.TypeName(), resourceName, state.cache.ToJSON()) + a.logger.Infof("Resource type %q with resource name %q was NACKed", rType.TypeName(), resourceName) } - a.watcherCallbackSerializer.TrySchedule(func(context.Context) { watcher.OnError(state.md.ErrState.Err, func() {}) }) + // state can only be accessed in the context of an + // xdsClientSerializer callback. Hence making a copy of the error + // here for watchCallbackSerializer. + err := state.md.ErrState.Err + a.watcherCallbackSerializer.TrySchedule(func(context.Context) { watcher.OnError(err, func() {}) }) } // If the metadata field is updated to indicate that the management // server does not have this resource, notify the new watcher. @@ -687,7 +694,7 @@ func (a *authority) unwatchResource(rType xdsresource.Type, resourceName string, delete(state.watchers, watcher) if len(state.watchers) > 0 { if a.logger.V(2) { - a.logger.Infof("%d more watchers exist for type %q, resource name %q", rType.TypeName(), resourceName) + a.logger.Infof("Other watchers exist for type %q, resource name %q", rType.TypeName(), resourceName) } return } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go index d8f9d6c9417bb..55299c457b25c 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go @@ -26,7 +26,6 @@ import ( "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/cache" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/xds/bootstrap" xdsclientinternal "google.golang.org/grpc/xds/internal/xdsclient/internal" @@ -61,11 +60,11 @@ func New(name string) (XDSClient, func(), error) { if err != nil { return nil, nil, fmt.Errorf("xds: failed to get xDS bootstrap config: %v", err) } - return newRefCounted(name, config, defaultWatchExpiryTimeout, defaultIdleChannelExpiryTimeout, backoff.DefaultExponential.Backoff) + return newRefCounted(name, config, defaultWatchExpiryTimeout, backoff.DefaultExponential.Backoff) } // newClientImpl returns a new xdsClient with the given config. -func newClientImpl(config *bootstrap.Config, watchExpiryTimeout, idleChannelExpiryTimeout time.Duration, streamBackoff func(int) time.Duration) (*clientImpl, error) { +func newClientImpl(config *bootstrap.Config, watchExpiryTimeout time.Duration, streamBackoff func(int) time.Duration) (*clientImpl, error) { ctx, cancel := context.WithCancel(context.Background()) c := &clientImpl{ done: grpcsync.NewEvent(), @@ -78,7 +77,6 @@ func newClientImpl(config *bootstrap.Config, watchExpiryTimeout, idleChannelExpi transportBuilder: &grpctransport.Builder{}, resourceTypes: newResourceTypeRegistry(), xdsActiveChannels: make(map[string]*channelState), - xdsIdleChannels: cache.NewTimeoutCache(idleChannelExpiryTimeout), } for name, cfg := range config.Authorities() { @@ -121,10 +119,6 @@ type OptionsForTesting struct { // unspecified, uses the default value used in non-test code. WatchExpiryTimeout time.Duration - // IdleChannelExpiryTimeout is the timeout before idle xdsChannels are - // deleted. If unspecified, uses the default value used in non-test code. - IdleChannelExpiryTimeout time.Duration - // StreamBackoffAfterFailure is the backoff function used to determine the // backoff duration after stream failures. // If unspecified, uses the default value used in non-test code. @@ -147,9 +141,6 @@ func NewForTesting(opts OptionsForTesting) (XDSClient, func(), error) { if opts.WatchExpiryTimeout == 0 { opts.WatchExpiryTimeout = defaultWatchExpiryTimeout } - if opts.IdleChannelExpiryTimeout == 0 { - opts.IdleChannelExpiryTimeout = defaultIdleChannelExpiryTimeout - } if opts.StreamBackoffAfterFailure == nil { opts.StreamBackoffAfterFailure = defaultStreamBackoffFunc } @@ -158,7 +149,7 @@ func NewForTesting(opts OptionsForTesting) (XDSClient, func(), error) { if err != nil { return nil, nil, err } - return newRefCounted(opts.Name, config, opts.WatchExpiryTimeout, opts.IdleChannelExpiryTimeout, opts.StreamBackoffAfterFailure) + return newRefCounted(opts.Name, config, opts.WatchExpiryTimeout, opts.StreamBackoffAfterFailure) } // GetForTesting returns an xDS client created earlier using the given name. diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go index 1c105ac4e061b..f5fc76d8a75c8 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go @@ -27,10 +27,7 @@ import ( "google.golang.org/grpc/internal/xds/bootstrap" ) -const ( - defaultWatchExpiryTimeout = 15 * time.Second - defaultIdleChannelExpiryTimeout = 5 * time.Minute -) +const defaultWatchExpiryTimeout = 15 * time.Second var ( // The following functions are no-ops in the actual code, but can be @@ -43,26 +40,31 @@ var ( func clientRefCountedClose(name string) { clientsMu.Lock() - defer clientsMu.Unlock() - client, ok := clients[name] if !ok { logger.Errorf("Attempt to close a non-existent xDS client with name %s", name) + clientsMu.Unlock() return } if client.decrRef() != 0 { + clientsMu.Unlock() return } + delete(clients, name) + clientsMu.Unlock() + + // This attempts to close the transport to the management server and could + // theoretically call back into the xdsclient package again and deadlock. + // Hence, this needs to be called without holding the lock. client.clientImpl.close() xdsClientImplCloseHook(name) - delete(clients, name) } // newRefCounted creates a new reference counted xDS client implementation for // name, if one does not exist already. If an xDS client for the given name // exists, it gets a reference to it and returns it. -func newRefCounted(name string, config *bootstrap.Config, watchExpiryTimeout, idleChannelExpiryTimeout time.Duration, streamBackoff func(int) time.Duration) (XDSClient, func(), error) { +func newRefCounted(name string, config *bootstrap.Config, watchExpiryTimeout time.Duration, streamBackoff func(int) time.Duration) (XDSClient, func(), error) { clientsMu.Lock() defer clientsMu.Unlock() @@ -72,7 +74,7 @@ func newRefCounted(name string, config *bootstrap.Config, watchExpiryTimeout, id } // Create the new client implementation. - c, err := newClientImpl(config, watchExpiryTimeout, idleChannelExpiryTimeout, streamBackoff) + c, err := newClientImpl(config, watchExpiryTimeout, streamBackoff) if err != nil { return nil, nil, err } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go index df0949e23cc7e..bb8d9040022f9 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go @@ -25,7 +25,6 @@ import ( "sync/atomic" "time" - "google.golang.org/grpc/internal/cache" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/xds/bootstrap" @@ -63,14 +62,9 @@ type clientImpl struct { // these channels, and forwards updates from the channels to each of these // authorities. // - // Once all references to a channel are dropped, the channel is moved to the - // idle cache where it lives for a configured duration before being closed. - // If the channel is required before the idle timeout fires, it is revived - // from the idle cache and used. + // Once all references to a channel are dropped, the channel is closed. channelsMu sync.Mutex xdsActiveChannels map[string]*channelState // Map from server config to in-use xdsChannels. - xdsIdleChannels *cache.TimeoutCache // Map from server config to idle xdsChannels. - closeCond *sync.Cond } // channelState represents the state of an xDS channel. It tracks the number of @@ -173,21 +167,6 @@ func (c *clientImpl) close() { c.close() } - // Similarly, closing idle channels cannot be done with the lock held, for - // the same reason as described above. So, we clear the idle cache in a - // goroutine and use a condition variable to wait on the condition that the - // idle cache has zero entries. The Wait() method on the condition variable - // releases the lock and blocks the goroutine until signaled (which happens - // when an idle channel is removed from the cache and closed), and grabs the - // lock before returning. - c.channelsMu.Lock() - c.closeCond = sync.NewCond(&c.channelsMu) - go c.xdsIdleChannels.Clear(true) - for c.xdsIdleChannels.Len() > 0 { - c.closeCond.Wait() - } - c.channelsMu.Unlock() - c.serializerClose() <-c.serializer.Done() @@ -289,27 +268,15 @@ func (c *clientImpl) getOrCreateChannel(serverConfig *bootstrap.ServerConfig, in c.logger.Infof("Received request for a reference to an xdsChannel for server config %q", serverConfig) } - // Use an active channel, if one exists for this server config. + // Use an existing channel, if one exists for this server config. if state, ok := c.xdsActiveChannels[serverConfig.String()]; ok { if c.logger.V(2) { - c.logger.Infof("Reusing an active xdsChannel for server config %q", serverConfig) + c.logger.Infof("Reusing an existing xdsChannel for server config %q", serverConfig) } initLocked(state) return state.channel, c.releaseChannel(serverConfig, state, deInitLocked), nil } - // If an idle channel exists for this server config, remove it from the - // idle cache and add it to the map of active channels, and return it. - if s, ok := c.xdsIdleChannels.Remove(serverConfig.String()); ok { - if c.logger.V(2) { - c.logger.Infof("Reviving an xdsChannel from the idle cache for server config %q", serverConfig) - } - state := s.(*channelState) - c.xdsActiveChannels[serverConfig.String()] = state - initLocked(state) - return state.channel, c.releaseChannel(serverConfig, state, deInitLocked), nil - } - if c.logger.V(2) { c.logger.Infof("Creating a new xdsChannel for server config %q", serverConfig) } @@ -345,9 +312,7 @@ func (c *clientImpl) getOrCreateChannel(serverConfig *bootstrap.ServerConfig, in } // releaseChannel is a function that is called when a reference to an xdsChannel -// needs to be released. It handles the logic of moving the channel to an idle -// cache if there are no other active references, and closing the channel if it -// remains in the idle cache for the configured duration. +// needs to be released. It handles closing channels with no active references. // // The function takes the following parameters: // - serverConfig: the server configuration for the xdsChannel @@ -360,7 +325,6 @@ func (c *clientImpl) getOrCreateChannel(serverConfig *bootstrap.ServerConfig, in func (c *clientImpl) releaseChannel(serverConfig *bootstrap.ServerConfig, state *channelState, deInitLocked func(*channelState)) func() { return grpcsync.OnceFunc(func() { c.channelsMu.Lock() - defer c.channelsMu.Unlock() if c.logger.V(2) { c.logger.Infof("Received request to release a reference to an xdsChannel for server config %q", serverConfig) @@ -372,40 +336,17 @@ func (c *clientImpl) releaseChannel(serverConfig *bootstrap.ServerConfig, state if c.logger.V(2) { c.logger.Infof("xdsChannel %p has other active references", state.channel) } + c.channelsMu.Unlock() return } - // Move the channel to the idle cache instead of closing - // immediately. If the channel remains in the idle cache for - // the configured duration, it will get closed. delete(c.xdsActiveChannels, serverConfig.String()) if c.logger.V(2) { - c.logger.Infof("Moving xdsChannel [%p] for server config %s to the idle cache", state.channel, serverConfig) + c.logger.Infof("Closing xdsChannel [%p] for server config %s", state.channel, serverConfig) } + channelToClose := state.channel + c.channelsMu.Unlock() - // The idle cache expiry timeout results in the channel getting - // closed in another serializer callback. - c.xdsIdleChannels.Add(serverConfig.String(), state, grpcsync.OnceFunc(func() { - c.channelsMu.Lock() - channelToClose := state.channel - c.channelsMu.Unlock() - - if c.logger.V(2) { - c.logger.Infof("Idle cache expiry timeout fired for xdsChannel [%p] for server config %s", state.channel, serverConfig) - } - channelToClose.close() - - // If the channel is being closed as a result of the xDS client - // being closed, closeCond is non-nil and we need to signal from - // here to unblock Close(). Holding the lock is not necessary - // to call Signal() on a condition variable. But the field - // `c.closeCond` needs to guarded by the lock, which is why we - // acquire it here. - c.channelsMu.Lock() - if c.closeCond != nil { - c.closeCond.Signal() - } - c.channelsMu.Unlock() - })) + channelToClose.close() }) } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/ads/ads_stream.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/ads/ads_stream.go index 4c4856a073476..bf7510058c5f4 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/ads/ads_stream.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/ads/ads_stream.go @@ -664,7 +664,7 @@ func (s *StreamImpl) onError(err error, msgReceived bool) { // connection hitting its max connection age limit. // (see [gRFC A9](https://github.com/grpc/proposal/blob/master/A9-server-side-conn-mgt.md)). if msgReceived { - err = xdsresource.NewErrorf(xdsresource.ErrTypeStreamFailedAfterRecv, err.Error()) + err = xdsresource.NewErrorf(xdsresource.ErrTypeStreamFailedAfterRecv, "%s", err.Error()) } s.eventHandler.OnADSStreamError(err) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go index 1254d250c99b0..f94a17e7c66ac 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go @@ -49,7 +49,7 @@ const ( // Endpoint contains information of an endpoint. type Endpoint struct { - Address string + Addresses []string HealthStatus EndpointHealthStatus Weight uint32 } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go index f65845b702c8c..fd780d6632d27 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go @@ -26,6 +26,7 @@ import ( v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/internal" "google.golang.org/protobuf/proto" @@ -93,14 +94,22 @@ func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint, uniqueEndpointAddrs } weight = w.GetValue() } - addr := parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress()) - if uniqueEndpointAddrs[addr] { - return nil, fmt.Errorf("duplicate endpoint with the same address %s", addr) + addrs := []string{parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress())} + if envconfig.XDSDualstackEndpointsEnabled { + for _, sa := range lbEndpoint.GetEndpoint().GetAdditionalAddresses() { + addrs = append(addrs, parseAddress(sa.GetAddress().GetSocketAddress())) + } + } + + for _, a := range addrs { + if uniqueEndpointAddrs[a] { + return nil, fmt.Errorf("duplicate endpoint with the same address %s", a) + } + uniqueEndpointAddrs[a] = true } - uniqueEndpointAddrs[addr] = true endpoints = append(endpoints, Endpoint{ HealthStatus: EndpointHealthStatus(lbEndpoint.GetHealthStatus()), - Address: addr, + Addresses: addrs, Weight: weight, }) } diff --git a/vendor/modules.txt b/vendor/modules.txt index 5e30ec37a3aba..b961906c11701 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -2049,7 +2049,7 @@ google.golang.org/genproto/googleapis/api/monitoredres google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.69.4 +# google.golang.org/grpc v1.70.0 ## explicit; go 1.22 google.golang.org/grpc google.golang.org/grpc/attributes
fix
update module google.golang.org/grpc to v1.70.0 (main) (#15955)
b63c191a5a115fa64a9efd031c85e66b40b7dfbc
2025-02-11 18:55:02
Ashwanth
docs(thanos): add migration doc for thanos storage clients (#16133)
false
diff --git a/docs/sources/configure/examples/_index.md b/docs/sources/configure/examples/_index.md index 96a00ec3d076a..d4928ef2e113e 100644 --- a/docs/sources/configure/examples/_index.md +++ b/docs/sources/configure/examples/_index.md @@ -10,4 +10,5 @@ weight: 800 The following pages contain examples of how to configure Grafana Loki. - [Configuration snippets and ready-to-use configuration examples]({{< relref "./configuration-examples" >}}). -- [Deploy a query frontend on a existing cluster]({{< relref "./query-frontend" >}}). \ No newline at end of file +- [Deploy a query frontend on a existing cluster]({{< relref "./query-frontend" >}}). +- [Configuration examples for using Thanos-based storage clients](./thanos-storage-configs). diff --git a/docs/sources/configure/examples/thanos-storage-configs.md b/docs/sources/configure/examples/thanos-storage-configs.md new file mode 100644 index 0000000000000..b24ba9698b7ae --- /dev/null +++ b/docs/sources/configure/examples/thanos-storage-configs.md @@ -0,0 +1,79 @@ +--- +title: "Configuration examples for using Thanos-based storage clients" +menuTitle: Thanos storage examples +description: "Minimal examples for using Thanos-based S3, GCS, Azure, and filesystem clients in Grafana Loki." +weight: 100 +--- + +# Configuration examples for using Thanos-based storage clients + +Use these examples as a starting point for configuring [Thanos based object storage clients](https://grafana.com/docs/loki/<LOKI_VERSION>/configure/#thanos_object_store_config) in Grafana Loki. + +## GCS example +```yaml +storage_config: + use_thanos_objstore: true + object_store: + gcs: + bucket_name: my-gcs-bucket + + # JSON either from a Google Developers Console client_credentials.json file, + # or a Google Developers service account key. Needs to be valid JSON, not a + # filesystem path. If empty, fallback to Google default logic: + # 1. A JSON file whose path is specified by the GOOGLE_APPLICATION_CREDENTIALS + # environment variable. For workload identity federation, refer to + # https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation + # on how to generate the JSON configuration file for on-prem/non-Google cloud + # platforms. + # 2. A JSON file in a location known to the gcloud command-line tool: + # $HOME/.config/gcloud/application_default_credentials.json. + # 3. On Google Compute Engine it fetches credentials from the metadata server. + service_account: |- + { + "type": "service_account", + "project_id": "project", + "private_key_id": "abcdefghijklmnopqrstuvwxyz12345678906666", + "private_key": "-----BEGIN PRIVATE KEY-----\...\n-----END PRIVATE KEY-----\n", + "client_email": "[email protected]", + "client_id": "123456789012345678901", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/[email protected]" + } +``` + +## S3 example +```yaml +storage_config: + use_thanos_objstore: true + object_store: + s3: + bucket_name: my-s3-bucket + endpoint: s3.us-west-2.amazonaws.com + region: us-west-2 + # You can either declare the access key and secret in the config or + # use environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY which will be picked up by the AWS SDK. + access_key_id: access-key-id + secret_access_key: secret-access-key +``` + +## Azure example +```yaml +storage_config: + use_thanos_objstore: true + object_store: + azure: + account_name: myaccount + account_key: ${SECRET_ACCESS_KEY} # loki expands environment variables + container_name: example-container +``` + +## Filesystem example +```yaml +storage_config: + use_thanos_objstore: true + object_store: + filesystem: + dir: /var/loki/chunks +``` diff --git a/docs/sources/setup/migrate/_index.md b/docs/sources/setup/migrate/_index.md index 034791b38040d..f2982d66ddfa8 100644 --- a/docs/sources/setup/migrate/_index.md +++ b/docs/sources/setup/migrate/_index.md @@ -12,3 +12,4 @@ This section contains instructions for migrating from one Loki implementation to - [Migrate]({{< relref "./migrate-to-tsdb" >}}) to TSDB index. - [Migrate]({{< relref "./migrate-from-distributed" >}}) from the `Loki-distributed` Helm chart to the `loki` Helm chart. - [Migrate]({{< relref "./migrate-to-three-scalable-targets" >}}) from the two target Helm chart to the three target scalable configuration Helm chart. +- [Migrate]({{< relref "./migrate-storage-clients" >}}) from the legacy storage clients to the Thanos object storage client. \ No newline at end of file diff --git a/docs/sources/setup/migrate/migrate-storage-clients/_index.md b/docs/sources/setup/migrate/migrate-storage-clients/_index.md new file mode 100644 index 0000000000000..fad6d434eb1b6 --- /dev/null +++ b/docs/sources/setup/migrate/migrate-storage-clients/_index.md @@ -0,0 +1,233 @@ +--- +title: Migrate to Thanos storage clients +menuTitle: Migrate to Thanos storage clients +description: Migration guide for moving from existing storage clients to Thanos storage clients. +weight: +--- +# Migrate to Thanos storage clients + +Loki release 3.4 introduces new object storage clients based on the [Thanos Object Storage Client Go module](https://github.com/thanos-io/objstore). + +One of the reasons for making this change is to have a consistent storage configuration across Grafana Loki, Mimir and other telemetry databases from Grafana Labs. If you are already using Grafana Mimir or Pyroscope, you can reuse the storage configuration for setting up Loki. + +This is an opt-in feature with the Loki 3.4 release. In a future release, Thanos will become the default way of configuring storage and the existing storage clients will be deprecated. + +{{< admonition type="note" >}} +The new storage configuration deviates from the existing format. The following sections describe the changes in detail for each provider. +Refer to the [Thanos storage configuration reference](https://grafana.com/docs/loki/<LOKI_VERSION>/configure/#thanos_object_store_config) to view the complete list of supported storage providers and their configuration options. +{{< /admonition >}} + +### Enable the new storage clients + +1. Enable Thanos storage clients by setting `use_thanos_objstore` to `true` in the `storage_config` section or by setting the `-use-thanos-objstore` flag to true. When enabled, configuration under `storage_config.object_store` takes effect instead of existing storage configurations. + + ```yaml + # Uses the new storage clients for connecting to gcs backend + storage_config: + use_thanos_objstore: true # enable the new storage clients + object_store: + gcs: + bucket_name: "example-bucket" + ``` + +1. As an alternative, you can also configure the new clients in the common `storage` section if you prefer to use the `common` config section. + + ```yaml + storage_config: + use_thanos_objstore: true # enable the new storage clients + common: + storage: + object_store: + gcs: + bucket_name: "example-bucket" + ``` + +1. Ruler storage should be configured under the `ruler_storage` section when using the new storage clients. + + ```yaml + storage_config: + use_thanos_objstore: true # enable the new storage clients + ruler_storage: + backend: gcs + gcs: + bucket_name: "example-bucket" + ``` + +1. If you are using `store.object-prefix` flag or the corresponding `object_prefix` YAML setting, you'll need to update your configuration to use the new `object_store.storage-prefix` flag or the corresponding `storage_prefix` YAML setting. + + ```yaml + # Example configuration to prefix all objects with "prefix" + storage_config: + use_thanos_objstore: true # enable the new storage clients + object_store: + storage_prefix: "prefix" + ``` + +### GCS Storage Migration + +When migrating from the existing [Google Cloud Storage (GCS)](https://grafana.com/docs/loki/<LOKI_VERSION>/configure/#gcs_storage_config) storage client to the new Thanos-based client, you'll need to update your configuration parameters as follows: + +{{< responsive-table >}} +| Existing Parameter | New Parameter | Required Changes | +|---------------------|-----------------------|--------------------------------------------------------------------------------------------------------------| +| `bucket_name` | `bucket_name` | No changes required | +| `service_account` | `service_account` | No changes required | +| `chunk_buffer_size` | `chunk_buffer_size` | No changes required | +| `enable_retries` | `max_retries` | Replace `enable_retries` (bool) with `max_retries` (int). Set a value > 1 to enable retries, or 1 to disable | +| `request_timeout` | Removed | Remove parameter | +| `enable_opencensus` | Removed | Remove parameter | +| `enable_http2` | Removed | Remove parameter | +{{< /responsive-table >}} + +**Example configuration migration (GCS):** + +_**Existing configuration:**_ + +```yaml +storage_config: + gcs: + bucket_name: example-bucket + chunk_buffer_size: 10MB + enable_retries: true +``` + +_**New configuration**_ (Thanos-based): + +```yaml +storage_config: + use_thanos_objstore: true + object_store: + gcs: + bucket_name: example-bucket + chunk_buffer_size: 10MB + max_retries: 5 +``` + +### Amazon S3 Storage Migration + +When migrating from the existing [Amazon S3](https://grafana.com/docs/loki/<LOKI_VERSION>/configure/#aws_storage_config) storage client to the new Thanos-based client, update or remove parameters as follows: + +{{< responsive-table >}} +| Existing Parameter | New Parameter | Required Changes | +|---------------------|-----------------------|------------------------------------------------------------------------------------------------------------------------------------| +| `bucket_names` | `bucket_name` | Rename this parameter. If you previously used multiple buckets, you must consolidate to a single bucket (Thanos supports only one). | +| `endpoint` | `endpoint` | No changes required. | +| `region` | `region` | No changes required. | +| `access_key_id` | `access_key_id` | No changes required. | +| `secret_access_key` | `secret_access_key` | No changes required. | +| `session_token` | `session_token` | No changes required. | +| `insecure` | `insecure` | No changes required. | +| `disable_dualstack` | `dualstack_enabled` | Renamed and inverted. If you had `disable_dualstack: false`, set `dualstack_enabled: true`. | +| `storage_class` | `storage_class` | No changes required. | +| `s3` | Removed | Remove or replace with `endpoint` if you used the URL-based setup. | +| `S3ForcePathStyle` | Removed or replaced | If you need path-based addressing, set `bucket_lookup_type: path` in the new config. Otherwise, remove it. | +| `signature_version` | Removed | Remove parameter. Thanos always uses Signature Version 4 (V4). | +| `http_config` | `http` | Move subfields (such as timeouts, CA file, etc.) into the `http:` block in the Thanos configuration. | +| `sse` | `sse` | Migrate any SSE settings (e.g., `type`, `kms_key_id`) into the `sse:` block in the Thanos configuration. | +| `backoff_config` | `max_retries`| Replace the advanced backoff settings with a single integer (`max_retries`). Set to 1 to disable retries, or a higher value to enable them. | +{{< /responsive-table >}} + +**Example configuration migration (S3):** + +_**Existing configuration**_ + +```yaml +storage_config: + aws: + bucket_names: my-bucket1,my-bucket2 # multiple buckets no longer supported + endpoint: s3.amazonaws.com + region: us-west-2 + access_key_id: example-key + secret_access_key: example-secret + signature_version: v4 + disable_dualstack: true + storage_class: STANDARD + http_config: + timeout: 1m + insecure_skip_verify: false + # ... + backoff_config: + max_retries: 5 + sse: + type: SSE-KMS + kms_key_id: mySSEKey +``` + +_**New configuration** (Thanos-based)_ + +```yaml +storage_config: + use_thanos_objstore: true + object_store: + s3: + bucket_name: my-bucket1 # single bucket + endpoint: s3.amazonaws.com + region: us-west-2 + access_key_id: example-key + secret_access_key: example-secret + dualstack_enabled: false # was disable_dualstack: true + storage_class: STANDARD + max_retries: 5 + http: + insecure_skip_verify: false + sse: + type: SSE-KMS + kms_key_id: mySSEKey +``` + +For more advanced configuration options (such as `list_objects_version`, `bucket_lookup_type`, etc.), see the [Thanos S3 configuration reference](https://grafana.com/docs/loki/<LOKI_VERSION>/configure/#thanos_object_store_config). + +### Azure Storage Migration + +When migrating from the existing [Azure](https://grafana.com/docs/loki/<LOKI_VERSION>/configure/#azure_storage_config) storage client to the new Thanos-based client, no changes are required if you are using the following parameters: + +{{< responsive-table >}} +| Existing Parameter | New Parameter | Required Changes | +|-----------------|---------------|------------------| +| `account_name` | `account_name` | No changes required | +| `account_key` | `account_key` | No changes required | +| `container_name` | `container_name` | No changes required | +| `endpoint_suffix` | `endpoint_suffix` | No changes required | +| `user_assigned_id` | `user_assigned_id` | No changes required | +| `connection_string` | `connection_string` | No changes required | +| `max_retries` | `max_retries` | No changes required | +| `chunk_delimiter` | `chunk_delimiter` | No changes required | +{{< /responsive-table >}} + +If you are using an authentication method other than storage account key or user-assigned managed identity, you'll have to pass the neccessary credetials using environment variables. +For more details, refer to [Azure Identity Client Module for Go](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity). + +### Filesystem Storage Migration + +When migrating from the existing [Filesystem storage](https://grafana.com/docs/loki/<LOKI_VERSION>/configure/#local_storage_config) +client to the new Thanos-based client, update or remove parameters as follows: + +{{< responsive-table >}} +| Existing Parameter | New Parameter | Required Changes | +|-----------------|---------------|-------------------------------| +| `directory` | `dir` | Rename `directory` to `dir`. | +{{< /responsive-table >}} + +**Example configuration migration (Filesystem):** + +_**Existing configuration** (`FSConfig`)_ + +```yaml +storage_config: + filesystem: + directory: /var/loki/chunks +``` + +_**New configuration** (Thanos-based)_ + +```yaml +storage_config: + use_thanos_objstore: true + object_store: + filesystem: + dir: /var/loki/chunks +``` + +{{< admonition type="note" >}} +For providers not listed here, refer to the [Thanos storage configuration reference](https://grafana.com/docs/loki/<LOKI_VERSION>/configure/#thanos_object_store_config). +{{< /admonition >}} \ No newline at end of file
docs
add migration doc for thanos storage clients (#16133)
e11b244a8bcbc69d6829d31fb164dc43d505068e
2024-08-29 20:15:46
renovate[bot]
fix(deps): update module github.com/hashicorp/consul/api to v1.29.4 (#14002)
false
diff --git a/go.mod b/go.mod index 3de9cb1460c2b..2857590e3857f 100644 --- a/go.mod +++ b/go.mod @@ -56,7 +56,7 @@ require ( github.com/grafana/tail v0.0.0-20230510142333-77b18831edf0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 - github.com/hashicorp/consul/api v1.29.2 + github.com/hashicorp/consul/api v1.29.4 github.com/hashicorp/golang-lru v0.6.0 github.com/imdario/mergo v0.3.16 github.com/influxdata/telegraf v1.16.3 diff --git a/go.sum b/go.sum index ed75525e6a7a4..b889a88da3a17 100644 --- a/go.sum +++ b/go.sum @@ -1092,8 +1092,8 @@ github.com/hashicorp/consul-awsauth v0.0.0-20220713182709-05ac1c5c2706/go.mod h1 github.com/hashicorp/consul-net-rpc v0.0.0-20220307172752-3602954411b4/go.mod h1:vWEAHAeAqfOwB3pSgHMQpIu8VH1jL+Ltg54Tw0wt/NI= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.18.0/go.mod h1:owRRGJ9M5xReDC5nfT8FTJrNAPbT4NM6p/k+d03q2v4= -github.com/hashicorp/consul/api v1.29.2 h1:aYyRn8EdE2mSfG14S1+L9Qkjtz8RzmaWh6AcNGRNwPw= -github.com/hashicorp/consul/api v1.29.2/go.mod h1:0YObcaLNDSbtlgzIRtmRXI1ZkeuK0trCBxwZQ4MYnIk= +github.com/hashicorp/consul/api v1.29.4 h1:P6slzxDLBOxUSj3fWo2o65VuKtbtOXFi7TSSgtXutuE= +github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg= github.com/hashicorp/consul/proto-public v0.2.1/go.mod h1:iWNlBDJIZQJC3bBiCThoqg9i7uk/4RQZYkqH1wiQrss= github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0= github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_mesh.go b/vendor/github.com/hashicorp/consul/api/config_entry_mesh.go index 1a1ebb8b536bc..e035d15967783 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_mesh.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_mesh.go @@ -26,6 +26,14 @@ type MeshConfigEntry struct { // MutualTLSMode=permissive in either service-defaults or proxy-defaults. AllowEnablingPermissiveMutualTLS bool `json:",omitempty" alias:"allow_enabling_permissive_mutual_tls"` + // ValidateClusters controls whether the clusters the route table refers to are validated. The default value is + // false. When set to false and a route refers to a cluster that does not exist, the route table loads and routing + // to a non-existent cluster results in a 404. When set to true and the route is set to a cluster that do not exist, + // the route table will not load. For more information, refer to + // [HTTP route configuration in the Envoy docs](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route.proto#envoy-v3-api-field-config-route-v3-routeconfiguration-validate-clusters) + // for more details. + ValidateClusters bool `json:",omitempty" alias:"validate_clusters"` + TLS *MeshTLSConfig `json:",omitempty"` HTTP *MeshHTTPConfig `json:",omitempty"` diff --git a/vendor/modules.txt b/vendor/modules.txt index 6e123ca45804a..a6444780a683d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1068,7 +1068,7 @@ github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc # github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed ## explicit github.com/hailocab/go-hostpool -# github.com/hashicorp/consul/api v1.29.2 +# github.com/hashicorp/consul/api v1.29.4 ## explicit; go 1.19 github.com/hashicorp/consul/api # github.com/hashicorp/errwrap v1.1.0
fix
update module github.com/hashicorp/consul/api to v1.29.4 (#14002)
740551bb31e0c1806de8d87f02fa4f507aa24092
2024-06-07 17:22:53
benclive
fix: Correctly encode step when translating proto to http internally (#13171)
false
diff --git a/pkg/logproto/compat.go b/pkg/logproto/compat.go index a11467584b58f..4a296fd8e43b6 100644 --- a/pkg/logproto/compat.go +++ b/pkg/logproto/compat.go @@ -506,6 +506,33 @@ func (m *ShardsRequest) LogToSpan(sp opentracing.Span) { sp.LogFields(fields...) } +func (m *DetectedFieldsRequest) GetCachingOptions() (res definitions.CachingOptions) { return } + +func (m *DetectedFieldsRequest) WithStartEnd(start, end time.Time) definitions.Request { + clone := *m + clone.Start = start + clone.End = end + return &clone +} + +func (m *DetectedFieldsRequest) WithQuery(query string) definitions.Request { + clone := *m + clone.Query = query + return &clone +} + +func (m *DetectedFieldsRequest) LogToSpan(sp opentracing.Span) { + fields := []otlog.Field{ + otlog.String("query", m.GetQuery()), + otlog.String("start", m.Start.String()), + otlog.String("end", m.End.String()), + otlog.String("step", time.Duration(m.Step).String()), + otlog.String("field_limit", fmt.Sprintf("%d", m.FieldLimit)), + otlog.String("line_limit", fmt.Sprintf("%d", m.LineLimit)), + } + sp.LogFields(fields...) +} + func (m *QueryPatternsRequest) GetCachingOptions() (res definitions.CachingOptions) { return } func (m *QueryPatternsRequest) WithStartEnd(start, end time.Time) definitions.Request { @@ -534,3 +561,33 @@ func (m *QueryPatternsRequest) LogToSpan(sp opentracing.Span) { } sp.LogFields(fields...) } + +func (m *DetectedLabelsRequest) GetStep() int64 { return 0 } + +func (m *DetectedLabelsRequest) GetCachingOptions() (res definitions.CachingOptions) { return } + +func (m *DetectedLabelsRequest) WithStartEnd(start, end time.Time) definitions.Request { + clone := *m + clone.Start = start + clone.End = end + return &clone +} + +func (m *DetectedLabelsRequest) WithQuery(query string) definitions.Request { + clone := *m + clone.Query = query + return &clone +} + +func (m *DetectedLabelsRequest) WithStartEndForCache(start, end time.Time) resultscache.Request { + return m.WithStartEnd(start, end).(resultscache.Request) +} + +func (m *DetectedLabelsRequest) LogToSpan(sp opentracing.Span) { + fields := []otlog.Field{ + otlog.String("query", m.GetQuery()), + otlog.String("start", m.Start.String()), + otlog.String("end", m.End.String()), + } + sp.LogFields(fields...) +} diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go index 7bab6f6c5d054..d1d93b7bcfdfe 100644 --- a/pkg/querier/queryrange/codec.go +++ b/pkg/querier/queryrange/codec.go @@ -919,7 +919,10 @@ func (c Codec) EncodeRequest(ctx context.Context, r queryrangebase.Request) (*ht "end": []string{fmt.Sprintf("%d", request.End.UnixNano())}, "line_limit": []string{fmt.Sprintf("%d", request.GetLineLimit())}, "field_limit": []string{fmt.Sprintf("%d", request.GetFieldLimit())}, - "step": []string{fmt.Sprintf("%d", request.GetStep())}, + } + + if request.Step != 0 { + params["step"] = []string{fmt.Sprintf("%f", float64(request.Step)/float64(1e3))} } u := &url.URL{ @@ -940,7 +943,10 @@ func (c Codec) EncodeRequest(ctx context.Context, r queryrangebase.Request) (*ht "query": []string{request.GetQuery()}, "start": []string{fmt.Sprintf("%d", request.Start.UnixNano())}, "end": []string{fmt.Sprintf("%d", request.End.UnixNano())}, - "step": []string{fmt.Sprintf("%d", request.GetStep())}, + } + + if request.Step != 0 { + params["step"] = []string{fmt.Sprintf("%f", float64(request.Step)/float64(1e3))} } u := &url.URL{ diff --git a/pkg/querier/queryrange/codec_test.go b/pkg/querier/queryrange/codec_test.go index af047d8e84e1e..f5d00e263d0f6 100644 --- a/pkg/querier/queryrange/codec_test.go +++ b/pkg/querier/queryrange/codec_test.go @@ -10,7 +10,7 @@ import ( "net/http/httptest" "net/url" "strconv" - strings "strings" + "strings" "testing" "time" @@ -202,6 +202,57 @@ func Test_codec_EncodeDecodeRequest(t *testing.T) { Step: 30 * 1e3, // step is expected in ms; default is 0 or no step AggregateBy: "series", }, false}, + {"detected_fields", func() (*http.Request, error) { + return DefaultCodec.EncodeRequest(ctx, &DetectedFieldsRequest{ + logproto.DetectedFieldsRequest{ + Query: `{foo="bar"}`, + Start: start, + End: end, + Step: 30 * 1e3, // step is expected in ms; default is 0 or no step + LineLimit: 100, + FieldLimit: 100, + }, + "/loki/api/v1/detected_fields", + }) + }, &DetectedFieldsRequest{ + logproto.DetectedFieldsRequest{ + Query: `{foo="bar"}`, + Start: start, + End: end, + Step: 30 * 1e3, // step is expected in ms; default is 0 or no step + LineLimit: 100, + FieldLimit: 100, + }, + "/loki/api/v1/detected_fields", + }, false}, + {"patterns", func() (*http.Request, error) { + return DefaultCodec.EncodeRequest(ctx, &logproto.QueryPatternsRequest{ + Start: start, + End: end, + Step: 30 * 1e3, // step is expected in ms + }) + }, &logproto.QueryPatternsRequest{ + Start: start, + End: end, + Step: 30 * 1e3, // step is expected in ms; default is 0 or no step + }, false}, + {"detected_labels", func() (*http.Request, error) { + return DefaultCodec.EncodeRequest(ctx, &DetectedLabelsRequest{ + "/loki/api/v1/detected_labels", + logproto.DetectedLabelsRequest{ + Query: `{foo="bar"}`, + Start: start, + End: end, + }, + }) + }, &DetectedLabelsRequest{ + "/loki/api/v1/detected_labels", + logproto.DetectedLabelsRequest{ + Query: `{foo="bar"}`, + Start: start, + End: end, + }, + }, false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) {
fix
Correctly encode step when translating proto to http internally (#13171)
38b01c974771244d1ee254ae414ccce0a8203e04
2024-09-30 22:15:04
Trevor Whitney
ci: update loki version in helm chart weekly and on Loki release (#14290)
false
diff --git a/.github/workflows/helm-tagged-release-pr.yaml b/.github/workflows/helm-tagged-release-pr.yaml new file mode 100644 index 0000000000000..1a5e6bdeccff8 --- /dev/null +++ b/.github/workflows/helm-tagged-release-pr.yaml @@ -0,0 +1,35 @@ +name: helm-weekly-release-pr + +on: + release: + types: + - released + +jobs: + weekly-release-pr: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - id: "get_github_app_token" + name: "get github app token" + uses: "actions/create-github-app-token@v1" + with: + app-id: "${{ secrets.APP_ID }}" + owner: "${{ github.repository_owner }}" + private-key: "${{ secrets.APP_PRIVATE_KEY }}" + + - name: Update/regenerate files + id: update + run: bash .github/workflows/scripts/helm-tagged-release.sh ${{ github.event.release.tag_name }} + + - name: Create Pull Request + uses: peter-evans/create-pull-request@v5 + with: + token: ${{ steps.get_github_app_token.outputs.token }} + title: Release loki Helm chart ${{ steps.update.outputs.new_chart_version }} + body: Automated PR created by [helm-tagged-release-pr.yaml](https://github.com/grafana/loki/blob/main/.github/workflows/helm-tagged-release-pr.yaml) + commit-message: Update loki chart to ${{ steps.update.outputs.new_chart_version }} + branch: helm-chart-tagged-${{ steps.update.outputs.new_chart_version }} + base: main + labels: helm diff --git a/.github/workflows/helm-weekly-release-pr.yaml b/.github/workflows/helm-weekly-release-pr.yaml new file mode 100644 index 0000000000000..7ac88b7b95841 --- /dev/null +++ b/.github/workflows/helm-weekly-release-pr.yaml @@ -0,0 +1,37 @@ +name: helm-weekly-release-pr + +on: + schedule: + - cron: '0 10 * * 1-5' # 10 UTC on weekdays; if we miss published images one day, they should align the day after + + workflow_dispatch: # for manual testing + +jobs: + weekly-release-pr: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: imjasonh/[email protected] + + - id: "get_github_app_token" + name: "get github app token" + uses: "actions/create-github-app-token@v1" + with: + app-id: "${{ secrets.APP_ID }}" + owner: "${{ github.repository_owner }}" + private-key: "${{ secrets.APP_PRIVATE_KEY }}" + + - name: Update/regenerate files + id: update + run: bash .github/workflows/scripts/helm-weekly-release.sh + + - name: Create Pull Request + uses: peter-evans/create-pull-request@v5 + with: + token: ${{ steps.get_github_app_token.outputs.token }} + title: Release loki Helm chart ${{ steps.update.outputs.new_chart_version }} + body: Automated PR created by [helm-weekly-release-pr.yaml](https://github.com/grafana/loki/blob/main/.github/workflows/helm-weekly-release-pr.yaml) + commit-message: Update loki chart to ${{ steps.update.outputs.new_chart_version }} + branch: helm-chart-weekly-${{ steps.update.outputs.new_chart_version }} + base: main + labels: helm diff --git a/.github/workflows/scripts/common.sh b/.github/workflows/scripts/common.sh new file mode 100644 index 0000000000000..b5cba118af718 --- /dev/null +++ b/.github/workflows/scripts/common.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +set -exo pipefail + +# This generates a new file where the yaml node is updated. +# The problem is that yq strips new lines when you update the file. +# So we use a workaround from https://github.com/mikefarah/yq/issues/515 which: +# generates the new file, diffs it with the original, removes all non-whitespace changes, and applies that to the original file. +update_yaml_node() { + local filename=$1 + local yaml_node=$2 + local new_value=$3 + patch "${filename}" <<<"$(diff -U0 -w -b --ignore-blank-lines "${filename}" <(yq eval "${yaml_node} = \"${new_value}\"" "${filename}"))" +} + +get_yaml_node() { + local filename=$1 + local yaml_node=$2 + yq "${yaml_node}" "${filename}" +} + +# Increments the part of the semver string +# $1: version itself +# $2: number of part: 0 – major, 1 – minor, 2 – patch +increment_semver() { + local delimiter=. + local array=("$(echo "$1" | tr "${delimiter}" '\n')") + array[$2]=$((array[$2] + 1)) + echo "$( + local IFS=${delimiter} + echo "${array[*]}" + )" +} + +# Sets the patch segment of a semver to 0 +# $1: version itself +set_semver_patch_to_zero() { + local delimiter=. + local array=("$(echo "$1" | tr "${delimiter}" '\n')") + array[2]="0" + echo "$( + local IFS=${delimiter} + echo "${array[*]}" + )" +} diff --git a/.github/workflows/scripts/helm-tagged-release.sh b/.github/workflows/scripts/helm-tagged-release.sh new file mode 100755 index 0000000000000..fd6c06f520d04 --- /dev/null +++ b/.github/workflows/scripts/helm-tagged-release.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: AGPL-3.0-only + +set -exo pipefail + +script_dir=$(cd "$(dirname "$0")" && pwd) +# shellcheck disable=SC2250,SC1091 +source "${script_dir}/common.sh" + +calculate_next_chart_version() { + local current_chart_version=$1 + + local current_chart_semver + current_chart_semver="$(echo "${current_chart_version}" | grep -P -o '^(\d+.){2}\d+')" + local new_chart_semver="${current_chart_semver}" + new_chart_semver=$(increment_semver "${current_chart_semver}" 1) + new_chart_semver=$(set_semver_patch_to_zero "${new_chart_semver}") + echo "${new_chart_semver}" +} + +validate_version_update() { + local new_chart_version=$1 + local current_chart_version=$2 + local latest_loki_tag=$3 + + if [[ "${new_chart_version}" == "${current_chart_version}" ]]; then + echo "New chart version (${new_chart_version}) is the same as current version (${current_chart_version}); not submitting PR" + exit 1 + fi +} + +latest_loki_tag=$(sed -E "s/v(.*)/\1/g" <<<"$1") + +values_file=production/helm/loki/values.yaml +chart_file=production/helm/loki/Chart.yaml + +current_chart_version=$(get_yaml_node "${chart_file}" .version) +new_chart_version=$(calculate_next_chart_version "${current_chart_version}") + +validate_version_update "${new_chart_version}" "${current_chart_version}" "${latest_loki_tag}" + +update_yaml_node "${values_file}" .loki.image.tag "${latest_loki_tag}" + +update_yaml_node "${values_file}" .enterprise.image.tag "${latest_loki_tag}" +update_yaml_node "${chart_file}" .appVersion "${latest_loki_tag}" +update_yaml_node "${chart_file}" .version "${new_chart_version}" + +sed --in-place \ + --regexp-extended \ + "s/(.*\<AUTOMATED_UPDATES_LOCATOR\>.*)/\1\n\n## ${new_chart_version}\n\n- \[CHANGE\] Changed version of Grafana Loki to ${latest_loki_tag}/g" production/helm/loki/CHANGELOG.md + +make TTY='' helm-docs + +echo "::set-output name=new_chart_version::${new_chart_version}" diff --git a/.github/workflows/scripts/helm-weekly-release.sh b/.github/workflows/scripts/helm-weekly-release.sh new file mode 100755 index 0000000000000..64d5f29f4557f --- /dev/null +++ b/.github/workflows/scripts/helm-weekly-release.sh @@ -0,0 +1,84 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: AGPL-3.0-only + +set -exo pipefail + +script_dir=$(cd "$(dirname "$0")" && pwd) +# shellcheck disable=SC2250,SC1091 +source "${script_dir}/common.sh" + +# Uses docker hub image tags to figure out what is the latest image tag +find_latest_image_tag() { + local docker_hub_repo=$1 + local regExp="^(k|weekly-k)\d+-[a-z0-9]+" + crane ls "${docker_hub_repo}" | grep -P "${regExp}" | sed -E "s/([weekly-]*k[[:digit:]]*)-([^-]*).*/\1-\2/g" | uniq | sort -Vur | head -1 +} + +# takes k197-abcdef and returns r197, k197-abcdef-arm64 and returns k197, weekly-k197-abcdef and returns k197 +extract_k_version() { + sed -E "s/[weekly-]*(k[[:digit:]]*).*/\1/g" <<<"$1" +} + +calculate_next_chart_version() { + local current_chart_version=$1 + local latest_image_tag=$2 + + local current_chart_semver + current_chart_semver=$(echo "${current_chart_version}" | grep -P -o '^(\d+.){2}\d+') + local new_chart_weekly + new_chart_weekly=$(extract_k_version "${latest_image_tag}" | grep -P -o '\d+') + local new_chart_semver="${current_chart_semver}" + if [[ "${current_chart_version}" != *weekly* ]]; then + # If previous version was not a weekly, then it was a stable release. + # _This_ weekly release should have a semver that's one above the stable release. + new_chart_semver=$(increment_semver "${current_chart_semver}" 1) + # Also reset the patch release number to 0. + new_chart_semver=$(set_semver_patch_to_zero "${new_chart_semver}") + fi + echo "${new_chart_semver}-weekly.${new_chart_weekly}" +} + +validate_version_update() { + local new_chart_version=$1 + local current_chart_version=$2 + local latest_gel_tag=$3 + local latest_loki_tag=$4 + + if [[ "${new_chart_version}" == "${current_chart_version}" ]]; then + echo "New chart version (${new_chart_version}) is the same as current version (${current_chart_version}); not submitting weekly PR" + exit 1 + fi + + local gel_weekly_version + gel_weekly_version=$(extract_k_version "${latest_gel_tag}") + local loki_weekly_version + loki_weekly_version=$(extract_k_version "${latest_loki_tag}") + echo "Comparing GEL weekly version (${gel_weekly_version}) with Loki weekly version (${loki_weekly_version})" + if [[ "${gel_weekly_version}" != "${loki_weekly_version}" ]]; then + echo "GEL weekly version (${gel_weekly_version}) does not match Loki weekly version (${loki_weekly_version}); not submitting PR" + exit 1 + fi +} + +values_file=production/helm/loki/values.yaml +chart_file=production/helm/loki/Chart.yaml + +latest_loki_tag=$(find_latest_image_tag grafana/loki) +latest_gel_tag=$(find_latest_image_tag grafana/enterprise-logs) +current_chart_version=$(get_yaml_node "${chart_file}" .version) +new_chart_version=$(calculate_next_chart_version "${current_chart_version}" "${latest_loki_tag}") + +validate_version_update "${new_chart_version}" "${current_chart_version}" "${latest_gel_tag}" "${latest_loki_tag}" + +update_yaml_node "${values_file}" .loki.image.tag "${latest_loki_tag}" +update_yaml_node "${values_file}" .enterprise.image.tag "${latest_gel_tag}" +update_yaml_node "${chart_file}" .appVersion "$(extract_k_version "${latest_loki_tag}")" +update_yaml_node "${chart_file}" .version "${new_chart_version}" + +sed --in-place \ + --regexp-extended \ + "s/(.*\<AUTOMATED_UPDATES_LOCATOR\>.*)/\1\n\n## ${new_chart_version}\n\n- \[CHANGE\] Changed version of Grafana Loki to ${latest_loki_tag}\n- \[CHANGE\] Changed version of Grafana Enterprise Logs to ${latest_gel_tag}/g" production/helm/loki/CHANGELOG.md + +make TTY='' helm-docs + +echo "::set-output name=new_chart_version::${new_chart_version}" diff --git a/.github/workflows/verify-drone.yml b/.github/workflows/verify-drone.yml deleted file mode 100644 index 04f3c05ff43f7..0000000000000 --- a/.github/workflows/verify-drone.yml +++ /dev/null @@ -1,52 +0,0 @@ -name: Verify drone updates -on: [pull_request] -jobs: - check-drone-changes: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Get changed files - # we need continue on error because the git diff | grep pipe can return a non-zero error code if no result is found - continue-on-error: true - id: changed-files - run: | - echo "changed_files=$(git diff --name-only main -- .drone/ | xargs)" >> $GITHUB_OUTPUT - git diff main .drone/ | grep "+hmac " - echo "sha_updated=$?" >> $GITHUB_OUTPUT - - name: Check that drone was updated properly - if: always() - run: | - jsonnetChanged=false - yamlChanged=false - echo "sha updated? ${{ steps.changed-files.outputs.sha_updated }}" - - # check whether the drone jsonnet and yaml files were updated - for file in ${{ steps.changed-files.outputs.changed_files }}; do - if [ "$file" == ".drone/drone.jsonnet" ]; then - echo "$file was changed" - jsonnetChanged=true - fi - if [ "$file" == ".drone/drone.yml" ]; then - echo "$file was changed" - yamlChanged=true - fi - done - - # if niether file was changed we're okay - if { [ "$yamlChanged" = false ] && [ "$jsonnetChanged" = false ]; } then - echo "neither file was changed" - exit 0 - fi - # if both files were changed then we should ensure that the sha in the yaml was also updated - if { [ "$yamlChanged" = true ] && [ "$jsonnetChanged" = true ]; } then - # annoyingly, the return value is a string - if [ "${{ steps.changed-files.outputs.sha_updated }}" = "0" ]; then - echo "both files were changed and sha was updated" - exit 0 - fi - echo "both drone yaml and jsonnet were updated but the sha in the yaml file was not updated" - exit 1 - fi - # only one of the two files was updated - echo "if one of the drone files (yaml or jsonnet) was changed then bothy files must be updated" - exit 1 \ No newline at end of file diff --git a/docs/sources/setup/install/helm/reference.md b/docs/sources/setup/install/helm/reference.md index 4185c5eb31ebf..b74a559b12e7f 100644 --- a/docs/sources/setup/install/helm/reference.md +++ b/docs/sources/setup/install/helm/reference.md @@ -5962,7 +5962,7 @@ null <tr> <td>loki.image.tag</td> <td>string</td> - <td>Overrides the image tag whose default is the chart's appVersion TODO: needed for 3rd target backend functionality revert to null or latest once this behavior is relased</td> + <td>Overrides the image tag whose default is the chart's appVersion</td> <td><pre lang="json"> null </pre> diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml index 3f44de3b3724f..ab62323c95f86 100644 --- a/production/helm/loki/values.yaml +++ b/production/helm/loki/values.yaml @@ -50,8 +50,6 @@ loki: # -- Docker image repository repository: grafana/loki # -- Overrides the image tag whose default is the chart's appVersion - # TODO: needed for 3rd target backend functionality - # revert to null or latest once this behavior is relased tag: null # -- Overrides the image tag with an image digest digest: null
ci
update loki version in helm chart weekly and on Loki release (#14290)
e7689b248dbe549b2ac61a0e335d8b5b999cc47d
2024-06-14 13:28:29
JordanRushing
feat: add recalculateOwnedStreams to check stream ownership if the ring is changed (#13103)
false
diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md index b287bdea5f37f..5e119c6e5de11 100644 --- a/docs/sources/shared/configuration.md +++ b/docs/sources/shared/configuration.md @@ -2879,6 +2879,11 @@ wal: # common.path_prefix is set then common.path_prefix will be used. # CLI flag: -ingester.shutdown-marker-path [shutdown_marker_path: <string> | default = ""] + +# Interval at which the ingester ownedStreamService checks for changes in the +# ring to recalculate owned streams. +# CLI flag: -ingester.owned-streams-check-interval +[owned_streams_check_interval: <duration> | default = 30s] ``` ### ingester_client @@ -3540,7 +3545,7 @@ When a memberlist config with atleast 1 join_members is defined, kvstore of type # The timeout for establishing a connection with a remote node, and for # read/write operations. # CLI flag: -memberlist.stream-timeout -[stream_timeout: <duration> | default = 10s] +[stream_timeout: <duration> | default = 2s] # Multiplication factor used when sending out messages (factor * log(N+1)). # CLI flag: -memberlist.retransmit-factor @@ -4755,6 +4760,10 @@ Configures the `server` of the launched module(s). # CLI flag: -server.grpc-conn-limit [grpc_listen_conn_limit: <int> | default = 0] +# Enables PROXY protocol. +# CLI flag: -server.proxy-protocol-enabled +[proxy_protocol_enabled: <boolean> | default = false] + # Comma-separated list of cipher suites to use. If blank, the default Go cipher # suites is used. # CLI flag: -server.tls-cipher-suites @@ -4909,6 +4918,21 @@ grpc_tls_config: # CLI flag: -server.grpc.num-workers [grpc_server_num_workers: <int> | default = 0] +# If true, the request_message_bytes, response_message_bytes, and +# inflight_requests metrics will be tracked. Enabling this option prevents the +# use of memory pools for parsing gRPC request bodies and may lead to more +# memory allocations. +# CLI flag: -server.grpc.stats-tracking-enabled +[grpc_server_stats_tracking_enabled: <boolean> | default = true] + +# If true, gGPC's buffer pools will be used to handle incoming requests. +# Enabling this feature can reduce memory allocation, but also requires +# disabling GRPC server stats tracking by setting +# `server.grpc.stats-tracking-enabled=false`. This is an experimental gRPC +# feature, so it might be removed in a future version of the gRPC library. +# CLI flag: -server.grpc.recv-buffer-pools-enabled +[grpc_server_recv_buffer_pools_enabled: <boolean> | default = false] + # Output log messages in the given format. Valid formats: [logfmt, json] # CLI flag: -log.format [log_format: <string> | default = "logfmt"] @@ -4922,6 +4946,11 @@ grpc_tls_config: # CLI flag: -server.log-source-ips-enabled [log_source_ips_enabled: <boolean> | default = false] +# Log all source IPs instead of only the originating one. Only used if +# server.log-source-ips-enabled is true +# CLI flag: -server.log-source-ips-full +[log_source_ips_full: <boolean> | default = false] + # Header field storing the source IPs. Only used if # server.log-source-ips-enabled is true. If not set the default Forwarded, # X-Real-IP and X-Forwarded-For headers are used diff --git a/go.mod b/go.mod index 051a18d2292f7..b4ed8ae7dea01 100644 --- a/go.mod +++ b/go.mod @@ -50,9 +50,9 @@ require ( github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.5.0 github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 - github.com/grafana/dskit v0.0.0-20240104111617-ea101a3b86eb + github.com/grafana/dskit v0.0.0-20240528015923-27d7d41066d3 github.com/grafana/go-gelf/v2 v2.0.1 - github.com/grafana/gomemcache v0.0.0-20231204155601-7de47a8c3cb0 + github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd github.com/grafana/tail v0.0.0-20230510142333-77b18831edf0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 @@ -153,6 +153,7 @@ require ( github.com/dlclark/regexp2 v1.4.0 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/pires/go-proxyproto v0.7.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect @@ -312,7 +313,6 @@ require ( github.com/sercand/kuberesolver/v5 v5.1.1 // indirect github.com/shopspring/decimal v1.2.0 // indirect github.com/sirupsen/logrus v1.9.3 - github.com/soheilhy/cmux v0.1.5 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spf13/cast v1.3.1 // indirect github.com/spf13/pflag v1.0.5 // indirect diff --git a/go.sum b/go.sum index 37ea77d492150..0c969948e1099 100644 --- a/go.sum +++ b/go.sum @@ -1017,14 +1017,14 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 h1:qhugDMdQ4Vp68H0tp/0iN17DM2ehRo1rLEdOFe/gB8I= github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2/go.mod h1:w/aiO1POVIeXUQyl0VQSZjl5OAGDTL5aX+4v0RA1tcw= -github.com/grafana/dskit v0.0.0-20240104111617-ea101a3b86eb h1:AWE6+kvtE18HP+lRWNUCyvymyrFSXs6TcS2vXIXGIuw= -github.com/grafana/dskit v0.0.0-20240104111617-ea101a3b86eb/go.mod h1:kkWM4WUV230bNG3urVRWPBnSJHs64y/0RmWjftnnn0c= +github.com/grafana/dskit v0.0.0-20240528015923-27d7d41066d3 h1:k8vINlI4w+RYc37NRwQlRe/IHYoEbu6KAe2XdGDeV1U= +github.com/grafana/dskit v0.0.0-20240528015923-27d7d41066d3/go.mod h1:HvSf3uf8Ps2vPpzHeAFyZTdUcbVr+Rxpq1xcx7J/muc= github.com/grafana/go-gelf/v2 v2.0.1 h1:BOChP0h/jLeD+7F9mL7tq10xVkDG15he3T1zHuQaWak= github.com/grafana/go-gelf/v2 v2.0.1/go.mod h1:lexHie0xzYGwCgiRGcvZ723bSNyNI8ZRD4s0CLobh90= github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 h1:xLuzPoOzdfNb/RF/IENCw+oLVdZB4G21VPhkHBgwSHY= github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85/go.mod h1:crI9WX6p0IhrqB+DqIUHulRW853PaNFf7o4UprV//3I= -github.com/grafana/gomemcache v0.0.0-20231204155601-7de47a8c3cb0 h1:aLBiDMjTtXx2800iCIp+8kdjIlvGX0MF/zICQMQO2qU= -github.com/grafana/gomemcache v0.0.0-20231204155601-7de47a8c3cb0/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= +github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 h1:X8IKQ0wu40wpvYcKfBcc5T4QnhdQjUhtUtB/1CY89lE= +github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= github.com/grafana/jsonparser v0.0.0-20240209175146-098958973a2d h1:YwbJJ/PrVWVdnR+j/EAVuazdeP+Za5qbiH1Vlr+wFXs= github.com/grafana/jsonparser v0.0.0-20240209175146-098958973a2d/go.mod h1:796sq+UcONnSlzA3RtlBZ+b/hrerkZXiEmO8oMjyRwY= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe h1:yIXAAbLswn7VNWBIvM71O2QsgfgW9fRXZNR0DXe6pDU= @@ -1541,6 +1541,8 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pires/go-proxyproto v0.7.0 h1:IukmRewDQFWC7kfnb66CSomk2q/seBuilHBYFwyq0Hs= +github.com/pires/go-proxyproto v0.7.0/go.mod h1:Vz/1JPY/OACxWGQNIRY2BeyDmpoaWmEP40O9LbuiFR4= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1697,8 +1699,6 @@ github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:s github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d/go.mod h1:Cw4GTlQccdRGSEf6KiMju767x0NEHE0YIVPJSaXjlsw= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/soniah/gosnmp v1.25.0/go.mod h1:8YvfZxH388NIIw2A+X5z2Oh97VcNhtmxDLt5QeUzVuQ= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sony/gobreaker v0.5.0 h1:dRCvqm0P490vZPmy7ppEk2qCnCieBooFJ+YoXGYB+yg= diff --git a/pkg/ingester/checkpoint_test.go b/pkg/ingester/checkpoint_test.go index d530d937d42fe..1b0c76466dc15 100644 --- a/pkg/ingester/checkpoint_test.go +++ b/pkg/ingester/checkpoint_test.go @@ -70,7 +70,9 @@ func TestIngesterWAL(t *testing.T) { } } - i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil) + readRingMock := mockReadRingWithOneActiveIngester() + + i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil, readRingMock) require.NoError(t, err) require.Nil(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -113,7 +115,7 @@ func TestIngesterWAL(t *testing.T) { expectCheckpoint(t, walDir, false, time.Second) // restart the ingester - i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil) + i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil, readRingMock) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck require.Nil(t, services.StartAndAwaitRunning(context.Background(), i)) @@ -127,7 +129,7 @@ func TestIngesterWAL(t *testing.T) { require.Nil(t, services.StopAndAwaitTerminated(context.Background(), i)) // restart the ingester - i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil) + i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil, readRingMock) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck require.Nil(t, services.StartAndAwaitRunning(context.Background(), i)) @@ -150,7 +152,9 @@ func TestIngesterWALIgnoresStreamLimits(t *testing.T) { } } - i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil) + readRingMock := mockReadRingWithOneActiveIngester() + + i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil, readRingMock) require.NoError(t, err) require.Nil(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -196,7 +200,7 @@ func TestIngesterWALIgnoresStreamLimits(t *testing.T) { require.NoError(t, err) // restart the ingester - i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil) + i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil, readRingMock) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck require.Nil(t, services.StartAndAwaitRunning(context.Background(), i)) @@ -253,7 +257,9 @@ func TestIngesterWALBackpressureSegments(t *testing.T) { } } - i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil) + readRingMock := mockReadRingWithOneActiveIngester() + + i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil, readRingMock) require.NoError(t, err) require.Nil(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -274,7 +280,7 @@ func TestIngesterWALBackpressureSegments(t *testing.T) { expectCheckpoint(t, walDir, false, time.Second) // restart the ingester, ensuring we replayed from WAL. - i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil) + i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil, readRingMock) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck require.Nil(t, services.StartAndAwaitRunning(context.Background(), i)) @@ -295,7 +301,9 @@ func TestIngesterWALBackpressureCheckpoint(t *testing.T) { } } - i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil) + readRingMock := mockReadRingWithOneActiveIngester() + + i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil, readRingMock) require.NoError(t, err) require.Nil(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -316,7 +324,7 @@ func TestIngesterWALBackpressureCheckpoint(t *testing.T) { require.Nil(t, services.StopAndAwaitTerminated(context.Background(), i)) // restart the ingester, ensuring we can replay from the checkpoint as well. - i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil) + i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil, readRingMock) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck require.Nil(t, services.StartAndAwaitRunning(context.Background(), i)) @@ -591,7 +599,9 @@ func TestIngesterWALReplaysUnorderedToOrdered(t *testing.T) { } } - i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil) + readRingMock := mockReadRingWithOneActiveIngester() + + i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil, readRingMock) require.NoError(t, err) require.Nil(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -663,7 +673,7 @@ func TestIngesterWALReplaysUnorderedToOrdered(t *testing.T) { require.NoError(t, err) // restart the ingester - i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil) + i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil, readRingMock) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck require.Nil(t, services.StartAndAwaitRunning(context.Background(), i)) diff --git a/pkg/ingester/flush_test.go b/pkg/ingester/flush_test.go index edd6084a2741b..460a50ffc8fac 100644 --- a/pkg/ingester/flush_test.go +++ b/pkg/ingester/flush_test.go @@ -337,10 +337,12 @@ func newTestStore(t require.TestingT, cfg Config, walOverride WAL) (*testStore, chunks: map[string][]chunk.Chunk{}, } + readRingMock := mockReadRingWithOneActiveIngester() + limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) require.NoError(t, err) - ing, err := New(cfg, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokitlog.NewNopLogger(), nil) + ing, err := New(cfg, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokitlog.NewNopLogger(), nil, readRingMock) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) @@ -376,6 +378,7 @@ func defaultIngesterTestConfig(t testing.TB) Config { cfg.BlockSize = 256 * 1024 cfg.TargetChunkSize = 1500 * 1024 cfg.WAL.Enabled = false + cfg.OwnedStreamsCheckInterval = 1 * time.Second return cfg } diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 1a89aebe6ef9f..464204334afcc 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -122,6 +122,8 @@ type Config struct { MaxDroppedStreams int `yaml:"max_dropped_streams"` ShutdownMarkerPath string `yaml:"shutdown_marker_path"` + + OwnedStreamsCheckInterval time.Duration `yaml:"owned_streams_check_interval" doc:"description=Interval at which the ingester ownedStreamService checks for changes in the ring to recalculate owned streams."` } // RegisterFlags registers the flags. @@ -149,6 +151,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.IndexShards, "ingester.index-shards", index.DefaultIndexShards, "Shard factor used in the ingesters for the in process reverse index. This MUST be evenly divisible by ALL schema shard factors or Loki will not start.") f.IntVar(&cfg.MaxDroppedStreams, "ingester.tailer.max-dropped-streams", 10, "Maximum number of dropped streams to keep in memory during tailing.") f.StringVar(&cfg.ShutdownMarkerPath, "ingester.shutdown-marker-path", "", "Path where the shutdown marker file is stored. If not set and common.path_prefix is set then common.path_prefix will be used.") + f.DurationVar(&cfg.OwnedStreamsCheckInterval, "ingester.owned-streams-check-interval", 30*time.Second, "Interval at which the ingester ownedStreamService checks for changes in the ring to recalculate owned streams.") } func (cfg *Config) Validate() error { @@ -262,10 +265,14 @@ type Ingester struct { writeLogManager *writefailures.Manager customStreamsTracker push.UsageTracker + + // recalculateOwnedStreams periodically checks the ring for changes and recalculates owned streams for each instance. + readRing ring.ReadRing + recalculateOwnedStreams *recalculateOwnedStreams } // New makes a new Ingester. -func New(cfg Config, clientConfig client.Config, store Store, limits Limits, configs *runtime.TenantConfigs, registerer prometheus.Registerer, writeFailuresCfg writefailures.Cfg, metricsNamespace string, logger log.Logger, customStreamsTracker push.UsageTracker) (*Ingester, error) { +func New(cfg Config, clientConfig client.Config, store Store, limits Limits, configs *runtime.TenantConfigs, registerer prometheus.Registerer, writeFailuresCfg writefailures.Cfg, metricsNamespace string, logger log.Logger, customStreamsTracker push.UsageTracker, readRing ring.ReadRing) (*Ingester, error) { if cfg.ingesterClientFactory == nil { cfg.ingesterClientFactory = client.New } @@ -294,6 +301,7 @@ func New(cfg Config, clientConfig client.Config, store Store, limits Limits, con streamRateCalculator: NewStreamRateCalculator(), writeLogManager: writefailures.NewManager(logger, registerer, writeFailuresCfg, configs, "ingester"), customStreamsTracker: customStreamsTracker, + readRing: readRing, } i.replayController = newReplayController(metrics, cfg.WAL, &replayFlusher{i}) @@ -343,6 +351,8 @@ func New(cfg Config, clientConfig client.Config, store Store, limits Limits, con i.SetExtractorWrapper(i.cfg.SampleExtractorWrapper) } + i.recalculateOwnedStreams = newRecalculateOwnedStreams(i.getInstances, i.lifecycler.ID, i.readRing, cfg.OwnedStreamsCheckInterval, util_log.Logger) + return i, nil } @@ -536,6 +546,16 @@ func (i *Ingester) starting(ctx context.Context) error { i.setPrepareShutdown() } + err = i.recalculateOwnedStreams.StartAsync(ctx) + if err != nil { + return fmt.Errorf("can not start recalculate owned streams service: %w", err) + } + + err = i.lifecycler.AwaitRunning(ctx) + if err != nil { + return fmt.Errorf("can not ensure recalculate owned streams service is running: %w", err) + } + // start our loop i.loopDone.Add(1) go i.loop() diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index 6bb27ad645cc9..444e1317e6972 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -2,6 +2,7 @@ package ingester import ( "fmt" + math "math" "net" "net/http" "net/http/httptest" @@ -16,8 +17,10 @@ import ( "github.com/grafana/dskit/flagext" "github.com/grafana/dskit/httpgrpc" "github.com/grafana/dskit/middleware" + "github.com/grafana/dskit/ring" "github.com/grafana/dskit/services" "github.com/grafana/dskit/user" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/require" @@ -58,7 +61,9 @@ func TestPrepareShutdownMarkerPathNotSet(t *testing.T) { chunks: map[string][]chunk.Chunk{}, } - i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) + mockRing := mockReadRingWithOneActiveIngester() + + i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, mockRing) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -81,7 +86,9 @@ func TestPrepareShutdown(t *testing.T) { chunks: map[string][]chunk.Chunk{}, } - i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) + readRingMock := mockReadRingWithOneActiveIngester() + + i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -142,7 +149,9 @@ func TestIngester_GetStreamRates_Correctness(t *testing.T) { chunks: map[string][]chunk.Chunk{}, } - i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) + readRingMock := mockReadRingWithOneActiveIngester() + + i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -173,8 +182,9 @@ func BenchmarkGetStreamRatesAllocs(b *testing.B) { store := &mockStore{ chunks: map[string][]chunk.Chunk{}, } + readRingMock := mockReadRingWithOneActiveIngester() - i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) + i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock) require.NoError(b, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -198,7 +208,9 @@ func TestIngester(t *testing.T) { chunks: map[string][]chunk.Chunk{}, } - i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) + readRingMock := mockReadRingWithOneActiveIngester() + + i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -383,7 +395,9 @@ func TestIngesterStreamLimitExceeded(t *testing.T) { chunks: map[string][]chunk.Chunk{}, } - i, err := New(ingesterConfig, client.Config{}, store, overrides, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) + readRingMock := mockReadRingWithOneActiveIngester() + + i, err := New(ingesterConfig, client.Config{}, store, overrides, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -803,7 +817,9 @@ func Test_InMemoryLabels(t *testing.T) { chunks: map[string][]chunk.Chunk{}, } - i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) + readRingMock := mockReadRingWithOneActiveIngester() + + i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -856,8 +872,9 @@ func TestIngester_GetDetectedLabels(t *testing.T) { store := &mockStore{ chunks: map[string][]chunk.Chunk{}, } + readRingMock := mockReadRingWithOneActiveIngester() - i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) + i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -919,8 +936,9 @@ func TestIngester_GetDetectedLabelsWithQuery(t *testing.T) { store := &mockStore{ chunks: map[string][]chunk.Chunk{}, } + readRingMock := mockReadRingWithOneActiveIngester() - i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) + i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -1286,8 +1304,9 @@ func TestStats(t *testing.T) { ingesterConfig := defaultIngesterTestConfig(t) limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) require.NoError(t, err) + readRingMock := mockReadRingWithOneActiveIngester() - i, err := New(ingesterConfig, client.Config{}, &mockStore{}, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) + i, err := New(ingesterConfig, client.Config{}, &mockStore{}, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock) require.NoError(t, err) i.instances["test"] = defaultInstance(t) @@ -1313,8 +1332,9 @@ func TestVolume(t *testing.T) { ingesterConfig := defaultIngesterTestConfig(t) limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) require.NoError(t, err) + readRingMock := mockReadRingWithOneActiveIngester() - i, err := New(ingesterConfig, client.Config{}, &mockStore{}, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) + i, err := New(ingesterConfig, client.Config{}, &mockStore{}, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock) require.NoError(t, err) i.instances["test"] = defaultInstance(t) @@ -1392,8 +1412,9 @@ func createIngesterServer(t *testing.T, ingesterConfig Config) (ingesterClient, t.Helper() limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) require.NoError(t, err) + readRingMock := mockReadRingWithOneActiveIngester() - ing, err := New(ingesterConfig, client.Config{}, &mockStore{}, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) + ing, err := New(ingesterConfig, client.Config{}, &mockStore{}, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock) require.NoError(t, err) listener := bufconn.Listen(1024 * 1024) @@ -1472,3 +1493,150 @@ func jsonLine(ts int64, i int) string { } return fmt.Sprintf(`{"e":"f", "h":"i", "j":"k", "g":"h", "ts":"%d"}`, ts) } + +type readRingMock struct { + replicationSet ring.ReplicationSet + getAllHealthyCallsCount int + tokenRangesByIngester map[string]ring.TokenRanges +} + +func (r *readRingMock) HealthyInstancesCount() int { + return len(r.replicationSet.Instances) +} + +func newReadRingMock(ingesters []ring.InstanceDesc, maxErrors int) *readRingMock { + return &readRingMock{ + replicationSet: ring.ReplicationSet{ + Instances: ingesters, + MaxErrors: maxErrors, + }, + } +} + +func (r *readRingMock) Describe(_ chan<- *prometheus.Desc) { +} + +func (r *readRingMock) Collect(_ chan<- prometheus.Metric) { +} + +func (r *readRingMock) Get(_ uint32, _ ring.Operation, _ []ring.InstanceDesc, _ []string, _ []string) (ring.ReplicationSet, error) { + return r.replicationSet, nil +} + +func (r *readRingMock) ShuffleShard(_ string, size int) ring.ReadRing { + // pass by value to copy + return func(r readRingMock) *readRingMock { + r.replicationSet.Instances = r.replicationSet.Instances[:size] + return &r + }(*r) +} + +func (r *readRingMock) BatchGet(_ []uint32, _ ring.Operation) ([]ring.ReplicationSet, error) { + return []ring.ReplicationSet{r.replicationSet}, nil +} + +func (r *readRingMock) GetAllHealthy(_ ring.Operation) (ring.ReplicationSet, error) { + r.getAllHealthyCallsCount++ + return r.replicationSet, nil +} + +func (r *readRingMock) GetReplicationSetForOperation(_ ring.Operation) (ring.ReplicationSet, error) { + return r.replicationSet, nil +} + +func (r *readRingMock) ReplicationFactor() int { + return 1 +} + +func (r *readRingMock) InstancesCount() int { + return len(r.replicationSet.Instances) +} + +func (r *readRingMock) InstancesInZoneCount(_ string) int { + return len(r.replicationSet.Instances) +} + +func (r *readRingMock) InstancesWithTokensCount() int { + return len(r.replicationSet.Instances) +} + +func (r *readRingMock) InstancesWithTokensInZoneCount(_ string) int { + return len(r.replicationSet.Instances) +} + +func (r *readRingMock) ZonesCount() int { + return 1 +} + +func (r *readRingMock) Subring(_ uint32, _ int) ring.ReadRing { + return r +} + +func (r *readRingMock) HasInstance(instanceID string) bool { + for _, ing := range r.replicationSet.Instances { + if ing.Addr != instanceID { + return true + } + } + return false +} + +func (r *readRingMock) ShuffleShardWithLookback(_ string, _ int, _ time.Duration, _ time.Time) ring.ReadRing { + return r +} + +func (r *readRingMock) CleanupShuffleShardCache(_ string) {} + +func (r *readRingMock) GetInstanceState(_ string) (ring.InstanceState, error) { + return 0, nil +} + +func (r *readRingMock) GetTokenRangesForInstance(instance string) (ring.TokenRanges, error) { + if r.tokenRangesByIngester != nil { + ranges, exists := r.tokenRangesByIngester[instance] + if !exists { + return nil, ring.ErrInstanceNotFound + } + return ranges, nil + } + tr := ring.TokenRanges{0, math.MaxUint32} + return tr, nil +} + +func mockReadRingWithOneActiveIngester() *readRingMock { + return newReadRingMock([]ring.InstanceDesc{ + {Addr: "test", Timestamp: time.Now().UnixNano(), State: ring.ACTIVE, Tokens: []uint32{1, 2, 3}}, + }, 0) +} + +func TestUpdateOwnedStreams(t *testing.T) { + ingesterConfig := defaultIngesterTestConfig(t) + limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) + require.NoError(t, err) + readRingMock := mockReadRingWithOneActiveIngester() + + i, err := New(ingesterConfig, client.Config{}, &mockStore{}, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock) + require.NoError(t, err) + + i.instances["test"] = defaultInstance(t) + + tt := time.Now().Add(-5 * time.Minute) + err = i.instances["test"].Push(context.Background(), &logproto.PushRequest{Streams: []logproto.Stream{ + // both label sets have FastFingerprint=e002a3a451262627 + {Labels: "{app=\"l\",uniq0=\"0\",uniq1=\"1\"}", Entries: entries(5, tt.Add(time.Minute))}, + {Labels: "{uniq0=\"1\",app=\"m\",uniq1=\"1\"}", Entries: entries(5, tt)}, + + // e002a3a451262247 + {Labels: "{app=\"l\",uniq0=\"1\",uniq1=\"0\"}", Entries: entries(5, tt.Add(time.Minute))}, + {Labels: "{uniq1=\"0\",app=\"m\",uniq0=\"0\"}", Entries: entries(5, tt)}, + + // e002a2a4512624f4 + {Labels: "{app=\"l\",uniq0=\"0\",uniq1=\"0\"}", Entries: entries(5, tt.Add(time.Minute))}, + {Labels: "{uniq0=\"1\",uniq1=\"0\",app=\"m\"}", Entries: entries(5, tt)}, + }}) + require.NoError(t, err) + + // streams are pushed, let's check owned stream counts + ownedStreams := i.instances["test"].ownedStreamsSvc.getOwnedStreamCount() + require.Equal(t, 8, ownedStreams) +} diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go index ecef3f10347b8..65389a3cb04a0 100644 --- a/pkg/ingester/instance.go +++ b/pkg/ingester/instance.go @@ -16,6 +16,7 @@ import ( "github.com/go-kit/log/level" "github.com/grafana/dskit/httpgrpc" + "github.com/grafana/dskit/ring" "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" @@ -186,6 +187,7 @@ func newInstance( customStreamsTracker: customStreamsTracker, } i.mapper = NewFPMapper(i.getLabelsFromFingerprint) + return i, err } @@ -375,10 +377,7 @@ func (i *instance) createStreamByFP(ls labels.Labels, fp model.Fingerprint) (*st s := newStream(chunkfmt, headfmt, i.cfg, i.limiter, i.instanceID, fp, sortedLabels, i.limiter.UnorderedWrites(i.instanceID), i.streamRateCalculator, i.metrics, i.writeFailures) - i.streamsCreatedTotal.Inc() - memoryStreams.WithLabelValues(i.instanceID).Inc() - memoryStreamsLabelsBytes.Add(float64(len(s.labels.String()))) - i.addTailersToNewStream(s) + i.onStreamCreated(s) return s, nil } @@ -1175,3 +1174,23 @@ func minTs(stream *logproto.Stream) model.Time { } return model.TimeFromUnixNano(streamMinTs) } + +// For each stream, we check if the stream is owned by the ingester or not and increment/decrement the owned stream count. +func (i *instance) updateOwnedStreams(ownedTokenRange ring.TokenRanges) error { + var err error + i.streams.WithLock(func() { + i.ownedStreamsSvc.resetStreamCounts() + err = i.streams.ForEach(func(s *stream) (bool, error) { + if ownedTokenRange.IncludesKey(uint32(s.fp)) { + i.ownedStreamsSvc.incOwnedStreamCount() + } else { + i.ownedStreamsSvc.incNotOwnedStreamCount() + } + return true, nil + }) + }) + if err != nil { + return fmt.Errorf("error checking streams ownership: %w", err) + } + return nil +} diff --git a/pkg/ingester/instance_test.go b/pkg/ingester/instance_test.go index 3055a7fb0c5b7..f5e9591654819 100644 --- a/pkg/ingester/instance_test.go +++ b/pkg/ingester/instance_test.go @@ -50,6 +50,7 @@ func defaultConfig() *Config { MaxBackoff: 10 * time.Second, MaxRetries: 1, }, + OwnedStreamsCheckInterval: 1 * time.Second, } if err := cfg.Validate(); err != nil { panic(errors.Wrap(err, "error building default test config")) @@ -1103,7 +1104,8 @@ func TestStreamShardingUsage(t *testing.T) { t.Run("invalid push returns error", func(t *testing.T) { tracker := &mockUsageTracker{} - i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, customTenant1, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, tracker) + + i, _ := newInstance(&Config{IndexShards: 1, OwnedStreamsCheckInterval: 1 * time.Second}, defaultPeriodConfigs, customTenant1, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, tracker) ctx := context.Background() err = i.Push(ctx, &logproto.PushRequest{ @@ -1123,7 +1125,7 @@ func TestStreamShardingUsage(t *testing.T) { }) t.Run("valid push returns no error", func(t *testing.T) { - i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, customTenant2, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil) + i, _ := newInstance(&Config{IndexShards: 1, OwnedStreamsCheckInterval: 1 * time.Second}, defaultPeriodConfigs, customTenant2, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil) ctx := context.Background() err = i.Push(ctx, &logproto.PushRequest{ diff --git a/pkg/ingester/owned_streams.go b/pkg/ingester/owned_streams.go index 3be6fb40fdd86..7a405d684dc55 100644 --- a/pkg/ingester/owned_streams.go +++ b/pkg/ingester/owned_streams.go @@ -3,15 +3,16 @@ package ingester import ( "sync" + "github.com/grafana/dskit/services" "go.uber.org/atomic" ) type ownedStreamService struct { - tenantID string - limiter *Limiter - fixedLimit *atomic.Int32 + services.Service - //todo: implement job to recalculate it + tenantID string + limiter *Limiter + fixedLimit *atomic.Int32 ownedStreamCount int notOwnedStreamCount int lock sync.RWMutex @@ -23,6 +24,7 @@ func newOwnedStreamService(tenantID string, limiter *Limiter) *ownedStreamServic limiter: limiter, fixedLimit: atomic.NewInt32(0), } + svc.updateFixedLimit() return svc } @@ -48,6 +50,12 @@ func (s *ownedStreamService) incOwnedStreamCount() { s.ownedStreamCount++ } +func (s *ownedStreamService) incNotOwnedStreamCount() { + s.lock.Lock() + defer s.lock.Unlock() + s.notOwnedStreamCount++ +} + func (s *ownedStreamService) decOwnedStreamCount() { s.lock.Lock() defer s.lock.Unlock() @@ -57,3 +65,10 @@ func (s *ownedStreamService) decOwnedStreamCount() { } s.ownedStreamCount-- } + +func (s *ownedStreamService) resetStreamCounts() { + s.lock.Lock() + defer s.lock.Unlock() + s.ownedStreamCount = 0 + s.notOwnedStreamCount = 0 +} diff --git a/pkg/ingester/owned_streams_test.go b/pkg/ingester/owned_streams_test.go index 759927a1d0cfe..876954b8579a0 100644 --- a/pkg/ingester/owned_streams_test.go +++ b/pkg/ingester/owned_streams_test.go @@ -33,9 +33,12 @@ func Test_OwnedStreamService(t *testing.T) { service.incOwnedStreamCount() require.Equal(t, 3, service.getOwnedStreamCount()) - // simulate the effect from the recalculation job + service.incOwnedStreamCount() + service.decOwnedStreamCount() service.notOwnedStreamCount = 1 service.ownedStreamCount = 2 + require.Equal(t, 2, service.getOwnedStreamCount()) + require.Equal(t, 1, service.notOwnedStreamCount) service.decOwnedStreamCount() require.Equal(t, 2, service.getOwnedStreamCount(), "owned stream count must be decremented only when notOwnedStreamCount is set to 0") @@ -63,4 +66,13 @@ func Test_OwnedStreamService(t *testing.T) { group.Wait() require.Equal(t, 1, service.getOwnedStreamCount(), "owned stream count must not be changed") + + // simulate the effect from the recalculation job + service.notOwnedStreamCount = 1 + service.ownedStreamCount = 2 + + service.resetStreamCounts() + + require.Equal(t, 0, service.getOwnedStreamCount()) + require.Equal(t, 0, service.notOwnedStreamCount) } diff --git a/pkg/ingester/recalculate_owned_streams.go b/pkg/ingester/recalculate_owned_streams.go new file mode 100644 index 0000000000000..1346033cb5819 --- /dev/null +++ b/pkg/ingester/recalculate_owned_streams.go @@ -0,0 +1,91 @@ +package ingester + +import ( + "context" + "errors" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/grafana/dskit/ring" + "github.com/grafana/dskit/services" +) + +var ownedStreamRingOp = ring.NewOp([]ring.InstanceState{ring.PENDING, ring.JOINING, ring.ACTIVE, ring.LEAVING}, nil) + +type recalculateOwnedStreams struct { + services.Service + + logger log.Logger + + instancesSupplier func() []*instance + ingesterID string + previousRing ring.ReplicationSet + ingestersRing ring.ReadRing + ticker *time.Ticker +} + +func newRecalculateOwnedStreams(instancesSupplier func() []*instance, ingesterID string, ring ring.ReadRing, ringPollInterval time.Duration, logger log.Logger) *recalculateOwnedStreams { + svc := &recalculateOwnedStreams{ + ingestersRing: ring, + instancesSupplier: instancesSupplier, + ingesterID: ingesterID, + logger: logger, + } + svc.Service = services.NewTimerService(ringPollInterval, nil, svc.iteration, nil) + return svc +} + +func (s *recalculateOwnedStreams) iteration(_ context.Context) error { + s.recalculate() + return nil +} + +func (s *recalculateOwnedStreams) recalculate() { + ringChanged, err := s.checkRingForChanges() + if err != nil { + level.Error(s.logger).Log("msg", "failed to check ring for changes", "err", err) + return + } + if !ringChanged { + return + } + ownedTokenRange, err := s.getTokenRangesForIngester() + if err != nil { + level.Error(s.logger).Log("msg", "failed to get token ranges for ingester", "err", err) + return + } + + for _, instance := range s.instancesSupplier() { + if !instance.limiter.limits.UseOwnedStreamCount(instance.instanceID) { + continue + } + err = instance.updateOwnedStreams(ownedTokenRange) + if err != nil { + level.Error(s.logger).Log("msg", "failed to update owned streams", "err", err) + } + } +} + +func (s *recalculateOwnedStreams) checkRingForChanges() (bool, error) { + rs, err := s.ingestersRing.GetAllHealthy(ownedStreamRingOp) + if err != nil { + return false, err + } + + ringChanged := ring.HasReplicationSetChangedWithoutStateOrAddr(s.previousRing, rs) + s.previousRing = rs + return ringChanged, nil +} + +func (s *recalculateOwnedStreams) getTokenRangesForIngester() (ring.TokenRanges, error) { + ranges, err := s.ingestersRing.GetTokenRangesForInstance(s.ingesterID) + if err != nil { + if errors.Is(err, ring.ErrInstanceNotFound) { + return nil, nil + } + return nil, err + } + + return ranges, nil +} diff --git a/pkg/ingester/recalculate_owned_streams_test.go b/pkg/ingester/recalculate_owned_streams_test.go new file mode 100644 index 0000000000000..6ac0f54f06e8c --- /dev/null +++ b/pkg/ingester/recalculate_owned_streams_test.go @@ -0,0 +1,154 @@ +package ingester + +import ( + "context" + "strconv" + "testing" + "time" + + "github.com/go-kit/log" + "github.com/grafana/dskit/ring" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/v3/pkg/runtime" + "github.com/grafana/loki/v3/pkg/validation" +) + +func Test_recalculateOwnedStreams_newRecalculateOwnedStreams(t *testing.T) { + mockInstancesSupplier := &mockTenantsSuplier{tenants: []*instance{}} + mockRing := newReadRingMock([]ring.InstanceDesc{ + {Addr: "test", Timestamp: time.Now().UnixNano(), State: ring.ACTIVE, Tokens: []uint32{1, 2, 3}}, + }, 0) + service := newRecalculateOwnedStreams(mockInstancesSupplier.get, "test", mockRing, 50*time.Millisecond, log.NewNopLogger()) + require.Equal(t, 0, mockRing.getAllHealthyCallsCount, "ring must be called only after service's start up") + ctx := context.Background() + require.NoError(t, service.StartAsync(ctx)) + require.NoError(t, service.AwaitRunning(ctx)) + require.Eventually(t, func() bool { + return mockRing.getAllHealthyCallsCount >= 2 + }, 1*time.Second, 50*time.Millisecond, "expected at least two runs of the iteration") +} + +func Test_recalculateOwnedStreams_recalculate(t *testing.T) { + tests := map[string]struct { + featureEnabled bool + expectedOwnedStreamCount int + expectedNotOwnedStreamCount int + }{ + "expected streams ownership to be recalculated": { + featureEnabled: true, + expectedOwnedStreamCount: 4, + expectedNotOwnedStreamCount: 3, + }, + "expected streams ownership recalculation to be skipped": { + featureEnabled: false, + expectedOwnedStreamCount: 7, + }, + } + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + mockRing := &readRingMock{ + replicationSet: ring.ReplicationSet{ + Instances: []ring.InstanceDesc{{Addr: "ingester-0", Timestamp: time.Now().UnixNano(), State: ring.ACTIVE, Tokens: []uint32{100, 200, 300}}}, + }, + tokenRangesByIngester: map[string]ring.TokenRanges{ + // this ingester owns token ranges [50, 100] and [200, 300] + "ingester-0": {50, 100, 200, 300}, + }, + } + + limits, err := validation.NewOverrides(validation.Limits{ + MaxGlobalStreamsPerUser: 100, + UseOwnedStreamCount: testData.featureEnabled, + }, nil) + require.NoError(t, err) + limiter := NewLimiter(limits, NilMetrics, mockRing, 1) + + tenant, err := newInstance( + defaultConfig(), + defaultPeriodConfigs, + "tenant-a", + limiter, + runtime.DefaultTenantConfigs(), + noopWAL{}, + NilMetrics, + nil, + nil, + nil, + nil, + NewStreamRateCalculator(), + nil, + nil, + ) + require.NoError(t, err) + // not owned streams + createStream(t, tenant, 49) + createStream(t, tenant, 101) + createStream(t, tenant, 301) + + // owned streams + createStream(t, tenant, 50) + createStream(t, tenant, 60) + createStream(t, tenant, 100) + createStream(t, tenant, 250) + + require.Equal(t, 7, tenant.ownedStreamsSvc.ownedStreamCount) + require.Equal(t, 0, tenant.ownedStreamsSvc.notOwnedStreamCount) + + mockTenantsSupplier := &mockTenantsSuplier{tenants: []*instance{tenant}} + + service := newRecalculateOwnedStreams(mockTenantsSupplier.get, "ingester-0", mockRing, 50*time.Millisecond, log.NewNopLogger()) + + service.recalculate() + + require.Equal(t, testData.expectedOwnedStreamCount, tenant.ownedStreamsSvc.ownedStreamCount) + require.Equal(t, testData.expectedNotOwnedStreamCount, tenant.ownedStreamsSvc.notOwnedStreamCount) + }) + } + +} + +func Test_recalculateOwnedStreams_checkRingForChanges(t *testing.T) { + mockRing := &readRingMock{ + replicationSet: ring.ReplicationSet{ + Instances: []ring.InstanceDesc{{Addr: "ingester-0", Timestamp: time.Now().UnixNano(), State: ring.ACTIVE, Tokens: []uint32{100, 200, 300}}}, + }, + } + mockTenantsSupplier := &mockTenantsSuplier{tenants: []*instance{{}}} + service := newRecalculateOwnedStreams(mockTenantsSupplier.get, "ingester-0", mockRing, 50*time.Millisecond, log.NewNopLogger()) + + ringChanged, err := service.checkRingForChanges() + require.NoError(t, err) + require.True(t, ringChanged, "expected ring to be changed because it was not initialized yet") + + ringChanged, err = service.checkRingForChanges() + require.NoError(t, err) + require.False(t, ringChanged, "expected ring not to be changed because token ranges is not changed") + + anotherIngester := ring.InstanceDesc{Addr: "ingester-1", Timestamp: time.Now().UnixNano(), State: ring.ACTIVE, Tokens: []uint32{150, 250, 350}} + mockRing.replicationSet.Instances = append(mockRing.replicationSet.Instances, anotherIngester) + + ringChanged, err = service.checkRingForChanges() + require.NoError(t, err) + require.True(t, ringChanged) +} + +func createStream(t *testing.T, inst *instance, fingerprint int) { + lbls := labels.Labels{ + labels.Label{Name: "mock", Value: strconv.Itoa(fingerprint)}} + + _, _, err := inst.streams.LoadOrStoreNew(lbls.String(), func() (*stream, error) { + return inst.createStreamByFP(lbls, model.Fingerprint(fingerprint)) + }, nil) + require.NoError(t, err) +} + +type mockTenantsSuplier struct { + tenants []*instance +} + +func (m *mockTenantsSuplier) get() []*instance { + return m.tenants +} diff --git a/pkg/ingester/recovery_test.go b/pkg/ingester/recovery_test.go index 9176ff3c6ad2f..4c5a4ce815d8d 100644 --- a/pkg/ingester/recovery_test.go +++ b/pkg/ingester/recovery_test.go @@ -228,7 +228,9 @@ func TestSeriesRecoveryNoDuplicates(t *testing.T) { chunks: map[string][]chunk.Chunk{}, } - i, err := New(ingesterConfig, client.Config{}, store, limits, loki_runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) + readRingMock := mockReadRingWithOneActiveIngester() + + i, err := New(ingesterConfig, client.Config{}, store, limits, loki_runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock) require.NoError(t, err) mkSample := func(i int) *logproto.PushRequest { @@ -262,7 +264,7 @@ func TestSeriesRecoveryNoDuplicates(t *testing.T) { require.Equal(t, false, iter.Next()) // create a new ingester now - i, err = New(ingesterConfig, client.Config{}, store, limits, loki_runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) + i, err = New(ingesterConfig, client.Config{}, store, limits, loki_runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock) require.NoError(t, err) // recover the checkpointed series diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 22cd46743ea27..c3904d1e681db 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -587,7 +587,7 @@ func (t *Loki) initIngester() (_ services.Service, err error) { level.Warn(util_log.Logger).Log("msg", "The config setting shutdown marker path is not set. The /ingester/prepare_shutdown endpoint won't work") } - t.Ingester, err = ingester.New(t.Cfg.Ingester, t.Cfg.IngesterClient, t.Store, t.Overrides, t.tenantConfigs, prometheus.DefaultRegisterer, t.Cfg.Distributor.WriteFailuresLogging, t.Cfg.MetricsNamespace, logger, t.UsageTracker) + t.Ingester, err = ingester.New(t.Cfg.Ingester, t.Cfg.IngesterClient, t.Store, t.Overrides, t.tenantConfigs, prometheus.DefaultRegisterer, t.Cfg.Distributor.WriteFailuresLogging, t.Cfg.MetricsNamespace, logger, t.UsageTracker, t.ring) if err != nil { return } diff --git a/pkg/querier/querier_mock_test.go b/pkg/querier/querier_mock_test.go index b70ba0cc3963b..4d5d2f22c532b 100644 --- a/pkg/querier/querier_mock_test.go +++ b/pkg/querier/querier_mock_test.go @@ -451,6 +451,22 @@ func (r *readRingMock) InstancesCount() int { return len(r.replicationSet.Instances) } +func (r *readRingMock) InstancesInZoneCount(_ string) int { + return len(r.replicationSet.Instances) +} + +func (r *readRingMock) InstancesWithTokensCount() int { + return len(r.replicationSet.Instances) +} + +func (r *readRingMock) InstancesWithTokensInZoneCount(_ string) int { + return len(r.replicationSet.Instances) +} + +func (r *readRingMock) ZonesCount() int { + return 1 +} + func (r *readRingMock) Subring(_ uint32, _ int) ring.ReadRing { return r } diff --git a/pkg/util/ring/ring_test.go b/pkg/util/ring/ring_test.go index 8e765d4ef6d41..fa285c0cb4ae7 100644 --- a/pkg/util/ring/ring_test.go +++ b/pkg/util/ring/ring_test.go @@ -95,6 +95,44 @@ func (r *readRingMock) GetTokenRangesForInstance(_ string) (ring.TokenRanges, er return tr, nil } +func (r *readRingMock) InstancesInZoneCount(zone string) int { + count := 0 + for _, instance := range r.replicationSet.Instances { + if instance.Zone == zone { + count++ + } + } + return count +} + +func (r *readRingMock) InstancesWithTokensCount() int { + count := 0 + for _, instance := range r.replicationSet.Instances { + if len(instance.Tokens) > 0 { + count++ + } + } + return count +} + +func (r *readRingMock) InstancesWithTokensInZoneCount(zone string) int { + count := 0 + for _, instance := range r.replicationSet.Instances { + if len(instance.Tokens) > 0 && instance.Zone == zone { + count++ + } + } + return count +} + +func (r *readRingMock) ZonesCount() int { + uniqueZone := make(map[string]any) + for _, instance := range r.replicationSet.Instances { + uniqueZone[instance.Zone] = nil + } + return len(uniqueZone) +} + type readLifecyclerMock struct { mock.Mock addr string diff --git a/vendor/github.com/grafana/dskit/concurrency/runner.go b/vendor/github.com/grafana/dskit/concurrency/runner.go index 023be10d7a0a3..fcc8929971491 100644 --- a/vendor/github.com/grafana/dskit/concurrency/runner.go +++ b/vendor/github.com/grafana/dskit/concurrency/runner.go @@ -83,11 +83,25 @@ func CreateJobsFromStrings(values []string) []interface{} { } // ForEachJob runs the provided jobFunc for each job index in [0, jobs) up to concurrency concurrent workers. +// If the concurrency value is <= 0 all jobs will be executed in parallel. +// // The execution breaks on first error encountered. +// +// ForEachJob cancels the context.Context passed to each invocation of jobFunc before ForEachJob returns. func ForEachJob(ctx context.Context, jobs int, concurrency int, jobFunc func(ctx context.Context, idx int) error) error { if jobs == 0 { return nil } + if jobs == 1 { + // Honor the function contract, cancelling the context passed to the jobFunc once it completed. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + return jobFunc(ctx, 0) + } + if concurrency <= 0 { + concurrency = jobs + } // Initialise indexes with -1 so first Inc() returns index 0. indexes := atomic.NewInt64(-1) @@ -113,3 +127,35 @@ func ForEachJob(ctx context.Context, jobs int, concurrency int, jobFunc func(ctx // Wait until done (or context has canceled). return g.Wait() } + +// ForEachJobMergeResults is like ForEachJob but expects jobFunc to return a slice of results which are then +// merged with results from all jobs. This function returns no results if an error occurred running any jobFunc. +// +// ForEachJobMergeResults cancels the context.Context passed to each invocation of jobFunc before ForEachJobMergeResults returns. +func ForEachJobMergeResults[J any, R any](ctx context.Context, jobs []J, concurrency int, jobFunc func(ctx context.Context, job J) ([]R, error)) ([]R, error) { + var ( + resultsMx sync.Mutex + results = make([]R, 0, len(jobs)) // Assume at least 1 result per job. + ) + + err := ForEachJob(ctx, len(jobs), concurrency, func(ctx context.Context, idx int) error { + jobResult, jobErr := jobFunc(ctx, jobs[idx]) + if jobErr != nil { + return jobErr + } + + resultsMx.Lock() + results = append(results, jobResult...) + resultsMx.Unlock() + + return nil + }) + + if err != nil { + return nil, err + } + + // Given no error occurred, it means that all job results have already been collected + // and so it's safe to access results slice with no locking. + return results, nil +} diff --git a/vendor/github.com/grafana/dskit/grpcclient/backoff_retry.go b/vendor/github.com/grafana/dskit/grpcclient/backoff_retry.go index 21abbb7865689..b0d7f9004f8d1 100644 --- a/vendor/github.com/grafana/dskit/grpcclient/backoff_retry.go +++ b/vendor/github.com/grafana/dskit/grpcclient/backoff_retry.go @@ -2,6 +2,7 @@ package grpcclient import ( "context" + "errors" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -10,22 +11,27 @@ import ( "github.com/grafana/dskit/backoff" ) -// NewBackoffRetry gRPC middleware. -func NewBackoffRetry(cfg backoff.Config) grpc.UnaryClientInterceptor { +// NewRateLimitRetrier creates a UnaryClientInterceptor which retries with backoff +// the calls from invoker when the executed RPC is rate limited. +func NewRateLimitRetrier(cfg backoff.Config) grpc.UnaryClientInterceptor { return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { backoff := backoff.New(ctx, cfg) + var err error for backoff.Ongoing() { - err := invoker(ctx, method, req, reply, cc, opts...) + err = invoker(ctx, method, req, reply, cc, opts...) if err == nil { return nil } + // Only ResourceExhausted statuses are handled as signals of being rate limited, + // following the implementation of package's RateLimiter interceptor. + // All other errors are propogated as-is upstream. if status.Code(err) != codes.ResourceExhausted { return err } backoff.Wait() } - return backoff.Err() + return errors.Join(err, backoff.Err()) } } diff --git a/vendor/github.com/grafana/dskit/grpcclient/grpcclient.go b/vendor/github.com/grafana/dskit/grpcclient/grpcclient.go index b171889d0a048..7518990471549 100644 --- a/vendor/github.com/grafana/dskit/grpcclient/grpcclient.go +++ b/vendor/github.com/grafana/dskit/grpcclient/grpcclient.go @@ -108,7 +108,7 @@ func (cfg *Config) DialOption(unaryClientInterceptors []grpc.UnaryClientIntercep streamClientInterceptors = append(streamClientInterceptors, cfg.StreamMiddleware...) if cfg.BackoffOnRatelimits { - unaryClientInterceptors = append([]grpc.UnaryClientInterceptor{NewBackoffRetry(cfg.BackoffConfig)}, unaryClientInterceptors...) + unaryClientInterceptors = append([]grpc.UnaryClientInterceptor{NewRateLimitRetrier(cfg.BackoffConfig)}, unaryClientInterceptors...) } if cfg.RateLimit > 0 { diff --git a/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go b/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go index e1f044d8650bb..b755e2adceaeb 100644 --- a/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go +++ b/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go @@ -89,7 +89,9 @@ func WriteError(w http.ResponseWriter, err error) { func ToHeader(hs []*Header, header http.Header) { for _, h := range hs { - header[h.Key] = h.Values + // http.Header expects header to be stored in canonical form, + // otherwise they are inaccessible with Get() on a http.Header struct. + header[http.CanonicalHeaderKey(h.Key)] = h.Values } } diff --git a/vendor/github.com/grafana/dskit/instrument/instrument.go b/vendor/github.com/grafana/dskit/instrument/instrument.go index 4ea480b29d60e..f54e49def3086 100644 --- a/vendor/github.com/grafana/dskit/instrument/instrument.go +++ b/vendor/github.com/grafana/dskit/instrument/instrument.go @@ -75,7 +75,7 @@ func ObserveWithExemplar(ctx context.Context, histogram prometheus.Observer, sec if traceID, ok := tracing.ExtractSampledTraceID(ctx); ok { histogram.(prometheus.ExemplarObserver).ObserveWithExemplar( seconds, - prometheus.Labels{"traceID": traceID}, + prometheus.Labels{"trace_id": traceID, "traceID": traceID}, ) return } diff --git a/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go b/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go index 693964b5ad067..e8a94debe181c 100644 --- a/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go +++ b/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go @@ -177,7 +177,7 @@ func (cfg *KVConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { // "Defaults to hostname" -- memberlist sets it to hostname by default. f.StringVar(&cfg.NodeName, prefix+"memberlist.nodename", "", "Name of the node in memberlist cluster. Defaults to hostname.") // memberlist.DefaultLANConfig will put hostname here. f.BoolVar(&cfg.RandomizeNodeName, prefix+"memberlist.randomize-node-name", true, "Add random suffix to the node name.") - f.DurationVar(&cfg.StreamTimeout, prefix+"memberlist.stream-timeout", mlDefaults.TCPTimeout, "The timeout for establishing a connection with a remote node, and for read/write operations.") + f.DurationVar(&cfg.StreamTimeout, prefix+"memberlist.stream-timeout", 2*time.Second, "The timeout for establishing a connection with a remote node, and for read/write operations.") f.IntVar(&cfg.RetransmitMult, prefix+"memberlist.retransmit-factor", mlDefaults.RetransmitMult, "Multiplication factor used when sending out messages (factor * log(N+1)).") f.Var(&cfg.JoinMembers, prefix+"memberlist.join", "Other cluster members to join. Can be specified multiple times. It can be an IP, hostname or an entry specified in the DNS Service Discovery format.") f.DurationVar(&cfg.MinJoinBackoff, prefix+"memberlist.min-join-backoff", 1*time.Second, "Min backoff duration to join other cluster members.") diff --git a/vendor/github.com/grafana/dskit/kv/memberlist/mergeable.go b/vendor/github.com/grafana/dskit/kv/memberlist/mergeable.go index 2c02acfa468e4..9833a858b4761 100644 --- a/vendor/github.com/grafana/dskit/kv/memberlist/mergeable.go +++ b/vendor/github.com/grafana/dskit/kv/memberlist/mergeable.go @@ -35,17 +35,17 @@ type Mergeable interface { // used when doing CAS operation) Merge(other Mergeable, localCAS bool) (change Mergeable, error error) - // Describes the content of this mergeable value. Used by memberlist client to decide if + // MergeContent describes the content of this mergeable value. Used by memberlist client to decide if // one change-value can invalidate some other value, that was received previously. // Invalidation can happen only if output of MergeContent is a superset of some other MergeContent. MergeContent() []string - // Remove tombstones older than given limit from this mergeable. + // RemoveTombstones remove tombstones older than given limit from this mergeable. // If limit is zero time, remove all tombstones. Memberlist client calls this method with zero limit each // time when client is accessing value from the store. It can be used to hide tombstones from the clients. // Returns the total number of tombstones present and the number of removed tombstones by this invocation. RemoveTombstones(limit time.Time) (total, removed int) - // Clone should return a deep copy of the state. + // Clone returns a deep copy of the state. Clone() Mergeable } diff --git a/vendor/github.com/grafana/dskit/kv/metrics.go b/vendor/github.com/grafana/dskit/kv/metrics.go index 7361b8c41c789..954f06ed30b46 100644 --- a/vendor/github.com/grafana/dskit/kv/metrics.go +++ b/vendor/github.com/grafana/dskit/kv/metrics.go @@ -3,6 +3,7 @@ package kv import ( "context" "strconv" + "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -53,6 +54,10 @@ func newMetricsClient(backend string, c Client, reg prometheus.Registerer) Clien Name: "kv_request_duration_seconds", Help: "Time spent on kv store requests.", Buckets: prometheus.DefBuckets, + // Use defaults recommended by Prometheus for native histograms. + NativeHistogramBucketFactor: 1.1, + NativeHistogramMaxBucketNumber: 100, + NativeHistogramMinResetDuration: time.Hour, ConstLabels: prometheus.Labels{ "type": backend, }, diff --git a/vendor/github.com/grafana/dskit/middleware/grpc_instrumentation.go b/vendor/github.com/grafana/dskit/middleware/grpc_instrumentation.go index e4052b8ed05ff..d15402ea484d7 100644 --- a/vendor/github.com/grafana/dskit/middleware/grpc_instrumentation.go +++ b/vendor/github.com/grafana/dskit/middleware/grpc_instrumentation.go @@ -22,7 +22,20 @@ import ( ) func observe(ctx context.Context, hist *prometheus.HistogramVec, method string, err error, duration time.Duration, instrumentLabel instrumentationLabel) { - instrument.ObserveWithExemplar(ctx, hist.WithLabelValues(gRPC, method, instrumentLabel.getInstrumentationLabel(err), "false"), duration.Seconds()) + labelValues := []string{ + gRPC, + method, + instrumentLabel.getInstrumentationLabel(err), + "false", + "", // this is a placeholder for the tenant ID + } + labelValues = labelValues[:len(labelValues)-1] + + instrument.ObserveWithExemplar(ctx, hist.WithLabelValues(labelValues...), duration.Seconds()) + if tenantID, ok := instrumentLabel.perTenantInstrumentation.shouldInstrument(ctx); ok { + labelValues = append(labelValues, tenantID) + instrument.ObserveWithExemplar(ctx, instrumentLabel.perTenantDuration.WithLabelValues(labelValues...), duration.Seconds()) + } } // UnaryServerInstrumentInterceptor instruments gRPC requests for errors and latency. @@ -182,8 +195,17 @@ var ( } ) +func WithPerTenantInstrumentation(m *prometheus.HistogramVec, f PerTenantCallback) InstrumentationOption { + return func(instrumentationLabel *instrumentationLabel) { + instrumentationLabel.perTenantInstrumentation = f + instrumentationLabel.perTenantDuration = m + } +} + func applyInstrumentationOptions(maskHTTPStatuses bool, options ...InstrumentationOption) instrumentationLabel { - instrumentationLabel := instrumentationLabel{maskHTTPStatus: maskHTTPStatuses} + instrumentationLabel := instrumentationLabel{ + maskHTTPStatus: maskHTTPStatuses, + } for _, opt := range options { opt(&instrumentationLabel) } @@ -191,8 +213,10 @@ func applyInstrumentationOptions(maskHTTPStatuses bool, options ...Instrumentati } type instrumentationLabel struct { - reportGRPCStatus bool - maskHTTPStatus bool + reportGRPCStatus bool + maskHTTPStatus bool + perTenantInstrumentation PerTenantCallback + perTenantDuration *prometheus.HistogramVec } // getInstrumentationLabel converts an error into an error code string by applying the configurations diff --git a/vendor/github.com/grafana/dskit/middleware/grpc_logging.go b/vendor/github.com/grafana/dskit/middleware/grpc_logging.go index feab364743225..68a2ce037ee64 100644 --- a/vendor/github.com/grafana/dskit/middleware/grpc_logging.go +++ b/vendor/github.com/grafana/dskit/middleware/grpc_logging.go @@ -6,11 +6,12 @@ package middleware import ( "context" - "errors" + "fmt" "time" "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/pkg/errors" dskit_log "github.com/grafana/dskit/log" @@ -24,16 +25,20 @@ const ( gRPC = "gRPC" ) -// An error can implement ShouldLog() to control whether GRPCServerLog will log. +// OptionalLogging is the interface that needs be implemented by an error that wants to control whether the log +// should be logged by GRPCServerLog. type OptionalLogging interface { - ShouldLog(ctx context.Context, duration time.Duration) bool + // ShouldLog returns whether the error should be logged and the reason. For example, if the error should be sampled + // return returned reason could be something like "sampled 1/10". The reason, if any, is used to decorate the error + // both in case the error should be logged or skipped. + ShouldLog(ctx context.Context) (bool, string) } type DoNotLogError struct{ Err error } -func (i DoNotLogError) Error() string { return i.Err.Error() } -func (i DoNotLogError) Unwrap() error { return i.Err } -func (i DoNotLogError) ShouldLog(_ context.Context, _ time.Duration) bool { return false } +func (i DoNotLogError) Error() string { return i.Err.Error() } +func (i DoNotLogError) Unwrap() error { return i.Err } +func (i DoNotLogError) ShouldLog(_ context.Context) (bool, string) { return false, "" } // GRPCServerLog logs grpc requests, errors, and latency. type GRPCServerLog struct { @@ -50,8 +55,13 @@ func (s GRPCServerLog) UnaryServerInterceptor(ctx context.Context, req interface if err == nil && s.DisableRequestSuccessLog { return resp, nil } - var optional OptionalLogging - if errors.As(err, &optional) && !optional.ShouldLog(ctx, time.Since(begin)) { + + // Honor sampled error logging. + keep, reason := shouldLog(ctx, err) + if reason != "" { + err = fmt.Errorf("%w (%s)", err, reason) + } + if !keep { return resp, err } @@ -91,3 +101,12 @@ func (s GRPCServerLog) StreamServerInterceptor(srv interface{}, ss grpc.ServerSt } return err } + +func shouldLog(ctx context.Context, err error) (bool, string) { + var optional OptionalLogging + if !errors.As(err, &optional) { + return true, "" + } + + return optional.ShouldLog(ctx) +} diff --git a/vendor/github.com/grafana/dskit/middleware/grpc_stats.go b/vendor/github.com/grafana/dskit/middleware/grpc_stats.go index 3d29d9baabbe1..ec766d640acf2 100644 --- a/vendor/github.com/grafana/dskit/middleware/grpc_stats.go +++ b/vendor/github.com/grafana/dskit/middleware/grpc_stats.go @@ -31,6 +31,7 @@ type contextKey int const ( contextKeyMethodName contextKey = 1 + contextKeyRouteName contextKey = 2 ) func (g *grpcStatsHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { diff --git a/vendor/github.com/grafana/dskit/middleware/http_timeout.go b/vendor/github.com/grafana/dskit/middleware/http_timeout.go new file mode 100644 index 0000000000000..15b1a3f2e92fe --- /dev/null +++ b/vendor/github.com/grafana/dskit/middleware/http_timeout.go @@ -0,0 +1,50 @@ +package middleware + +import ( + "net/http" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" +) + +// NewTimeoutMiddleware returns a new timeout middleware that returns a 503 Service Unavailable +// using the http.TimeoutHandler. Note also that the middleware disables the http server write timeout +// to ensure the two timeouts don't conflict. We disable the server write timeout b/c it's behavior may +// be unintuitive. See below. +// +// Server.WriteTimeout: +// - does not cancel context and instead allows the request to go until the next write. in practice this +// means that an http server with a write timeout of 10s may go for significantly longer +// - closes the tcp connection on the next write after the timeout has elapsed instead of sending a +// meaningful http response +// - allows streaming of http response back to caller +// +// http.TimeoutHandler +// - cancels context allowing downstream code to abandon the request +// - returns a 503 Service Unavailable with the provided message +// - buffers response in memory which may be undesirable for large responses +func NewTimeoutMiddleware(dt time.Duration, msg string, log log.Logger) Func { + return func(next http.Handler) http.Handler { + return &timeoutHandler{ + log: log, + handler: http.TimeoutHandler(next, dt, msg), + } + } +} + +type timeoutHandler struct { + log log.Logger + handler http.Handler +} + +func (t timeoutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + rc := http.NewResponseController(w) + // setting the write deadline to the zero time disables it + err := rc.SetWriteDeadline(time.Time{}) + if err != nil { + level.Warn(t.log).Log("msg", "failed to set write deadline in timeout handler. server WriteTimeout is still enforced", "err", err) + } + + t.handler.ServeHTTP(w, r) +} diff --git a/vendor/github.com/grafana/dskit/middleware/http_tracing.go b/vendor/github.com/grafana/dskit/middleware/http_tracing.go index 901970a4a6b1c..d75535ebe38c9 100644 --- a/vendor/github.com/grafana/dskit/middleware/http_tracing.go +++ b/vendor/github.com/grafana/dskit/middleware/http_tracing.go @@ -24,14 +24,13 @@ var _ = nethttp.MWURLTagFunc // Tracer is a middleware which traces incoming requests. type Tracer struct { - RouteMatcher RouteMatcher - SourceIPs *SourceIPExtractor + SourceIPs *SourceIPExtractor } // Wrap implements Interface func (t Tracer) Wrap(next http.Handler) http.Handler { options := []nethttp.MWOption{ - nethttp.OperationNameFunc(makeHTTPOperationNameFunc(t.RouteMatcher)), + nethttp.OperationNameFunc(httpOperationNameFunc), nethttp.MWSpanObserver(func(sp opentracing.Span, r *http.Request) { // add a tag with the client's user agent to the span userAgent := r.Header.Get("User-Agent") @@ -130,11 +129,9 @@ func HTTPGRPCTracingInterceptor(router *mux.Router) grpc.UnaryServerInterceptor } } -func makeHTTPOperationNameFunc(routeMatcher RouteMatcher) func(r *http.Request) string { - return func(r *http.Request) string { - routeName := getRouteName(routeMatcher, r) - return getOperationName(routeName, r) - } +func httpOperationNameFunc(r *http.Request) string { + routeName := ExtractRouteName(r.Context()) + return getOperationName(routeName, r) } func getOperationName(routeName string, r *http.Request) string { diff --git a/vendor/github.com/grafana/dskit/middleware/instrument.go b/vendor/github.com/grafana/dskit/middleware/instrument.go index e5ae9c53c98c3..9813077ce6c2a 100644 --- a/vendor/github.com/grafana/dskit/middleware/instrument.go +++ b/vendor/github.com/grafana/dskit/middleware/instrument.go @@ -5,9 +5,9 @@ package middleware import ( + "context" "io" "net/http" - "regexp" "strconv" "strings" @@ -28,13 +28,28 @@ type RouteMatcher interface { Match(*http.Request, *mux.RouteMatch) bool } +// PerTenantCallback is a function that returns a tenant ID for a given request. When the returned tenant ID is not empty, it is used to label the duration histogram. +type PerTenantCallback func(context.Context) string + +func (f PerTenantCallback) shouldInstrument(ctx context.Context) (string, bool) { + if f == nil { + return "", false + } + tenantID := f(ctx) + if tenantID == "" { + return "", false + } + return tenantID, true +} + // Instrument is a Middleware which records timings for every HTTP request type Instrument struct { - RouteMatcher RouteMatcher - Duration *prometheus.HistogramVec - RequestBodySize *prometheus.HistogramVec - ResponseBodySize *prometheus.HistogramVec - InflightRequests *prometheus.GaugeVec + Duration *prometheus.HistogramVec + PerTenantDuration *prometheus.HistogramVec + PerTenantCallback PerTenantCallback + RequestBodySize *prometheus.HistogramVec + ResponseBodySize *prometheus.HistogramVec + InflightRequests *prometheus.GaugeVec } // IsWSHandshakeRequest returns true if the given request is a websocket handshake request. @@ -77,7 +92,19 @@ func (i Instrument) Wrap(next http.Handler) http.Handler { i.RequestBodySize.WithLabelValues(r.Method, route).Observe(float64(rBody.read)) i.ResponseBodySize.WithLabelValues(r.Method, route).Observe(float64(respMetrics.Written)) - instrument.ObserveWithExemplar(r.Context(), i.Duration.WithLabelValues(r.Method, route, strconv.Itoa(respMetrics.Code), isWS), respMetrics.Duration.Seconds()) + labelValues := []string{ + r.Method, + route, + strconv.Itoa(respMetrics.Code), + isWS, + "", // this is a placeholder for the tenant ID + } + labelValues = labelValues[:len(labelValues)-1] + instrument.ObserveWithExemplar(r.Context(), i.Duration.WithLabelValues(labelValues...), respMetrics.Duration.Seconds()) + if tenantID, ok := i.PerTenantCallback.shouldInstrument(r.Context()); ok { + labelValues = append(labelValues, tenantID) + instrument.ObserveWithExemplar(r.Context(), i.PerTenantDuration.WithLabelValues(labelValues...), respMetrics.Duration.Seconds()) + } }) } @@ -91,7 +118,7 @@ func (i Instrument) Wrap(next http.Handler) http.Handler { // We do all this as we do not wish to emit high cardinality labels to // prometheus. func (i Instrument) getRouteName(r *http.Request) string { - route := getRouteName(i.RouteMatcher, r) + route := ExtractRouteName(r.Context()) if route == "" { route = "other" } @@ -99,53 +126,6 @@ func (i Instrument) getRouteName(r *http.Request) string { return route } -func getRouteName(routeMatcher RouteMatcher, r *http.Request) string { - var routeMatch mux.RouteMatch - if routeMatcher == nil || !routeMatcher.Match(r, &routeMatch) { - return "" - } - - if routeMatch.MatchErr == mux.ErrNotFound { - return "notfound" - } - - if routeMatch.Route == nil { - return "" - } - - if name := routeMatch.Route.GetName(); name != "" { - return name - } - - tmpl, err := routeMatch.Route.GetPathTemplate() - if err == nil { - return MakeLabelValue(tmpl) - } - - return "" -} - -var invalidChars = regexp.MustCompile(`[^a-zA-Z0-9]+`) - -// MakeLabelValue converts a Gorilla mux path to a string suitable for use in -// a Prometheus label value. -func MakeLabelValue(path string) string { - // Convert non-alnums to underscores. - result := invalidChars.ReplaceAllString(path, "_") - - // Trim leading and trailing underscores. - result = strings.Trim(result, "_") - - // Make it all lowercase - result = strings.ToLower(result) - - // Special case. - if result == "" { - result = "root" - } - return result -} - type reqBody struct { b io.ReadCloser read int64 diff --git a/vendor/github.com/grafana/dskit/middleware/logging.go b/vendor/github.com/grafana/dskit/middleware/logging.go index aeb15cc6b63a1..fe00d3a82846c 100644 --- a/vendor/github.com/grafana/dskit/middleware/logging.go +++ b/vendor/github.com/grafana/dskit/middleware/logging.go @@ -58,7 +58,7 @@ func (l Log) logWithRequest(r *http.Request) log.Logger { localLog := l.Log traceID, ok := tracing.ExtractTraceID(r.Context()) if ok { - localLog = log.With(localLog, "traceID", traceID) + localLog = log.With(localLog, "trace_id", traceID) } if l.SourceIPs != nil { diff --git a/vendor/github.com/grafana/dskit/middleware/route_injector.go b/vendor/github.com/grafana/dskit/middleware/route_injector.go new file mode 100644 index 0000000000000..7b275f74f7564 --- /dev/null +++ b/vendor/github.com/grafana/dskit/middleware/route_injector.go @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package middleware + +import ( + "context" + "net/http" + "regexp" + "strings" + + "github.com/gorilla/mux" +) + +// RouteInjector is a middleware that injects the route name for the current request into the request context. +// +// The route name can be retrieved by calling ExtractRouteName. +type RouteInjector struct { + RouteMatcher RouteMatcher +} + +func (i RouteInjector) Wrap(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + routeName := getRouteName(i.RouteMatcher, r) + handler.ServeHTTP(w, WithRouteName(r, routeName)) + }) +} + +// WithRouteName annotates r's context with the provided route name. +// +// The provided value must be suitable to use as a Prometheus label value. +// +// This method should generally only be used in tests: in production code, use RouteInjector instead. +func WithRouteName(r *http.Request, routeName string) *http.Request { + ctx := context.WithValue(r.Context(), contextKeyRouteName, routeName) + return r.WithContext(ctx) +} + +// ExtractRouteName returns the route name associated with this request that was previously injected by the +// RouteInjector middleware or WithRouteName. +// +// This is the same route name used for trace and metric names, and is already suitable for use as a Prometheus label +// value. +func ExtractRouteName(ctx context.Context) string { + routeName, ok := ctx.Value(contextKeyRouteName).(string) + if !ok { + return "" + } + + return routeName +} + +func getRouteName(routeMatcher RouteMatcher, r *http.Request) string { + var routeMatch mux.RouteMatch + if routeMatcher == nil || !routeMatcher.Match(r, &routeMatch) { + return "" + } + + if routeMatch.MatchErr == mux.ErrNotFound { + return "notfound" + } + + if routeMatch.Route == nil { + return "" + } + + if name := routeMatch.Route.GetName(); name != "" { + return name + } + + tmpl, err := routeMatch.Route.GetPathTemplate() + if err == nil { + return MakeLabelValue(tmpl) + } + + return "" +} + +var invalidChars = regexp.MustCompile(`[^a-zA-Z0-9]+`) + +// MakeLabelValue converts a Gorilla mux path to a string suitable for use in +// a Prometheus label value. +func MakeLabelValue(path string) string { + // Convert non-alnums to underscores. + result := invalidChars.ReplaceAllString(path, "_") + + // Trim leading and trailing underscores. + result = strings.Trim(result, "_") + + // Make it all lowercase + result = strings.ToLower(result) + + // Special case. + if result == "" { + result = "root" + } + return result +} diff --git a/vendor/github.com/grafana/dskit/middleware/source_ips.go b/vendor/github.com/grafana/dskit/middleware/source_ips.go index 7c035ddbf47e6..d08797abb09b5 100644 --- a/vendor/github.com/grafana/dskit/middleware/source_ips.go +++ b/vendor/github.com/grafana/dskit/middleware/source_ips.go @@ -18,6 +18,9 @@ var ( // De-facto standard header keys. xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For") xRealIP = http.CanonicalHeaderKey("X-Real-IP") + // Allows to extract the host from the X-Forwarded-For header. + // Will strip out any spaces or double quote surrounding host. + xForwardedForRegex = regexp.MustCompile(`(?: *"?([^,]+)"? *)`) ) var ( @@ -25,9 +28,9 @@ var ( // existing use of X-Forwarded-* headers. // e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43 forwarded = http.CanonicalHeaderKey("Forwarded") - // Allows for a sub-match of the first value after 'for=' to the next - // comma, semi-colon or space. The match is case-insensitive. - forRegex = regexp.MustCompile(`(?i)(?:for=)([^(;|,| )]+)`) + // Allows to extract the host from the for clause of the Forwarded header. + // Will strip out any spaces or double quote surrounding host. + forwardedRegex = regexp.MustCompile(`(?i)(?:for=)(?: *"?([^;,]+)"? *)`) ) // SourceIPExtractor extracts the source IPs from a HTTP request @@ -37,10 +40,12 @@ type SourceIPExtractor struct { // A regex that extracts the IP address from the header. // It should contain at least one capturing group the first of which will be returned. regex *regexp.Regexp + // A boolean to choose if we should return all found IP or just first match + extractAllHosts bool } // NewSourceIPs creates a new SourceIPs -func NewSourceIPs(header, regex string) (*SourceIPExtractor, error) { +func NewSourceIPs(header, regex string, extractAllHosts bool) (*SourceIPExtractor, error) { if (header == "" && regex != "") || (header != "" && regex == "") { return nil, fmt.Errorf("either both a header field and a regex have to be given or neither") } @@ -50,8 +55,9 @@ func NewSourceIPs(header, regex string) (*SourceIPExtractor, error) { } return &SourceIPExtractor{ - header: header, - regex: re, + header: header, + regex: re, + extractAllHosts: extractAllHosts, }, nil } @@ -72,7 +78,15 @@ func extractHost(address string) string { // Get returns any source addresses we can find in the request, comma-separated func (sips SourceIPExtractor) Get(req *http.Request) string { - fwd := extractHost(sips.getIP(req)) + hosts := []string{} + + // Remove port informations from extracted address + for _, addr := range sips.getIP(req) { + hosts = append(hosts, extractHost(addr)) + } + + fwd := strings.Join(hosts, ", ") + if fwd == "" { if req.RemoteAddr == "" { return "" @@ -94,52 +108,45 @@ func (sips SourceIPExtractor) Get(req *http.Request) string { // getIP retrieves the IP from the RFC7239 Forwarded headers, // X-Real-IP and X-Forwarded-For (in that order) or from the // custom regex. -func (sips SourceIPExtractor) getIP(r *http.Request) string { - var addr string +func (sips SourceIPExtractor) getIP(r *http.Request) []string { + var addrs = []string{} // Use the custom regex only if it was setup if sips.header != "" { hdr := r.Header.Get(sips.header) if hdr == "" { - return "" - } - allMatches := sips.regex.FindAllStringSubmatch(hdr, 1) - if len(allMatches) == 0 { - return "" - } - firstMatch := allMatches[0] - // Check there is at least 1 submatch - if len(firstMatch) < 2 { - return "" + return addrs } - return firstMatch[1] - } - if fwd := r.Header.Get(forwarded); fwd != "" { - // match should contain at least two elements if the protocol was - // specified in the Forwarded header. The first element will always be - // the 'for=' capture, which we ignore. In the case of multiple IP - // addresses (for=8.8.8.8, 8.8.4.4,172.16.1.20 is valid) we only - // extract the first, which should be the client IP. - if match := forRegex.FindStringSubmatch(fwd); len(match) > 1 { - // IPv6 addresses in Forwarded headers are quoted-strings. We strip - // these quotes. - addr = strings.Trim(match[1], `"`) - } + addrs = sips.extractHeader(hdr, sips.regex) + } else if fwd := r.Header.Get(forwarded); fwd != "" { + addrs = sips.extractHeader(fwd, forwardedRegex) } else if fwd := r.Header.Get(xRealIP); fwd != "" { // X-Real-IP should only contain one IP address (the client making the // request). - addr = fwd + addrs = append([]string{}, fwd) } else if fwd := strings.ReplaceAll(r.Header.Get(xForwardedFor), " ", ""); fwd != "" { - // Only grab the first (client) address. Note that '192.168.0.1, - // 10.1.1.1' is a valid key for X-Forwarded-For where addresses after - // the first may represent forwarding proxies earlier in the chain. - s := strings.Index(fwd, ",") - if s == -1 { - s = len(fwd) + addrs = sips.extractHeader(fwd, xForwardedForRegex) + } + + return addrs +} + +// extractHeader is a toolbox function that will parse a header content with a regex and return a list +// of all matching groups as string. +func (sips SourceIPExtractor) extractHeader(header string, regex *regexp.Regexp) []string { + var addrs = []string{} + + if allMatches := regex.FindAllStringSubmatch(header, -1); len(allMatches) > 0 { + for _, match := range allMatches { + if len(match) > 1 { + addrs = append(addrs, match[1]) + } + if !sips.extractAllHosts { + break + } } - addr = fwd[:s] } - return addr + return addrs } diff --git a/vendor/github.com/grafana/dskit/multierror/multierror.go b/vendor/github.com/grafana/dskit/multierror/multierror.go index 68b73e201c1e3..d48b76e6f1a1e 100644 --- a/vendor/github.com/grafana/dskit/multierror/multierror.go +++ b/vendor/github.com/grafana/dskit/multierror/multierror.go @@ -5,7 +5,6 @@ package multierror import ( "bytes" - "errors" "fmt" ) @@ -62,14 +61,6 @@ func (es nonNilMultiError) Error() string { return buf.String() } -// Is attempts to match the provided error against errors in the error list. -// -// This function allows errors.Is to traverse the values stored in the MultiError. -func (es nonNilMultiError) Is(target error) bool { - for _, err := range es { - if errors.Is(err, target) { - return true - } - } - return false +func (es nonNilMultiError) Unwrap() []error { + return es } diff --git a/vendor/github.com/grafana/dskit/ring/batch.go b/vendor/github.com/grafana/dskit/ring/batch.go index 7781fe67a5ae0..f982bd6c68c3e 100644 --- a/vendor/github.com/grafana/dskit/ring/batch.go +++ b/vendor/github.com/grafana/dskit/ring/batch.go @@ -49,9 +49,26 @@ func isHTTPStatus4xx(err error) bool { return code/100 == 4 } +// DoBatchRing defines the interface required by a ring implementation to use DoBatch() and DoBatchWithOptions(). +type DoBatchRing interface { + // Get returns a ReplicationSet containing the instances to which the input key should be sharded to + // for the input Operation. + // + // The input buffers may be referenced in the returned ReplicationSet. This means that it's unsafe to call + // Get() multiple times passing the same buffers if ReplicationSet is retained between two different Get() + // calls. In this cas, you can pass nil buffers. + Get(key uint32, op Operation, bufInstances []InstanceDesc, bufStrings1, bufStrings2 []string) (ReplicationSet, error) + + // ReplicationFactor returns the number of instances each key is expected to be sharded to. + ReplicationFactor() int + + // InstancesCount returns the number of instances in the ring eligible to get any key sharded to. + InstancesCount() int +} + // DoBatch is a deprecated version of DoBatchWithOptions where grpc errors containing status codes 4xx are treated as client errors. // Deprecated. Use DoBatchWithOptions instead. -func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callback func(InstanceDesc, []int) error, cleanup func()) error { +func DoBatch(ctx context.Context, op Operation, r DoBatchRing, keys []uint32, callback func(InstanceDesc, []int) error, cleanup func()) error { return DoBatchWithOptions(ctx, op, r, keys, callback, DoBatchOptions{ Cleanup: cleanup, IsClientError: isHTTPStatus4xx, @@ -94,14 +111,14 @@ func (o *DoBatchOptions) replaceZeroValuesWithDefaults() { // See comments on DoBatchOptions for available options for this call. // // Not implemented as a method on Ring, so we can test separately. -func DoBatchWithOptions(ctx context.Context, op Operation, r ReadRing, keys []uint32, callback func(InstanceDesc, []int) error, o DoBatchOptions) error { +func DoBatchWithOptions(ctx context.Context, op Operation, r DoBatchRing, keys []uint32, callback func(InstanceDesc, []int) error, o DoBatchOptions) error { o.replaceZeroValuesWithDefaults() if r.InstancesCount() <= 0 { o.Cleanup() return fmt.Errorf("DoBatch: InstancesCount <= 0") } - expectedTrackers := len(keys) * (r.ReplicationFactor() + 1) / r.InstancesCount() + expectedTrackersPerInstance := len(keys) * (r.ReplicationFactor() + 1) / r.InstancesCount() itemTrackers := make([]itemTracker, len(keys)) instances := make(map[string]instance, r.InstancesCount()) @@ -132,8 +149,8 @@ func DoBatchWithOptions(ctx context.Context, op Operation, r ReadRing, keys []ui for _, desc := range replicationSet.Instances { curr, found := instances[desc.Addr] if !found { - curr.itemTrackers = make([]*itemTracker, 0, expectedTrackers) - curr.indexes = make([]int, 0, expectedTrackers) + curr.itemTrackers = make([]*itemTracker, 0, expectedTrackersPerInstance) + curr.indexes = make([]int, 0, expectedTrackersPerInstance) } instances[desc.Addr] = instance{ desc: desc, diff --git a/vendor/github.com/grafana/dskit/ring/model.go b/vendor/github.com/grafana/dskit/ring/model.go index 956dbe0cf4224..334b027d0f8b1 100644 --- a/vendor/github.com/grafana/dskit/ring/model.go +++ b/vendor/github.com/grafana/dskit/ring/model.go @@ -21,6 +21,13 @@ func (ts ByAddr) Len() int { return len(ts) } func (ts ByAddr) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] } func (ts ByAddr) Less(i, j int) bool { return ts[i].Addr < ts[j].Addr } +// ByID is a sortable list of InstanceDesc. +type ByID []InstanceDesc + +func (ts ByID) Len() int { return len(ts) } +func (ts ByID) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] } +func (ts ByID) Less(i, j int) bool { return ts[i].Id < ts[j].Id } + // ProtoDescFactory makes new Descs func ProtoDescFactory() proto.Message { return NewDesc() @@ -195,7 +202,6 @@ func (d *Desc) mergeWithTime(mergeable memberlist.Mergeable, localCAS bool, now other, ok := mergeable.(*Desc) if !ok { - // This method only deals with non-nil rings. return nil, fmt.Errorf("expected *ring.Desc, got %T", mergeable) } @@ -512,6 +518,40 @@ func (d *Desc) getOldestRegisteredTimestamp() int64 { return result } +func (d *Desc) instancesWithTokensCount() int { + count := 0 + if d != nil { + for _, ingester := range d.Ingesters { + if len(ingester.Tokens) > 0 { + count++ + } + } + } + return count +} + +func (d *Desc) instancesCountPerZone() map[string]int { + instancesCountPerZone := map[string]int{} + if d != nil { + for _, ingester := range d.Ingesters { + instancesCountPerZone[ingester.Zone]++ + } + } + return instancesCountPerZone +} + +func (d *Desc) instancesWithTokensCountPerZone() map[string]int { + instancesCountPerZone := map[string]int{} + if d != nil { + for _, ingester := range d.Ingesters { + if len(ingester.Tokens) > 0 { + instancesCountPerZone[ingester.Zone]++ + } + } + } + return instancesCountPerZone +} + type CompareResult int // CompareResult responses diff --git a/vendor/github.com/grafana/dskit/ring/partition_instance_lifecycler.go b/vendor/github.com/grafana/dskit/ring/partition_instance_lifecycler.go new file mode 100644 index 0000000000000..09fef7223370f --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/partition_instance_lifecycler.go @@ -0,0 +1,412 @@ +package ring + +import ( + "context" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "go.uber.org/atomic" + + "github.com/grafana/dskit/kv" + "github.com/grafana/dskit/services" +) + +var ( + ErrPartitionDoesNotExist = errors.New("the partition does not exist") + ErrPartitionStateMismatch = errors.New("the partition state does not match the expected one") + ErrPartitionStateChangeNotAllowed = errors.New("partition state change not allowed") + + allowedPartitionStateChanges = map[PartitionState][]PartitionState{ + PartitionPending: {PartitionActive, PartitionInactive}, + PartitionActive: {PartitionInactive}, + PartitionInactive: {PartitionActive}, + } +) + +type PartitionInstanceLifecyclerConfig struct { + // PartitionID is the ID of the partition managed by the lifecycler. + PartitionID int32 + + // InstanceID is the ID of the instance managed by the lifecycler. + InstanceID string + + // WaitOwnersCountOnPending is the minimum number of owners to wait before switching a + // PENDING partition to ACTIVE. + WaitOwnersCountOnPending int + + // WaitOwnersDurationOnPending is how long each owner should have been added to the + // partition before it's considered eligible for the WaitOwnersCountOnPending count. + WaitOwnersDurationOnPending time.Duration + + // DeleteInactivePartitionAfterDuration is how long the lifecycler should wait before + // deleting inactive partitions with no owners. Inactive partitions are never removed + // if this value is 0. + DeleteInactivePartitionAfterDuration time.Duration + + // PollingInterval is the internal polling interval. This setting is useful to let + // upstream projects to lower it in unit tests. + PollingInterval time.Duration +} + +// PartitionInstanceLifecycler is responsible to manage the lifecycle of a single +// partition and partition owner in the ring. +type PartitionInstanceLifecycler struct { + *services.BasicService + + // These values are initialised at startup, and never change. + cfg PartitionInstanceLifecyclerConfig + ringName string + ringKey string + store kv.Client + logger log.Logger + + // Channel used to execute logic within the lifecycler loop. + actorChan chan func() + + // Whether the partitions should be created on startup if it doesn't exist yet. + createPartitionOnStartup *atomic.Bool + + // Whether the lifecycler should remove the partition owner (identified by instance ID) on shutdown. + removeOwnerOnShutdown *atomic.Bool + + // Metrics. + reconcilesTotal *prometheus.CounterVec + reconcilesFailedTotal *prometheus.CounterVec +} + +func NewPartitionInstanceLifecycler(cfg PartitionInstanceLifecyclerConfig, ringName, ringKey string, store kv.Client, logger log.Logger, reg prometheus.Registerer) *PartitionInstanceLifecycler { + if cfg.PollingInterval == 0 { + cfg.PollingInterval = 5 * time.Second + } + + l := &PartitionInstanceLifecycler{ + cfg: cfg, + ringName: ringName, + ringKey: ringKey, + store: store, + logger: log.With(logger, "ring", ringName), + actorChan: make(chan func()), + createPartitionOnStartup: atomic.NewBool(true), + removeOwnerOnShutdown: atomic.NewBool(false), + reconcilesTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "partition_ring_lifecycler_reconciles_total", + Help: "Total number of reconciliations started.", + ConstLabels: map[string]string{"name": ringName}, + }, []string{"type"}), + reconcilesFailedTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "partition_ring_lifecycler_reconciles_failed_total", + Help: "Total number of reconciliations failed.", + ConstLabels: map[string]string{"name": ringName}, + }, []string{"type"}), + } + + l.BasicService = services.NewBasicService(l.starting, l.running, l.stopping) + + return l +} + +// CreatePartitionOnStartup returns whether the lifecycle creates the partition on startup +// if it doesn't exist. +func (l *PartitionInstanceLifecycler) CreatePartitionOnStartup() bool { + return l.createPartitionOnStartup.Load() +} + +// SetCreatePartitionOnStartup sets whether the lifecycler should create the partition on +// startup if it doesn't exist. +func (l *PartitionInstanceLifecycler) SetCreatePartitionOnStartup(create bool) { + l.createPartitionOnStartup.Store(create) +} + +// RemoveOwnerOnShutdown returns whether the lifecycler has been configured to remove the partition +// owner on shutdown. +func (l *PartitionInstanceLifecycler) RemoveOwnerOnShutdown() bool { + return l.removeOwnerOnShutdown.Load() +} + +// SetRemoveOwnerOnShutdown sets whether the lifecycler should remove the partition owner on shutdown. +func (l *PartitionInstanceLifecycler) SetRemoveOwnerOnShutdown(remove bool) { + l.removeOwnerOnShutdown.Store(remove) +} + +// GetPartitionState returns the current state of the partition, and the timestamp when the state was +// changed the last time. +func (l *PartitionInstanceLifecycler) GetPartitionState(ctx context.Context) (PartitionState, time.Time, error) { + ring, err := l.getRing(ctx) + if err != nil { + return PartitionUnknown, time.Time{}, err + } + + partition, exists := ring.Partitions[l.cfg.PartitionID] + if !exists { + return PartitionUnknown, time.Time{}, ErrPartitionDoesNotExist + } + + return partition.GetState(), partition.GetStateTime(), nil +} + +// ChangePartitionState changes the partition state to toState. +// This function returns ErrPartitionDoesNotExist if the partition doesn't exist, +// and ErrPartitionStateChangeNotAllowed if the state change is not allowed. +func (l *PartitionInstanceLifecycler) ChangePartitionState(ctx context.Context, toState PartitionState) error { + return l.runOnLifecyclerLoop(func() error { + err := l.updateRing(ctx, func(ring *PartitionRingDesc) (bool, error) { + return changePartitionState(ring, l.cfg.PartitionID, toState) + }) + + if err != nil { + level.Warn(l.logger).Log("msg", "failed to change partition state", "partition", l.cfg.PartitionID, "to_state", toState, "err", err) + } + + return err + }) +} + +func (l *PartitionInstanceLifecycler) starting(ctx context.Context) error { + if l.CreatePartitionOnStartup() { + return errors.Wrap(l.createPartitionAndRegisterOwner(ctx), "create partition and register owner") + } + + return errors.Wrap(l.waitPartitionAndRegisterOwner(ctx), "wait partition and register owner") +} + +func (l *PartitionInstanceLifecycler) running(ctx context.Context) error { + reconcile := func() { + l.reconcileOwnedPartition(ctx, time.Now()) + l.reconcileOtherPartitions(ctx, time.Now()) + } + + // Run a reconciliation as soon as the lifecycler, in order to not having to wait for the 1st timer tick. + reconcile() + + reconcileTicker := time.NewTicker(l.cfg.PollingInterval) + defer reconcileTicker.Stop() + + for { + select { + case <-reconcileTicker.C: + reconcile() + + case f := <-l.actorChan: + f() + + case <-ctx.Done(): + return nil + } + } +} + +func (l *PartitionInstanceLifecycler) stopping(_ error) error { + level.Info(l.logger).Log("msg", "partition ring lifecycler is shutting down", "ring", l.ringName) + + // Remove the instance from partition owners, if configured to do so. + if l.RemoveOwnerOnShutdown() { + err := l.updateRing(context.Background(), func(ring *PartitionRingDesc) (bool, error) { + return ring.RemoveOwner(l.cfg.InstanceID), nil + }) + + if err != nil { + level.Error(l.logger).Log("msg", "failed to remove instance from partition owners on shutdown", "instance", l.cfg.InstanceID, "partition", l.cfg.PartitionID, "err", err) + } else { + level.Info(l.logger).Log("msg", "instance removed from partition owners", "instance", l.cfg.InstanceID, "partition", l.cfg.PartitionID) + } + } + + return nil +} + +// runOnLifecyclerLoop runs fn within the lifecycler loop. +func (l *PartitionInstanceLifecycler) runOnLifecyclerLoop(fn func() error) error { + sc := l.ServiceContext() + if sc == nil { + return errors.New("lifecycler not running") + } + + errCh := make(chan error) + wrappedFn := func() { + errCh <- fn() + } + + select { + case <-sc.Done(): + return errors.New("lifecycler not running") + case l.actorChan <- wrappedFn: + return <-errCh + } +} + +func (l *PartitionInstanceLifecycler) getRing(ctx context.Context) (*PartitionRingDesc, error) { + in, err := l.store.Get(ctx, l.ringKey) + if err != nil { + return nil, err + } + + return GetOrCreatePartitionRingDesc(in), nil +} + +func (l *PartitionInstanceLifecycler) updateRing(ctx context.Context, update func(ring *PartitionRingDesc) (bool, error)) error { + return l.store.CAS(ctx, l.ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + ringDesc := GetOrCreatePartitionRingDesc(in) + + if changed, err := update(ringDesc); err != nil { + return nil, false, err + } else if !changed { + return nil, false, nil + } + + return ringDesc, true, nil + }) +} + +func (l *PartitionInstanceLifecycler) createPartitionAndRegisterOwner(ctx context.Context) error { + return l.updateRing(ctx, func(ring *PartitionRingDesc) (bool, error) { + now := time.Now() + changed := false + + partitionDesc, exists := ring.Partitions[l.cfg.PartitionID] + if exists { + level.Info(l.logger).Log("msg", "partition found in the ring", "partition", l.cfg.PartitionID, "state", partitionDesc.GetState(), "state_timestamp", partitionDesc.GetState().String(), "tokens", len(partitionDesc.GetTokens())) + } else { + level.Info(l.logger).Log("msg", "partition not found in the ring", "partition", l.cfg.PartitionID) + } + + if !exists { + // The partition doesn't exist, so we create a new one. A new partition should always be created + // in PENDING state. + ring.AddPartition(l.cfg.PartitionID, PartitionPending, now) + changed = true + } + + // Ensure the instance is added as partition owner. + if ring.AddOrUpdateOwner(l.cfg.InstanceID, OwnerActive, l.cfg.PartitionID, now) { + changed = true + } + + return changed, nil + }) +} + +func (l *PartitionInstanceLifecycler) waitPartitionAndRegisterOwner(ctx context.Context) error { + pollTicker := time.NewTicker(l.cfg.PollingInterval) + defer pollTicker.Stop() + + // Wait until the partition exists. + checkPartitionExist := func() (bool, error) { + level.Info(l.logger).Log("msg", "checking if the partition exist in the ring", "partition", l.cfg.PartitionID) + + ring, err := l.getRing(ctx) + if err != nil { + return false, errors.Wrap(err, "read partition ring") + } + + if ring.HasPartition(l.cfg.PartitionID) { + level.Info(l.logger).Log("msg", "partition found in the ring", "partition", l.cfg.PartitionID) + return true, nil + } + + level.Info(l.logger).Log("msg", "partition not found in the ring", "partition", l.cfg.PartitionID) + return false, nil + } + + for { + if exists, err := checkPartitionExist(); err != nil { + return err + } else if exists { + break + } + + select { + case <-ctx.Done(): + return ctx.Err() + + case <-pollTicker.C: + // Throttle. + } + } + + // Ensure the instance is added as partition owner. + return l.updateRing(ctx, func(ring *PartitionRingDesc) (bool, error) { + return ring.AddOrUpdateOwner(l.cfg.InstanceID, OwnerActive, l.cfg.PartitionID, time.Now()), nil + }) +} + +// reconcileOwnedPartition reconciles the owned partition. +// This function should be called periodically. +func (l *PartitionInstanceLifecycler) reconcileOwnedPartition(ctx context.Context, now time.Time) { + const reconcileType = "owned-partition" + l.reconcilesTotal.WithLabelValues(reconcileType).Inc() + + err := l.updateRing(ctx, func(ring *PartitionRingDesc) (bool, error) { + partitionID := l.cfg.PartitionID + + partition, exists := ring.Partitions[partitionID] + if !exists { + return false, ErrPartitionDoesNotExist + } + + // A pending partition should be switched to active if there are enough owners that + // have been added since more than the waiting period. + if partition.IsPending() && ring.PartitionOwnersCountUpdatedBefore(partitionID, now.Add(-l.cfg.WaitOwnersDurationOnPending)) >= l.cfg.WaitOwnersCountOnPending { + level.Info(l.logger).Log("msg", "switching partition state because enough owners have been registered and minimum waiting time has elapsed", "partition", l.cfg.PartitionID, "from_state", PartitionPending, "to_state", PartitionActive) + return ring.UpdatePartitionState(partitionID, PartitionActive, now), nil + } + + return false, nil + }) + + if err != nil { + l.reconcilesFailedTotal.WithLabelValues(reconcileType).Inc() + level.Warn(l.logger).Log("msg", "failed to reconcile owned partition", "partition", l.cfg.PartitionID, "err", err) + } +} + +// reconcileOtherPartitions reconciles other partitions. +// This function should be called periodically. +func (l *PartitionInstanceLifecycler) reconcileOtherPartitions(ctx context.Context, now time.Time) { + const reconcileType = "other-partitions" + l.reconcilesTotal.WithLabelValues(reconcileType).Inc() + + err := l.updateRing(ctx, func(ring *PartitionRingDesc) (bool, error) { + changed := false + + if l.cfg.DeleteInactivePartitionAfterDuration > 0 { + deleteBefore := now.Add(-l.cfg.DeleteInactivePartitionAfterDuration) + + for partitionID, partition := range ring.Partitions { + // Never delete the partition owned by this lifecycler, since it's expected to have at least + // this instance as owner. + if partitionID == l.cfg.PartitionID { + continue + } + + // A partition is safe to be removed only if it's inactive since longer than the wait period + // and it has no owners registered. + if partition.IsInactiveSince(deleteBefore) && ring.PartitionOwnersCount(partitionID) == 0 { + level.Info(l.logger).Log("msg", "removing inactive partition with no owners from ring", "partition", partitionID, "state", partition.State.CleanName(), "state_timestamp", partition.GetStateTime().String()) + ring.RemovePartition(partitionID) + changed = true + } + } + } + + return changed, nil + }) + + if err != nil { + l.reconcilesFailedTotal.WithLabelValues(reconcileType).Inc() + level.Warn(l.logger).Log("msg", "failed to reconcile other partitions", "err", err) + } +} + +func isPartitionStateChangeAllowed(from, to PartitionState) bool { + for _, allowed := range allowedPartitionStateChanges[from] { + if to == allowed { + return true + } + } + + return false +} diff --git a/vendor/github.com/grafana/dskit/ring/partition_instance_ring.go b/vendor/github.com/grafana/dskit/ring/partition_instance_ring.go new file mode 100644 index 0000000000000..2fb15d8af98d7 --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/partition_instance_ring.go @@ -0,0 +1,150 @@ +package ring + +import ( + "fmt" + "time" + + "golang.org/x/exp/slices" +) + +type PartitionRingReader interface { + // PartitionRing returns a snapshot of the PartitionRing. This function must never return nil. + // If the ring is empty or unknown, an empty PartitionRing can be returned. + PartitionRing() *PartitionRing +} + +// PartitionInstanceRing holds a partitions ring and a instances ring, and provide functions +// to look up the intersection of the two (e.g. healthy instances by partition). +type PartitionInstanceRing struct { + partitionsRingReader PartitionRingReader + instancesRing *Ring + heartbeatTimeout time.Duration +} + +func NewPartitionInstanceRing(partitionsRingWatcher PartitionRingReader, instancesRing *Ring, heartbeatTimeout time.Duration) *PartitionInstanceRing { + return &PartitionInstanceRing{ + partitionsRingReader: partitionsRingWatcher, + instancesRing: instancesRing, + heartbeatTimeout: heartbeatTimeout, + } +} + +func (r *PartitionInstanceRing) PartitionRing() *PartitionRing { + return r.partitionsRingReader.PartitionRing() +} + +func (r *PartitionInstanceRing) InstanceRing() *Ring { + return r.instancesRing +} + +// GetReplicationSetsForOperation returns one ReplicationSet for each partition in the ring. +// A ReplicationSet is returned for every partition in ring. If there are no healthy owners +// for a partition, an error is returned. +func (r *PartitionInstanceRing) GetReplicationSetsForOperation(op Operation) ([]ReplicationSet, error) { + partitionsRing := r.PartitionRing() + partitionsRingDesc := partitionsRing.desc + + if len(partitionsRingDesc.Partitions) == 0 { + return nil, ErrEmptyRing + } + + now := time.Now() + result := make([]ReplicationSet, 0, len(partitionsRingDesc.Partitions)) + zonesBuffer := make([]string, 0, 3) // Pre-allocate buffer assuming 3 zones. + + for partitionID := range partitionsRingDesc.Partitions { + ownerIDs := partitionsRing.PartitionOwnerIDs(partitionID) + instances := make([]InstanceDesc, 0, len(ownerIDs)) + + for _, instanceID := range ownerIDs { + instance, err := r.instancesRing.GetInstance(instanceID) + if err != nil { + // If an instance doesn't exist in the instances ring we don't return an error + // but lookup for other instances of the partition. + continue + } + + if !instance.IsHealthy(op, r.heartbeatTimeout, now) { + continue + } + + instances = append(instances, instance) + } + + if len(instances) == 0 { + return nil, fmt.Errorf("partition %d: %w", partitionID, ErrTooManyUnhealthyInstances) + } + + // Count the number of unique zones among instances. + zonesBuffer = uniqueZonesFromInstances(instances, zonesBuffer[:0]) + uniqueZones := len(zonesBuffer) + + result = append(result, ReplicationSet{ + Instances: instances, + + // Partitions has no concept of zone, but we enable it in order to support ring's requests + // minimization feature. + ZoneAwarenessEnabled: true, + + // We need response from at least 1 owner. The assumption is that we have 1 owner per zone + // but it's not guaranteed (depends on how the application was deployed). The safest thing + // we can do here is to just request a successful response from at least 1 zone. + MaxUnavailableZones: uniqueZones - 1, + }) + } + return result, nil +} + +// ShuffleShard wraps PartitionRing.ShuffleShard(). +// +// The PartitionRing embedded in the returned PartitionInstanceRing is based on a snapshot of the partitions ring +// at the time this function gets called. This means that subsequent changes to the partitions ring will not +// be reflected in the returned PartitionInstanceRing. +func (r *PartitionInstanceRing) ShuffleShard(identifier string, size int) (*PartitionInstanceRing, error) { + partitionsSubring, err := r.PartitionRing().ShuffleShard(identifier, size) + if err != nil { + return nil, err + } + + return NewPartitionInstanceRing(newStaticPartitionRingReader(partitionsSubring), r.instancesRing, r.heartbeatTimeout), nil +} + +// ShuffleShardWithLookback wraps PartitionRing.ShuffleShardWithLookback(). +// +// The PartitionRing embedded in the returned PartitionInstanceRing is based on a snapshot of the partitions ring +// at the time this function gets called. This means that subsequent changes to the partitions ring will not +// be reflected in the returned PartitionInstanceRing. +func (r *PartitionInstanceRing) ShuffleShardWithLookback(identifier string, size int, lookbackPeriod time.Duration, now time.Time) (*PartitionInstanceRing, error) { + partitionsSubring, err := r.PartitionRing().ShuffleShardWithLookback(identifier, size, lookbackPeriod, now) + if err != nil { + return nil, err + } + + return NewPartitionInstanceRing(newStaticPartitionRingReader(partitionsSubring), r.instancesRing, r.heartbeatTimeout), nil +} + +type staticPartitionRingReader struct { + ring *PartitionRing +} + +func newStaticPartitionRingReader(ring *PartitionRing) staticPartitionRingReader { + return staticPartitionRingReader{ + ring: ring, + } +} + +func (m staticPartitionRingReader) PartitionRing() *PartitionRing { + return m.ring +} + +// uniqueZonesFromInstances returns the unique list of zones among the input instances. The input buf MUST have +// zero length, but could be capacity in order to avoid memory allocations. +func uniqueZonesFromInstances(instances []InstanceDesc, buf []string) []string { + for _, instance := range instances { + if !slices.Contains(buf, instance.Zone) { + buf = append(buf, instance.Zone) + } + } + + return buf +} diff --git a/vendor/github.com/grafana/dskit/ring/partition_ring.go b/vendor/github.com/grafana/dskit/ring/partition_ring.go new file mode 100644 index 0000000000000..911de476c865f --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/partition_ring.go @@ -0,0 +1,487 @@ +package ring + +import ( + "bytes" + "fmt" + "math" + "math/rand" + "strconv" + "time" + + "golang.org/x/exp/slices" + + shardUtil "github.com/grafana/dskit/ring/shard" +) + +var ErrNoActivePartitionFound = fmt.Errorf("no active partition found") + +// PartitionRing holds an immutable view of the partitions ring. +// +// Design principles: +// - Immutable: the PartitionRingDesc hold by PartitionRing is immutable. When PartitionRingDesc changes +// a new instance of PartitionRing should be created. The partitions ring is expected to change infrequently +// (e.g. there's no heartbeat), so creating a new PartitionRing each time the partitions ring changes is +// not expected to have a significant overhead. +type PartitionRing struct { + // desc is a snapshot of the partition ring. This data is immutable and MUST NOT be modified. + desc PartitionRingDesc + + // ringTokens is a sorted list of all tokens registered by all partitions. + ringTokens Tokens + + // partitionByToken is a map where they key is a registered token and the value is ID of the partition + // that registered that token. + partitionByToken map[Token]int32 + + // ownersByPartition is a map where the key is the partition ID and the value is a list of owner IDs. + ownersByPartition map[int32][]string + + // shuffleShardCache is used to cache subrings generated with shuffle sharding. + shuffleShardCache *partitionRingShuffleShardCache + + // activePartitionsCount is a saved count of active partitions to avoid recomputing it. + activePartitionsCount int +} + +func NewPartitionRing(desc PartitionRingDesc) *PartitionRing { + return &PartitionRing{ + desc: desc, + ringTokens: desc.tokens(), + partitionByToken: desc.partitionByToken(), + ownersByPartition: desc.ownersByPartition(), + activePartitionsCount: desc.activePartitionsCount(), + shuffleShardCache: newPartitionRingShuffleShardCache(), + } +} + +// ActivePartitionForKey returns partition for the given key. Only active partitions are considered. +// Only one partition is returned: in other terms, the replication factor is always 1. +func (r *PartitionRing) ActivePartitionForKey(key uint32) (int32, error) { + var ( + start = searchToken(r.ringTokens, key) + iterations = 0 + tokensCount = len(r.ringTokens) + ) + + for i := start; iterations < tokensCount; i++ { + iterations++ + + if i >= tokensCount { + i %= len(r.ringTokens) + } + + token := r.ringTokens[i] + + partitionID, ok := r.partitionByToken[Token(token)] + if !ok { + return 0, ErrInconsistentTokensInfo + } + + partition, ok := r.desc.Partitions[partitionID] + if !ok { + return 0, ErrInconsistentTokensInfo + } + + // If the partition is not active we'll keep walking the ring. + if partition.IsActive() { + return partitionID, nil + } + } + + return 0, ErrNoActivePartitionFound +} + +// ShuffleShardSize returns number of partitions that would be in the result of ShuffleShard call with the same size. +func (r *PartitionRing) ShuffleShardSize(size int) int { + if size <= 0 || size > r.activePartitionsCount { + return r.activePartitionsCount + } + + if size < r.activePartitionsCount { + return size + } + return r.activePartitionsCount +} + +// ShuffleShard returns a subring for the provided identifier (eg. a tenant ID) +// and size (number of partitions). +// +// The algorithm used to build the subring is a shuffle sharder based on probabilistic +// hashing. We pick N unique partitions, walking the ring starting from random but +// predictable numbers. The random generator is initialised with a seed based on the +// provided identifier. +// +// This function returns a subring containing ONLY ACTIVE partitions. +// +// This function supports caching. +// +// This implementation guarantees: +// +// - Stability: given the same ring, two invocations returns the same result. +// +// - Consistency: adding/removing 1 partition from the ring generates a resulting +// subring with no more then 1 difference. +// +// - Shuffling: probabilistically, for a large enough cluster each identifier gets a different +// set of instances, with a reduced number of overlapping instances between two identifiers. +func (r *PartitionRing) ShuffleShard(identifier string, size int) (*PartitionRing, error) { + if cached := r.shuffleShardCache.getSubring(identifier, size); cached != nil { + return cached, nil + } + + // No need to pass the time if there's no lookback. + subring, err := r.shuffleShard(identifier, size, 0, time.Time{}) + if err != nil { + return nil, err + } + + r.shuffleShardCache.setSubring(identifier, size, subring) + return subring, nil +} + +// ShuffleShardWithLookback is like ShuffleShard() but the returned subring includes all instances +// that have been part of the identifier's shard in [now - lookbackPeriod, now] time window. +// +// This function can return a mix of ACTIVE and INACTIVE partitions. INACTIVE partitions are only +// included if they were part of the identifier's shard within the lookbackPeriod. PENDING partitions +// are never returned. +// +// This function supports caching, but the cache will only be effective if successive calls for the +// same identifier are with the same lookbackPeriod and increasing values of now. +func (r *PartitionRing) ShuffleShardWithLookback(identifier string, size int, lookbackPeriod time.Duration, now time.Time) (*PartitionRing, error) { + if cached := r.shuffleShardCache.getSubringWithLookback(identifier, size, lookbackPeriod, now); cached != nil { + return cached, nil + } + + subring, err := r.shuffleShard(identifier, size, lookbackPeriod, now) + if err != nil { + return nil, err + } + + r.shuffleShardCache.setSubringWithLookback(identifier, size, lookbackPeriod, now, subring) + return subring, nil +} + +func (r *PartitionRing) shuffleShard(identifier string, size int, lookbackPeriod time.Duration, now time.Time) (*PartitionRing, error) { + // If the size is too small or too large, run with a size equal to the total number of partitions. + // We have to run the function anyway because the logic may filter out some INACTIVE partitions. + if size <= 0 || size >= len(r.desc.Partitions) { + size = len(r.desc.Partitions) + } + + var lookbackUntil int64 + if lookbackPeriod > 0 { + lookbackUntil = now.Add(-lookbackPeriod).Unix() + } + + // Initialise the random generator used to select instances in the ring. + // There are no zones + random := rand.New(rand.NewSource(shardUtil.ShuffleShardSeed(identifier, ""))) + + // To select one more instance while guaranteeing the "consistency" property, + // we do pick a random value from the generator and resolve uniqueness collisions + // (if any) continuing walking the ring. + tokensCount := len(r.ringTokens) + + result := make(map[int32]struct{}, size) + exclude := map[int32]struct{}{} + + for len(result) < size { + start := searchToken(r.ringTokens, random.Uint32()) + iterations := 0 + found := false + + for p := start; !found && iterations < tokensCount; p++ { + iterations++ + + // Wrap p around in the ring. + if p >= tokensCount { + p %= tokensCount + } + + pid, ok := r.partitionByToken[Token(r.ringTokens[p])] + if !ok { + return nil, ErrInconsistentTokensInfo + } + + // Ensure the partition has not already been included or excluded. + if _, ok := result[pid]; ok { + continue + } + if _, ok := exclude[pid]; ok { + continue + } + + p, ok := r.desc.Partitions[pid] + if !ok { + return nil, ErrInconsistentTokensInfo + } + + // PENDING partitions should be skipped because they're not ready for read or write yet, + // and they don't need to be looked back. + if p.IsPending() { + exclude[pid] = struct{}{} + continue + } + + var ( + withinLookbackPeriod = lookbackPeriod > 0 && p.GetStateTimestamp() >= lookbackUntil + shouldExtend = withinLookbackPeriod + shouldInclude = p.IsActive() || withinLookbackPeriod + ) + + // Either include or exclude the found partition. + if shouldInclude { + result[pid] = struct{}{} + } else { + exclude[pid] = struct{}{} + } + + // Extend the shard, if requested. + if shouldExtend { + size++ + } + + // We can stop searching for other partitions only if this partition was included + // and no extension was requested, which means it's the "stop partition" for this cycle. + if shouldInclude && !shouldExtend { + found = true + } + } + + // If we iterated over all tokens, and no new partition has been found, we can stop looking for more partitions. + if !found { + break + } + } + + return NewPartitionRing(r.desc.WithPartitions(result)), nil +} + +// PartitionsCount returns the number of partitions in the ring. +func (r *PartitionRing) PartitionsCount() int { + return len(r.desc.Partitions) +} + +// ActivePartitionsCount returns the number of active partitions in the ring. +func (r *PartitionRing) ActivePartitionsCount() int { + return r.activePartitionsCount +} + +// Partitions returns the partitions in the ring. +// The returned slice is a deep copy, so the caller can freely manipulate it. +func (r *PartitionRing) Partitions() []PartitionDesc { + res := make([]PartitionDesc, 0, len(r.desc.Partitions)) + + for _, partition := range r.desc.Partitions { + res = append(res, partition.Clone()) + } + + return res +} + +// PartitionIDs returns a sorted list of all partition IDs in the ring. +// The returned slice is a copy, so the caller can freely manipulate it. +func (r *PartitionRing) PartitionIDs() []int32 { + ids := make([]int32, 0, len(r.desc.Partitions)) + + for id := range r.desc.Partitions { + ids = append(ids, id) + } + + slices.Sort(ids) + return ids +} + +// PendingPartitionIDs returns a sorted list of all PENDING partition IDs in the ring. +// The returned slice is a copy, so the caller can freely manipulate it. +func (r *PartitionRing) PendingPartitionIDs() []int32 { + ids := make([]int32, 0, len(r.desc.Partitions)) + + for id, partition := range r.desc.Partitions { + if partition.IsPending() { + ids = append(ids, id) + } + } + + slices.Sort(ids) + return ids +} + +// ActivePartitionIDs returns a sorted list of all ACTIVE partition IDs in the ring. +// The returned slice is a copy, so the caller can freely manipulate it. +func (r *PartitionRing) ActivePartitionIDs() []int32 { + ids := make([]int32, 0, len(r.desc.Partitions)) + + for id, partition := range r.desc.Partitions { + if partition.IsActive() { + ids = append(ids, id) + } + } + + slices.Sort(ids) + return ids +} + +// InactivePartitionIDs returns a sorted list of all INACTIVE partition IDs in the ring. +// The returned slice is a copy, so the caller can freely manipulate it. +func (r *PartitionRing) InactivePartitionIDs() []int32 { + ids := make([]int32, 0, len(r.desc.Partitions)) + + for id, partition := range r.desc.Partitions { + if partition.IsInactive() { + ids = append(ids, id) + } + } + + slices.Sort(ids) + return ids +} + +// PartitionOwnerIDs returns a list of owner IDs for the given partitionID. +// The returned slice is NOT a copy and should be never modified by the caller. +func (r *PartitionRing) PartitionOwnerIDs(partitionID int32) (doNotModify []string) { + return r.ownersByPartition[partitionID] +} + +// PartitionOwnerIDsCopy is like PartitionOwnerIDs(), but the returned slice is a copy, +// so the caller can freely manipulate it. +func (r *PartitionRing) PartitionOwnerIDsCopy(partitionID int32) []string { + ids := r.ownersByPartition[partitionID] + if len(ids) == 0 { + return nil + } + + return slices.Clone(ids) +} + +func (r *PartitionRing) String() string { + buf := bytes.Buffer{} + for pid, pd := range r.desc.Partitions { + buf.WriteString(fmt.Sprintf(" %d:%v", pid, pd.State.String())) + } + + return fmt.Sprintf("PartitionRing{ownersCount: %d, partitionsCount: %d, partitions: {%s}}", len(r.desc.Owners), len(r.desc.Partitions), buf.String()) +} + +// GetTokenRangesForPartition returns token-range owned by given partition. Note that this +// method does NOT take partition state into account, so if only active partitions should be +// considered, then PartitionRing with only active partitions must be created first (e.g. using ShuffleShard method). +func (r *PartitionRing) GetTokenRangesForPartition(partitionID int32) (TokenRanges, error) { + partition, ok := r.desc.Partitions[partitionID] + if !ok { + return nil, ErrPartitionDoesNotExist + } + + // 1 range (2 values) per token + one additional if we need to split the rollover range. + ranges := make(TokenRanges, 0, 2*(len(partition.Tokens)+1)) + + addRange := func(start, end uint32) { + // check if we can group ranges. If so, we just update end of previous range. + if len(ranges) > 0 && ranges[len(ranges)-1] == start-1 { + ranges[len(ranges)-1] = end + } else { + ranges = append(ranges, start, end) + } + } + + // "last" range is range that includes token math.MaxUint32. + ownsLastRange := false + startOfLastRange := uint32(0) + + // We start with all tokens, but will remove tokens we already skipped, to let binary search do less work. + ringTokens := r.ringTokens + + for iter, t := range partition.Tokens { + lastOwnedToken := t - 1 + + ix := searchToken(ringTokens, lastOwnedToken) + prevIx := ix - 1 + + if prevIx < 0 { + // We can only find "last" range during first iteration. + if iter > 0 { + return nil, ErrInconsistentTokensInfo + } + + prevIx = len(ringTokens) - 1 + ownsLastRange = true + + startOfLastRange = ringTokens[prevIx] + + // We can only claim token 0 if our actual token in the ring (which is exclusive end of range) was not 0. + if t > 0 { + addRange(0, lastOwnedToken) + } + } else { + addRange(ringTokens[prevIx], lastOwnedToken) + } + + // Reduce number of tokens we need to search through. We keep current token to serve as min boundary for next search, + // to make sure we don't find another "last" range (where prevIx < 0). + ringTokens = ringTokens[ix:] + } + + if ownsLastRange { + addRange(startOfLastRange, math.MaxUint32) + } + + return ranges, nil +} + +// ActivePartitionBatchRing wraps PartitionRing and implements DoBatchRing to lookup ACTIVE partitions. +type ActivePartitionBatchRing struct { + ring *PartitionRing +} + +func NewActivePartitionBatchRing(ring *PartitionRing) *ActivePartitionBatchRing { + return &ActivePartitionBatchRing{ + ring: ring, + } +} + +// InstancesCount returns the number of active partitions in the ring. +// +// InstancesCount implements DoBatchRing.InstancesCount. +func (r *ActivePartitionBatchRing) InstancesCount() int { + return r.ring.ActivePartitionsCount() +} + +// ReplicationFactor returns 1 as partitions replication factor: an entry (looked by key via Get()) +// is always stored in 1 and only 1 partition. +// +// ReplicationFactor implements DoBatchRing.ReplicationFactor. +func (r *ActivePartitionBatchRing) ReplicationFactor() int { + return 1 +} + +// Get implements DoBatchRing.Get. +func (r *ActivePartitionBatchRing) Get(key uint32, _ Operation, bufInstances []InstanceDesc, _, _ []string) (ReplicationSet, error) { + partitionID, err := r.ring.ActivePartitionForKey(key) + if err != nil { + return ReplicationSet{}, err + } + + // Ensure we have enough capacity in bufInstances. + if cap(bufInstances) < 1 { + bufInstances = []InstanceDesc{{}} + } else { + bufInstances = bufInstances[:1] + } + + partitionIDString := strconv.Itoa(int(partitionID)) + + bufInstances[0] = InstanceDesc{ + Addr: partitionIDString, + Timestamp: 0, + State: ACTIVE, + Id: partitionIDString, + } + + return ReplicationSet{ + Instances: bufInstances, + MaxErrors: 0, + MaxUnavailableZones: 0, + ZoneAwarenessEnabled: false, + }, nil +} diff --git a/vendor/github.com/grafana/dskit/ring/partition_ring_desc.pb.go b/vendor/github.com/grafana/dskit/ring/partition_ring_desc.pb.go new file mode 100644 index 0000000000000..8f47b1c562ea7 --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/partition_ring_desc.pb.go @@ -0,0 +1,1545 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: partition_ring_desc.proto + +package ring + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type PartitionState int32 + +const ( + PartitionUnknown PartitionState = 0 + // Pending partition is a partition that is about to be switched to ACTIVE. This state is used + // to let owners to attach to the partition and get ready to handle the partition. + // + // When a partition is in this state, it must not be used for writing or reading. + PartitionPending PartitionState = 1 + // Active partition in read-write mode. + PartitionActive PartitionState = 2 + // Inactive partition in read-only mode. This partition will be deleted after a grace period, + // unless its state changes to Active again. + PartitionInactive PartitionState = 3 + // Deleted partition. This state is not visible to ring clients: it's only used to propagate + // via memberlist the information that a partition has been deleted. + PartitionDeleted PartitionState = 4 +) + +var PartitionState_name = map[int32]string{ + 0: "PartitionUnknown", + 1: "PartitionPending", + 2: "PartitionActive", + 3: "PartitionInactive", + 4: "PartitionDeleted", +} + +var PartitionState_value = map[string]int32{ + "PartitionUnknown": 0, + "PartitionPending": 1, + "PartitionActive": 2, + "PartitionInactive": 3, + "PartitionDeleted": 4, +} + +func (PartitionState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_4df2762174d93dc4, []int{0} +} + +type OwnerState int32 + +const ( + OwnerUnknown OwnerState = 0 + // Active owner. + OwnerActive OwnerState = 1 + // Deleted owner. This state is not visible to ring clients: it's only used to propagate + // via memberlist the information that a owner has been deleted. Owners in this state + // are removed before client can see them. + OwnerDeleted OwnerState = 2 +) + +var OwnerState_name = map[int32]string{ + 0: "OwnerUnknown", + 1: "OwnerActive", + 2: "OwnerDeleted", +} + +var OwnerState_value = map[string]int32{ + "OwnerUnknown": 0, + "OwnerActive": 1, + "OwnerDeleted": 2, +} + +func (OwnerState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_4df2762174d93dc4, []int{1} +} + +// PartitionRingDesc holds the state of the partitions ring. +type PartitionRingDesc struct { + // Mapping between partition ID and partition info. + Partitions map[int32]PartitionDesc `protobuf:"bytes,1,rep,name=partitions,proto3" json:"partitions" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Mapping between instance ID and partition ownership info. + Owners map[string]OwnerDesc `protobuf:"bytes,2,rep,name=owners,proto3" json:"owners" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *PartitionRingDesc) Reset() { *m = PartitionRingDesc{} } +func (*PartitionRingDesc) ProtoMessage() {} +func (*PartitionRingDesc) Descriptor() ([]byte, []int) { + return fileDescriptor_4df2762174d93dc4, []int{0} +} +func (m *PartitionRingDesc) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartitionRingDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PartitionRingDesc.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PartitionRingDesc) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartitionRingDesc.Merge(m, src) +} +func (m *PartitionRingDesc) XXX_Size() int { + return m.Size() +} +func (m *PartitionRingDesc) XXX_DiscardUnknown() { + xxx_messageInfo_PartitionRingDesc.DiscardUnknown(m) +} + +var xxx_messageInfo_PartitionRingDesc proto.InternalMessageInfo + +func (m *PartitionRingDesc) GetPartitions() map[int32]PartitionDesc { + if m != nil { + return m.Partitions + } + return nil +} + +func (m *PartitionRingDesc) GetOwners() map[string]OwnerDesc { + if m != nil { + return m.Owners + } + return nil +} + +// PartitionDesc holds the state of a single partition. +type PartitionDesc struct { + // The partition ID. This value is the same as the key in the partitions map in PartitionRingDesc. + Id int32 `protobuf:"varint,4,opt,name=id,proto3" json:"id,omitempty"` + // Unique tokens, generated with deterministic token generator. Tokens MUST be immutable: + // if tokens get changed, the change will not be propagated via memberlist. + Tokens []uint32 `protobuf:"varint,1,rep,packed,name=tokens,proto3" json:"tokens,omitempty"` + // The state of the partition. + State PartitionState `protobuf:"varint,2,opt,name=state,proto3,enum=ring.PartitionState" json:"state,omitempty"` + // Unix timestamp (with seconds precision) of when has the state changed last time for this partition. + StateTimestamp int64 `protobuf:"varint,3,opt,name=stateTimestamp,proto3" json:"stateTimestamp,omitempty"` +} + +func (m *PartitionDesc) Reset() { *m = PartitionDesc{} } +func (*PartitionDesc) ProtoMessage() {} +func (*PartitionDesc) Descriptor() ([]byte, []int) { + return fileDescriptor_4df2762174d93dc4, []int{1} +} +func (m *PartitionDesc) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartitionDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PartitionDesc.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PartitionDesc) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartitionDesc.Merge(m, src) +} +func (m *PartitionDesc) XXX_Size() int { + return m.Size() +} +func (m *PartitionDesc) XXX_DiscardUnknown() { + xxx_messageInfo_PartitionDesc.DiscardUnknown(m) +} + +var xxx_messageInfo_PartitionDesc proto.InternalMessageInfo + +func (m *PartitionDesc) GetId() int32 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *PartitionDesc) GetTokens() []uint32 { + if m != nil { + return m.Tokens + } + return nil +} + +func (m *PartitionDesc) GetState() PartitionState { + if m != nil { + return m.State + } + return PartitionUnknown +} + +func (m *PartitionDesc) GetStateTimestamp() int64 { + if m != nil { + return m.StateTimestamp + } + return 0 +} + +// OwnerDesc holds the information of a partition owner. +type OwnerDesc struct { + // Partition that belongs to this owner. A owner can own only 1 partition, but 1 partition can be + // owned by multiple owners. + OwnedPartition int32 `protobuf:"varint,1,opt,name=ownedPartition,proto3" json:"ownedPartition,omitempty"` + // The owner state. This field is used to propagate deletions via memberlist. + State OwnerState `protobuf:"varint,2,opt,name=state,proto3,enum=ring.OwnerState" json:"state,omitempty"` + // Unix timestamp (with seconds precision) of when the data for the owner has been updated the last time. + // This timestamp is used to resolve conflicts when merging updates via memberlist (the most recent + // update wins). + UpdatedTimestamp int64 `protobuf:"varint,3,opt,name=updatedTimestamp,proto3" json:"updatedTimestamp,omitempty"` +} + +func (m *OwnerDesc) Reset() { *m = OwnerDesc{} } +func (*OwnerDesc) ProtoMessage() {} +func (*OwnerDesc) Descriptor() ([]byte, []int) { + return fileDescriptor_4df2762174d93dc4, []int{2} +} +func (m *OwnerDesc) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OwnerDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_OwnerDesc.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *OwnerDesc) XXX_Merge(src proto.Message) { + xxx_messageInfo_OwnerDesc.Merge(m, src) +} +func (m *OwnerDesc) XXX_Size() int { + return m.Size() +} +func (m *OwnerDesc) XXX_DiscardUnknown() { + xxx_messageInfo_OwnerDesc.DiscardUnknown(m) +} + +var xxx_messageInfo_OwnerDesc proto.InternalMessageInfo + +func (m *OwnerDesc) GetOwnedPartition() int32 { + if m != nil { + return m.OwnedPartition + } + return 0 +} + +func (m *OwnerDesc) GetState() OwnerState { + if m != nil { + return m.State + } + return OwnerUnknown +} + +func (m *OwnerDesc) GetUpdatedTimestamp() int64 { + if m != nil { + return m.UpdatedTimestamp + } + return 0 +} + +func init() { + proto.RegisterEnum("ring.PartitionState", PartitionState_name, PartitionState_value) + proto.RegisterEnum("ring.OwnerState", OwnerState_name, OwnerState_value) + proto.RegisterType((*PartitionRingDesc)(nil), "ring.PartitionRingDesc") + proto.RegisterMapType((map[string]OwnerDesc)(nil), "ring.PartitionRingDesc.OwnersEntry") + proto.RegisterMapType((map[int32]PartitionDesc)(nil), "ring.PartitionRingDesc.PartitionsEntry") + proto.RegisterType((*PartitionDesc)(nil), "ring.PartitionDesc") + proto.RegisterType((*OwnerDesc)(nil), "ring.OwnerDesc") +} + +func init() { proto.RegisterFile("partition_ring_desc.proto", fileDescriptor_4df2762174d93dc4) } + +var fileDescriptor_4df2762174d93dc4 = []byte{ + // 497 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x93, 0x31, 0x6f, 0xd3, 0x40, + 0x14, 0xc7, 0x7d, 0x76, 0x12, 0xa9, 0x2f, 0x34, 0x39, 0xae, 0x05, 0x99, 0x0c, 0x47, 0x14, 0x44, + 0x09, 0x91, 0x48, 0xa5, 0xc0, 0x80, 0xd8, 0x52, 0x95, 0x01, 0x24, 0x44, 0x65, 0x60, 0xae, 0x9c, + 0xf8, 0x30, 0xa7, 0x34, 0x77, 0x91, 0x7d, 0x6e, 0xd5, 0x05, 0xb1, 0x31, 0xb0, 0xf0, 0x31, 0xf8, + 0x22, 0x48, 0x1d, 0x33, 0x76, 0x42, 0xc4, 0x59, 0x18, 0xfb, 0x11, 0x90, 0xcf, 0xae, 0x63, 0xbb, + 0xea, 0x76, 0xef, 0x7f, 0xef, 0xfd, 0xfe, 0xff, 0x3b, 0x9f, 0xe1, 0xc1, 0xc2, 0x0d, 0x14, 0x57, + 0x5c, 0x8a, 0xe3, 0x80, 0x0b, 0xff, 0xd8, 0x63, 0xe1, 0x74, 0xb8, 0x08, 0xa4, 0x92, 0xa4, 0x96, + 0x08, 0x9d, 0x67, 0x3e, 0x57, 0x5f, 0xa2, 0xc9, 0x70, 0x2a, 0xe7, 0xfb, 0xbe, 0xf4, 0xe5, 0xbe, + 0xde, 0x9c, 0x44, 0x9f, 0x75, 0xa5, 0x0b, 0xbd, 0x4a, 0x87, 0x7a, 0xbf, 0x4d, 0xb8, 0x7b, 0x74, + 0x8d, 0x74, 0xb8, 0xf0, 0x0f, 0x59, 0x38, 0x25, 0xef, 0x00, 0x72, 0x9f, 0xd0, 0x46, 0x5d, 0xab, + 0xdf, 0x1c, 0x3d, 0x19, 0x26, 0xfc, 0xe1, 0x8d, 0xe6, 0x8d, 0x12, 0xbe, 0x16, 0x2a, 0x38, 0x3f, + 0xa8, 0x5d, 0xfc, 0x79, 0x68, 0x38, 0x05, 0x00, 0x19, 0x43, 0x43, 0x9e, 0x09, 0x16, 0x84, 0xb6, + 0xa9, 0x51, 0x8f, 0x6e, 0x43, 0xbd, 0xd7, 0x5d, 0x45, 0x4c, 0x36, 0xd8, 0x71, 0xa0, 0x5d, 0xf1, + 0x21, 0x18, 0xac, 0x19, 0x3b, 0xb7, 0x51, 0x17, 0xf5, 0xeb, 0x4e, 0xb2, 0x24, 0x4f, 0xa1, 0x7e, + 0xea, 0x9e, 0x44, 0xcc, 0x36, 0xbb, 0xa8, 0xdf, 0x1c, 0xed, 0x54, 0x6c, 0x12, 0x0b, 0x27, 0xed, + 0x78, 0x65, 0xbe, 0x44, 0x9d, 0xb7, 0xd0, 0x2c, 0x18, 0x16, 0x79, 0x5b, 0x29, 0xef, 0x71, 0x99, + 0xd7, 0x4e, 0x79, 0x7a, 0xa6, 0xc2, 0xea, 0xfd, 0x40, 0xb0, 0x5d, 0x32, 0x22, 0x2d, 0x30, 0xb9, + 0x67, 0xd7, 0x74, 0x3a, 0x93, 0x7b, 0xe4, 0x3e, 0x34, 0x94, 0x9c, 0xb1, 0xec, 0x3e, 0xb7, 0x9d, + 0xac, 0x22, 0x03, 0xa8, 0x87, 0xca, 0x55, 0xa9, 0x49, 0x6b, 0xb4, 0x5b, 0x09, 0xfd, 0x21, 0xd9, + 0x73, 0xd2, 0x16, 0xb2, 0x07, 0x2d, 0xbd, 0xf8, 0xc8, 0xe7, 0x2c, 0x54, 0xee, 0x7c, 0x61, 0x5b, + 0x5d, 0xd4, 0xb7, 0x9c, 0x8a, 0xda, 0xfb, 0x8e, 0x60, 0x2b, 0x8f, 0x99, 0x4c, 0x25, 0xb7, 0xe8, + 0xe5, 0xcc, 0xec, 0xce, 0x2a, 0x2a, 0xd9, 0x2b, 0x27, 0xc1, 0x85, 0xe3, 0x96, 0x52, 0x0c, 0x00, + 0x47, 0x0b, 0xcf, 0x55, 0xcc, 0xab, 0xe6, 0xb8, 0xa1, 0x0f, 0xbe, 0x42, 0xab, 0x7c, 0x14, 0xb2, + 0x0b, 0x38, 0x57, 0x3e, 0x89, 0x99, 0x90, 0x67, 0x02, 0x1b, 0x25, 0xf5, 0x88, 0x09, 0x8f, 0x0b, + 0x1f, 0x23, 0xb2, 0x53, 0xf8, 0xea, 0xe3, 0xa9, 0xe2, 0xa7, 0x0c, 0x9b, 0xe4, 0x5e, 0xe1, 0xc5, + 0xbe, 0x11, 0x6e, 0x2a, 0x5b, 0x25, 0xc2, 0x21, 0x3b, 0x61, 0x8a, 0x79, 0xb8, 0x36, 0x18, 0x03, + 0x6c, 0x0e, 0x40, 0x30, 0xdc, 0xd1, 0xd5, 0xc6, 0xb7, 0x9d, 0xbd, 0x81, 0x8c, 0x8e, 0xf2, 0x96, + 0x6b, 0x84, 0x79, 0xf0, 0x62, 0xb9, 0xa2, 0xc6, 0xe5, 0x8a, 0x1a, 0x57, 0x2b, 0x8a, 0xbe, 0xc5, + 0x14, 0xfd, 0x8a, 0x29, 0xba, 0x88, 0x29, 0x5a, 0xc6, 0x14, 0xfd, 0x8d, 0x29, 0xfa, 0x17, 0x53, + 0xe3, 0x2a, 0xa6, 0xe8, 0xe7, 0x9a, 0x1a, 0xcb, 0x35, 0x35, 0x2e, 0xd7, 0xd4, 0x98, 0x34, 0xf4, + 0xff, 0xf5, 0xfc, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc9, 0xd1, 0xa7, 0xbd, 0xb1, 0x03, 0x00, + 0x00, +} + +func (x PartitionState) String() string { + s, ok := PartitionState_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x OwnerState) String() string { + s, ok := OwnerState_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *PartitionRingDesc) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PartitionRingDesc) + if !ok { + that2, ok := that.(PartitionRingDesc) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Partitions) != len(that1.Partitions) { + return false + } + for i := range this.Partitions { + a := this.Partitions[i] + b := that1.Partitions[i] + if !(&a).Equal(&b) { + return false + } + } + if len(this.Owners) != len(that1.Owners) { + return false + } + for i := range this.Owners { + a := this.Owners[i] + b := that1.Owners[i] + if !(&a).Equal(&b) { + return false + } + } + return true +} +func (this *PartitionDesc) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PartitionDesc) + if !ok { + that2, ok := that.(PartitionDesc) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Id != that1.Id { + return false + } + if len(this.Tokens) != len(that1.Tokens) { + return false + } + for i := range this.Tokens { + if this.Tokens[i] != that1.Tokens[i] { + return false + } + } + if this.State != that1.State { + return false + } + if this.StateTimestamp != that1.StateTimestamp { + return false + } + return true +} +func (this *OwnerDesc) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*OwnerDesc) + if !ok { + that2, ok := that.(OwnerDesc) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.OwnedPartition != that1.OwnedPartition { + return false + } + if this.State != that1.State { + return false + } + if this.UpdatedTimestamp != that1.UpdatedTimestamp { + return false + } + return true +} +func (this *PartitionRingDesc) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&ring.PartitionRingDesc{") + keysForPartitions := make([]int32, 0, len(this.Partitions)) + for k, _ := range this.Partitions { + keysForPartitions = append(keysForPartitions, k) + } + github_com_gogo_protobuf_sortkeys.Int32s(keysForPartitions) + mapStringForPartitions := "map[int32]PartitionDesc{" + for _, k := range keysForPartitions { + mapStringForPartitions += fmt.Sprintf("%#v: %#v,", k, this.Partitions[k]) + } + mapStringForPartitions += "}" + if this.Partitions != nil { + s = append(s, "Partitions: "+mapStringForPartitions+",\n") + } + keysForOwners := make([]string, 0, len(this.Owners)) + for k, _ := range this.Owners { + keysForOwners = append(keysForOwners, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOwners) + mapStringForOwners := "map[string]OwnerDesc{" + for _, k := range keysForOwners { + mapStringForOwners += fmt.Sprintf("%#v: %#v,", k, this.Owners[k]) + } + mapStringForOwners += "}" + if this.Owners != nil { + s = append(s, "Owners: "+mapStringForOwners+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *PartitionDesc) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&ring.PartitionDesc{") + s = append(s, "Id: "+fmt.Sprintf("%#v", this.Id)+",\n") + s = append(s, "Tokens: "+fmt.Sprintf("%#v", this.Tokens)+",\n") + s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") + s = append(s, "StateTimestamp: "+fmt.Sprintf("%#v", this.StateTimestamp)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OwnerDesc) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&ring.OwnerDesc{") + s = append(s, "OwnedPartition: "+fmt.Sprintf("%#v", this.OwnedPartition)+",\n") + s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") + s = append(s, "UpdatedTimestamp: "+fmt.Sprintf("%#v", this.UpdatedTimestamp)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringPartitionRingDesc(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *PartitionRingDesc) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PartitionRingDesc) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PartitionRingDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Owners) > 0 { + for k := range m.Owners { + v := m.Owners[k] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Partitions) > 0 { + for k := range m.Partitions { + v := m.Partitions[k] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(k)) + i-- + dAtA[i] = 0x8 + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PartitionDesc) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PartitionDesc) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PartitionDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Id != 0 { + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x20 + } + if m.StateTimestamp != 0 { + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(m.StateTimestamp)) + i-- + dAtA[i] = 0x18 + } + if m.State != 0 { + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x10 + } + if len(m.Tokens) > 0 { + dAtA4 := make([]byte, len(m.Tokens)*10) + var j3 int + for _, num := range m.Tokens { + for num >= 1<<7 { + dAtA4[j3] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j3++ + } + dAtA4[j3] = uint8(num) + j3++ + } + i -= j3 + copy(dAtA[i:], dAtA4[:j3]) + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(j3)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OwnerDesc) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OwnerDesc) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OwnerDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.UpdatedTimestamp != 0 { + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(m.UpdatedTimestamp)) + i-- + dAtA[i] = 0x18 + } + if m.State != 0 { + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x10 + } + if m.OwnedPartition != 0 { + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(m.OwnedPartition)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintPartitionRingDesc(dAtA []byte, offset int, v uint64) int { + offset -= sovPartitionRingDesc(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PartitionRingDesc) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Partitions) > 0 { + for k, v := range m.Partitions { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + sovPartitionRingDesc(uint64(k)) + 1 + l + sovPartitionRingDesc(uint64(l)) + n += mapEntrySize + 1 + sovPartitionRingDesc(uint64(mapEntrySize)) + } + } + if len(m.Owners) > 0 { + for k, v := range m.Owners { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovPartitionRingDesc(uint64(len(k))) + 1 + l + sovPartitionRingDesc(uint64(l)) + n += mapEntrySize + 1 + sovPartitionRingDesc(uint64(mapEntrySize)) + } + } + return n +} + +func (m *PartitionDesc) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Tokens) > 0 { + l = 0 + for _, e := range m.Tokens { + l += sovPartitionRingDesc(uint64(e)) + } + n += 1 + sovPartitionRingDesc(uint64(l)) + l + } + if m.State != 0 { + n += 1 + sovPartitionRingDesc(uint64(m.State)) + } + if m.StateTimestamp != 0 { + n += 1 + sovPartitionRingDesc(uint64(m.StateTimestamp)) + } + if m.Id != 0 { + n += 1 + sovPartitionRingDesc(uint64(m.Id)) + } + return n +} + +func (m *OwnerDesc) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OwnedPartition != 0 { + n += 1 + sovPartitionRingDesc(uint64(m.OwnedPartition)) + } + if m.State != 0 { + n += 1 + sovPartitionRingDesc(uint64(m.State)) + } + if m.UpdatedTimestamp != 0 { + n += 1 + sovPartitionRingDesc(uint64(m.UpdatedTimestamp)) + } + return n +} + +func sovPartitionRingDesc(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozPartitionRingDesc(x uint64) (n int) { + return sovPartitionRingDesc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PartitionRingDesc) String() string { + if this == nil { + return "nil" + } + keysForPartitions := make([]int32, 0, len(this.Partitions)) + for k, _ := range this.Partitions { + keysForPartitions = append(keysForPartitions, k) + } + github_com_gogo_protobuf_sortkeys.Int32s(keysForPartitions) + mapStringForPartitions := "map[int32]PartitionDesc{" + for _, k := range keysForPartitions { + mapStringForPartitions += fmt.Sprintf("%v: %v,", k, this.Partitions[k]) + } + mapStringForPartitions += "}" + keysForOwners := make([]string, 0, len(this.Owners)) + for k, _ := range this.Owners { + keysForOwners = append(keysForOwners, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOwners) + mapStringForOwners := "map[string]OwnerDesc{" + for _, k := range keysForOwners { + mapStringForOwners += fmt.Sprintf("%v: %v,", k, this.Owners[k]) + } + mapStringForOwners += "}" + s := strings.Join([]string{`&PartitionRingDesc{`, + `Partitions:` + mapStringForPartitions + `,`, + `Owners:` + mapStringForOwners + `,`, + `}`, + }, "") + return s +} +func (this *PartitionDesc) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PartitionDesc{`, + `Tokens:` + fmt.Sprintf("%v", this.Tokens) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `StateTimestamp:` + fmt.Sprintf("%v", this.StateTimestamp) + `,`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `}`, + }, "") + return s +} +func (this *OwnerDesc) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OwnerDesc{`, + `OwnedPartition:` + fmt.Sprintf("%v", this.OwnedPartition) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `UpdatedTimestamp:` + fmt.Sprintf("%v", this.UpdatedTimestamp) + `,`, + `}`, + }, "") + return s +} +func valueToStringPartitionRingDesc(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PartitionRingDesc) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartitionRingDesc: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartitionRingDesc: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Partitions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPartitionRingDesc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Partitions == nil { + m.Partitions = make(map[int32]PartitionDesc) + } + var mapkey int32 + mapvalue := &PartitionDesc{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapkey |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthPartitionRingDesc + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &PartitionDesc{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipPartitionRingDesc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Partitions[mapkey] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owners", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPartitionRingDesc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Owners == nil { + m.Owners = make(map[string]OwnerDesc) + } + var mapkey string + mapvalue := &OwnerDesc{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthPartitionRingDesc + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthPartitionRingDesc + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &OwnerDesc{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipPartitionRingDesc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Owners[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPartitionRingDesc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PartitionDesc) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartitionDesc: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartitionDesc: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Tokens = append(m.Tokens, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthPartitionRingDesc + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Tokens) == 0 { + m.Tokens = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Tokens = append(m.Tokens, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Tokens", wireType) + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= PartitionState(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StateTimestamp", wireType) + } + m.StateTimestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StateTimestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipPartitionRingDesc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OwnerDesc) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OwnerDesc: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OwnerDesc: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OwnedPartition", wireType) + } + m.OwnedPartition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OwnedPartition |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= OwnerState(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedTimestamp", wireType) + } + m.UpdatedTimestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedTimestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipPartitionRingDesc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPartitionRingDesc(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthPartitionRingDesc + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthPartitionRingDesc + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipPartitionRingDesc(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthPartitionRingDesc + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthPartitionRingDesc = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPartitionRingDesc = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/grafana/dskit/ring/partition_ring_desc.proto b/vendor/github.com/grafana/dskit/ring/partition_ring_desc.proto new file mode 100644 index 0000000000000..d8fb9316f01db --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/partition_ring_desc.proto @@ -0,0 +1,81 @@ +syntax = "proto3"; + +package ring; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +// PartitionRingDesc holds the state of the partitions ring. +message PartitionRingDesc { + // Mapping between partition ID and partition info. + map<int32, PartitionDesc> partitions = 1 [(gogoproto.nullable) = false]; + + // Mapping between instance ID and partition ownership info. + map<string, OwnerDesc> owners = 2 [(gogoproto.nullable) = false]; +} + +// PartitionDesc holds the state of a single partition. +message PartitionDesc { + // The partition ID. This value is the same as the key in the partitions map in PartitionRingDesc. + int32 id = 4; + + // Unique tokens, generated with deterministic token generator. Tokens MUST be immutable: + // if tokens get changed, the change will not be propagated via memberlist. + repeated uint32 tokens = 1; + + // The state of the partition. + PartitionState state = 2; + + // Unix timestamp (with seconds precision) of when has the state changed last time for this partition. + int64 stateTimestamp = 3; +} + +enum PartitionState { + PartitionUnknown = 0; + + // Pending partition is a partition that is about to be switched to ACTIVE. This state is used + // to let owners to attach to the partition and get ready to handle the partition. + // + // When a partition is in this state, it must not be used for writing or reading. + PartitionPending = 1; + + // Active partition in read-write mode. + PartitionActive = 2; + + // Inactive partition in read-only mode. This partition will be deleted after a grace period, + // unless its state changes to Active again. + PartitionInactive = 3; + + // Deleted partition. This state is not visible to ring clients: it's only used to propagate + // via memberlist the information that a partition has been deleted. + PartitionDeleted = 4; +} + +// OwnerDesc holds the information of a partition owner. +message OwnerDesc { + // Partition that belongs to this owner. A owner can own only 1 partition, but 1 partition can be + // owned by multiple owners. + int32 ownedPartition = 1; + + // The owner state. This field is used to propagate deletions via memberlist. + OwnerState state = 2; + + // Unix timestamp (with seconds precision) of when the data for the owner has been updated the last time. + // This timestamp is used to resolve conflicts when merging updates via memberlist (the most recent + // update wins). + int64 updatedTimestamp = 3; +} + +enum OwnerState { + OwnerUnknown = 0; + + // Active owner. + OwnerActive = 1; + + // Deleted owner. This state is not visible to ring clients: it's only used to propagate + // via memberlist the information that a owner has been deleted. Owners in this state + // are removed before client can see them. + OwnerDeleted = 2; +} diff --git a/vendor/github.com/grafana/dskit/ring/partition_ring_editor.go b/vendor/github.com/grafana/dskit/ring/partition_ring_editor.go new file mode 100644 index 0000000000000..a816693e55caf --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/partition_ring_editor.go @@ -0,0 +1,64 @@ +package ring + +import ( + "context" + "time" + + "github.com/pkg/errors" + + "github.com/grafana/dskit/kv" +) + +// PartitionRingEditor is standalone component that can be used to modify the partitions ring. +// If you want to implement the partition lifecycle you should use PartitionInstanceLifecycler instead. +type PartitionRingEditor struct { + ringKey string + store kv.Client +} + +func NewPartitionRingEditor(ringKey string, store kv.Client) *PartitionRingEditor { + return &PartitionRingEditor{ + ringKey: ringKey, + store: store, + } +} + +// ChangePartitionState changes the partition state to toState. +// This function returns ErrPartitionDoesNotExist if the partition doesn't exist, +// and ErrPartitionStateChangeNotAllowed if the state change is not allowed. +func (l *PartitionRingEditor) ChangePartitionState(ctx context.Context, partitionID int32, toState PartitionState) error { + return l.updateRing(ctx, func(ring *PartitionRingDesc) (bool, error) { + return changePartitionState(ring, partitionID, toState) + }) +} + +func (l *PartitionRingEditor) updateRing(ctx context.Context, update func(ring *PartitionRingDesc) (bool, error)) error { + return l.store.CAS(ctx, l.ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + ringDesc := GetOrCreatePartitionRingDesc(in) + + if changed, err := update(ringDesc); err != nil { + return nil, false, err + } else if !changed { + return nil, false, nil + } + + return ringDesc, true, nil + }) +} + +func changePartitionState(ring *PartitionRingDesc, partitionID int32, toState PartitionState) (changed bool, _ error) { + partition, exists := ring.Partitions[partitionID] + if !exists { + return false, ErrPartitionDoesNotExist + } + + if partition.State == toState { + return false, nil + } + + if !isPartitionStateChangeAllowed(partition.State, toState) { + return false, errors.Wrapf(ErrPartitionStateChangeNotAllowed, "change partition state from %s to %s", partition.State.CleanName(), toState.CleanName()) + } + + return ring.UpdatePartitionState(partitionID, toState, time.Now()), nil +} diff --git a/vendor/github.com/grafana/dskit/ring/partition_ring_http.go b/vendor/github.com/grafana/dskit/ring/partition_ring_http.go new file mode 100644 index 0000000000000..8e58c58c7afc8 --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/partition_ring_http.go @@ -0,0 +1,158 @@ +package ring + +import ( + "context" + _ "embed" + "fmt" + "html/template" + "net/http" + "sort" + "strconv" + "time" + + "golang.org/x/exp/slices" +) + +//go:embed partition_ring_status.gohtml +var partitionRingPageContent string +var partitionRingPageTemplate = template.Must(template.New("webpage").Funcs(template.FuncMap{ + "mod": func(i, j int32) bool { + return i%j == 0 + }, + "formatTimestamp": func(ts time.Time) string { + return ts.Format("2006-01-02 15:04:05 MST") + }, +}).Parse(partitionRingPageContent)) + +type PartitionRingUpdater interface { + ChangePartitionState(ctx context.Context, partitionID int32, toState PartitionState) error +} + +type PartitionRingPageHandler struct { + reader PartitionRingReader + updater PartitionRingUpdater +} + +func NewPartitionRingPageHandler(reader PartitionRingReader, updater PartitionRingUpdater) *PartitionRingPageHandler { + return &PartitionRingPageHandler{ + reader: reader, + updater: updater, + } +} + +func (h *PartitionRingPageHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + switch req.Method { + case http.MethodGet: + h.handleGetRequest(w, req) + case http.MethodPost: + h.handlePostRequest(w, req) + default: + http.Error(w, "Unsupported HTTP method", http.StatusMethodNotAllowed) + } +} + +func (h *PartitionRingPageHandler) handleGetRequest(w http.ResponseWriter, req *http.Request) { + var ( + ring = h.reader.PartitionRing() + ringDesc = ring.desc + ) + + // Prepare the data to render partitions in the page. + partitionsByID := make(map[int32]partitionPageData, len(ringDesc.Partitions)) + for id, partition := range ringDesc.Partitions { + owners := ring.PartitionOwnerIDsCopy(id) + slices.Sort(owners) + + partitionsByID[id] = partitionPageData{ + ID: id, + Corrupted: false, + State: partition.State, + StateTimestamp: partition.GetStateTime(), + OwnerIDs: owners, + } + } + + // Look for owners of non-existing partitions. We want to provide visibility for such case + // and we report the partition in corrupted state. + for ownerID, owner := range ringDesc.Owners { + partition, exists := partitionsByID[owner.OwnedPartition] + + if !exists { + partition = partitionPageData{ + ID: owner.OwnedPartition, + Corrupted: true, + State: PartitionUnknown, + StateTimestamp: time.Time{}, + OwnerIDs: []string{ownerID}, + } + + partitionsByID[owner.OwnedPartition] = partition + } + + if !slices.Contains(partition.OwnerIDs, ownerID) { + partition.OwnerIDs = append(partition.OwnerIDs, ownerID) + partitionsByID[owner.OwnedPartition] = partition + } + } + + // Covert partitions to a list and sort it by ID. + partitions := make([]partitionPageData, 0, len(partitionsByID)) + + for _, partition := range partitionsByID { + partitions = append(partitions, partition) + } + + sort.Slice(partitions, func(i, j int) bool { + return partitions[i].ID < partitions[j].ID + }) + + renderHTTPResponse(w, partitionRingPageData{ + Partitions: partitions, + PartitionStateChanges: map[PartitionState]PartitionState{ + PartitionPending: PartitionActive, + PartitionActive: PartitionInactive, + PartitionInactive: PartitionActive, + }, + }, partitionRingPageTemplate, req) +} + +func (h *PartitionRingPageHandler) handlePostRequest(w http.ResponseWriter, req *http.Request) { + if req.FormValue("action") == "change_state" { + partitionID, err := strconv.Atoi(req.FormValue("partition_id")) + if err != nil { + http.Error(w, fmt.Sprintf("invalid partition ID: %s", err.Error()), http.StatusBadRequest) + return + } + + toState, ok := PartitionState_value[req.FormValue("partition_state")] + if !ok { + http.Error(w, "invalid partition state", http.StatusBadRequest) + return + } + + if err := h.updater.ChangePartitionState(req.Context(), int32(partitionID), PartitionState(toState)); err != nil { + http.Error(w, fmt.Sprintf("failed to change partition state: %s", err.Error()), http.StatusBadRequest) + return + } + } + + // Implement PRG pattern to prevent double-POST and work with CSRF middleware. + // https://en.wikipedia.org/wiki/Post/Redirect/Get + w.Header().Set("Location", "#") + w.WriteHeader(http.StatusFound) +} + +type partitionRingPageData struct { + Partitions []partitionPageData `json:"partitions"` + + // PartitionStateChanges maps the allowed state changes through the UI. + PartitionStateChanges map[PartitionState]PartitionState `json:"-"` +} + +type partitionPageData struct { + ID int32 `json:"id"` + Corrupted bool `json:"corrupted"` + State PartitionState `json:"state"` + StateTimestamp time.Time `json:"state_timestamp"` + OwnerIDs []string `json:"owner_ids"` +} diff --git a/vendor/github.com/grafana/dskit/ring/partition_ring_model.go b/vendor/github.com/grafana/dskit/ring/partition_ring_model.go new file mode 100644 index 0000000000000..c95380756a3c5 --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/partition_ring_model.go @@ -0,0 +1,460 @@ +package ring + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/gogo/protobuf/proto" + "golang.org/x/exp/slices" + + "github.com/grafana/dskit/kv/codec" + "github.com/grafana/dskit/kv/memberlist" +) + +type partitionRingCodec struct { + codec.Codec +} + +// Decode wraps Codec.Decode and ensure PartitionRingDesc maps are not nil. +func (c *partitionRingCodec) Decode(in []byte) (interface{}, error) { + out, err := c.Codec.Decode(in) + if err != nil { + return out, err + } + + // Ensure maps are initialised. This makes working with PartitionRingDesc more convenient. + if actual, ok := out.(*PartitionRingDesc); ok { + if actual.Partitions == nil { + actual.Partitions = map[int32]PartitionDesc{} + } + if actual.Owners == nil { + actual.Owners = map[string]OwnerDesc{} + } + } + + return out, nil +} + +func GetPartitionRingCodec() codec.Codec { + return &partitionRingCodec{ + Codec: codec.NewProtoCodec("partitionRingDesc", PartitionRingDescFactory), + } +} + +// PartitionRingDescFactory makes new PartitionRingDesc. +func PartitionRingDescFactory() proto.Message { + return NewPartitionRingDesc() +} + +func GetOrCreatePartitionRingDesc(in any) *PartitionRingDesc { + if in == nil { + return NewPartitionRingDesc() + } + + desc := in.(*PartitionRingDesc) + if desc == nil { + return NewPartitionRingDesc() + } + + return desc +} + +func NewPartitionRingDesc() *PartitionRingDesc { + return &PartitionRingDesc{ + Partitions: map[int32]PartitionDesc{}, + Owners: map[string]OwnerDesc{}, + } +} + +// tokens returns a sort list of tokens registered by all partitions. +func (m *PartitionRingDesc) tokens() Tokens { + allTokens := make(Tokens, 0, len(m.Partitions)*optimalTokensPerInstance) + + for _, partition := range m.Partitions { + allTokens = append(allTokens, partition.Tokens...) + } + + slices.Sort(allTokens) + return allTokens +} + +// partitionByToken returns a map where they key is a registered token and the value is ID of the partition +// that registered that token. +func (m *PartitionRingDesc) partitionByToken() map[Token]int32 { + out := make(map[Token]int32, len(m.Partitions)*optimalTokensPerInstance) + + for partitionID, partition := range m.Partitions { + for _, token := range partition.Tokens { + out[Token(token)] = partitionID + } + } + + return out +} + +// ownersByPartition returns a map where the key is the partition ID and the value is a list of owner IDs. +func (m *PartitionRingDesc) ownersByPartition() map[int32][]string { + out := make(map[int32][]string, len(m.Partitions)) + for id, o := range m.Owners { + out[o.OwnedPartition] = append(out[o.OwnedPartition], id) + } + + // Sort owners to have predictable tests. + for id := range out { + slices.Sort(out[id]) + } + + return out +} + +// countPartitionsByState returns a map containing the number of partitions by state. +func (m *PartitionRingDesc) countPartitionsByState() map[PartitionState]int { + // Init the map to have to zero values for all states. + out := make(map[PartitionState]int, len(PartitionState_value)-2) + for _, state := range PartitionState_value { + if PartitionState(state) == PartitionUnknown || PartitionState(state) == PartitionDeleted { + continue + } + + out[PartitionState(state)] = 0 + } + + for _, partition := range m.Partitions { + out[partition.State]++ + } + + return out +} + +func (m *PartitionRingDesc) activePartitionsCount() int { + count := 0 + for _, partition := range m.Partitions { + if partition.IsActive() { + count++ + } + } + return count +} + +// WithPartitions returns a new PartitionRingDesc with only the specified partitions and their owners included. +func (m *PartitionRingDesc) WithPartitions(partitions map[int32]struct{}) PartitionRingDesc { + newPartitions := make(map[int32]PartitionDesc, len(partitions)) + newOwners := make(map[string]OwnerDesc, len(partitions)*2) // assuming two owners per partition. + + for pid, p := range m.Partitions { + if _, ok := partitions[pid]; ok { + newPartitions[pid] = p + } + } + + for oid, o := range m.Owners { + if _, ok := partitions[o.OwnedPartition]; ok { + newOwners[oid] = o + } + } + + return PartitionRingDesc{ + Partitions: newPartitions, + Owners: newOwners, + } +} + +// AddPartition adds a new partition to the ring. Tokens are auto-generated using the spread minimizing strategy +// which generates deterministic unique tokens. +func (m *PartitionRingDesc) AddPartition(id int32, state PartitionState, now time.Time) { + // Spread-minimizing token generator is deterministic unique-token generator for given id and zone. + // Partitions don't use zones. + spreadMinimizing := NewSpreadMinimizingTokenGeneratorForInstanceAndZoneID("", int(id), 0, false) + + m.Partitions[id] = PartitionDesc{ + Id: id, + Tokens: spreadMinimizing.GenerateTokens(optimalTokensPerInstance, nil), + State: state, + StateTimestamp: now.Unix(), + } +} + +// UpdatePartitionState changes the state of a partition. Returns true if the state was changed, +// or false if the update was a no-op. +func (m *PartitionRingDesc) UpdatePartitionState(id int32, state PartitionState, now time.Time) bool { + d, ok := m.Partitions[id] + if !ok { + return false + } + + if d.State == state { + return false + } + + d.State = state + d.StateTimestamp = now.Unix() + m.Partitions[id] = d + return true +} + +// RemovePartition removes a partition. +func (m *PartitionRingDesc) RemovePartition(id int32) { + delete(m.Partitions, id) +} + +// HasPartition returns whether a partition exists. +func (m *PartitionRingDesc) HasPartition(id int32) bool { + _, ok := m.Partitions[id] + return ok +} + +// AddOrUpdateOwner adds or updates a partition owner in the ring. Returns true, if the +// owner was added or updated, false if it was left unchanged. +func (m *PartitionRingDesc) AddOrUpdateOwner(id string, state OwnerState, ownedPartition int32, now time.Time) bool { + prev, ok := m.Owners[id] + updated := OwnerDesc{ + State: state, + OwnedPartition: ownedPartition, + + // Preserve the previous timestamp so that we'll NOT compare it. + // Then, if we detect that the OwnerDesc should be updated, we'll + // also update the UpdateTimestamp. + UpdatedTimestamp: prev.UpdatedTimestamp, + } + + if ok && prev.Equal(updated) { + return false + } + + updated.UpdatedTimestamp = now.Unix() + m.Owners[id] = updated + + return true +} + +// RemoveOwner removes a partition owner. Returns true if the ring has been changed. +func (m *PartitionRingDesc) RemoveOwner(id string) bool { + if _, ok := m.Owners[id]; !ok { + return false + } + + delete(m.Owners, id) + return true +} + +// HasOwner returns whether a owner exists. +func (m *PartitionRingDesc) HasOwner(id string) bool { + _, ok := m.Owners[id] + return ok +} + +// PartitionOwnersCount returns the number of owners for a given partition. +func (m *PartitionRingDesc) PartitionOwnersCount(partitionID int32) int { + count := 0 + for _, o := range m.Owners { + if o.OwnedPartition == partitionID { + count++ + } + } + return count +} + +// PartitionOwnersCountUpdatedBefore returns the number of owners for a given partition, +// including only owners which have been updated the last time before the input timestamp. +func (m *PartitionRingDesc) PartitionOwnersCountUpdatedBefore(partitionID int32, before time.Time) int { + count := 0 + beforeSeconds := before.Unix() + + for _, o := range m.Owners { + if o.OwnedPartition == partitionID && o.GetUpdatedTimestamp() < beforeSeconds { + count++ + } + } + return count +} + +// Merge implements memberlist.Mergeable. +func (m *PartitionRingDesc) Merge(mergeable memberlist.Mergeable, localCAS bool) (memberlist.Mergeable, error) { + return m.mergeWithTime(mergeable, localCAS, time.Now()) +} + +func (m *PartitionRingDesc) mergeWithTime(mergeable memberlist.Mergeable, localCAS bool, now time.Time) (memberlist.Mergeable, error) { + if mergeable == nil { + return nil, nil + } + + other, ok := mergeable.(*PartitionRingDesc) + if !ok { + return nil, fmt.Errorf("expected *PartitionRingDesc, got %T", mergeable) + } + + if other == nil { + return nil, nil + } + + change := NewPartitionRingDesc() + + // Handle partitions. + for id, otherPart := range other.Partitions { + changed := false + + thisPart, exists := m.Partitions[id] + if !exists { + changed = true + thisPart = otherPart + } else { + // We don't merge changes to partition ID and tokens because we expect them to be immutable. + // + // If in the future we'll change the tokens generation algorithm and we'll have to handle migration to + // a different set of tokens then we'll add the support. For example, we could add "token generation version" + // to PartitionDesc and then preserve tokens generated by latest version only, or a timestamp for tokens + // update too. + + // In case the timestamp is equal we give priority to the deleted state. + // Reason is that timestamp has second precision, so we cover the case an + // update and subsequent deletion occur within the same second. + if otherPart.StateTimestamp > thisPart.StateTimestamp || (otherPart.StateTimestamp == thisPart.StateTimestamp && otherPart.State == PartitionDeleted && thisPart.State != PartitionDeleted) { + changed = true + + thisPart.State = otherPart.State + thisPart.StateTimestamp = otherPart.StateTimestamp + } + } + + if changed { + m.Partitions[id] = thisPart + change.Partitions[id] = thisPart + } + } + + if localCAS { + // Let's mark all missing partitions in incoming change as deleted. + // This breaks commutativity! But we only do it locally, not when gossiping with others. + for pid, thisPart := range m.Partitions { + if _, exists := other.Partitions[pid]; !exists && thisPart.State != PartitionDeleted { + // Partition was removed from the ring. We need to preserve it locally, but we set state to PartitionDeleted. + thisPart.State = PartitionDeleted + thisPart.StateTimestamp = now.Unix() + m.Partitions[pid] = thisPart + change.Partitions[pid] = thisPart + } + } + } + + // Now let's handle owners. + for id, otherOwner := range other.Owners { + thisOwner := m.Owners[id] + + // In case the timestamp is equal we give priority to the deleted state. + // Reason is that timestamp has second precision, so we cover the case an + // update and subsequent deletion occur within the same second. + if otherOwner.UpdatedTimestamp > thisOwner.UpdatedTimestamp || (otherOwner.UpdatedTimestamp == thisOwner.UpdatedTimestamp && otherOwner.State == OwnerDeleted && thisOwner.State != OwnerDeleted) { + m.Owners[id] = otherOwner + change.Owners[id] = otherOwner + } + } + + if localCAS { + // Mark all missing owners as deleted. + // This breaks commutativity! But we only do it locally, not when gossiping with others. + for id, thisOwner := range m.Owners { + if _, exists := other.Owners[id]; !exists && thisOwner.State != OwnerDeleted { + // Owner was removed from the ring. We need to preserve it locally, but we set state to OwnerDeleted. + thisOwner.State = OwnerDeleted + thisOwner.UpdatedTimestamp = now.Unix() + m.Owners[id] = thisOwner + change.Owners[id] = thisOwner + } + } + } + + // If nothing changed, report nothing. + if len(change.Partitions) == 0 && len(change.Owners) == 0 { + return nil, nil + } + + return change, nil +} + +// MergeContent implements memberlist.Mergeable. +func (m *PartitionRingDesc) MergeContent() []string { + result := make([]string, len(m.Partitions)+len(m.Owners)) + + // We're assuming that partition IDs and instance IDs are not colliding (ie. no instance is called "1"). + for pid := range m.Partitions { + result = append(result, strconv.Itoa(int(pid))) + } + + for id := range m.Owners { + result = append(result, id) + } + return result +} + +// RemoveTombstones implements memberlist.Mergeable. +func (m *PartitionRingDesc) RemoveTombstones(limit time.Time) (total, removed int) { + for pid, part := range m.Partitions { + if part.State == PartitionDeleted { + if limit.IsZero() || time.Unix(part.StateTimestamp, 0).Before(limit) { + delete(m.Partitions, pid) + removed++ + } else { + total++ + } + } + } + + for n, owner := range m.Owners { + if owner.State == OwnerDeleted { + if limit.IsZero() || time.Unix(owner.UpdatedTimestamp, 0).Before(limit) { + delete(m.Owners, n) + removed++ + } else { + total++ + } + } + } + + return +} + +// Clone implements memberlist.Mergeable. +func (m *PartitionRingDesc) Clone() memberlist.Mergeable { + clone := proto.Clone(m).(*PartitionRingDesc) + + // Ensure empty maps are preserved (easier to compare with a deep equal in tests). + if m.Partitions != nil && clone.Partitions == nil { + clone.Partitions = map[int32]PartitionDesc{} + } + if m.Owners != nil && clone.Owners == nil { + clone.Owners = map[string]OwnerDesc{} + } + + return clone +} + +func (m *PartitionDesc) IsPending() bool { + return m.GetState() == PartitionPending +} + +func (m *PartitionDesc) IsActive() bool { + return m.GetState() == PartitionActive +} + +func (m *PartitionDesc) IsInactive() bool { + return m.GetState() == PartitionInactive +} + +func (m *PartitionDesc) IsInactiveSince(since time.Time) bool { + return m.IsInactive() && m.GetStateTimestamp() < since.Unix() +} + +func (m *PartitionDesc) GetStateTime() time.Time { + return time.Unix(m.GetStateTimestamp(), 0) +} + +func (m *PartitionDesc) Clone() PartitionDesc { + return *(proto.Clone(m).(*PartitionDesc)) +} + +// CleanName returns the PartitionState name without the "Partition" prefix. +func (s PartitionState) CleanName() string { + return strings.TrimPrefix(s.String(), "Partition") +} diff --git a/vendor/github.com/grafana/dskit/ring/partition_ring_status.gohtml b/vendor/github.com/grafana/dskit/ring/partition_ring_status.gohtml new file mode 100644 index 0000000000000..f4f9afe87d88f --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/partition_ring_status.gohtml @@ -0,0 +1,63 @@ +{{- /*gotype: github.com/grafana/dskit/ring.partitionRingPageData */ -}} +<!DOCTYPE html> +<html> +<head> + <meta charset="UTF-8"> + <title>Partitions Ring Status</title> +</head> +<body> + <h1>Partitions Ring Status</h1> + + <table width="100%" border="1"> + <thead> + <tr> + <th>Partition ID</th> + <th>State</th> + <th>State updated at</th> + <th>Owners</th> + <th>Actions</th> + </tr> + </thead> + <tbody> + {{ $stateChanges := .PartitionStateChanges }} + {{ range $partition := .Partitions }} + <tr {{ if .Corrupted }}bgcolor="#FFDEDE"{{ else if mod $partition.ID 2 }}bgcolor="#BEBEBE"{{ end }}> + <td>{{ .ID }}</td> + <td> + {{ if .Corrupted }} + Corrupt + {{ else }} + {{ .State.CleanName }} + {{ end }} + </td> + <td> + {{ if not .StateTimestamp.IsZero }} + {{ .StateTimestamp | formatTimestamp }} + {{ else }} + N/A + {{ end }} + </td> + <td> + {{ range $ownerID := $partition.OwnerIDs }} + {{$ownerID}} <br /> + {{ end }} + </td> + <td> + <!-- Allow to force a state change --> + {{ if and (not .Corrupted) (ne (index $stateChanges .State) 0) }} + {{ $toState := index $stateChanges .State }} + <form action="" method="POST" onsubmit="return confirm('Do you confirm you want to change the state to {{ $toState.CleanName }}?');"> + <input type="hidden" name="csrf_token" value="$__CSRF_TOKEN_PLACEHOLDER__"> + <input type="hidden" name="partition_id" value="{{ .ID }}"> + <input type="hidden" name="partition_state" value="{{ $toState.String }}"> + + <button name="action" value="change_state" type="submit">Change state to {{ $toState.CleanName }}</button> + </form> + {{ end }} + </td> + </tr> + {{ end }} + </tbody> + </table> +</body> +</html> \ No newline at end of file diff --git a/vendor/github.com/grafana/dskit/ring/partition_ring_watcher.go b/vendor/github.com/grafana/dskit/ring/partition_ring_watcher.go new file mode 100644 index 0000000000000..39225697eb0ef --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/partition_ring_watcher.go @@ -0,0 +1,100 @@ +package ring + +import ( + "context" + "sync" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/grafana/dskit/kv" + "github.com/grafana/dskit/services" +) + +// PartitionRingWatcher watches the partitions ring for changes in the KV store. +type PartitionRingWatcher struct { + services.Service + + key string + kv kv.Client + logger log.Logger + + ringMx sync.Mutex + ring *PartitionRing + + // Metrics. + numPartitionsGaugeVec *prometheus.GaugeVec +} + +func NewPartitionRingWatcher(name, key string, kv kv.Client, logger log.Logger, reg prometheus.Registerer) *PartitionRingWatcher { + r := &PartitionRingWatcher{ + key: key, + kv: kv, + logger: logger, + ring: NewPartitionRing(*NewPartitionRingDesc()), + numPartitionsGaugeVec: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ + Name: "partition_ring_partitions", + Help: "Number of partitions by state in the partitions ring.", + ConstLabels: map[string]string{"name": name}, + }, []string{"state"}), + } + + r.Service = services.NewBasicService(r.starting, r.loop, nil).WithName("partitions-ring-watcher") + return r +} + +func (w *PartitionRingWatcher) starting(ctx context.Context) error { + // Get the initial ring state so that, as soon as the service will be running, the in-memory + // ring would be already populated and there's no race condition between when the service is + // running and the WatchKey() callback is called for the first time. + value, err := w.kv.Get(ctx, w.key) + if err != nil { + return errors.Wrap(err, "unable to initialise ring state") + } + + if value == nil { + level.Info(w.logger).Log("msg", "partition ring doesn't exist in KV store yet") + value = NewPartitionRingDesc() + } + + w.updatePartitionRing(value.(*PartitionRingDesc)) + return nil +} + +func (w *PartitionRingWatcher) loop(ctx context.Context) error { + w.kv.WatchKey(ctx, w.key, func(value interface{}) bool { + if value == nil { + level.Info(w.logger).Log("msg", "partition ring doesn't exist in KV store yet") + return true + } + + w.updatePartitionRing(value.(*PartitionRingDesc)) + return true + }) + return nil +} + +func (w *PartitionRingWatcher) updatePartitionRing(desc *PartitionRingDesc) { + newRing := NewPartitionRing(*desc) + + w.ringMx.Lock() + w.ring = newRing + w.ringMx.Unlock() + + // Update metrics. + for state, count := range desc.countPartitionsByState() { + w.numPartitionsGaugeVec.WithLabelValues(state.CleanName()).Set(float64(count)) + } +} + +// PartitionRing returns the most updated snapshot of the PartitionRing. The returned instance +// is immutable and will not be updated if new changes are done to the ring. +func (w *PartitionRingWatcher) PartitionRing() *PartitionRing { + w.ringMx.Lock() + defer w.ringMx.Unlock() + + return w.ring +} diff --git a/vendor/github.com/grafana/dskit/ring/partitions_ring_shuffle_shard_cache.go b/vendor/github.com/grafana/dskit/ring/partitions_ring_shuffle_shard_cache.go new file mode 100644 index 0000000000000..ce80d2c14adcf --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/partitions_ring_shuffle_shard_cache.go @@ -0,0 +1,96 @@ +package ring + +import ( + "math" + "sync" + "time" +) + +type partitionRingShuffleShardCache struct { + mtx sync.RWMutex + cacheWithoutLookback map[subringCacheKey]*PartitionRing + cacheWithLookback map[subringCacheKey]cachedSubringWithLookback[*PartitionRing] +} + +func newPartitionRingShuffleShardCache() *partitionRingShuffleShardCache { + return &partitionRingShuffleShardCache{ + cacheWithoutLookback: map[subringCacheKey]*PartitionRing{}, + cacheWithLookback: map[subringCacheKey]cachedSubringWithLookback[*PartitionRing]{}, + } +} + +func (r *partitionRingShuffleShardCache) setSubring(identifier string, size int, subring *PartitionRing) { + if subring == nil { + return + } + + r.mtx.Lock() + defer r.mtx.Unlock() + + r.cacheWithoutLookback[subringCacheKey{identifier: identifier, shardSize: size}] = subring +} + +func (r *partitionRingShuffleShardCache) getSubring(identifier string, size int) *PartitionRing { + r.mtx.RLock() + defer r.mtx.RUnlock() + + cached := r.cacheWithoutLookback[subringCacheKey{identifier: identifier, shardSize: size}] + if cached == nil { + return nil + } + + return cached +} + +func (r *partitionRingShuffleShardCache) setSubringWithLookback(identifier string, size int, lookbackPeriod time.Duration, now time.Time, subring *PartitionRing) { + if subring == nil { + return + } + + var ( + lookbackWindowStart = now.Add(-lookbackPeriod).Unix() + validForLookbackWindowsStartingBefore = int64(math.MaxInt64) + ) + + for _, partition := range subring.desc.Partitions { + stateChangedDuringLookbackWindow := partition.StateTimestamp >= lookbackWindowStart + + if stateChangedDuringLookbackWindow && partition.StateTimestamp < validForLookbackWindowsStartingBefore { + validForLookbackWindowsStartingBefore = partition.StateTimestamp + } + } + + r.mtx.Lock() + defer r.mtx.Unlock() + + // Only update cache if subring's lookback window starts later than the previously cached subring for this identifier, + // if there is one. This prevents cache thrashing due to different calls competing if their lookback windows start + // before and after the time a partition state has changed. + key := subringCacheKey{identifier: identifier, shardSize: size, lookbackPeriod: lookbackPeriod} + + if existingEntry, haveCached := r.cacheWithLookback[key]; !haveCached || existingEntry.validForLookbackWindowsStartingAfter < lookbackWindowStart { + r.cacheWithLookback[key] = cachedSubringWithLookback[*PartitionRing]{ + subring: subring, + validForLookbackWindowsStartingAfter: lookbackWindowStart, + validForLookbackWindowsStartingBefore: validForLookbackWindowsStartingBefore, + } + } +} + +func (r *partitionRingShuffleShardCache) getSubringWithLookback(identifier string, size int, lookbackPeriod time.Duration, now time.Time) *PartitionRing { + r.mtx.RLock() + defer r.mtx.RUnlock() + + cached, ok := r.cacheWithLookback[subringCacheKey{identifier: identifier, shardSize: size, lookbackPeriod: lookbackPeriod}] + if !ok { + return nil + } + + lookbackWindowStart := now.Add(-lookbackPeriod).Unix() + if lookbackWindowStart < cached.validForLookbackWindowsStartingAfter || lookbackWindowStart > cached.validForLookbackWindowsStartingBefore { + // The cached subring is not valid for the lookback window that has been requested. + return nil + } + + return cached.subring +} diff --git a/vendor/github.com/grafana/dskit/ring/replication_set.go b/vendor/github.com/grafana/dskit/ring/replication_set.go index f05153c0525cb..ffdcf80ab5268 100644 --- a/vendor/github.com/grafana/dskit/ring/replication_set.go +++ b/vendor/github.com/grafana/dskit/ring/replication_set.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "sort" + "sync" "time" kitlog "github.com/go-kit/log" @@ -388,6 +389,111 @@ func DoUntilQuorumWithoutSuccessfulContextCancellation[T any](ctx context.Contex return results, nil } +// DoMultiUntilQuorumWithoutSuccessfulContextCancellation behaves similar to DoUntilQuorumWithoutSuccessfulContextCancellation +// with the following exceptions: +// +// - This function calls DoUntilQuorumWithoutSuccessfulContextCancellation for each input ReplicationSet and requires +// DoUntilQuorumWithoutSuccessfulContextCancellation to successfully run for each of them. Execution breaks on the +// first error returned by DoUntilQuorumWithoutSuccessfulContextCancellation on any ReplicationSet. +// +// - This function requires that the callback function f always call context.CancelCauseFunc once done. Failing to +// cancel the context will leak resources. +func DoMultiUntilQuorumWithoutSuccessfulContextCancellation[T any](ctx context.Context, sets []ReplicationSet, cfg DoUntilQuorumConfig, f func(context.Context, *InstanceDesc, context.CancelCauseFunc) (T, error), cleanupFunc func(T)) ([]T, error) { + if len(sets) == 0 { + return nil, errors.New("no replication sets") + } + if len(sets) == 1 { + return DoUntilQuorumWithoutSuccessfulContextCancellation[T](ctx, sets[0], cfg, f, cleanupFunc) + } + + results, _, err := doMultiUntilQuorumWithoutSuccessfulContextCancellation[T](ctx, sets, cfg, f, cleanupFunc) + return results, err +} + +// See DoMultiUntilQuorumWithoutSuccessfulContextCancellation(). +// +// The returned context.Context is the internal context used by workers and it's used for testing purposes. +func doMultiUntilQuorumWithoutSuccessfulContextCancellation[T any](ctx context.Context, sets []ReplicationSet, cfg DoUntilQuorumConfig, f func(context.Context, *InstanceDesc, context.CancelCauseFunc) (T, error), cleanupFunc func(T)) ([]T, context.Context, error) { + var ( + returnResultsMx = sync.Mutex{} + returnResults = make([]T, 0, len(sets)*len(sets[0].Instances)) // Assume all replication sets have the same number of instances. + + returnErrOnce sync.Once + returnErr error // The first error occurred. + + workersGroup = sync.WaitGroup{} + workersCtx, cancelWorkersCtx = context.WithCancelCause(ctx) + + inflightTracker = newInflightInstanceTracker(sets) + ) + + cancelWorkersCtxIfSafe := func() { + if inflightTracker.allInstancesCompleted() { + cancelWorkersCtx(errors.New("all requests completed")) + } + } + + // Start a worker for each set. A worker is responsible to call DoUntilQuorumWithoutSuccessfulContextCancellation() + // for the given replication set and handle the result. + workersGroup.Add(len(sets)) + + for idx, set := range sets { + go func(idx int, set ReplicationSet) { + defer workersGroup.Done() + + wrappedFn := func(ctx context.Context, instance *InstanceDesc, cancelCtx context.CancelCauseFunc) (T, error) { + // The callback function has been called, so we need to track it. + inflightTracker.addInstance(idx, instance) + + // Inject custom logic in the context.CancelCauseFunc. + return f(ctx, instance, func(cause error) { + // Call the original one. + cancelCtx(cause) + + // The callback has done, so we can remove it from tracker and then check if it's safe + // to cancel the workers context. + inflightTracker.removeInstance(idx, instance) + cancelWorkersCtxIfSafe() + }) + } + + setResults, setErr := DoUntilQuorumWithoutSuccessfulContextCancellation[T](workersCtx, set, cfg, wrappedFn, cleanupFunc) + + if setErr != nil { + returnErrOnce.Do(func() { + returnErr = setErr + + // Interrupt the execution of all workers. + cancelWorkersCtx(setErr) + }) + + return + } + + // Keep track of the results. + returnResultsMx.Lock() + returnResults = append(returnResults, setResults...) + returnResultsMx.Unlock() + }(idx, set) + } + + // Wait until all goroutines have terminated. + workersGroup.Wait() + + // All workers completed, so it's guaranteed returnResults and returnErr won't be accessed by workers anymore, + // and it's safe to read them with no locking. + if returnErr != nil { + return nil, workersCtx, returnErr + } + + // No error occurred. It means workers context hasn't been canceled yet, and we don't expect more callbacks + // to get tracked, so we can check if the cancelling condition has already been reached and eventually do it. + inflightTracker.allInstancesAdded() + cancelWorkersCtxIfSafe() + + return returnResults, workersCtx, nil +} + type instanceResult[T any] struct { result T err error @@ -405,6 +511,16 @@ func (r ReplicationSet) Includes(addr string) bool { return false } +// GetIDs returns the IDs of all instances within the replication set. Returned slice +// order is not guaranteed. +func (r ReplicationSet) GetIDs() []string { + ids := make([]string, 0, len(r.Instances)) + for _, desc := range r.Instances { + ids = append(ids, desc.Id) + } + return ids +} + // GetAddresses returns the addresses of all instances within the replication set. Returned slice // order is not guaranteed. func (r ReplicationSet) GetAddresses() []string { @@ -468,6 +584,17 @@ func HasReplicationSetChangedWithoutState(before, after ReplicationSet) bool { }) } +// Has HasReplicationSetChangedWithoutStateOrAddr returns false if two replications sets +// are the same (with possibly different timestamps, instance states, and ip addresses), +// true if they differ in any other way (number of instances, tokens, zones, ...). +func HasReplicationSetChangedWithoutStateOrAddr(before, after ReplicationSet) bool { + return hasReplicationSetChangedExcluding(before, after, func(i *InstanceDesc) { + i.Timestamp = 0 + i.State = PENDING + i.Addr = "" + }) +} + // Do comparison of replicasets, but apply a function first // to be able to exclude (reset) some values func hasReplicationSetChangedExcluding(before, after ReplicationSet, exclude func(*InstanceDesc)) bool { @@ -478,8 +605,8 @@ func hasReplicationSetChangedExcluding(before, after ReplicationSet, exclude fun return true } - sort.Sort(ByAddr(beforeInstances)) - sort.Sort(ByAddr(afterInstances)) + sort.Sort(ByID(beforeInstances)) + sort.Sort(ByID(afterInstances)) for i := 0; i < len(beforeInstances); i++ { b := beforeInstances[i] diff --git a/vendor/github.com/grafana/dskit/ring/replication_set_tracker.go b/vendor/github.com/grafana/dskit/ring/replication_set_tracker.go index 202b568bb9567..73da1bc37f8ac 100644 --- a/vendor/github.com/grafana/dskit/ring/replication_set_tracker.go +++ b/vendor/github.com/grafana/dskit/ring/replication_set_tracker.go @@ -4,6 +4,7 @@ import ( "context" "errors" "math/rand" + "sync" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -465,3 +466,91 @@ func (t *zoneAwareContextTracker) cancelAllContexts(cause error) { delete(t.cancelFuncs, instance) } } + +type inflightInstanceTracker struct { + mx sync.Mutex + inflight [][]*InstanceDesc + + // expectMoreInstances is true if more instances are expected to be added to the tracker. + expectMoreInstances bool +} + +func newInflightInstanceTracker(sets []ReplicationSet) *inflightInstanceTracker { + // Init the inflight tracker. + inflight := make([][]*InstanceDesc, len(sets)) + for idx, set := range sets { + inflight[idx] = make([]*InstanceDesc, 0, len(set.Instances)) + } + + return &inflightInstanceTracker{ + inflight: inflight, + expectMoreInstances: true, + } +} + +// addInstance adds the instance for replicationSetIdx to the tracker. +// +// addInstance is idempotent. +func (t *inflightInstanceTracker) addInstance(replicationSetIdx int, instance *InstanceDesc) { + t.mx.Lock() + defer t.mx.Unlock() + + // Check if the instance has already been added. + for _, curr := range t.inflight[replicationSetIdx] { + if curr == instance { + return + } + } + + t.inflight[replicationSetIdx] = append(t.inflight[replicationSetIdx], instance) +} + +// removeInstance removes the instance for replicationSetIdx from the tracker. +// +// removeInstance is idempotent. +func (t *inflightInstanceTracker) removeInstance(replicationSetIdx int, instance *InstanceDesc) { + t.mx.Lock() + defer t.mx.Unlock() + + for i, curr := range t.inflight[replicationSetIdx] { + if curr == instance { + instances := t.inflight[replicationSetIdx] + t.inflight[replicationSetIdx] = append(instances[:i], instances[i+1:]...) + + // We can safely break the loop because we don't expect multiple occurrences of the same instance. + return + } + } +} + +// allInstancesAdded signals the tracker that all expected instances have been added. +// +// allInstancesAdded is idempotent. +func (t *inflightInstanceTracker) allInstancesAdded() { + t.mx.Lock() + defer t.mx.Unlock() + + t.expectMoreInstances = false +} + +// allInstancesCompleted returns true if and only if no more instances are expected to be +// added to the tracker and all previously tracked instances have been removed calling removeInstance(). +func (t *inflightInstanceTracker) allInstancesCompleted() bool { + t.mx.Lock() + defer t.mx.Unlock() + + // We can't assert all instances have completed if it's still possible + // to add new ones to the tracker. + if t.expectMoreInstances { + return false + } + + // Ensure there are no inflight instances for any replication set. + for _, instances := range t.inflight { + if len(instances) > 0 { + return false + } + } + + return true +} diff --git a/vendor/github.com/grafana/dskit/ring/replication_strategy.go b/vendor/github.com/grafana/dskit/ring/replication_strategy.go index 44e05a53833c3..db2b283548f26 100644 --- a/vendor/github.com/grafana/dskit/ring/replication_strategy.go +++ b/vendor/github.com/grafana/dskit/ring/replication_strategy.go @@ -109,11 +109,3 @@ func (r *Ring) IsHealthy(instance *InstanceDesc, op Operation, now time.Time) bo func (r *Ring) ReplicationFactor() int { return r.cfg.ReplicationFactor } - -// InstancesCount returns the number of instances in the ring. -func (r *Ring) InstancesCount() int { - r.mtx.RLock() - c := len(r.ringDesc.Ingesters) - r.mtx.RUnlock() - return c -} diff --git a/vendor/github.com/grafana/dskit/ring/ring.go b/vendor/github.com/grafana/dskit/ring/ring.go index 0c54bb1c5433e..e1c1f6a5159d9 100644 --- a/vendor/github.com/grafana/dskit/ring/ring.go +++ b/vendor/github.com/grafana/dskit/ring/ring.go @@ -58,6 +58,9 @@ type ReadRing interface { // InstancesCount returns the number of instances in the ring. InstancesCount() int + // InstancesWithTokensCount returns the number of instances in the ring that have tokens. + InstancesWithTokensCount() int + // ShuffleShard returns a subring for the provided identifier (eg. a tenant ID) // and size (number of instances). ShuffleShard(identifier string, size int) ReadRing @@ -78,6 +81,15 @@ type ReadRing interface { // GetTokenRangesForInstance returns the token ranges owned by an instance in the ring GetTokenRangesForInstance(instanceID string) (TokenRanges, error) + + // InstancesInZoneCount returns the number of instances in the ring that are registered in given zone. + InstancesInZoneCount(zone string) int + + // InstancesWithTokensInZoneCount returns the number of instances in the ring that are registered in given zone and have tokens. + InstancesWithTokensInZoneCount(zone string) int + + // ZonesCount returns the number of zones for which there's at least 1 instance registered in the ring. + ZonesCount() int } var ( @@ -184,10 +196,19 @@ type Ring struct { // to be sorted alphabetically. ringZones []string + // Number of registered instances with tokens. + instancesWithTokensCount int + + // Number of registered instances per zone. + instancesCountPerZone map[string]int + + // Nubmber of registered instances with tokens per zone. + instancesWithTokensCountPerZone map[string]int + // Cache of shuffle-sharded subrings per identifier. Invalidated when topology changes. // If set to nil, no caching is done (used by tests, and subrings). shuffledSubringCache map[subringCacheKey]*Ring - shuffledSubringWithLookbackCache map[subringCacheKey]cachedSubringWithLookback + shuffledSubringWithLookbackCache map[subringCacheKey]cachedSubringWithLookback[*Ring] numMembersGaugeVec *prometheus.GaugeVec totalTokensGauge prometheus.Gauge @@ -202,8 +223,8 @@ type subringCacheKey struct { lookbackPeriod time.Duration } -type cachedSubringWithLookback struct { - subring *Ring +type cachedSubringWithLookback[R any] struct { + subring R validForLookbackWindowsStartingAfter int64 // if the lookback window is from T to S, validForLookbackWindowsStartingAfter is the earliest value of T this cache entry is valid for validForLookbackWindowsStartingBefore int64 // if the lookback window is from T to S, validForLookbackWindowsStartingBefore is the latest value of T this cache entry is valid for } @@ -237,7 +258,7 @@ func NewWithStoreClientAndStrategy(cfg Config, name, key string, store kv.Client strategy: strategy, ringDesc: &Desc{}, shuffledSubringCache: map[subringCacheKey]*Ring{}, - shuffledSubringWithLookbackCache: map[subringCacheKey]cachedSubringWithLookback{}, + shuffledSubringWithLookbackCache: map[subringCacheKey]cachedSubringWithLookback[*Ring]{}, numMembersGaugeVec: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ Name: "ring_members", Help: "Number of members in the ring", @@ -333,6 +354,9 @@ func (r *Ring) updateRingState(ringDesc *Desc) { ringInstanceByToken := ringDesc.getTokensInfo() ringZones := getZones(ringTokensByZone) oldestRegisteredTimestamp := ringDesc.getOldestRegisteredTimestamp() + instancesWithTokensCount := ringDesc.instancesWithTokensCount() + instancesCountPerZone := ringDesc.instancesCountPerZone() + instancesWithTokensCountPerZone := ringDesc.instancesWithTokensCountPerZone() r.mtx.Lock() defer r.mtx.Unlock() @@ -341,6 +365,9 @@ func (r *Ring) updateRingState(ringDesc *Desc) { r.ringTokensByZone = ringTokensByZone r.ringInstanceByToken = ringInstanceByToken r.ringZones = ringZones + r.instancesWithTokensCount = instancesWithTokensCount + r.instancesCountPerZone = instancesCountPerZone + r.instancesWithTokensCountPerZone = instancesWithTokensCountPerZone r.oldestRegisteredTimestamp = oldestRegisteredTimestamp r.lastTopologyChange = now @@ -349,7 +376,7 @@ func (r *Ring) updateRingState(ringDesc *Desc) { r.shuffledSubringCache = make(map[subringCacheKey]*Ring) } if r.shuffledSubringWithLookbackCache != nil { - r.shuffledSubringWithLookbackCache = make(map[subringCacheKey]cachedSubringWithLookback) + r.shuffledSubringWithLookbackCache = make(map[subringCacheKey]cachedSubringWithLookback[*Ring]) } r.updateRingMetrics(rc) @@ -676,7 +703,7 @@ func (r *Ring) ShuffleShard(identifier string, size int) ReadRing { // operations (read only). // // This function supports caching, but the cache will only be effective if successive calls for the -// same identifier are for increasing values of (now-lookbackPeriod). +// same identifier are with the same lookbackPeriod and increasing values of now. func (r *Ring) ShuffleShardWithLookback(identifier string, size int, lookbackPeriod time.Duration, now time.Time) ReadRing { // Nothing to do if the shard size is not smaller then the actual ring. if size <= 0 || r.InstancesCount() <= size { @@ -797,12 +824,15 @@ func (r *Ring) shuffleShard(identifier string, size int, lookbackPeriod time.Dur shardTokens := mergeTokenGroups(shardTokensByZone) return &Ring{ - cfg: r.cfg, - strategy: r.strategy, - ringDesc: shardDesc, - ringTokens: shardTokens, - ringTokensByZone: shardTokensByZone, - ringZones: getZones(shardTokensByZone), + cfg: r.cfg, + strategy: r.strategy, + ringDesc: shardDesc, + ringTokens: shardTokens, + ringTokensByZone: shardTokensByZone, + ringZones: getZones(shardTokensByZone), + instancesWithTokensCount: shardDesc.instancesWithTokensCount(), + instancesCountPerZone: shardDesc.instancesCountPerZone(), + instancesWithTokensCountPerZone: shardDesc.instancesWithTokensCountPerZone(), oldestRegisteredTimestamp: shardDesc.getOldestRegisteredTimestamp(), @@ -866,16 +896,32 @@ func mergeTokenGroups(groupsByName map[string][]uint32) []uint32 { return merged } -// GetInstanceState returns the current state of an instance or an error if the -// instance does not exist in the ring. -func (r *Ring) GetInstanceState(instanceID string) (InstanceState, error) { +// GetInstance return the InstanceDesc for the given instanceID or an error +// if the instance doesn't exist in the ring. The returned InstanceDesc is NOT a +// deep copy, so the caller should never modify it. +func (r *Ring) GetInstance(instanceID string) (doNotModify InstanceDesc, _ error) { r.mtx.RLock() defer r.mtx.RUnlock() instances := r.ringDesc.GetIngesters() + if instances == nil { + return InstanceDesc{}, ErrInstanceNotFound + } + instance, ok := instances[instanceID] if !ok { - return PENDING, ErrInstanceNotFound + return InstanceDesc{}, ErrInstanceNotFound + } + + return instance, nil +} + +// GetInstanceState returns the current state of an instance or an error if the +// instance does not exist in the ring. +func (r *Ring) GetInstanceState(instanceID string) (InstanceState, error) { + instance, err := r.GetInstance(instanceID) + if err != nil { + return PENDING, err } return instance.GetState(), nil @@ -1017,7 +1063,7 @@ func (r *Ring) setCachedShuffledSubringWithLookback(identifier string, size int, key := subringCacheKey{identifier: identifier, shardSize: size, lookbackPeriod: lookbackPeriod} if existingEntry, haveCached := r.shuffledSubringWithLookbackCache[key]; !haveCached || existingEntry.validForLookbackWindowsStartingAfter < lookbackWindowStart { - r.shuffledSubringWithLookbackCache[key] = cachedSubringWithLookback{ + r.shuffledSubringWithLookbackCache[key] = cachedSubringWithLookback[*Ring]{ subring: subring, validForLookbackWindowsStartingAfter: lookbackWindowStart, validForLookbackWindowsStartingBefore: validForLookbackWindowsStartingBefore, @@ -1063,6 +1109,45 @@ func (r *Ring) ServeHTTP(w http.ResponseWriter, req *http.Request) { newRingPageHandler(r, r.cfg.HeartbeatTimeout).handle(w, req) } +// InstancesCount returns the number of instances in the ring. +func (r *Ring) InstancesCount() int { + r.mtx.RLock() + c := len(r.ringDesc.Ingesters) + r.mtx.RUnlock() + return c +} + +// InstancesWithTokensCount returns the number of instances in the ring that have tokens. +func (r *Ring) InstancesWithTokensCount() int { + r.mtx.RLock() + defer r.mtx.RUnlock() + + return r.instancesWithTokensCount +} + +// InstancesInZoneCount returns the number of instances in the ring that are registered in given zone. +func (r *Ring) InstancesInZoneCount(zone string) int { + r.mtx.RLock() + defer r.mtx.RUnlock() + + return r.instancesCountPerZone[zone] +} + +// InstancesWithTokensInZoneCount returns the number of instances in the ring that are registered in given zone and have tokens. +func (r *Ring) InstancesWithTokensInZoneCount(zone string) int { + r.mtx.RLock() + defer r.mtx.RUnlock() + + return r.instancesWithTokensCountPerZone[zone] +} + +func (r *Ring) ZonesCount() int { + r.mtx.RLock() + defer r.mtx.RUnlock() + + return len(r.ringZones) +} + // Operation describes which instances can be included in the replica set, based on their state. // // Implemented as bitmap, with upper 16-bits used for encoding extendReplicaSet, and lower 16-bits used for encoding healthy states. diff --git a/vendor/github.com/grafana/dskit/ring/http.go b/vendor/github.com/grafana/dskit/ring/ring_http.go similarity index 96% rename from vendor/github.com/grafana/dskit/ring/http.go rename to vendor/github.com/grafana/dskit/ring/ring_http.go index e70b3e6f0a1f9..7300430ddac18 100644 --- a/vendor/github.com/grafana/dskit/ring/http.go +++ b/vendor/github.com/grafana/dskit/ring/ring_http.go @@ -13,7 +13,7 @@ import ( "time" ) -//go:embed status.gohtml +//go:embed ring_status.gohtml var defaultPageContent string var defaultPageTemplate = template.Must(template.New("webpage").Funcs(template.FuncMap{ "mod": func(i, j int) bool { return i%j == 0 }, @@ -134,7 +134,7 @@ func (h *ringPageHandler) handle(w http.ResponseWriter, req *http.Request) { // RenderHTTPResponse either responds with json or a rendered html page using the passed in template // by checking the Accepts header -func renderHTTPResponse(w http.ResponseWriter, v httpResponse, t *template.Template, r *http.Request) { +func renderHTTPResponse(w http.ResponseWriter, v any, t *template.Template, r *http.Request) { accept := r.Header.Get("Accept") if strings.Contains(accept, "application/json") { writeJSONResponse(w, v) @@ -161,7 +161,7 @@ func (h *ringPageHandler) forget(ctx context.Context, id string) error { } // WriteJSONResponse writes some JSON as a HTTP response. -func writeJSONResponse(w http.ResponseWriter, v httpResponse) { +func writeJSONResponse(w http.ResponseWriter, v any) { w.Header().Set("Content-Type", "application/json") if err := json.NewEncoder(w).Encode(v); err != nil { diff --git a/vendor/github.com/grafana/dskit/ring/status.gohtml b/vendor/github.com/grafana/dskit/ring/ring_status.gohtml similarity index 100% rename from vendor/github.com/grafana/dskit/ring/status.gohtml rename to vendor/github.com/grafana/dskit/ring/ring_status.gohtml diff --git a/vendor/github.com/grafana/dskit/ring/spread_minimizing_token_generator.go b/vendor/github.com/grafana/dskit/ring/spread_minimizing_token_generator.go index 2363825076fcd..bd2ed9970a594 100644 --- a/vendor/github.com/grafana/dskit/ring/spread_minimizing_token_generator.go +++ b/vendor/github.com/grafana/dskit/ring/spread_minimizing_token_generator.go @@ -8,10 +8,6 @@ import ( "sort" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "golang.org/x/exp/slices" ) @@ -22,11 +18,10 @@ const ( ) var ( - instanceIDRegex = regexp.MustCompile(`^(.*)-(\d+)$`) + instanceIDRegex = regexp.MustCompile(`^(.*-)(\d+)$`) errorBadInstanceIDFormat = func(instanceID string) error { return fmt.Errorf("unable to extract instance id from %q", instanceID) } - errorNoPreviousInstance = fmt.Errorf("impossible to find the instance preceding the target instance, because it is the first instance") errorMissingPreviousInstance = func(requiredInstanceID string) error { return fmt.Errorf("the instance %q has not been registered to the ring or has no tokens yet", requiredInstanceID) @@ -49,15 +44,13 @@ var ( ) type SpreadMinimizingTokenGenerator struct { - instanceID int - instance string - zoneID int - spreadMinimizingZones []string - canJoinEnabled bool - logger log.Logger + instanceID int + instancePrefix string + zoneID int + canJoinEnabled bool } -func NewSpreadMinimizingTokenGenerator(instance, zone string, spreadMinimizingZones []string, canJoinEnabled bool, logger log.Logger) (*SpreadMinimizingTokenGenerator, error) { +func NewSpreadMinimizingTokenGenerator(instance, zone string, spreadMinimizingZones []string, canJoinEnabled bool) (*SpreadMinimizingTokenGenerator, error) { if len(spreadMinimizingZones) <= 0 || len(spreadMinimizingZones) > maxZonesCount { return nil, errorZoneCountOutOfBound(len(spreadMinimizingZones)) } @@ -66,52 +59,35 @@ func NewSpreadMinimizingTokenGenerator(instance, zone string, spreadMinimizingZo if !slices.IsSorted(sortedZones) { sort.Strings(sortedZones) } - instanceID, err := parseInstanceID(instance) + zoneID, err := findZoneID(zone, sortedZones) if err != nil { return nil, err } - zoneID, err := findZoneID(zone, sortedZones) + + prefix, instanceID, err := parseInstanceID(instance) if err != nil { return nil, err } - tokenGenerator := &SpreadMinimizingTokenGenerator{ - instanceID: instanceID, - instance: instance, - zoneID: zoneID, - spreadMinimizingZones: sortedZones, - canJoinEnabled: canJoinEnabled, - logger: logger, - } - return tokenGenerator, nil + return NewSpreadMinimizingTokenGeneratorForInstanceAndZoneID(prefix, instanceID, zoneID, canJoinEnabled), nil } -func parseInstanceID(instanceID string) (int, error) { - parts := instanceIDRegex.FindStringSubmatch(instanceID) - if len(parts) != 3 { - return 0, errorBadInstanceIDFormat(instanceID) +func NewSpreadMinimizingTokenGeneratorForInstanceAndZoneID(instancePrefix string, instanceID, zoneID int, canJoinEnabled bool) *SpreadMinimizingTokenGenerator { + return &SpreadMinimizingTokenGenerator{ + instanceID: instanceID, + instancePrefix: instancePrefix, + zoneID: zoneID, + canJoinEnabled: canJoinEnabled, } - return strconv.Atoi(parts[2]) } -// previousInstance determines the string id of the instance preceding the given instance string id. -// If it is impossible to parse the given instanceID, or it is impossible to determine its predecessor -// because the passed instanceID has a bad format, or has no predecessor, an error is returned. -// For examples, my-instance-1 is preceded by instance my-instance-0, but my-instance-0 has no -// predecessor because its index is 0. -func previousInstance(instanceID string) (string, error) { +func parseInstanceID(instanceID string) (string, int, error) { parts := instanceIDRegex.FindStringSubmatch(instanceID) if len(parts) != 3 { - return "", errorBadInstanceIDFormat(instanceID) - } - id, err := strconv.Atoi(parts[2]) - if err != nil { - return "", err - } - if id == 0 { - return "", errorNoPreviousInstance + return "", 0, errorBadInstanceIDFormat(instanceID) } - return fmt.Sprintf("%s-%d", parts[1], id-1), nil + val, err := strconv.Atoi(parts[2]) + return parts[1], val, err } // findZoneID gets a zone name and a slice of sorted zones, @@ -193,7 +169,11 @@ func (t *SpreadMinimizingTokenGenerator) GenerateTokens(requestedTokensCount int used[v] = true } - allTokens := t.generateAllTokens() + allTokens, err := t.generateAllTokens() + if err != nil { + // we were unable to generate required tokens, so we panic. + panic(err) + } uniqueTokens := make(Tokens, 0, requestedTokensCount) // allTokens is a sorted slice of tokens for instance t.cfg.InstanceID in zone t.cfg.zone @@ -214,11 +194,14 @@ func (t *SpreadMinimizingTokenGenerator) GenerateTokens(requestedTokensCount int // placed in the ring that already contains instances with all the ids lower that t.instanceID // is optimal. // Calls to this method will always return the same set of tokens. -func (t *SpreadMinimizingTokenGenerator) generateAllTokens() Tokens { - tokensByInstanceID := t.generateTokensByInstanceID() +func (t *SpreadMinimizingTokenGenerator) generateAllTokens() (Tokens, error) { + tokensByInstanceID, err := t.generateTokensByInstanceID() + if err != nil { + return nil, err + } allTokens := tokensByInstanceID[t.instanceID] slices.Sort(allTokens) - return allTokens + return allTokens, nil } // generateTokensByInstanceID generates the optimal number of tokens (optimalTokenPerInstance), @@ -226,13 +209,13 @@ func (t *SpreadMinimizingTokenGenerator) generateAllTokens() Tokens { // (with id t.instanceID). Generated tokens are not sorted, but they are distributed in such a // way that registered ownership of all the instances is optimal. // Calls to this method will always return the same set of tokens. -func (t *SpreadMinimizingTokenGenerator) generateTokensByInstanceID() map[int]Tokens { +func (t *SpreadMinimizingTokenGenerator) generateTokensByInstanceID() (map[int]Tokens, error) { firstInstanceTokens := t.generateFirstInstanceTokens() tokensByInstanceID := make(map[int]Tokens, t.instanceID+1) tokensByInstanceID[0] = firstInstanceTokens if t.instanceID == 0 { - return tokensByInstanceID + return tokensByInstanceID, nil } // tokensQueues is a slice of priority queues. Slice indexes correspond @@ -272,10 +255,8 @@ func (t *SpreadMinimizingTokenGenerator) generateTokensByInstanceID() map[int]To optimalTokenOwnership := t.optimalTokenOwnership(optimalInstanceOwnership, currInstanceOwnership, uint32(optimalTokensPerInstance-addedTokens)) highestOwnershipInstance := instanceQueue.Peek() if highestOwnershipInstance == nil || highestOwnershipInstance.ownership <= float64(optimalTokenOwnership) { - level.Warn(t.logger).Log("msg", "it was impossible to add a token because the instance with the highest ownership cannot satisfy the request", "added tokens", addedTokens+1, "highest ownership", highestOwnershipInstance.ownership, "requested ownership", optimalTokenOwnership) - // if this happens, it means that we cannot accommodate other tokens, so we panic - err := fmt.Errorf("it was impossible to add %dth token for instance with id %d in zone %s because the instance with the highest ownership cannot satisfy the requested ownership %d", addedTokens+1, i, t.spreadMinimizingZones[t.zoneID], optimalTokenOwnership) - panic(err) + // if this happens, it means that we cannot accommodate other tokens + return nil, fmt.Errorf("it was impossible to add %dth token for instance with id %d in zone id %d because the instance with the highest ownership cannot satisfy the requested ownership %d", addedTokens+1, i, t.zoneID, optimalTokenOwnership) } tokensQueue := tokensQueues[highestOwnershipInstance.item.instanceID] highestOwnershipToken := tokensQueue.Peek() @@ -288,10 +269,8 @@ func (t *SpreadMinimizingTokenGenerator) generateTokensByInstanceID() map[int]To token := highestOwnershipToken.item newToken, err := t.calculateNewToken(token, optimalTokenOwnership) if err != nil { - level.Error(t.logger).Log("msg", "it was impossible to calculate a new token because an error occurred", "err", err) - // if this happens, it means that we cannot accommodate additional tokens, so we panic - err := fmt.Errorf("it was impossible to calculate the %dth token for instance with id %d in zone %s", addedTokens+1, i, t.spreadMinimizingZones[t.zoneID]) - panic(err) + // if this happens, it means that we cannot accommodate additional tokens + return nil, fmt.Errorf("it was impossible to calculate the %dth token for instance with id %d in zone id %d", addedTokens+1, i, t.zoneID) } tokens = append(tokens, newToken) // add the new token to currInstanceTokenQueue @@ -317,7 +296,7 @@ func (t *SpreadMinimizingTokenGenerator) generateTokensByInstanceID() map[int]To tokensByInstanceID[i] = tokens // if this is the last iteration we return, so we avoid to call additional heap.Pushs if i == t.instanceID { - return tokensByInstanceID + return tokensByInstanceID, nil } // If there were some ignored instances, we put them back on the queue. @@ -331,7 +310,7 @@ func (t *SpreadMinimizingTokenGenerator) generateTokensByInstanceID() map[int]To heap.Push(&instanceQueue, newRingInstanceOwnershipInfo(i, currInstanceOwnership)) } - return tokensByInstanceID + return tokensByInstanceID, nil } func (t *SpreadMinimizingTokenGenerator) CanJoin(instances map[string]InstanceDesc) error { @@ -339,13 +318,10 @@ func (t *SpreadMinimizingTokenGenerator) CanJoin(instances map[string]InstanceDe return nil } - prevInstance, err := previousInstance(t.instance) - if err != nil { - if errors.Is(err, errorNoPreviousInstance) { - return nil - } - return err + if t.instanceID == 0 { + return nil } + prevInstance := fmt.Sprintf("%s%d", t.instancePrefix, t.instanceID-1) instanceDesc, ok := instances[prevInstance] if ok && len(instanceDesc.Tokens) != 0 { return nil diff --git a/vendor/github.com/grafana/dskit/ring/tokens.go b/vendor/github.com/grafana/dskit/ring/tokens.go index cf4999ff5d21f..7f0780639421b 100644 --- a/vendor/github.com/grafana/dskit/ring/tokens.go +++ b/vendor/github.com/grafana/dskit/ring/tokens.go @@ -7,6 +7,8 @@ import ( "sort" ) +type Token uint32 + // Tokens is a simple list of tokens. type Tokens []uint32 diff --git a/vendor/github.com/grafana/dskit/server/PROXYPROTOCOL.md b/vendor/github.com/grafana/dskit/server/PROXYPROTOCOL.md new file mode 100644 index 0000000000000..726bde758dc82 --- /dev/null +++ b/vendor/github.com/grafana/dskit/server/PROXYPROTOCOL.md @@ -0,0 +1,28 @@ +# PROXY protocol support + +> **Note:** enabling PROXY protocol support does not break existing setups (e.g. non-PROXY connections are still accepted), however it does add a small overhead to the connection handling. + +To enable PROXY protocol support, set `Config.ProxyProtocolEnabled` to `true` before initializing a `Server` in your application. This enables PROXY protocol for both HTTP and gRPC servers. + +```go +cfg := &Config{ + ProxyProtocolEnabled: true, + // ... +} + +server := NewServer(cfg) +// ... +``` + +PROXY protocol is supported by using [go-proxyproto](https://github.com/pires/go-proxyproto). +Both PROXY v1 and PROXY v2 are supported out of the box. + +When enabled, incoming connections are checked for the PROXY header, and if present, the connection information is updated to reflect the original source address. +Most commonly, you will use the source address via [Request.RemoteAddr](https://pkg.go.dev/net/http#Request.RemoteAddr). + +```go +server.HTTP.HandleFunc("/your-endpoint", func(w http.ResponseWriter, r *http.Request) { + ip, _, err := net.SplitHostPort(r.RemoteAddr) + // ... +}) +``` diff --git a/vendor/github.com/grafana/dskit/server/fake_server.pb.go b/vendor/github.com/grafana/dskit/server/fake_server.pb.go index 75ee6b0a14e3e..4bb2d5a1f3901 100644 --- a/vendor/github.com/grafana/dskit/server/fake_server.pb.go +++ b/vendor/github.com/grafana/dskit/server/fake_server.pb.go @@ -29,6 +29,49 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +type ProxyProtoIPResponse struct { + IP string `protobuf:"bytes,1,opt,name=IP,proto3" json:"IP,omitempty"` +} + +func (m *ProxyProtoIPResponse) Reset() { *m = ProxyProtoIPResponse{} } +func (*ProxyProtoIPResponse) ProtoMessage() {} +func (*ProxyProtoIPResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a932e7b7b9f5c118, []int{0} +} +func (m *ProxyProtoIPResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProxyProtoIPResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProxyProtoIPResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ProxyProtoIPResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProxyProtoIPResponse.Merge(m, src) +} +func (m *ProxyProtoIPResponse) XXX_Size() int { + return m.Size() +} +func (m *ProxyProtoIPResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ProxyProtoIPResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ProxyProtoIPResponse proto.InternalMessageInfo + +func (m *ProxyProtoIPResponse) GetIP() string { + if m != nil { + return m.IP + } + return "" +} + type FailWithHTTPErrorRequest struct { Code int32 `protobuf:"varint,1,opt,name=Code,proto3" json:"Code,omitempty"` } @@ -36,7 +79,7 @@ type FailWithHTTPErrorRequest struct { func (m *FailWithHTTPErrorRequest) Reset() { *m = FailWithHTTPErrorRequest{} } func (*FailWithHTTPErrorRequest) ProtoMessage() {} func (*FailWithHTTPErrorRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a932e7b7b9f5c118, []int{0} + return fileDescriptor_a932e7b7b9f5c118, []int{1} } func (m *FailWithHTTPErrorRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -73,32 +116,61 @@ func (m *FailWithHTTPErrorRequest) GetCode() int32 { } func init() { + proto.RegisterType((*ProxyProtoIPResponse)(nil), "server.ProxyProtoIPResponse") proto.RegisterType((*FailWithHTTPErrorRequest)(nil), "server.FailWithHTTPErrorRequest") } func init() { proto.RegisterFile("fake_server.proto", fileDescriptor_a932e7b7b9f5c118) } var fileDescriptor_a932e7b7b9f5c118 = []byte{ - // 265 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x4b, 0xcc, 0x4e, - 0x8d, 0x2f, 0x4e, 0x2d, 0x2a, 0x4b, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x83, - 0xf0, 0xa4, 0xa4, 0xd3, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0xc1, 0xa2, 0x49, 0xa5, 0x69, 0xfa, - 0xa9, 0xb9, 0x05, 0x25, 0x95, 0x10, 0x45, 0x4a, 0x7a, 0x5c, 0x12, 0x6e, 0x89, 0x99, 0x39, 0xe1, - 0x99, 0x25, 0x19, 0x1e, 0x21, 0x21, 0x01, 0xae, 0x45, 0x45, 0xf9, 0x45, 0x41, 0xa9, 0x85, 0xa5, - 0xa9, 0xc5, 0x25, 0x42, 0x42, 0x5c, 0x2c, 0xce, 0xf9, 0x29, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, - 0xac, 0x41, 0x60, 0xb6, 0xd1, 0x6d, 0x26, 0x2e, 0x2e, 0xb7, 0xc4, 0xec, 0xd4, 0x60, 0xb0, 0xd9, - 0x42, 0xd6, 0x5c, 0xec, 0xc1, 0xa5, 0xc9, 0xc9, 0xa9, 0xa9, 0x29, 0x42, 0x62, 0x7a, 0x10, 0x7b, - 0xf4, 0x60, 0xf6, 0xe8, 0xb9, 0x82, 0xec, 0x91, 0xc2, 0x21, 0xae, 0xc4, 0x20, 0xe4, 0xc8, 0xc5, - 0x0b, 0xb3, 0x1b, 0x6c, 0x2f, 0x19, 0x46, 0xf8, 0x73, 0x09, 0x62, 0x38, 0x5f, 0x48, 0x41, 0x0f, - 0x1a, 0x0e, 0xb8, 0x7c, 0x86, 0xc7, 0x40, 0x4b, 0x2e, 0xd6, 0xe0, 0x9c, 0xd4, 0xd4, 0x02, 0xb2, - 0xbc, 0xc3, 0x1d, 0x5c, 0x52, 0x94, 0x9a, 0x98, 0x4b, 0xa6, 0x01, 0x06, 0x8c, 0x4e, 0x26, 0x17, - 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xf0, 0xe1, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, 0xc6, - 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, - 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, - 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0x92, 0xd8, 0xc0, 0x26, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, - 0xff, 0x43, 0x2b, 0x71, 0x6d, 0x04, 0x02, 0x00, 0x00, -} + // 330 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x91, 0xb1, 0x4e, 0x02, 0x41, + 0x10, 0x86, 0x77, 0x51, 0x30, 0xae, 0xd1, 0x84, 0x8d, 0x31, 0x04, 0xcd, 0x84, 0x5c, 0x61, 0xac, + 0x0e, 0xa3, 0x36, 0xc6, 0x4a, 0x09, 0xc4, 0xab, 0xdc, 0xdc, 0x91, 0x58, 0x9a, 0x03, 0x06, 0x24, + 0x1c, 0xec, 0xb9, 0x77, 0x67, 0xa4, 0xf3, 0x11, 0x7c, 0x0c, 0x3b, 0x5f, 0xc3, 0x92, 0x92, 0x52, + 0x96, 0xc6, 0x92, 0x47, 0x30, 0x2c, 0x12, 0x0b, 0xc5, 0xe2, 0xba, 0x9d, 0xc9, 0xe4, 0xff, 0xbf, + 0x7f, 0x7f, 0x96, 0x6f, 0xfb, 0x3d, 0xbc, 0x8b, 0x50, 0x3d, 0xa2, 0xb2, 0x43, 0x25, 0x63, 0xc9, + 0x73, 0x8b, 0xa9, 0xb8, 0xdf, 0x91, 0xb2, 0x13, 0x60, 0xd9, 0x6c, 0x1b, 0x49, 0xbb, 0x8c, 0xfd, + 0x30, 0x1e, 0x2e, 0x8e, 0xac, 0x43, 0xb6, 0x2b, 0x94, 0x7c, 0x1a, 0x8a, 0xf9, 0xe4, 0x08, 0x17, + 0xa3, 0x50, 0x0e, 0x22, 0xe4, 0x3b, 0x2c, 0xe3, 0x88, 0x02, 0x2d, 0xd1, 0xa3, 0x4d, 0x37, 0xe3, + 0x08, 0xcb, 0x66, 0x85, 0x9a, 0xdf, 0x0d, 0x6e, 0xbb, 0xf1, 0xfd, 0x75, 0xbd, 0x2e, 0xaa, 0x4a, + 0x49, 0xe5, 0xe2, 0x43, 0x82, 0x51, 0xcc, 0x39, 0x5b, 0xaf, 0xc8, 0x16, 0x9a, 0xeb, 0xac, 0x6b, + 0xde, 0x27, 0x6f, 0x6b, 0x8c, 0xd5, 0xfc, 0x1e, 0x7a, 0x86, 0x81, 0x5f, 0xb0, 0x0d, 0x2f, 0x69, + 0x36, 0x11, 0x5b, 0x7c, 0xcf, 0x5e, 0xf0, 0xd8, 0x4b, 0x1e, 0xbb, 0x3a, 0xe7, 0x29, 0xae, 0xd8, + 0x5b, 0x84, 0x5f, 0xb2, 0xed, 0xa5, 0xb7, 0xf1, 0x4d, 0x21, 0x71, 0xc3, 0xf2, 0xbf, 0xf0, 0x79, + 0xc9, 0xfe, 0xfe, 0xaf, 0x55, 0xc9, 0xfe, 0x11, 0x3c, 0x67, 0x59, 0x2f, 0x40, 0x0c, 0x53, 0xc5, + 0xd9, 0xf2, 0x62, 0x85, 0x7e, 0x3f, 0xa5, 0xc0, 0x31, 0xe5, 0x2e, 0x2b, 0xb8, 0x18, 0x27, 0x6a, + 0xf0, 0xd3, 0x5d, 0xc5, 0x0f, 0x02, 0x54, 0x8e, 0x58, 0xa9, 0x77, 0xb0, 0x4c, 0xfb, 0x57, 0xdf, + 0x16, 0xb9, 0x3a, 0x1b, 0x4d, 0x80, 0x8c, 0x27, 0x40, 0x66, 0x13, 0xa0, 0xcf, 0x1a, 0xe8, 0xab, + 0x06, 0xfa, 0xae, 0x81, 0x8e, 0x34, 0xd0, 0x0f, 0x0d, 0xf4, 0x53, 0x03, 0x99, 0x69, 0xa0, 0x2f, + 0x53, 0x20, 0xa3, 0x29, 0x90, 0xf1, 0x14, 0x48, 0x23, 0x67, 0x5c, 0x4e, 0xbf, 0x02, 0x00, 0x00, + 0xff, 0xff, 0xf3, 0x3d, 0xce, 0x89, 0x80, 0x02, 0x00, 0x00, +} + +func (this *ProxyProtoIPResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + that1, ok := that.(*ProxyProtoIPResponse) + if !ok { + that2, ok := that.(ProxyProtoIPResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.IP != that1.IP { + return false + } + return true +} func (this *FailWithHTTPErrorRequest) Equal(that interface{}) bool { if that == nil { return this == nil @@ -123,6 +195,16 @@ func (this *FailWithHTTPErrorRequest) Equal(that interface{}) bool { } return true } +func (this *ProxyProtoIPResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&server.ProxyProtoIPResponse{") + s = append(s, "IP: "+fmt.Sprintf("%#v", this.IP)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} func (this *FailWithHTTPErrorRequest) GoString() string { if this == nil { return "nil" @@ -159,6 +241,7 @@ type FakeServerClient interface { FailWithHTTPError(ctx context.Context, in *FailWithHTTPErrorRequest, opts ...grpc.CallOption) (*empty.Empty, error) Sleep(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) StreamSleep(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (FakeServer_StreamSleepClient, error) + ReturnProxyProtoCallerIP(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ProxyProtoIPResponse, error) } type fakeServerClient struct { @@ -237,6 +320,15 @@ func (x *fakeServerStreamSleepClient) Recv() (*empty.Empty, error) { return m, nil } +func (c *fakeServerClient) ReturnProxyProtoCallerIP(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ProxyProtoIPResponse, error) { + out := new(ProxyProtoIPResponse) + err := c.cc.Invoke(ctx, "/server.FakeServer/ReturnProxyProtoCallerIP", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // FakeServerServer is the server API for FakeServer service. type FakeServerServer interface { Succeed(context.Context, *empty.Empty) (*empty.Empty, error) @@ -244,6 +336,7 @@ type FakeServerServer interface { FailWithHTTPError(context.Context, *FailWithHTTPErrorRequest) (*empty.Empty, error) Sleep(context.Context, *empty.Empty) (*empty.Empty, error) StreamSleep(*empty.Empty, FakeServer_StreamSleepServer) error + ReturnProxyProtoCallerIP(context.Context, *empty.Empty) (*ProxyProtoIPResponse, error) } // UnimplementedFakeServerServer can be embedded to have forward compatible implementations. @@ -265,6 +358,9 @@ func (*UnimplementedFakeServerServer) Sleep(ctx context.Context, req *empty.Empt func (*UnimplementedFakeServerServer) StreamSleep(req *empty.Empty, srv FakeServer_StreamSleepServer) error { return status.Errorf(codes.Unimplemented, "method StreamSleep not implemented") } +func (*UnimplementedFakeServerServer) ReturnProxyProtoCallerIP(ctx context.Context, req *empty.Empty) (*ProxyProtoIPResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReturnProxyProtoCallerIP not implemented") +} func RegisterFakeServerServer(s *grpc.Server, srv FakeServerServer) { s.RegisterService(&_FakeServer_serviceDesc, srv) @@ -363,6 +459,24 @@ func (x *fakeServerStreamSleepServer) Send(m *empty.Empty) error { return x.ServerStream.SendMsg(m) } +func _FakeServer_ReturnProxyProtoCallerIP_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FakeServerServer).ReturnProxyProtoCallerIP(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/server.FakeServer/ReturnProxyProtoCallerIP", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FakeServerServer).ReturnProxyProtoCallerIP(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + var _FakeServer_serviceDesc = grpc.ServiceDesc{ ServiceName: "server.FakeServer", HandlerType: (*FakeServerServer)(nil), @@ -383,6 +497,10 @@ var _FakeServer_serviceDesc = grpc.ServiceDesc{ MethodName: "Sleep", Handler: _FakeServer_Sleep_Handler, }, + { + MethodName: "ReturnProxyProtoCallerIP", + Handler: _FakeServer_ReturnProxyProtoCallerIP_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -394,6 +512,36 @@ var _FakeServer_serviceDesc = grpc.ServiceDesc{ Metadata: "fake_server.proto", } +func (m *ProxyProtoIPResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProxyProtoIPResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProxyProtoIPResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.IP) > 0 { + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintFakeServer(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *FailWithHTTPErrorRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -433,6 +581,19 @@ func encodeVarintFakeServer(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *ProxyProtoIPResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.IP) + if l > 0 { + n += 1 + l + sovFakeServer(uint64(l)) + } + return n +} + func (m *FailWithHTTPErrorRequest) Size() (n int) { if m == nil { return 0 @@ -451,6 +612,16 @@ func sovFakeServer(x uint64) (n int) { func sozFakeServer(x uint64) (n int) { return sovFakeServer(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *ProxyProtoIPResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProxyProtoIPResponse{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `}`, + }, "") + return s +} func (this *FailWithHTTPErrorRequest) String() string { if this == nil { return "nil" @@ -469,6 +640,91 @@ func valueToStringFakeServer(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *ProxyProtoIPResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFakeServer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProxyProtoIPResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProxyProtoIPResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFakeServer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFakeServer + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFakeServer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFakeServer(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthFakeServer + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthFakeServer + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *FailWithHTTPErrorRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/grafana/dskit/server/fake_server.proto b/vendor/github.com/grafana/dskit/server/fake_server.proto index 248a6f244bdae..0c4780cda0d48 100644 --- a/vendor/github.com/grafana/dskit/server/fake_server.proto +++ b/vendor/github.com/grafana/dskit/server/fake_server.proto @@ -10,6 +10,11 @@ service FakeServer { rpc FailWithHTTPError(FailWithHTTPErrorRequest) returns (google.protobuf.Empty) {}; rpc Sleep(google.protobuf.Empty) returns (google.protobuf.Empty) {}; rpc StreamSleep(google.protobuf.Empty) returns (stream google.protobuf.Empty) {}; + rpc ReturnProxyProtoCallerIP(google.protobuf.Empty) returns (ProxyProtoIPResponse) {}; +} + +message ProxyProtoIPResponse { + string IP = 1; } message FailWithHTTPErrorRequest { diff --git a/vendor/github.com/grafana/dskit/server/metrics.go b/vendor/github.com/grafana/dskit/server/metrics.go index aa1c3e53aef93..d6011525da3ab 100644 --- a/vendor/github.com/grafana/dskit/server/metrics.go +++ b/vendor/github.com/grafana/dskit/server/metrics.go @@ -15,12 +15,13 @@ import ( ) type Metrics struct { - TCPConnections *prometheus.GaugeVec - TCPConnectionsLimit *prometheus.GaugeVec - RequestDuration *prometheus.HistogramVec - ReceivedMessageSize *prometheus.HistogramVec - SentMessageSize *prometheus.HistogramVec - InflightRequests *prometheus.GaugeVec + TCPConnections *prometheus.GaugeVec + TCPConnectionsLimit *prometheus.GaugeVec + RequestDuration *prometheus.HistogramVec + PerTenantRequestDuration *prometheus.HistogramVec + ReceivedMessageSize *prometheus.HistogramVec + SentMessageSize *prometheus.HistogramVec + InflightRequests *prometheus.GaugeVec } func NewServerMetrics(cfg Config) *Metrics { @@ -46,6 +47,15 @@ func NewServerMetrics(cfg Config) *Metrics { NativeHistogramMaxBucketNumber: 100, NativeHistogramMinResetDuration: time.Hour, }, []string{"method", "route", "status_code", "ws"}), + PerTenantRequestDuration: reg.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: cfg.MetricsNamespace, + Name: "per_tenant_request_duration_seconds", + Help: "Time (in seconds) spent serving HTTP requests for a particular tenant.", + Buckets: instrument.DefBuckets, + NativeHistogramBucketFactor: cfg.MetricsNativeHistogramFactor, + NativeHistogramMaxBucketNumber: 100, + NativeHistogramMinResetDuration: time.Hour, + }, []string{"method", "route", "status_code", "ws", "tenant"}), ReceivedMessageSize: reg.NewHistogramVec(prometheus.HistogramOpts{ Namespace: cfg.MetricsNamespace, Name: "request_message_bytes", diff --git a/vendor/github.com/grafana/dskit/server/server.go b/vendor/github.com/grafana/dskit/server/server.go index 6c2133a9bc242..a23eead3891e4 100644 --- a/vendor/github.com/grafana/dskit/server/server.go +++ b/vendor/github.com/grafana/dskit/server/server.go @@ -17,21 +17,21 @@ import ( "strings" "time" - _ "github.com/grafana/pyroscope-go/godeltaprof/http/pprof" // anonymous import to get godelatprof handlers registered - gokit_log "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/gorilla/mux" + _ "github.com/grafana/pyroscope-go/godeltaprof/http/pprof" // anonymous import to get godelatprof handlers registered otgrpc "github.com/opentracing-contrib/go-grpc" "github.com/opentracing/opentracing-go" + "github.com/pires/go-proxyproto" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/config" "github.com/prometheus/exporter-toolkit/web" - "github.com/soheilhy/cmux" "golang.org/x/net/netutil" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/experimental" "google.golang.org/grpc/keepalive" "github.com/grafana/dskit/httpgrpc" @@ -80,14 +80,15 @@ type Config struct { // for details. A generally useful value is 1.1. MetricsNativeHistogramFactor float64 `yaml:"-"` - HTTPListenNetwork string `yaml:"http_listen_network"` - HTTPListenAddress string `yaml:"http_listen_address"` - HTTPListenPort int `yaml:"http_listen_port"` - HTTPConnLimit int `yaml:"http_listen_conn_limit"` - GRPCListenNetwork string `yaml:"grpc_listen_network"` - GRPCListenAddress string `yaml:"grpc_listen_address"` - GRPCListenPort int `yaml:"grpc_listen_port"` - GRPCConnLimit int `yaml:"grpc_listen_conn_limit"` + HTTPListenNetwork string `yaml:"http_listen_network"` + HTTPListenAddress string `yaml:"http_listen_address"` + HTTPListenPort int `yaml:"http_listen_port"` + HTTPConnLimit int `yaml:"http_listen_conn_limit"` + GRPCListenNetwork string `yaml:"grpc_listen_network"` + GRPCListenAddress string `yaml:"grpc_listen_address"` + GRPCListenPort int `yaml:"grpc_listen_port"` + GRPCConnLimit int `yaml:"grpc_listen_conn_limit"` + ProxyProtocolEnabled bool `yaml:"proxy_protocol_enabled"` CipherSuites string `yaml:"tls_cipher_suites"` MinVersion string `yaml:"tls_min_version"` @@ -100,6 +101,8 @@ type Config struct { ExcludeRequestInLog bool `yaml:"-"` DisableRequestSuccessLog bool `yaml:"-"` + PerTenantDurationInstrumentation middleware.PerTenantCallback `yaml:"-"` + ServerGracefulShutdownTimeout time.Duration `yaml:"graceful_shutdown_timeout"` HTTPServerReadTimeout time.Duration `yaml:"http_server_read_timeout"` HTTPServerReadHeaderTimeout time.Duration `yaml:"http_server_read_header_timeout"` @@ -114,7 +117,6 @@ type Config struct { HTTPMiddleware []middleware.Interface `yaml:"-"` Router *mux.Router `yaml:"-"` DoNotAddDefaultHTTPMiddleware bool `yaml:"-"` - RouteHTTPToGRPC bool `yaml:"-"` GRPCServerMaxRecvMsgSize int `yaml:"grpc_server_max_recv_msg_size"` GRPCServerMaxSendMsgSize int `yaml:"grpc_server_max_send_msg_size"` @@ -127,11 +129,14 @@ type Config struct { GRPCServerMinTimeBetweenPings time.Duration `yaml:"grpc_server_min_time_between_pings"` GRPCServerPingWithoutStreamAllowed bool `yaml:"grpc_server_ping_without_stream_allowed"` GRPCServerNumWorkers int `yaml:"grpc_server_num_workers"` + GRPCServerStatsTrackingEnabled bool `yaml:"grpc_server_stats_tracking_enabled"` + GRPCServerRecvBufferPoolsEnabled bool `yaml:"grpc_server_recv_buffer_pools_enabled"` LogFormat string `yaml:"log_format"` LogLevel log.Level `yaml:"log_level"` Log gokit_log.Logger `yaml:"-"` LogSourceIPs bool `yaml:"log_source_ips_enabled"` + LogSourceIPsFull bool `yaml:"log_source_ips_full"` LogSourceIPsHeader string `yaml:"log_source_ips_header"` LogSourceIPsRegex string `yaml:"log_source_ips_regex"` LogRequestHeaders bool `yaml:"log_request_headers"` @@ -191,16 +196,20 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.GRPCServerTimeout, "server.grpc.keepalive.timeout", time.Second*20, "After having pinged for keepalive check, the duration after which an idle connection should be closed, Default: 20s") f.DurationVar(&cfg.GRPCServerMinTimeBetweenPings, "server.grpc.keepalive.min-time-between-pings", 5*time.Minute, "Minimum amount of time a client should wait before sending a keepalive ping. If client sends keepalive ping more often, server will send GOAWAY and close the connection.") f.BoolVar(&cfg.GRPCServerPingWithoutStreamAllowed, "server.grpc.keepalive.ping-without-stream-allowed", false, "If true, server allows keepalive pings even when there are no active streams(RPCs). If false, and client sends ping when there are no active streams, server will send GOAWAY and close the connection.") + f.BoolVar(&cfg.GRPCServerStatsTrackingEnabled, "server.grpc.stats-tracking-enabled", true, "If true, the request_message_bytes, response_message_bytes, and inflight_requests metrics will be tracked. Enabling this option prevents the use of memory pools for parsing gRPC request bodies and may lead to more memory allocations.") + f.BoolVar(&cfg.GRPCServerRecvBufferPoolsEnabled, "server.grpc.recv-buffer-pools-enabled", false, "If true, gGPC's buffer pools will be used to handle incoming requests. Enabling this feature can reduce memory allocation, but also requires disabling GRPC server stats tracking by setting `server.grpc.stats-tracking-enabled=false`. This is an experimental gRPC feature, so it might be removed in a future version of the gRPC library.") f.IntVar(&cfg.GRPCServerNumWorkers, "server.grpc.num-workers", 0, "If non-zero, configures the amount of GRPC server workers used to serve the requests.") f.StringVar(&cfg.PathPrefix, "server.path-prefix", "", "Base path to serve all API routes from (e.g. /v1/)") f.StringVar(&cfg.LogFormat, "log.format", log.LogfmtFormat, "Output log messages in the given format. Valid formats: [logfmt, json]") cfg.LogLevel.RegisterFlags(f) f.BoolVar(&cfg.LogSourceIPs, "server.log-source-ips-enabled", false, "Optionally log the source IPs.") + f.BoolVar(&cfg.LogSourceIPsFull, "server.log-source-ips-full", false, "Log all source IPs instead of only the originating one. Only used if server.log-source-ips-enabled is true") f.StringVar(&cfg.LogSourceIPsHeader, "server.log-source-ips-header", "", "Header field storing the source IPs. Only used if server.log-source-ips-enabled is true. If not set the default Forwarded, X-Real-IP and X-Forwarded-For headers are used") f.StringVar(&cfg.LogSourceIPsRegex, "server.log-source-ips-regex", "", "Regex for matching the source IPs. Only used if server.log-source-ips-enabled is true. If not set the default Forwarded, X-Real-IP and X-Forwarded-For headers are used") f.BoolVar(&cfg.LogRequestHeaders, "server.log-request-headers", false, "Optionally log request headers.") f.StringVar(&cfg.LogRequestExcludeHeadersList, "server.log-request-headers-exclude-list", "", "Comma separated list of headers to exclude from loggin. Only used if server.log-request-headers is true.") f.BoolVar(&cfg.LogRequestAtInfoLevel, "server.log-request-at-info-level-enabled", false, "Optionally log requests at info level instead of debug level. Applies to request headers as well if server.log-request-headers is enabled.") + f.BoolVar(&cfg.ProxyProtocolEnabled, "server.proxy-protocol-enabled", false, "Enables PROXY protocol.") } func (cfg *Config) registererOrDefault() prometheus.Registerer { @@ -220,13 +229,6 @@ type Server struct { grpcListener net.Listener httpListener net.Listener - // These fields are used to support grpc over the http server - // if RouteHTTPToGRPC is set. the fields are kept here - // so they can be initialized in New() and started in Run() - grpchttpmux cmux.CMux - grpcOnHTTPListener net.Listener - GRPCOnHTTPServer *grpc.Server - HTTP *mux.Router HTTPServer *http.Server GRPC *grpc.Server @@ -278,15 +280,6 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) { httpListener = netutil.LimitListener(httpListener, cfg.HTTPConnLimit) } - var grpcOnHTTPListener net.Listener - var grpchttpmux cmux.CMux - if cfg.RouteHTTPToGRPC { - grpchttpmux = cmux.New(httpListener) - - httpListener = grpchttpmux.Match(cmux.HTTP1Fast("PATCH")) - grpcOnHTTPListener = grpchttpmux.Match(cmux.HTTP2()) - } - network = cfg.GRPCListenNetwork if network == "" { network = DefaultNetwork @@ -302,6 +295,11 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) { grpcListener = netutil.LimitListener(grpcListener, cfg.GRPCConnLimit) } + if cfg.ProxyProtocolEnabled { + httpListener = newProxyProtocolListener(httpListener, cfg.HTTPServerReadHeaderTimeout) + grpcListener = newProxyProtocolListener(grpcListener, cfg.HTTPServerReadHeaderTimeout) + } + cipherSuites, err := stringToCipherSuites(cfg.CipherSuites) if err != nil { return nil, err @@ -375,22 +373,29 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) { WithRequest: !cfg.ExcludeRequestInLog, DisableRequestSuccessLog: cfg.DisableRequestSuccessLog, } - var reportGRPCStatusesOptions []middleware.InstrumentationOption + var grpcInstrumentationOptions []middleware.InstrumentationOption if cfg.ReportGRPCCodesInInstrumentationLabel { - reportGRPCStatusesOptions = []middleware.InstrumentationOption{middleware.ReportGRPCStatusOption} + grpcInstrumentationOptions = append(grpcInstrumentationOptions, middleware.ReportGRPCStatusOption) + } + if cfg.PerTenantDurationInstrumentation != nil { + grpcInstrumentationOptions = append(grpcInstrumentationOptions, + middleware.WithPerTenantInstrumentation( + metrics.PerTenantRequestDuration, + cfg.PerTenantDurationInstrumentation, + )) } grpcMiddleware := []grpc.UnaryServerInterceptor{ serverLog.UnaryServerInterceptor, otgrpc.OpenTracingServerInterceptor(opentracing.GlobalTracer()), middleware.HTTPGRPCTracingInterceptor(router), // This must appear after the OpenTracingServerInterceptor. - middleware.UnaryServerInstrumentInterceptor(metrics.RequestDuration, reportGRPCStatusesOptions...), + middleware.UnaryServerInstrumentInterceptor(metrics.RequestDuration, grpcInstrumentationOptions...), } grpcMiddleware = append(grpcMiddleware, cfg.GRPCMiddleware...) grpcStreamMiddleware := []grpc.StreamServerInterceptor{ serverLog.StreamServerInterceptor, otgrpc.OpenTracingStreamServerInterceptor(opentracing.GlobalTracer()), - middleware.StreamServerInstrumentInterceptor(metrics.RequestDuration, reportGRPCStatusesOptions...), + middleware.StreamServerInstrumentInterceptor(metrics.RequestDuration, grpcInstrumentationOptions...), } grpcStreamMiddleware = append(grpcStreamMiddleware, cfg.GRPCStreamMiddleware...) @@ -423,13 +428,22 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) { grpcOptions = append(grpcOptions, grpc.InTapHandle(grpcServerLimit.TapHandle), grpc.StatsHandler(grpcServerLimit)) } - grpcOptions = append(grpcOptions, - grpc.StatsHandler(middleware.NewStatsHandler( - metrics.ReceivedMessageSize, - metrics.SentMessageSize, - metrics.InflightRequests, - )), - ) + if cfg.GRPCServerStatsTrackingEnabled { + grpcOptions = append(grpcOptions, + grpc.StatsHandler(middleware.NewStatsHandler( + metrics.ReceivedMessageSize, + metrics.SentMessageSize, + metrics.InflightRequests, + )), + ) + } + + if cfg.GRPCServerRecvBufferPoolsEnabled { + if cfg.GRPCServerStatsTrackingEnabled { + return nil, fmt.Errorf("grpc_server_stats_tracking_enabled must be set to false if grpc_server_recv_buffer_pools_enabled is true") + } + grpcOptions = append(grpcOptions, experimental.RecvBufferPool(grpc.NewSharedBufferPool())) + } grpcOptions = append(grpcOptions, cfg.GRPCOptions...) if grpcTLSConfig != nil { @@ -437,41 +451,10 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) { grpcOptions = append(grpcOptions, grpc.Creds(grpcCreds)) } grpcServer := grpc.NewServer(grpcOptions...) - grpcOnHTTPServer := grpc.NewServer(grpcOptions...) - sourceIPs, err := middleware.NewSourceIPs(cfg.LogSourceIPsHeader, cfg.LogSourceIPsRegex) + httpMiddleware, err := BuildHTTPMiddleware(cfg, router, metrics, logger) if err != nil { - return nil, fmt.Errorf("error setting up source IP extraction: %v", err) - } - logSourceIPs := sourceIPs - if !cfg.LogSourceIPs { - // We always include the source IPs for traces, - // but only want to log them in the middleware if that is enabled. - logSourceIPs = nil - } - - defaultLogMiddleware := middleware.NewLogMiddleware(logger, cfg.LogRequestHeaders, cfg.LogRequestAtInfoLevel, logSourceIPs, strings.Split(cfg.LogRequestExcludeHeadersList, ",")) - defaultLogMiddleware.DisableRequestSuccessLog = cfg.DisableRequestSuccessLog - - defaultHTTPMiddleware := []middleware.Interface{ - middleware.Tracer{ - RouteMatcher: router, - SourceIPs: sourceIPs, - }, - defaultLogMiddleware, - middleware.Instrument{ - RouteMatcher: router, - Duration: metrics.RequestDuration, - RequestBodySize: metrics.ReceivedMessageSize, - ResponseBodySize: metrics.SentMessageSize, - InflightRequests: metrics.InflightRequests, - }, - } - var httpMiddleware []middleware.Interface - if cfg.DoNotAddDefaultHTTPMiddleware { - httpMiddleware = cfg.HTTPMiddleware - } else { - httpMiddleware = append(defaultHTTPMiddleware, cfg.HTTPMiddleware...) + return nil, fmt.Errorf("error building http middleware: %w", err) } httpServer := &http.Server{ @@ -491,20 +474,17 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) { } return &Server{ - cfg: cfg, - httpListener: httpListener, - grpcListener: grpcListener, - grpcOnHTTPListener: grpcOnHTTPListener, - handler: handler, - grpchttpmux: grpchttpmux, - - HTTP: router, - HTTPServer: httpServer, - GRPC: grpcServer, - GRPCOnHTTPServer: grpcOnHTTPServer, - Log: logger, - Registerer: cfg.registererOrDefault(), - Gatherer: gatherer, + cfg: cfg, + httpListener: httpListener, + grpcListener: grpcListener, + handler: handler, + + HTTP: router, + HTTPServer: httpServer, + GRPC: grpcServer, + Log: logger, + Registerer: cfg.registererOrDefault(), + Gatherer: gatherer, }, nil } @@ -521,6 +501,48 @@ func RegisterInstrumentationWithGatherer(router *mux.Router, gatherer prometheus router.PathPrefix("/debug/pprof").Handler(http.DefaultServeMux) } +func BuildHTTPMiddleware(cfg Config, router *mux.Router, metrics *Metrics, logger gokit_log.Logger) ([]middleware.Interface, error) { + sourceIPs, err := middleware.NewSourceIPs(cfg.LogSourceIPsHeader, cfg.LogSourceIPsRegex, cfg.LogSourceIPsFull) + if err != nil { + return nil, fmt.Errorf("error setting up source IP extraction: %w", err) + } + logSourceIPs := sourceIPs + if !cfg.LogSourceIPs { + // We always include the source IPs for traces, + // but only want to log them in the middleware if that is enabled. + logSourceIPs = nil + } + + defaultLogMiddleware := middleware.NewLogMiddleware(logger, cfg.LogRequestHeaders, cfg.LogRequestAtInfoLevel, logSourceIPs, strings.Split(cfg.LogRequestExcludeHeadersList, ",")) + defaultLogMiddleware.DisableRequestSuccessLog = cfg.DisableRequestSuccessLog + + defaultHTTPMiddleware := []middleware.Interface{ + middleware.RouteInjector{ + RouteMatcher: router, + }, + middleware.Tracer{ + SourceIPs: sourceIPs, + }, + defaultLogMiddleware, + middleware.Instrument{ + Duration: metrics.RequestDuration, + PerTenantDuration: metrics.PerTenantRequestDuration, + PerTenantCallback: cfg.PerTenantDurationInstrumentation, + RequestBodySize: metrics.ReceivedMessageSize, + ResponseBodySize: metrics.SentMessageSize, + InflightRequests: metrics.InflightRequests, + }, + } + var httpMiddleware []middleware.Interface + if cfg.DoNotAddDefaultHTTPMiddleware { + httpMiddleware = cfg.HTTPMiddleware + } else { + httpMiddleware = append(defaultHTTPMiddleware, cfg.HTTPMiddleware...) + } + + return httpMiddleware, nil +} + // Run the server; blocks until SIGTERM (if signal handling is enabled), an error is received, or Stop() is called. func (s *Server) Run() error { errChan := make(chan error, 1) @@ -563,18 +585,6 @@ func (s *Server) Run() error { handleGRPCError(err, errChan) }() - // grpchttpmux will only be set if grpchttpmux RouteHTTPToGRPC is set - if s.grpchttpmux != nil { - go func() { - err := s.grpchttpmux.Serve() - handleGRPCError(err, errChan) - }() - go func() { - err := s.GRPCOnHTTPServer.Serve(s.grpcOnHTTPListener) - handleGRPCError(err, errChan) - }() - } - return <-errChan } @@ -615,3 +625,13 @@ func (s *Server) Shutdown() { _ = s.HTTPServer.Shutdown(ctx) s.GRPC.GracefulStop() } + +func newProxyProtocolListener(httpListener net.Listener, readHeaderTimeout time.Duration) net.Listener { + // Wraps the listener with a proxy protocol listener. + // NOTE: go-proxyproto supports non-PROXY, PROXY v1 and PROXY v2 protocols via the same listener. + // Therefore, enabling this feature does not break existing setups. + return &proxyproto.Listener{ + Listener: httpListener, + ReadHeaderTimeout: readHeaderTimeout, + } +} diff --git a/vendor/github.com/grafana/dskit/spanlogger/spanlogger.go b/vendor/github.com/grafana/dskit/spanlogger/spanlogger.go index 08653eda38abd..70c86d16d85dd 100644 --- a/vendor/github.com/grafana/dskit/spanlogger/spanlogger.go +++ b/vendor/github.com/grafana/dskit/spanlogger/spanlogger.go @@ -158,7 +158,7 @@ func (s *SpanLogger) getLogger() log.Logger { traceID, ok := tracing.ExtractSampledTraceID(s.ctx) if ok { - logger = log.With(logger, "traceID", traceID) + logger = log.With(logger, "trace_id", traceID) } // If the value has been set by another goroutine, fetch that other value and discard the one we made. if !s.logger.CompareAndSwap(nil, &logger) { @@ -167,3 +167,17 @@ func (s *SpanLogger) getLogger() log.Logger { } return logger } + +// SetSpanAndLogTag sets a tag on the span used by this SpanLogger, and appends a key/value pair to the logger used for +// future log lines emitted by this SpanLogger. +// +// It is not safe to call this method from multiple goroutines simultaneously. +// It is safe to call this method at the same time as calling other SpanLogger methods, however, this may produce +// inconsistent results (eg. some log lines may be emitted with the provided key/value pair, and others may not). +func (s *SpanLogger) SetSpanAndLogTag(key string, value interface{}) { + s.Span.SetTag(key, value) + + logger := s.getLogger() + wrappedLogger := log.With(logger, key, value) + s.logger.Store(&wrappedLogger) +} diff --git a/vendor/github.com/grafana/dskit/user/grpc.go b/vendor/github.com/grafana/dskit/user/grpc.go index 201b835eeab7d..fcfd3d7a91cdc 100644 --- a/vendor/github.com/grafana/dskit/user/grpc.go +++ b/vendor/github.com/grafana/dskit/user/grpc.go @@ -13,13 +13,8 @@ import ( // ExtractFromGRPCRequest extracts the user ID from the request metadata and returns // the user ID and a context with the user ID injected. func ExtractFromGRPCRequest(ctx context.Context) (string, context.Context, error) { - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return "", ctx, ErrNoOrgID - } - - orgIDs, ok := md[lowerOrgIDHeaderName] - if !ok || len(orgIDs) != 1 { + orgIDs := metadata.ValueFromIncomingContext(ctx, lowerOrgIDHeaderName) + if len(orgIDs) != 1 { return "", ctx, ErrNoOrgID } diff --git a/vendor/github.com/grafana/gomemcache/memcache/memcache.go b/vendor/github.com/grafana/gomemcache/memcache/memcache.go index c5962d092e0f6..c627cbdf9834c 100644 --- a/vendor/github.com/grafana/gomemcache/memcache/memcache.go +++ b/vendor/github.com/grafana/gomemcache/memcache/memcache.go @@ -619,7 +619,7 @@ func (c *Client) GetMulti(keys []string, opts ...Option) (map[string]*Item, erro options := newOptions(opts...) var lk sync.Mutex - m := make(map[string]*Item) + m := make(map[string]*Item, len(keys)) addItemToMap := func(it *Item) { lk.Lock() defer lk.Unlock() diff --git a/vendor/github.com/pires/go-proxyproto/.gitignore b/vendor/github.com/pires/go-proxyproto/.gitignore new file mode 100644 index 0000000000000..a2d2c30197697 --- /dev/null +++ b/vendor/github.com/pires/go-proxyproto/.gitignore @@ -0,0 +1,11 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +.idea +bin +pkg + +*.out diff --git a/vendor/github.com/soheilhy/cmux/LICENSE b/vendor/github.com/pires/go-proxyproto/LICENSE similarity index 99% rename from vendor/github.com/soheilhy/cmux/LICENSE rename to vendor/github.com/pires/go-proxyproto/LICENSE index d645695673349..a65c05a627170 100644 --- a/vendor/github.com/soheilhy/cmux/LICENSE +++ b/vendor/github.com/pires/go-proxyproto/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -179,7 +178,7 @@ APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -187,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2016 Paulo Pires Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/pires/go-proxyproto/README.md b/vendor/github.com/pires/go-proxyproto/README.md new file mode 100644 index 0000000000000..982707cceef8e --- /dev/null +++ b/vendor/github.com/pires/go-proxyproto/README.md @@ -0,0 +1,162 @@ +# go-proxyproto + +[![Actions Status](https://github.com/pires/go-proxyproto/workflows/test/badge.svg)](https://github.com/pires/go-proxyproto/actions) +[![Coverage Status](https://coveralls.io/repos/github/pires/go-proxyproto/badge.svg?branch=master)](https://coveralls.io/github/pires/go-proxyproto?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/pires/go-proxyproto)](https://goreportcard.com/report/github.com/pires/go-proxyproto) +[![](https://godoc.org/github.com/pires/go-proxyproto?status.svg)](https://pkg.go.dev/github.com/pires/go-proxyproto?tab=doc) + + +A Go library implementation of the [PROXY protocol, versions 1 and 2](https://www.haproxy.org/download/2.3/doc/proxy-protocol.txt), +which provides, as per specification: +> (...) a convenient way to safely transport connection +> information such as a client's address across multiple layers of NAT or TCP +> proxies. It is designed to require little changes to existing components and +> to limit the performance impact caused by the processing of the transported +> information. + +This library is to be used in one of or both proxy clients and proxy servers that need to support said protocol. +Both protocol versions, 1 (text-based) and 2 (binary-based) are supported. + +## Installation + +```shell +$ go get -u github.com/pires/go-proxyproto +``` + +## Usage + +### Client + +```go +package main + +import ( + "io" + "log" + "net" + + proxyproto "github.com/pires/go-proxyproto" +) + +func chkErr(err error) { + if err != nil { + log.Fatalf("Error: %s", err.Error()) + } +} + +func main() { + // Dial some proxy listener e.g. https://github.com/mailgun/proxyproto + target, err := net.ResolveTCPAddr("tcp", "127.0.0.1:2319") + chkErr(err) + + conn, err := net.DialTCP("tcp", nil, target) + chkErr(err) + + defer conn.Close() + + // Create a proxyprotocol header or use HeaderProxyFromAddrs() if you + // have two conn's + header := &proxyproto.Header{ + Version: 1, + Command: proxyproto.PROXY, + TransportProtocol: proxyproto.TCPv4, + SourceAddr: &net.TCPAddr{ + IP: net.ParseIP("10.1.1.1"), + Port: 1000, + }, + DestinationAddr: &net.TCPAddr{ + IP: net.ParseIP("20.2.2.2"), + Port: 2000, + }, + } + // After the connection was created write the proxy headers first + _, err = header.WriteTo(conn) + chkErr(err) + // Then your data... e.g.: + _, err = io.WriteString(conn, "HELO") + chkErr(err) +} +``` + +### Server + +```go +package main + +import ( + "log" + "net" + + proxyproto "github.com/pires/go-proxyproto" +) + +func main() { + // Create a listener + addr := "localhost:9876" + list, err := net.Listen("tcp", addr) + if err != nil { + log.Fatalf("couldn't listen to %q: %q\n", addr, err.Error()) + } + + // Wrap listener in a proxyproto listener + proxyListener := &proxyproto.Listener{Listener: list} + defer proxyListener.Close() + + // Wait for a connection and accept it + conn, err := proxyListener.Accept() + defer conn.Close() + + // Print connection details + if conn.LocalAddr() == nil { + log.Fatal("couldn't retrieve local address") + } + log.Printf("local address: %q", conn.LocalAddr().String()) + + if conn.RemoteAddr() == nil { + log.Fatal("couldn't retrieve remote address") + } + log.Printf("remote address: %q", conn.RemoteAddr().String()) +} +``` + +### HTTP Server +```go +package main + +import ( + "net" + "net/http" + "time" + + "github.com/pires/go-proxyproto" +) + +func main() { + server := http.Server{ + Addr: ":8080", + } + + ln, err := net.Listen("tcp", server.Addr) + if err != nil { + panic(err) + } + + proxyListener := &proxyproto.Listener{ + Listener: ln, + ReadHeaderTimeout: 10 * time.Second, + } + defer proxyListener.Close() + + server.Serve(proxyListener) +} +``` + +## Special notes + +### AWS + +AWS Network Load Balancer (NLB) does not push the PPV2 header until the client starts sending the data. This is a problem if your server speaks first. e.g. SMTP, FTP, SSH etc. + +By default, NLB target group attribute `proxy_protocol_v2.client_to_server.header_placement` has the value `on_first_ack_with_payload`. You need to contact AWS support to change it to `on_first_ack`, instead. + +Just to be clear, you need this fix only if your server is designed to speak first. diff --git a/vendor/github.com/pires/go-proxyproto/addr_proto.go b/vendor/github.com/pires/go-proxyproto/addr_proto.go new file mode 100644 index 0000000000000..d254fc41317c4 --- /dev/null +++ b/vendor/github.com/pires/go-proxyproto/addr_proto.go @@ -0,0 +1,62 @@ +package proxyproto + +// AddressFamilyAndProtocol represents address family and transport protocol. +type AddressFamilyAndProtocol byte + +const ( + UNSPEC AddressFamilyAndProtocol = '\x00' + TCPv4 AddressFamilyAndProtocol = '\x11' + UDPv4 AddressFamilyAndProtocol = '\x12' + TCPv6 AddressFamilyAndProtocol = '\x21' + UDPv6 AddressFamilyAndProtocol = '\x22' + UnixStream AddressFamilyAndProtocol = '\x31' + UnixDatagram AddressFamilyAndProtocol = '\x32' +) + +// IsIPv4 returns true if the address family is IPv4 (AF_INET4), false otherwise. +func (ap AddressFamilyAndProtocol) IsIPv4() bool { + return ap&0xF0 == 0x10 +} + +// IsIPv6 returns true if the address family is IPv6 (AF_INET6), false otherwise. +func (ap AddressFamilyAndProtocol) IsIPv6() bool { + return ap&0xF0 == 0x20 +} + +// IsUnix returns true if the address family is UNIX (AF_UNIX), false otherwise. +func (ap AddressFamilyAndProtocol) IsUnix() bool { + return ap&0xF0 == 0x30 +} + +// IsStream returns true if the transport protocol is TCP or STREAM (SOCK_STREAM), false otherwise. +func (ap AddressFamilyAndProtocol) IsStream() bool { + return ap&0x0F == 0x01 +} + +// IsDatagram returns true if the transport protocol is UDP or DGRAM (SOCK_DGRAM), false otherwise. +func (ap AddressFamilyAndProtocol) IsDatagram() bool { + return ap&0x0F == 0x02 +} + +// IsUnspec returns true if the transport protocol or address family is unspecified, false otherwise. +func (ap AddressFamilyAndProtocol) IsUnspec() bool { + return (ap&0xF0 == 0x00) || (ap&0x0F == 0x00) +} + +func (ap AddressFamilyAndProtocol) toByte() byte { + if ap.IsIPv4() && ap.IsStream() { + return byte(TCPv4) + } else if ap.IsIPv4() && ap.IsDatagram() { + return byte(UDPv4) + } else if ap.IsIPv6() && ap.IsStream() { + return byte(TCPv6) + } else if ap.IsIPv6() && ap.IsDatagram() { + return byte(UDPv6) + } else if ap.IsUnix() && ap.IsStream() { + return byte(UnixStream) + } else if ap.IsUnix() && ap.IsDatagram() { + return byte(UnixDatagram) + } + + return byte(UNSPEC) +} diff --git a/vendor/github.com/pires/go-proxyproto/header.go b/vendor/github.com/pires/go-proxyproto/header.go new file mode 100644 index 0000000000000..81ebeb387eb1e --- /dev/null +++ b/vendor/github.com/pires/go-proxyproto/header.go @@ -0,0 +1,280 @@ +// Package proxyproto implements Proxy Protocol (v1 and v2) parser and writer, as per specification: +// https://www.haproxy.org/download/2.3/doc/proxy-protocol.txt +package proxyproto + +import ( + "bufio" + "bytes" + "errors" + "io" + "net" + "time" +) + +var ( + // Protocol + SIGV1 = []byte{'\x50', '\x52', '\x4F', '\x58', '\x59'} + SIGV2 = []byte{'\x0D', '\x0A', '\x0D', '\x0A', '\x00', '\x0D', '\x0A', '\x51', '\x55', '\x49', '\x54', '\x0A'} + + ErrCantReadVersion1Header = errors.New("proxyproto: can't read version 1 header") + ErrVersion1HeaderTooLong = errors.New("proxyproto: version 1 header must be 107 bytes or less") + ErrLineMustEndWithCrlf = errors.New("proxyproto: version 1 header is invalid, must end with \\r\\n") + ErrCantReadProtocolVersionAndCommand = errors.New("proxyproto: can't read proxy protocol version and command") + ErrCantReadAddressFamilyAndProtocol = errors.New("proxyproto: can't read address family or protocol") + ErrCantReadLength = errors.New("proxyproto: can't read length") + ErrCantResolveSourceUnixAddress = errors.New("proxyproto: can't resolve source Unix address") + ErrCantResolveDestinationUnixAddress = errors.New("proxyproto: can't resolve destination Unix address") + ErrNoProxyProtocol = errors.New("proxyproto: proxy protocol signature not present") + ErrUnknownProxyProtocolVersion = errors.New("proxyproto: unknown proxy protocol version") + ErrUnsupportedProtocolVersionAndCommand = errors.New("proxyproto: unsupported proxy protocol version and command") + ErrUnsupportedAddressFamilyAndProtocol = errors.New("proxyproto: unsupported address family and protocol") + ErrInvalidLength = errors.New("proxyproto: invalid length") + ErrInvalidAddress = errors.New("proxyproto: invalid address") + ErrInvalidPortNumber = errors.New("proxyproto: invalid port number") + ErrSuperfluousProxyHeader = errors.New("proxyproto: upstream connection sent PROXY header but isn't allowed to send one") +) + +// Header is the placeholder for proxy protocol header. +type Header struct { + Version byte + Command ProtocolVersionAndCommand + TransportProtocol AddressFamilyAndProtocol + SourceAddr net.Addr + DestinationAddr net.Addr + rawTLVs []byte +} + +// HeaderProxyFromAddrs creates a new PROXY header from a source and a +// destination address. If version is zero, the latest protocol version is +// used. +// +// The header is filled on a best-effort basis: if hints cannot be inferred +// from the provided addresses, the header will be left unspecified. +func HeaderProxyFromAddrs(version byte, sourceAddr, destAddr net.Addr) *Header { + if version < 1 || version > 2 { + version = 2 + } + h := &Header{ + Version: version, + Command: LOCAL, + TransportProtocol: UNSPEC, + } + switch sourceAddr := sourceAddr.(type) { + case *net.TCPAddr: + if _, ok := destAddr.(*net.TCPAddr); !ok { + break + } + if len(sourceAddr.IP.To4()) == net.IPv4len { + h.TransportProtocol = TCPv4 + } else if len(sourceAddr.IP) == net.IPv6len { + h.TransportProtocol = TCPv6 + } + case *net.UDPAddr: + if _, ok := destAddr.(*net.UDPAddr); !ok { + break + } + if len(sourceAddr.IP.To4()) == net.IPv4len { + h.TransportProtocol = UDPv4 + } else if len(sourceAddr.IP) == net.IPv6len { + h.TransportProtocol = UDPv6 + } + case *net.UnixAddr: + if _, ok := destAddr.(*net.UnixAddr); !ok { + break + } + switch sourceAddr.Net { + case "unix": + h.TransportProtocol = UnixStream + case "unixgram": + h.TransportProtocol = UnixDatagram + } + } + if h.TransportProtocol != UNSPEC { + h.Command = PROXY + h.SourceAddr = sourceAddr + h.DestinationAddr = destAddr + } + return h +} + +func (header *Header) TCPAddrs() (sourceAddr, destAddr *net.TCPAddr, ok bool) { + if !header.TransportProtocol.IsStream() { + return nil, nil, false + } + sourceAddr, sourceOK := header.SourceAddr.(*net.TCPAddr) + destAddr, destOK := header.DestinationAddr.(*net.TCPAddr) + return sourceAddr, destAddr, sourceOK && destOK +} + +func (header *Header) UDPAddrs() (sourceAddr, destAddr *net.UDPAddr, ok bool) { + if !header.TransportProtocol.IsDatagram() { + return nil, nil, false + } + sourceAddr, sourceOK := header.SourceAddr.(*net.UDPAddr) + destAddr, destOK := header.DestinationAddr.(*net.UDPAddr) + return sourceAddr, destAddr, sourceOK && destOK +} + +func (header *Header) UnixAddrs() (sourceAddr, destAddr *net.UnixAddr, ok bool) { + if !header.TransportProtocol.IsUnix() { + return nil, nil, false + } + sourceAddr, sourceOK := header.SourceAddr.(*net.UnixAddr) + destAddr, destOK := header.DestinationAddr.(*net.UnixAddr) + return sourceAddr, destAddr, sourceOK && destOK +} + +func (header *Header) IPs() (sourceIP, destIP net.IP, ok bool) { + if sourceAddr, destAddr, ok := header.TCPAddrs(); ok { + return sourceAddr.IP, destAddr.IP, true + } else if sourceAddr, destAddr, ok := header.UDPAddrs(); ok { + return sourceAddr.IP, destAddr.IP, true + } else { + return nil, nil, false + } +} + +func (header *Header) Ports() (sourcePort, destPort int, ok bool) { + if sourceAddr, destAddr, ok := header.TCPAddrs(); ok { + return sourceAddr.Port, destAddr.Port, true + } else if sourceAddr, destAddr, ok := header.UDPAddrs(); ok { + return sourceAddr.Port, destAddr.Port, true + } else { + return 0, 0, false + } +} + +// EqualTo returns true if headers are equivalent, false otherwise. +// Deprecated: use EqualsTo instead. This method will eventually be removed. +func (header *Header) EqualTo(otherHeader *Header) bool { + return header.EqualsTo(otherHeader) +} + +// EqualsTo returns true if headers are equivalent, false otherwise. +func (header *Header) EqualsTo(otherHeader *Header) bool { + if otherHeader == nil { + return false + } + // TLVs only exist for version 2 + if header.Version == 2 && !bytes.Equal(header.rawTLVs, otherHeader.rawTLVs) { + return false + } + if header.Version != otherHeader.Version || header.Command != otherHeader.Command || header.TransportProtocol != otherHeader.TransportProtocol { + return false + } + // Return early for header with LOCAL command, which contains no address information + if header.Command == LOCAL { + return true + } + return header.SourceAddr.String() == otherHeader.SourceAddr.String() && + header.DestinationAddr.String() == otherHeader.DestinationAddr.String() +} + +// WriteTo renders a proxy protocol header in a format and writes it to an io.Writer. +func (header *Header) WriteTo(w io.Writer) (int64, error) { + buf, err := header.Format() + if err != nil { + return 0, err + } + + return bytes.NewBuffer(buf).WriteTo(w) +} + +// Format renders a proxy protocol header in a format to write over the wire. +func (header *Header) Format() ([]byte, error) { + switch header.Version { + case 1: + return header.formatVersion1() + case 2: + return header.formatVersion2() + default: + return nil, ErrUnknownProxyProtocolVersion + } +} + +// TLVs returns the TLVs stored into this header, if they exist. TLVs are optional for v2 of the protocol. +func (header *Header) TLVs() ([]TLV, error) { + return SplitTLVs(header.rawTLVs) +} + +// SetTLVs sets the TLVs stored in this header. This method replaces any +// previous TLV. +func (header *Header) SetTLVs(tlvs []TLV) error { + raw, err := JoinTLVs(tlvs) + if err != nil { + return err + } + header.rawTLVs = raw + return nil +} + +// Read identifies the proxy protocol version and reads the remaining of +// the header, accordingly. +// +// If proxy protocol header signature is not present, the reader buffer remains untouched +// and is safe for reading outside of this code. +// +// If proxy protocol header signature is present but an error is raised while processing +// the remaining header, assume the reader buffer to be in a corrupt state. +// Also, this operation will block until enough bytes are available for peeking. +func Read(reader *bufio.Reader) (*Header, error) { + // In order to improve speed for small non-PROXYed packets, take a peek at the first byte alone. + b1, err := reader.Peek(1) + if err != nil { + if err == io.EOF { + return nil, ErrNoProxyProtocol + } + return nil, err + } + + if bytes.Equal(b1[:1], SIGV1[:1]) || bytes.Equal(b1[:1], SIGV2[:1]) { + signature, err := reader.Peek(5) + if err != nil { + if err == io.EOF { + return nil, ErrNoProxyProtocol + } + return nil, err + } + if bytes.Equal(signature[:5], SIGV1) { + return parseVersion1(reader) + } + + signature, err = reader.Peek(12) + if err != nil { + if err == io.EOF { + return nil, ErrNoProxyProtocol + } + return nil, err + } + if bytes.Equal(signature[:12], SIGV2) { + return parseVersion2(reader) + } + } + + return nil, ErrNoProxyProtocol +} + +// ReadTimeout acts as Read but takes a timeout. If that timeout is reached, it's assumed +// there's no proxy protocol header. +func ReadTimeout(reader *bufio.Reader, timeout time.Duration) (*Header, error) { + type header struct { + h *Header + e error + } + read := make(chan *header, 1) + + go func() { + h := &header{} + h.h, h.e = Read(reader) + read <- h + }() + + timer := time.NewTimer(timeout) + select { + case result := <-read: + timer.Stop() + return result.h, result.e + case <-timer.C: + return nil, ErrNoProxyProtocol + } +} diff --git a/vendor/github.com/pires/go-proxyproto/policy.go b/vendor/github.com/pires/go-proxyproto/policy.go new file mode 100644 index 0000000000000..6d505be4c8037 --- /dev/null +++ b/vendor/github.com/pires/go-proxyproto/policy.go @@ -0,0 +1,172 @@ +package proxyproto + +import ( + "fmt" + "net" + "strings" +) + +// PolicyFunc can be used to decide whether to trust the PROXY info from +// upstream. If set, the connecting address is passed in as an argument. +// +// See below for the different policies. +// +// In case an error is returned the connection is denied. +type PolicyFunc func(upstream net.Addr) (Policy, error) + +// Policy defines how a connection with a PROXY header address is treated. +type Policy int + +const ( + // USE address from PROXY header + USE Policy = iota + // IGNORE address from PROXY header, but accept connection + IGNORE + // REJECT connection when PROXY header is sent + // Note: even though the first read on the connection returns an error if + // a PROXY header is present, subsequent reads do not. It is the task of + // the code using the connection to handle that case properly. + REJECT + // REQUIRE connection to send PROXY header, reject if not present + // Note: even though the first read on the connection returns an error if + // a PROXY header is not present, subsequent reads do not. It is the task + // of the code using the connection to handle that case properly. + REQUIRE + // SKIP accepts a connection without requiring the PROXY header + // Note: an example usage can be found in the SkipProxyHeaderForCIDR + // function. + SKIP +) + +// SkipProxyHeaderForCIDR returns a PolicyFunc which can be used to accept a +// connection from a skipHeaderCIDR without requiring a PROXY header, e.g. +// Kubernetes pods local traffic. The def is a policy to use when an upstream +// address doesn't match the skipHeaderCIDR. +func SkipProxyHeaderForCIDR(skipHeaderCIDR *net.IPNet, def Policy) PolicyFunc { + return func(upstream net.Addr) (Policy, error) { + ip, err := ipFromAddr(upstream) + if err != nil { + return def, err + } + + if skipHeaderCIDR != nil && skipHeaderCIDR.Contains(ip) { + return SKIP, nil + } + + return def, nil + } +} + +// WithPolicy adds given policy to a connection when passed as option to NewConn() +func WithPolicy(p Policy) func(*Conn) { + return func(c *Conn) { + c.ProxyHeaderPolicy = p + } +} + +// LaxWhiteListPolicy returns a PolicyFunc which decides whether the +// upstream ip is allowed to send a proxy header based on a list of allowed +// IP addresses and IP ranges. In case upstream IP is not in list the proxy +// header will be ignored. If one of the provided IP addresses or IP ranges +// is invalid it will return an error instead of a PolicyFunc. +func LaxWhiteListPolicy(allowed []string) (PolicyFunc, error) { + allowFrom, err := parse(allowed) + if err != nil { + return nil, err + } + + return whitelistPolicy(allowFrom, IGNORE), nil +} + +// MustLaxWhiteListPolicy returns a LaxWhiteListPolicy but will panic if one +// of the provided IP addresses or IP ranges is invalid. +func MustLaxWhiteListPolicy(allowed []string) PolicyFunc { + pfunc, err := LaxWhiteListPolicy(allowed) + if err != nil { + panic(err) + } + + return pfunc +} + +// StrictWhiteListPolicy returns a PolicyFunc which decides whether the +// upstream ip is allowed to send a proxy header based on a list of allowed +// IP addresses and IP ranges. In case upstream IP is not in list reading on +// the connection will be refused on the first read. Please note: subsequent +// reads do not error. It is the task of the code using the connection to +// handle that case properly. If one of the provided IP addresses or IP +// ranges is invalid it will return an error instead of a PolicyFunc. +func StrictWhiteListPolicy(allowed []string) (PolicyFunc, error) { + allowFrom, err := parse(allowed) + if err != nil { + return nil, err + } + + return whitelistPolicy(allowFrom, REJECT), nil +} + +// MustStrictWhiteListPolicy returns a StrictWhiteListPolicy but will panic +// if one of the provided IP addresses or IP ranges is invalid. +func MustStrictWhiteListPolicy(allowed []string) PolicyFunc { + pfunc, err := StrictWhiteListPolicy(allowed) + if err != nil { + panic(err) + } + + return pfunc +} + +func whitelistPolicy(allowed []func(net.IP) bool, def Policy) PolicyFunc { + return func(upstream net.Addr) (Policy, error) { + upstreamIP, err := ipFromAddr(upstream) + if err != nil { + // something is wrong with the source IP, better reject the connection + return REJECT, err + } + + for _, allowFrom := range allowed { + if allowFrom(upstreamIP) { + return USE, nil + } + } + + return def, nil + } +} + +func parse(allowed []string) ([]func(net.IP) bool, error) { + a := make([]func(net.IP) bool, len(allowed)) + for i, allowFrom := range allowed { + if strings.LastIndex(allowFrom, "/") > 0 { + _, ipRange, err := net.ParseCIDR(allowFrom) + if err != nil { + return nil, fmt.Errorf("proxyproto: given string %q is not a valid IP range: %v", allowFrom, err) + } + + a[i] = ipRange.Contains + } else { + allowed := net.ParseIP(allowFrom) + if allowed == nil { + return nil, fmt.Errorf("proxyproto: given string %q is not a valid IP address", allowFrom) + } + + a[i] = allowed.Equal + } + } + + return a, nil +} + +func ipFromAddr(upstream net.Addr) (net.IP, error) { + upstreamString, _, err := net.SplitHostPort(upstream.String()) + if err != nil { + return nil, err + } + + upstreamIP := net.ParseIP(upstreamString) + if nil == upstreamIP { + return nil, fmt.Errorf("proxyproto: invalid IP address") + } + + return upstreamIP, nil +} diff --git a/vendor/github.com/pires/go-proxyproto/protocol.go b/vendor/github.com/pires/go-proxyproto/protocol.go new file mode 100644 index 0000000000000..4ce16a2765bad --- /dev/null +++ b/vendor/github.com/pires/go-proxyproto/protocol.go @@ -0,0 +1,319 @@ +package proxyproto + +import ( + "bufio" + "io" + "net" + "sync" + "sync/atomic" + "time" +) + +// DefaultReadHeaderTimeout is how long header processing waits for header to +// be read from the wire, if Listener.ReaderHeaderTimeout is not set. +// It's kept as a global variable so to make it easier to find and override, +// e.g. go build -ldflags -X "github.com/pires/go-proxyproto.DefaultReadHeaderTimeout=1s" +var DefaultReadHeaderTimeout = 10 * time.Second + +// Listener is used to wrap an underlying listener, +// whose connections may be using the HAProxy Proxy Protocol. +// If the connection is using the protocol, the RemoteAddr() will return +// the correct client address. ReadHeaderTimeout will be applied to all +// connections in order to prevent blocking operations. If no ReadHeaderTimeout +// is set, a default of 200ms will be used. This can be disabled by setting the +// timeout to < 0. +type Listener struct { + Listener net.Listener + Policy PolicyFunc + ValidateHeader Validator + ReadHeaderTimeout time.Duration +} + +// Conn is used to wrap and underlying connection which +// may be speaking the Proxy Protocol. If it is, the RemoteAddr() will +// return the address of the client instead of the proxy address. Each connection +// will have its own readHeaderTimeout and readDeadline set by the Accept() call. +type Conn struct { + readDeadline atomic.Value // time.Time + once sync.Once + readErr error + conn net.Conn + Validate Validator + bufReader *bufio.Reader + header *Header + ProxyHeaderPolicy Policy + readHeaderTimeout time.Duration +} + +// Validator receives a header and decides whether it is a valid one +// In case the header is not deemed valid it should return an error. +type Validator func(*Header) error + +// ValidateHeader adds given validator for proxy headers to a connection when passed as option to NewConn() +func ValidateHeader(v Validator) func(*Conn) { + return func(c *Conn) { + if v != nil { + c.Validate = v + } + } +} + +// Accept waits for and returns the next connection to the listener. +func (p *Listener) Accept() (net.Conn, error) { + // Get the underlying connection + conn, err := p.Listener.Accept() + if err != nil { + return nil, err + } + + proxyHeaderPolicy := USE + if p.Policy != nil { + proxyHeaderPolicy, err = p.Policy(conn.RemoteAddr()) + if err != nil { + // can't decide the policy, we can't accept the connection + conn.Close() + return nil, err + } + // Handle a connection as a regular one + if proxyHeaderPolicy == SKIP { + return conn, nil + } + } + + newConn := NewConn( + conn, + WithPolicy(proxyHeaderPolicy), + ValidateHeader(p.ValidateHeader), + ) + + // If the ReadHeaderTimeout for the listener is unset, use the default timeout. + if p.ReadHeaderTimeout == 0 { + p.ReadHeaderTimeout = DefaultReadHeaderTimeout + } + + // Set the readHeaderTimeout of the new conn to the value of the listener + newConn.readHeaderTimeout = p.ReadHeaderTimeout + + return newConn, nil +} + +// Close closes the underlying listener. +func (p *Listener) Close() error { + return p.Listener.Close() +} + +// Addr returns the underlying listener's network address. +func (p *Listener) Addr() net.Addr { + return p.Listener.Addr() +} + +// NewConn is used to wrap a net.Conn that may be speaking +// the proxy protocol into a proxyproto.Conn +func NewConn(conn net.Conn, opts ...func(*Conn)) *Conn { + pConn := &Conn{ + bufReader: bufio.NewReader(conn), + conn: conn, + } + + for _, opt := range opts { + opt(pConn) + } + + return pConn +} + +// Read is check for the proxy protocol header when doing +// the initial scan. If there is an error parsing the header, +// it is returned and the socket is closed. +func (p *Conn) Read(b []byte) (int, error) { + p.once.Do(func() { + p.readErr = p.readHeader() + }) + if p.readErr != nil { + return 0, p.readErr + } + + return p.bufReader.Read(b) +} + +// Write wraps original conn.Write +func (p *Conn) Write(b []byte) (int, error) { + return p.conn.Write(b) +} + +// Close wraps original conn.Close +func (p *Conn) Close() error { + return p.conn.Close() +} + +// ProxyHeader returns the proxy protocol header, if any. If an error occurs +// while reading the proxy header, nil is returned. +func (p *Conn) ProxyHeader() *Header { + p.once.Do(func() { p.readErr = p.readHeader() }) + return p.header +} + +// LocalAddr returns the address of the server if the proxy +// protocol is being used, otherwise just returns the address of +// the socket server. In case an error happens on reading the +// proxy header the original LocalAddr is returned, not the one +// from the proxy header even if the proxy header itself is +// syntactically correct. +func (p *Conn) LocalAddr() net.Addr { + p.once.Do(func() { p.readErr = p.readHeader() }) + if p.header == nil || p.header.Command.IsLocal() || p.readErr != nil { + return p.conn.LocalAddr() + } + + return p.header.DestinationAddr +} + +// RemoteAddr returns the address of the client if the proxy +// protocol is being used, otherwise just returns the address of +// the socket peer. In case an error happens on reading the +// proxy header the original RemoteAddr is returned, not the one +// from the proxy header even if the proxy header itself is +// syntactically correct. +func (p *Conn) RemoteAddr() net.Addr { + p.once.Do(func() { p.readErr = p.readHeader() }) + if p.header == nil || p.header.Command.IsLocal() || p.readErr != nil { + return p.conn.RemoteAddr() + } + + return p.header.SourceAddr +} + +// Raw returns the underlying connection which can be casted to +// a concrete type, allowing access to specialized functions. +// +// Use this ONLY if you know exactly what you are doing. +func (p *Conn) Raw() net.Conn { + return p.conn +} + +// TCPConn returns the underlying TCP connection, +// allowing access to specialized functions. +// +// Use this ONLY if you know exactly what you are doing. +func (p *Conn) TCPConn() (conn *net.TCPConn, ok bool) { + conn, ok = p.conn.(*net.TCPConn) + return +} + +// UnixConn returns the underlying Unix socket connection, +// allowing access to specialized functions. +// +// Use this ONLY if you know exactly what you are doing. +func (p *Conn) UnixConn() (conn *net.UnixConn, ok bool) { + conn, ok = p.conn.(*net.UnixConn) + return +} + +// UDPConn returns the underlying UDP connection, +// allowing access to specialized functions. +// +// Use this ONLY if you know exactly what you are doing. +func (p *Conn) UDPConn() (conn *net.UDPConn, ok bool) { + conn, ok = p.conn.(*net.UDPConn) + return +} + +// SetDeadline wraps original conn.SetDeadline +func (p *Conn) SetDeadline(t time.Time) error { + p.readDeadline.Store(t) + return p.conn.SetDeadline(t) +} + +// SetReadDeadline wraps original conn.SetReadDeadline +func (p *Conn) SetReadDeadline(t time.Time) error { + // Set a local var that tells us the desired deadline. This is + // needed in order to reset the read deadline to the one that is + // desired by the user, rather than an empty deadline. + p.readDeadline.Store(t) + return p.conn.SetReadDeadline(t) +} + +// SetWriteDeadline wraps original conn.SetWriteDeadline +func (p *Conn) SetWriteDeadline(t time.Time) error { + return p.conn.SetWriteDeadline(t) +} + +func (p *Conn) readHeader() error { + // If the connection's readHeaderTimeout is more than 0, + // push our deadline back to now plus the timeout. This should only + // run on the connection, as we don't want to override the previous + // read deadline the user may have used. + if p.readHeaderTimeout > 0 { + if err := p.conn.SetReadDeadline(time.Now().Add(p.readHeaderTimeout)); err != nil { + return err + } + } + + header, err := Read(p.bufReader) + + // If the connection's readHeaderTimeout is more than 0, undo the change to the + // deadline that we made above. Because we retain the readDeadline as part of our + // SetReadDeadline override, we know the user's desired deadline so we use that. + // Therefore, we check whether the error is a net.Timeout and if it is, we decide + // the proxy proto does not exist and set the error accordingly. + if p.readHeaderTimeout > 0 { + t := p.readDeadline.Load() + if t == nil { + t = time.Time{} + } + if err := p.conn.SetReadDeadline(t.(time.Time)); err != nil { + return err + } + if netErr, ok := err.(net.Error); ok && netErr.Timeout() { + err = ErrNoProxyProtocol + } + } + + // For the purpose of this wrapper shamefully stolen from armon/go-proxyproto + // let's act as if there was no error when PROXY protocol is not present. + if err == ErrNoProxyProtocol { + // but not if it is required that the connection has one + if p.ProxyHeaderPolicy == REQUIRE { + return err + } + + return nil + } + + // proxy protocol header was found + if err == nil && header != nil { + switch p.ProxyHeaderPolicy { + case REJECT: + // this connection is not allowed to send one + return ErrSuperfluousProxyHeader + case USE, REQUIRE: + if p.Validate != nil { + err = p.Validate(header) + if err != nil { + return err + } + } + + p.header = header + } + } + + return err +} + +// ReadFrom implements the io.ReaderFrom ReadFrom method +func (p *Conn) ReadFrom(r io.Reader) (int64, error) { + if rf, ok := p.conn.(io.ReaderFrom); ok { + return rf.ReadFrom(r) + } + return io.Copy(p.conn, r) +} + +// WriteTo implements io.WriterTo +func (p *Conn) WriteTo(w io.Writer) (int64, error) { + p.once.Do(func() { p.readErr = p.readHeader() }) + if p.readErr != nil { + return 0, p.readErr + } + return p.bufReader.WriteTo(w) +} diff --git a/vendor/github.com/pires/go-proxyproto/tlv.go b/vendor/github.com/pires/go-proxyproto/tlv.go new file mode 100644 index 0000000000000..7cc2fb376ed78 --- /dev/null +++ b/vendor/github.com/pires/go-proxyproto/tlv.go @@ -0,0 +1,132 @@ +// Type-Length-Value splitting and parsing for proxy protocol V2 +// See spec https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt sections 2.2 to 2.7 and + +package proxyproto + +import ( + "encoding/binary" + "errors" + "fmt" + "math" +) + +const ( + // Section 2.2 + PP2_TYPE_ALPN PP2Type = 0x01 + PP2_TYPE_AUTHORITY PP2Type = 0x02 + PP2_TYPE_CRC32C PP2Type = 0x03 + PP2_TYPE_NOOP PP2Type = 0x04 + PP2_TYPE_UNIQUE_ID PP2Type = 0x05 + PP2_TYPE_SSL PP2Type = 0x20 + PP2_SUBTYPE_SSL_VERSION PP2Type = 0x21 + PP2_SUBTYPE_SSL_CN PP2Type = 0x22 + PP2_SUBTYPE_SSL_CIPHER PP2Type = 0x23 + PP2_SUBTYPE_SSL_SIG_ALG PP2Type = 0x24 + PP2_SUBTYPE_SSL_KEY_ALG PP2Type = 0x25 + PP2_TYPE_NETNS PP2Type = 0x30 + + // Section 2.2.7, reserved types + PP2_TYPE_MIN_CUSTOM PP2Type = 0xE0 + PP2_TYPE_MAX_CUSTOM PP2Type = 0xEF + PP2_TYPE_MIN_EXPERIMENT PP2Type = 0xF0 + PP2_TYPE_MAX_EXPERIMENT PP2Type = 0xF7 + PP2_TYPE_MIN_FUTURE PP2Type = 0xF8 + PP2_TYPE_MAX_FUTURE PP2Type = 0xFF +) + +var ( + ErrTruncatedTLV = errors.New("proxyproto: truncated TLV") + ErrMalformedTLV = errors.New("proxyproto: malformed TLV Value") + ErrIncompatibleTLV = errors.New("proxyproto: incompatible TLV type") +) + +// PP2Type is the proxy protocol v2 type +type PP2Type byte + +// TLV is a uninterpreted Type-Length-Value for V2 protocol, see section 2.2 +type TLV struct { + Type PP2Type + Value []byte +} + +// SplitTLVs splits the Type-Length-Value vector, returns the vector or an error. +func SplitTLVs(raw []byte) ([]TLV, error) { + var tlvs []TLV + for i := 0; i < len(raw); { + tlv := TLV{ + Type: PP2Type(raw[i]), + } + if len(raw)-i <= 2 { + return nil, ErrTruncatedTLV + } + tlvLen := int(binary.BigEndian.Uint16(raw[i+1 : i+3])) // Max length = 65K + i += 3 + if i+tlvLen > len(raw) { + return nil, ErrTruncatedTLV + } + // Ignore no-op padding + if tlv.Type != PP2_TYPE_NOOP { + tlv.Value = make([]byte, tlvLen) + copy(tlv.Value, raw[i:i+tlvLen]) + } + i += tlvLen + tlvs = append(tlvs, tlv) + } + return tlvs, nil +} + +// JoinTLVs joins multiple Type-Length-Value records. +func JoinTLVs(tlvs []TLV) ([]byte, error) { + var raw []byte + for _, tlv := range tlvs { + if len(tlv.Value) > math.MaxUint16 { + return nil, fmt.Errorf("proxyproto: cannot format TLV %v with length %d", tlv.Type, len(tlv.Value)) + } + var length [2]byte + binary.BigEndian.PutUint16(length[:], uint16(len(tlv.Value))) + raw = append(raw, byte(tlv.Type)) + raw = append(raw, length[:]...) + raw = append(raw, tlv.Value...) + } + return raw, nil +} + +// Registered is true if the type is registered in the spec, see section 2.2 +func (p PP2Type) Registered() bool { + switch p { + case PP2_TYPE_ALPN, + PP2_TYPE_AUTHORITY, + PP2_TYPE_CRC32C, + PP2_TYPE_NOOP, + PP2_TYPE_UNIQUE_ID, + PP2_TYPE_SSL, + PP2_SUBTYPE_SSL_VERSION, + PP2_SUBTYPE_SSL_CN, + PP2_SUBTYPE_SSL_CIPHER, + PP2_SUBTYPE_SSL_SIG_ALG, + PP2_SUBTYPE_SSL_KEY_ALG, + PP2_TYPE_NETNS: + return true + } + return false +} + +// App is true if the type is reserved for application specific data, see section 2.2.7 +func (p PP2Type) App() bool { + return p >= PP2_TYPE_MIN_CUSTOM && p <= PP2_TYPE_MAX_CUSTOM +} + +// Experiment is true if the type is reserved for temporary experimental use by application developers, see section 2.2.7 +func (p PP2Type) Experiment() bool { + return p >= PP2_TYPE_MIN_EXPERIMENT && p <= PP2_TYPE_MAX_EXPERIMENT +} + +// Future is true is the type is reserved for future use, see section 2.2.7 +func (p PP2Type) Future() bool { + return p >= PP2_TYPE_MIN_FUTURE +} + +// Spec is true if the type is covered by the spec, see section 2.2 and 2.2.7 +func (p PP2Type) Spec() bool { + return p.Registered() || p.App() || p.Experiment() || p.Future() +} diff --git a/vendor/github.com/pires/go-proxyproto/v1.go b/vendor/github.com/pires/go-proxyproto/v1.go new file mode 100644 index 0000000000000..0d34ba5264e58 --- /dev/null +++ b/vendor/github.com/pires/go-proxyproto/v1.go @@ -0,0 +1,243 @@ +package proxyproto + +import ( + "bufio" + "bytes" + "fmt" + "net" + "net/netip" + "strconv" + "strings" +) + +const ( + crlf = "\r\n" + separator = " " +) + +func initVersion1() *Header { + header := new(Header) + header.Version = 1 + // Command doesn't exist in v1 + header.Command = PROXY + return header +} + +func parseVersion1(reader *bufio.Reader) (*Header, error) { + //The header cannot be more than 107 bytes long. Per spec: + // + // (...) + // - worst case (optional fields set to 0xff) : + // "PROXY UNKNOWN ffff:f...f:ffff ffff:f...f:ffff 65535 65535\r\n" + // => 5 + 1 + 7 + 1 + 39 + 1 + 39 + 1 + 5 + 1 + 5 + 2 = 107 chars + // + // So a 108-byte buffer is always enough to store all the line and a + // trailing zero for string processing. + // + // It must also be CRLF terminated, as above. The header does not otherwise + // contain a CR or LF byte. + // + // ISSUE #69 + // We can't use Peek here as it will block trying to fill the buffer, which + // will never happen if the header is TCP4 or TCP6 (max. 56 and 104 bytes + // respectively) and the server is expected to speak first. + // + // Similarly, we can't use ReadString or ReadBytes as these will keep reading + // until the delimiter is found; an abusive client could easily disrupt a + // server by sending a large amount of data that do not contain a LF byte. + // Another means of attack would be to start connections and simply not send + // data after the initial PROXY signature bytes, accumulating a large + // number of blocked goroutines on the server. ReadSlice will also block for + // a delimiter when the internal buffer does not fill up. + // + // A plain Read is also problematic since we risk reading past the end of the + // header without being able to easily put the excess bytes back into the reader's + // buffer (with the current implementation's design). + // + // So we use a ReadByte loop, which solves the overflow problem and avoids + // reading beyond the end of the header. However, we need one more trick to harden + // against partial header attacks (slow loris) - per spec: + // + // (..) The sender must always ensure that the header is sent at once, so that + // the transport layer maintains atomicity along the path to the receiver. The + // receiver may be tolerant to partial headers or may simply drop the connection + // when receiving a partial header. Recommendation is to be tolerant, but + // implementation constraints may not always easily permit this. + // + // We are subject to such implementation constraints. So we return an error if + // the header cannot be fully extracted with a single read of the underlying + // reader. + buf := make([]byte, 0, 107) + for { + b, err := reader.ReadByte() + if err != nil { + return nil, fmt.Errorf(ErrCantReadVersion1Header.Error()+": %v", err) + } + buf = append(buf, b) + if b == '\n' { + // End of header found + break + } + if len(buf) == 107 { + // No delimiter in first 107 bytes + return nil, ErrVersion1HeaderTooLong + } + if reader.Buffered() == 0 { + // Header was not buffered in a single read. Since we can't + // differentiate between genuine slow writers and DoS agents, + // we abort. On healthy networks, this should never happen. + return nil, ErrCantReadVersion1Header + } + } + + // Check for CR before LF. + if len(buf) < 2 || buf[len(buf)-2] != '\r' { + return nil, ErrLineMustEndWithCrlf + } + + // Check full signature. + tokens := strings.Split(string(buf[:len(buf)-2]), separator) + + // Expect at least 2 tokens: "PROXY" and the transport protocol. + if len(tokens) < 2 { + return nil, ErrCantReadAddressFamilyAndProtocol + } + + // Read address family and protocol + var transportProtocol AddressFamilyAndProtocol + switch tokens[1] { + case "TCP4": + transportProtocol = TCPv4 + case "TCP6": + transportProtocol = TCPv6 + case "UNKNOWN": + transportProtocol = UNSPEC // doesn't exist in v1 but fits UNKNOWN + default: + return nil, ErrCantReadAddressFamilyAndProtocol + } + + // Expect 6 tokens only when UNKNOWN is not present. + if transportProtocol != UNSPEC && len(tokens) < 6 { + return nil, ErrCantReadAddressFamilyAndProtocol + } + + // When a signature is found, allocate a v1 header with Command set to PROXY. + // Command doesn't exist in v1 but set it for other parts of this library + // to rely on it for determining connection details. + header := initVersion1() + + // Transport protocol has been processed already. + header.TransportProtocol = transportProtocol + + // When UNKNOWN, set the command to LOCAL and return early + if header.TransportProtocol == UNSPEC { + header.Command = LOCAL + return header, nil + } + + // Otherwise, continue to read addresses and ports + sourceIP, err := parseV1IPAddress(header.TransportProtocol, tokens[2]) + if err != nil { + return nil, err + } + destIP, err := parseV1IPAddress(header.TransportProtocol, tokens[3]) + if err != nil { + return nil, err + } + sourcePort, err := parseV1PortNumber(tokens[4]) + if err != nil { + return nil, err + } + destPort, err := parseV1PortNumber(tokens[5]) + if err != nil { + return nil, err + } + header.SourceAddr = &net.TCPAddr{ + IP: sourceIP, + Port: sourcePort, + } + header.DestinationAddr = &net.TCPAddr{ + IP: destIP, + Port: destPort, + } + + return header, nil +} + +func (header *Header) formatVersion1() ([]byte, error) { + // As of version 1, only "TCP4" ( \x54 \x43 \x50 \x34 ) for TCP over IPv4, + // and "TCP6" ( \x54 \x43 \x50 \x36 ) for TCP over IPv6 are allowed. + var proto string + switch header.TransportProtocol { + case TCPv4: + proto = "TCP4" + case TCPv6: + proto = "TCP6" + default: + // Unknown connection (short form) + return []byte("PROXY UNKNOWN" + crlf), nil + } + + sourceAddr, sourceOK := header.SourceAddr.(*net.TCPAddr) + destAddr, destOK := header.DestinationAddr.(*net.TCPAddr) + if !sourceOK || !destOK { + return nil, ErrInvalidAddress + } + + sourceIP, destIP := sourceAddr.IP, destAddr.IP + switch header.TransportProtocol { + case TCPv4: + sourceIP = sourceIP.To4() + destIP = destIP.To4() + case TCPv6: + sourceIP = sourceIP.To16() + destIP = destIP.To16() + } + if sourceIP == nil || destIP == nil { + return nil, ErrInvalidAddress + } + + buf := bytes.NewBuffer(make([]byte, 0, 108)) + buf.Write(SIGV1) + buf.WriteString(separator) + buf.WriteString(proto) + buf.WriteString(separator) + buf.WriteString(sourceIP.String()) + buf.WriteString(separator) + buf.WriteString(destIP.String()) + buf.WriteString(separator) + buf.WriteString(strconv.Itoa(sourceAddr.Port)) + buf.WriteString(separator) + buf.WriteString(strconv.Itoa(destAddr.Port)) + buf.WriteString(crlf) + + return buf.Bytes(), nil +} + +func parseV1PortNumber(portStr string) (int, error) { + port, err := strconv.Atoi(portStr) + if err != nil || port < 0 || port > 65535 { + return 0, ErrInvalidPortNumber + } + return port, nil +} + +func parseV1IPAddress(protocol AddressFamilyAndProtocol, addrStr string) (net.IP, error) { + addr, err := netip.ParseAddr(addrStr) + if err != nil { + return nil, ErrInvalidAddress + } + + switch protocol { + case TCPv4: + if addr.Is4() { + return net.IP(addr.AsSlice()), nil + } + case TCPv6: + if addr.Is6() || addr.Is4In6() { + return net.IP(addr.AsSlice()), nil + } + } + + return nil, ErrInvalidAddress +} diff --git a/vendor/github.com/pires/go-proxyproto/v2.go b/vendor/github.com/pires/go-proxyproto/v2.go new file mode 100644 index 0000000000000..74bf3f0771451 --- /dev/null +++ b/vendor/github.com/pires/go-proxyproto/v2.go @@ -0,0 +1,285 @@ +package proxyproto + +import ( + "bufio" + "bytes" + "encoding/binary" + "errors" + "io" + "net" +) + +var ( + lengthUnspec = uint16(0) + lengthV4 = uint16(12) + lengthV6 = uint16(36) + lengthUnix = uint16(216) + lengthUnspecBytes = func() []byte { + a := make([]byte, 2) + binary.BigEndian.PutUint16(a, lengthUnspec) + return a + }() + lengthV4Bytes = func() []byte { + a := make([]byte, 2) + binary.BigEndian.PutUint16(a, lengthV4) + return a + }() + lengthV6Bytes = func() []byte { + a := make([]byte, 2) + binary.BigEndian.PutUint16(a, lengthV6) + return a + }() + lengthUnixBytes = func() []byte { + a := make([]byte, 2) + binary.BigEndian.PutUint16(a, lengthUnix) + return a + }() + errUint16Overflow = errors.New("proxyproto: uint16 overflow") +) + +type _ports struct { + SrcPort uint16 + DstPort uint16 +} + +type _addr4 struct { + Src [4]byte + Dst [4]byte + SrcPort uint16 + DstPort uint16 +} + +type _addr6 struct { + Src [16]byte + Dst [16]byte + _ports +} + +type _addrUnix struct { + Src [108]byte + Dst [108]byte +} + +func parseVersion2(reader *bufio.Reader) (header *Header, err error) { + // Skip first 12 bytes (signature) + for i := 0; i < 12; i++ { + if _, err = reader.ReadByte(); err != nil { + return nil, ErrCantReadProtocolVersionAndCommand + } + } + + header = new(Header) + header.Version = 2 + + // Read the 13th byte, protocol version and command + b13, err := reader.ReadByte() + if err != nil { + return nil, ErrCantReadProtocolVersionAndCommand + } + header.Command = ProtocolVersionAndCommand(b13) + if _, ok := supportedCommand[header.Command]; !ok { + return nil, ErrUnsupportedProtocolVersionAndCommand + } + + // Read the 14th byte, address family and protocol + b14, err := reader.ReadByte() + if err != nil { + return nil, ErrCantReadAddressFamilyAndProtocol + } + header.TransportProtocol = AddressFamilyAndProtocol(b14) + // UNSPEC is only supported when LOCAL is set. + if header.TransportProtocol == UNSPEC && header.Command != LOCAL { + return nil, ErrUnsupportedAddressFamilyAndProtocol + } + + // Make sure there are bytes available as specified in length + var length uint16 + if err := binary.Read(io.LimitReader(reader, 2), binary.BigEndian, &length); err != nil { + return nil, ErrCantReadLength + } + if !header.validateLength(length) { + return nil, ErrInvalidLength + } + + // Return early if the length is zero, which means that + // there's no address information and TLVs present for UNSPEC. + if length == 0 { + return header, nil + } + + if _, err := reader.Peek(int(length)); err != nil { + return nil, ErrInvalidLength + } + + // Length-limited reader for payload section + payloadReader := io.LimitReader(reader, int64(length)).(*io.LimitedReader) + + // Read addresses and ports for protocols other than UNSPEC. + // Ignore address information for UNSPEC, and skip straight to read TLVs, + // since the length is greater than zero. + if header.TransportProtocol != UNSPEC { + if header.TransportProtocol.IsIPv4() { + var addr _addr4 + if err := binary.Read(payloadReader, binary.BigEndian, &addr); err != nil { + return nil, ErrInvalidAddress + } + header.SourceAddr = newIPAddr(header.TransportProtocol, addr.Src[:], addr.SrcPort) + header.DestinationAddr = newIPAddr(header.TransportProtocol, addr.Dst[:], addr.DstPort) + } else if header.TransportProtocol.IsIPv6() { + var addr _addr6 + if err := binary.Read(payloadReader, binary.BigEndian, &addr); err != nil { + return nil, ErrInvalidAddress + } + header.SourceAddr = newIPAddr(header.TransportProtocol, addr.Src[:], addr.SrcPort) + header.DestinationAddr = newIPAddr(header.TransportProtocol, addr.Dst[:], addr.DstPort) + } else if header.TransportProtocol.IsUnix() { + var addr _addrUnix + if err := binary.Read(payloadReader, binary.BigEndian, &addr); err != nil { + return nil, ErrInvalidAddress + } + + network := "unix" + if header.TransportProtocol.IsDatagram() { + network = "unixgram" + } + + header.SourceAddr = &net.UnixAddr{ + Net: network, + Name: parseUnixName(addr.Src[:]), + } + header.DestinationAddr = &net.UnixAddr{ + Net: network, + Name: parseUnixName(addr.Dst[:]), + } + } + } + + // Copy bytes for optional Type-Length-Value vector + header.rawTLVs = make([]byte, payloadReader.N) // Allocate minimum size slice + if _, err = io.ReadFull(payloadReader, header.rawTLVs); err != nil && err != io.EOF { + return nil, err + } + + return header, nil +} + +func (header *Header) formatVersion2() ([]byte, error) { + var buf bytes.Buffer + buf.Write(SIGV2) + buf.WriteByte(header.Command.toByte()) + buf.WriteByte(header.TransportProtocol.toByte()) + if header.TransportProtocol.IsUnspec() { + // For UNSPEC, write no addresses and ports but only TLVs if they are present + hdrLen, err := addTLVLen(lengthUnspecBytes, len(header.rawTLVs)) + if err != nil { + return nil, err + } + buf.Write(hdrLen) + } else { + var addrSrc, addrDst []byte + if header.TransportProtocol.IsIPv4() { + hdrLen, err := addTLVLen(lengthV4Bytes, len(header.rawTLVs)) + if err != nil { + return nil, err + } + buf.Write(hdrLen) + sourceIP, destIP, _ := header.IPs() + addrSrc = sourceIP.To4() + addrDst = destIP.To4() + } else if header.TransportProtocol.IsIPv6() { + hdrLen, err := addTLVLen(lengthV6Bytes, len(header.rawTLVs)) + if err != nil { + return nil, err + } + buf.Write(hdrLen) + sourceIP, destIP, _ := header.IPs() + addrSrc = sourceIP.To16() + addrDst = destIP.To16() + } else if header.TransportProtocol.IsUnix() { + buf.Write(lengthUnixBytes) + sourceAddr, destAddr, ok := header.UnixAddrs() + if !ok { + return nil, ErrInvalidAddress + } + addrSrc = formatUnixName(sourceAddr.Name) + addrDst = formatUnixName(destAddr.Name) + } + + if addrSrc == nil || addrDst == nil { + return nil, ErrInvalidAddress + } + buf.Write(addrSrc) + buf.Write(addrDst) + + if sourcePort, destPort, ok := header.Ports(); ok { + portBytes := make([]byte, 2) + + binary.BigEndian.PutUint16(portBytes, uint16(sourcePort)) + buf.Write(portBytes) + + binary.BigEndian.PutUint16(portBytes, uint16(destPort)) + buf.Write(portBytes) + } + } + + if len(header.rawTLVs) > 0 { + buf.Write(header.rawTLVs) + } + + return buf.Bytes(), nil +} + +func (header *Header) validateLength(length uint16) bool { + if header.TransportProtocol.IsIPv4() { + return length >= lengthV4 + } else if header.TransportProtocol.IsIPv6() { + return length >= lengthV6 + } else if header.TransportProtocol.IsUnix() { + return length >= lengthUnix + } else if header.TransportProtocol.IsUnspec() { + return length >= lengthUnspec + } + return false +} + +// addTLVLen adds the length of the TLV to the header length or errors on uint16 overflow. +func addTLVLen(cur []byte, tlvLen int) ([]byte, error) { + if tlvLen == 0 { + return cur, nil + } + curLen := binary.BigEndian.Uint16(cur) + newLen := int(curLen) + tlvLen + if newLen >= 1<<16 { + return nil, errUint16Overflow + } + a := make([]byte, 2) + binary.BigEndian.PutUint16(a, uint16(newLen)) + return a, nil +} + +func newIPAddr(transport AddressFamilyAndProtocol, ip net.IP, port uint16) net.Addr { + if transport.IsStream() { + return &net.TCPAddr{IP: ip, Port: int(port)} + } else if transport.IsDatagram() { + return &net.UDPAddr{IP: ip, Port: int(port)} + } else { + return nil + } +} + +func parseUnixName(b []byte) string { + i := bytes.IndexByte(b, 0) + if i < 0 { + return string(b) + } + return string(b[:i]) +} + +func formatUnixName(name string) []byte { + n := int(lengthUnix) / 2 + if len(name) >= n { + return []byte(name[:n]) + } + pad := make([]byte, n-len(name)) + return append([]byte(name), pad...) +} diff --git a/vendor/github.com/pires/go-proxyproto/version_cmd.go b/vendor/github.com/pires/go-proxyproto/version_cmd.go new file mode 100644 index 0000000000000..59f20420882a5 --- /dev/null +++ b/vendor/github.com/pires/go-proxyproto/version_cmd.go @@ -0,0 +1,47 @@ +package proxyproto + +// ProtocolVersionAndCommand represents the command in proxy protocol v2. +// Command doesn't exist in v1 but it should be set since other parts of +// this library may rely on it for determining connection details. +type ProtocolVersionAndCommand byte + +const ( + // LOCAL represents the LOCAL command in v2 or UNKNOWN transport in v1, + // in which case no address information is expected. + LOCAL ProtocolVersionAndCommand = '\x20' + // PROXY represents the PROXY command in v2 or transport is not UNKNOWN in v1, + // in which case valid local/remote address and port information is expected. + PROXY ProtocolVersionAndCommand = '\x21' +) + +var supportedCommand = map[ProtocolVersionAndCommand]bool{ + LOCAL: true, + PROXY: true, +} + +// IsLocal returns true if the command in v2 is LOCAL or the transport in v1 is UNKNOWN, +// i.e. when no address information is expected, false otherwise. +func (pvc ProtocolVersionAndCommand) IsLocal() bool { + return LOCAL == pvc +} + +// IsProxy returns true if the command in v2 is PROXY or the transport in v1 is not UNKNOWN, +// i.e. when valid local/remote address and port information is expected, false otherwise. +func (pvc ProtocolVersionAndCommand) IsProxy() bool { + return PROXY == pvc +} + +// IsUnspec returns true if the command is unspecified, false otherwise. +func (pvc ProtocolVersionAndCommand) IsUnspec() bool { + return !(pvc.IsLocal() || pvc.IsProxy()) +} + +func (pvc ProtocolVersionAndCommand) toByte() byte { + if pvc.IsLocal() { + return byte(LOCAL) + } else if pvc.IsProxy() { + return byte(PROXY) + } + + return byte(LOCAL) +} diff --git a/vendor/github.com/soheilhy/cmux/.gitignore b/vendor/github.com/soheilhy/cmux/.gitignore deleted file mode 100644 index daf913b1b347a..0000000000000 --- a/vendor/github.com/soheilhy/cmux/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/soheilhy/cmux/.travis.yml b/vendor/github.com/soheilhy/cmux/.travis.yml deleted file mode 100644 index 4d78a519feb62..0000000000000 --- a/vendor/github.com/soheilhy/cmux/.travis.yml +++ /dev/null @@ -1,29 +0,0 @@ -language: go - -go: - - 1.6 - - 1.7 - - 1.8 - - tip - -matrix: - allow_failures: - - go: tip - -gobuild_args: -race - -before_install: - - if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then go get -u github.com/kisielk/errcheck; fi - - if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then go get -u golang.org/x/lint/golint; fi - -before_script: - - '! gofmt -s -l . | read' - - echo $TRAVIS_GO_VERSION - - if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then golint ./...; fi - - if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then errcheck ./...; fi - - if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then go tool vet .; fi - - if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then go tool vet --shadow .; fi - -script: - - go test -bench . -v ./... - - go test -race -bench . -v ./... diff --git a/vendor/github.com/soheilhy/cmux/CONTRIBUTORS b/vendor/github.com/soheilhy/cmux/CONTRIBUTORS deleted file mode 100644 index 49878f228a122..0000000000000 --- a/vendor/github.com/soheilhy/cmux/CONTRIBUTORS +++ /dev/null @@ -1,12 +0,0 @@ -# The list of people who have contributed code to the cmux repository. -# -# Auto-generated with: -# git log --oneline --pretty=format:'%an <%aE>' | sort -u -# -Andreas Jaekle <[email protected]> -Dmitri Shuralyov <[email protected]> -Ethan Mosbaugh <[email protected]> -Soheil Hassas Yeganeh <[email protected]> -Soheil Hassas Yeganeh <[email protected]> -Tamir Duberstein <[email protected]> -Tamir Duberstein <[email protected]> diff --git a/vendor/github.com/soheilhy/cmux/README.md b/vendor/github.com/soheilhy/cmux/README.md deleted file mode 100644 index c4191b70b0035..0000000000000 --- a/vendor/github.com/soheilhy/cmux/README.md +++ /dev/null @@ -1,83 +0,0 @@ -# cmux: Connection Mux ![Travis Build Status](https://api.travis-ci.org/soheilhy/args.svg?branch=master "Travis Build Status") [![GoDoc](https://godoc.org/github.com/soheilhy/cmux?status.svg)](http://godoc.org/github.com/soheilhy/cmux) - -cmux is a generic Go library to multiplex connections based on -their payload. Using cmux, you can serve gRPC, SSH, HTTPS, HTTP, -Go RPC, and pretty much any other protocol on the same TCP listener. - -## How-To -Simply create your main listener, create a cmux for that listener, -and then match connections: -```go -// Create the main listener. -l, err := net.Listen("tcp", ":23456") -if err != nil { - log.Fatal(err) -} - -// Create a cmux. -m := cmux.New(l) - -// Match connections in order: -// First grpc, then HTTP, and otherwise Go RPC/TCP. -grpcL := m.Match(cmux.HTTP2HeaderField("content-type", "application/grpc")) -httpL := m.Match(cmux.HTTP1Fast()) -trpcL := m.Match(cmux.Any()) // Any means anything that is not yet matched. - -// Create your protocol servers. -grpcS := grpc.NewServer() -grpchello.RegisterGreeterServer(grpcS, &server{}) - -httpS := &http.Server{ - Handler: &helloHTTP1Handler{}, -} - -trpcS := rpc.NewServer() -trpcS.Register(&ExampleRPCRcvr{}) - -// Use the muxed listeners for your servers. -go grpcS.Serve(grpcL) -go httpS.Serve(httpL) -go trpcS.Accept(trpcL) - -// Start serving! -m.Serve() -``` - -Take a look at [other examples in the GoDoc](http://godoc.org/github.com/soheilhy/cmux/#pkg-examples). - -## Docs -* [GoDocs](https://godoc.org/github.com/soheilhy/cmux) - -## Performance -There is room for improvment but, since we are only matching -the very first bytes of a connection, the performance overheads on -long-lived connections (i.e., RPCs and pipelined HTTP streams) -is negligible. - -*TODO(soheil)*: Add benchmarks. - -## Limitations -* *TLS*: `net/http` uses a type assertion to identify TLS connections; since -cmux's lookahead-implementing connection wraps the underlying TLS connection, -this type assertion fails. -Because of that, you can serve HTTPS using cmux but `http.Request.TLS` -would not be set in your handlers. - -* *Different Protocols on The Same Connection*: `cmux` matches the connection -when it's accepted. For example, one connection can be either gRPC or REST, but -not both. That is, we assume that a client connection is either used for gRPC -or REST. - -* *Java gRPC Clients*: Java gRPC client blocks until it receives a SETTINGS -frame from the server. If you are using the Java client to connect to a cmux'ed -gRPC server please match with writers: -```go -grpcl := m.MatchWithWriters(cmux.HTTP2MatchHeaderFieldSendSettings("content-type", "application/grpc")) -``` - -# Copyright and License -Copyright 2016 The CMux Authors. All rights reserved. - -See [CONTRIBUTORS](https://github.com/soheilhy/cmux/blob/master/CONTRIBUTORS) -for the CMux Authors. Code is released under -[the Apache 2 license](https://github.com/soheilhy/cmux/blob/master/LICENSE). diff --git a/vendor/github.com/soheilhy/cmux/buffer.go b/vendor/github.com/soheilhy/cmux/buffer.go deleted file mode 100644 index f8cf30a1e66af..0000000000000 --- a/vendor/github.com/soheilhy/cmux/buffer.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2016 The CMux Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the License. - -package cmux - -import ( - "bytes" - "io" -) - -// bufferedReader is an optimized implementation of io.Reader that behaves like -// ``` -// io.MultiReader(bytes.NewReader(buffer.Bytes()), io.TeeReader(source, buffer)) -// ``` -// without allocating. -type bufferedReader struct { - source io.Reader - buffer bytes.Buffer - bufferRead int - bufferSize int - sniffing bool - lastErr error -} - -func (s *bufferedReader) Read(p []byte) (int, error) { - if s.bufferSize > s.bufferRead { - // If we have already read something from the buffer before, we return the - // same data and the last error if any. We need to immediately return, - // otherwise we may block for ever, if we try to be smart and call - // source.Read() seeking a little bit of more data. - bn := copy(p, s.buffer.Bytes()[s.bufferRead:s.bufferSize]) - s.bufferRead += bn - return bn, s.lastErr - } else if !s.sniffing && s.buffer.Cap() != 0 { - // We don't need the buffer anymore. - // Reset it to release the internal slice. - s.buffer = bytes.Buffer{} - } - - // If there is nothing more to return in the sniffed buffer, read from the - // source. - sn, sErr := s.source.Read(p) - if sn > 0 && s.sniffing { - s.lastErr = sErr - if wn, wErr := s.buffer.Write(p[:sn]); wErr != nil { - return wn, wErr - } - } - return sn, sErr -} - -func (s *bufferedReader) reset(snif bool) { - s.sniffing = snif - s.bufferRead = 0 - s.bufferSize = s.buffer.Len() -} diff --git a/vendor/github.com/soheilhy/cmux/cmux.go b/vendor/github.com/soheilhy/cmux/cmux.go deleted file mode 100644 index 5ba921e72dc06..0000000000000 --- a/vendor/github.com/soheilhy/cmux/cmux.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright 2016 The CMux Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the License. - -package cmux - -import ( - "errors" - "fmt" - "io" - "net" - "sync" - "time" -) - -// Matcher matches a connection based on its content. -type Matcher func(io.Reader) bool - -// MatchWriter is a match that can also write response (say to do handshake). -type MatchWriter func(io.Writer, io.Reader) bool - -// ErrorHandler handles an error and returns whether -// the mux should continue serving the listener. -type ErrorHandler func(error) bool - -var _ net.Error = ErrNotMatched{} - -// ErrNotMatched is returned whenever a connection is not matched by any of -// the matchers registered in the multiplexer. -type ErrNotMatched struct { - c net.Conn -} - -func (e ErrNotMatched) Error() string { - return fmt.Sprintf("mux: connection %v not matched by an matcher", - e.c.RemoteAddr()) -} - -// Temporary implements the net.Error interface. -func (e ErrNotMatched) Temporary() bool { return true } - -// Timeout implements the net.Error interface. -func (e ErrNotMatched) Timeout() bool { return false } - -type errListenerClosed string - -func (e errListenerClosed) Error() string { return string(e) } -func (e errListenerClosed) Temporary() bool { return false } -func (e errListenerClosed) Timeout() bool { return false } - -// ErrListenerClosed is returned from muxListener.Accept when the underlying -// listener is closed. -var ErrListenerClosed = errListenerClosed("mux: listener closed") - -// ErrServerClosed is returned from muxListener.Accept when mux server is closed. -var ErrServerClosed = errors.New("mux: server closed") - -// for readability of readTimeout -var noTimeout time.Duration - -// New instantiates a new connection multiplexer. -func New(l net.Listener) CMux { - return &cMux{ - root: l, - bufLen: 1024, - errh: func(_ error) bool { return true }, - donec: make(chan struct{}), - readTimeout: noTimeout, - } -} - -// CMux is a multiplexer for network connections. -type CMux interface { - // Match returns a net.Listener that sees (i.e., accepts) only - // the connections matched by at least one of the matcher. - // - // The order used to call Match determines the priority of matchers. - Match(...Matcher) net.Listener - // MatchWithWriters returns a net.Listener that accepts only the - // connections that matched by at least of the matcher writers. - // - // Prefer Matchers over MatchWriters, since the latter can write on the - // connection before the actual handler. - // - // The order used to call Match determines the priority of matchers. - MatchWithWriters(...MatchWriter) net.Listener - // Serve starts multiplexing the listener. Serve blocks and perhaps - // should be invoked concurrently within a go routine. - Serve() error - // Closes cmux server and stops accepting any connections on listener - Close() - // HandleError registers an error handler that handles listener errors. - HandleError(ErrorHandler) - // sets a timeout for the read of matchers - SetReadTimeout(time.Duration) -} - -type matchersListener struct { - ss []MatchWriter - l muxListener -} - -type cMux struct { - root net.Listener - bufLen int - errh ErrorHandler - sls []matchersListener - readTimeout time.Duration - donec chan struct{} - mu sync.Mutex -} - -func matchersToMatchWriters(matchers []Matcher) []MatchWriter { - mws := make([]MatchWriter, 0, len(matchers)) - for _, m := range matchers { - cm := m - mws = append(mws, func(w io.Writer, r io.Reader) bool { - return cm(r) - }) - } - return mws -} - -func (m *cMux) Match(matchers ...Matcher) net.Listener { - mws := matchersToMatchWriters(matchers) - return m.MatchWithWriters(mws...) -} - -func (m *cMux) MatchWithWriters(matchers ...MatchWriter) net.Listener { - ml := muxListener{ - Listener: m.root, - connc: make(chan net.Conn, m.bufLen), - donec: make(chan struct{}), - } - m.sls = append(m.sls, matchersListener{ss: matchers, l: ml}) - return ml -} - -func (m *cMux) SetReadTimeout(t time.Duration) { - m.readTimeout = t -} - -func (m *cMux) Serve() error { - var wg sync.WaitGroup - - defer func() { - m.closeDoneChans() - wg.Wait() - - for _, sl := range m.sls { - close(sl.l.connc) - // Drain the connections enqueued for the listener. - for c := range sl.l.connc { - _ = c.Close() - } - } - }() - - for { - c, err := m.root.Accept() - if err != nil { - if !m.handleErr(err) { - return err - } - continue - } - - wg.Add(1) - go m.serve(c, m.donec, &wg) - } -} - -func (m *cMux) serve(c net.Conn, donec <-chan struct{}, wg *sync.WaitGroup) { - defer wg.Done() - - muc := newMuxConn(c) - if m.readTimeout > noTimeout { - _ = c.SetReadDeadline(time.Now().Add(m.readTimeout)) - } - for _, sl := range m.sls { - for _, s := range sl.ss { - matched := s(muc.Conn, muc.startSniffing()) - if matched { - muc.doneSniffing() - if m.readTimeout > noTimeout { - _ = c.SetReadDeadline(time.Time{}) - } - select { - case sl.l.connc <- muc: - case <-donec: - _ = c.Close() - } - return - } - } - } - - _ = c.Close() - err := ErrNotMatched{c: c} - if !m.handleErr(err) { - _ = m.root.Close() - } -} - -func (m *cMux) Close() { - m.closeDoneChans() -} - -func (m *cMux) closeDoneChans() { - m.mu.Lock() - defer m.mu.Unlock() - - select { - case <-m.donec: - // Already closed. Don't close again - default: - close(m.donec) - } - for _, sl := range m.sls { - select { - case <-sl.l.donec: - // Already closed. Don't close again - default: - close(sl.l.donec) - } - } -} - -func (m *cMux) HandleError(h ErrorHandler) { - m.errh = h -} - -func (m *cMux) handleErr(err error) bool { - if !m.errh(err) { - return false - } - - if ne, ok := err.(net.Error); ok { - return ne.Temporary() - } - - return false -} - -type muxListener struct { - net.Listener - connc chan net.Conn - donec chan struct{} -} - -func (l muxListener) Accept() (net.Conn, error) { - select { - case c, ok := <-l.connc: - if !ok { - return nil, ErrListenerClosed - } - return c, nil - case <-l.donec: - return nil, ErrServerClosed - } -} - -// MuxConn wraps a net.Conn and provides transparent sniffing of connection data. -type MuxConn struct { - net.Conn - buf bufferedReader -} - -func newMuxConn(c net.Conn) *MuxConn { - return &MuxConn{ - Conn: c, - buf: bufferedReader{source: c}, - } -} - -// From the io.Reader documentation: -// -// When Read encounters an error or end-of-file condition after -// successfully reading n > 0 bytes, it returns the number of -// bytes read. It may return the (non-nil) error from the same call -// or return the error (and n == 0) from a subsequent call. -// An instance of this general case is that a Reader returning -// a non-zero number of bytes at the end of the input stream may -// return either err == EOF or err == nil. The next Read should -// return 0, EOF. -func (m *MuxConn) Read(p []byte) (int, error) { - return m.buf.Read(p) -} - -func (m *MuxConn) startSniffing() io.Reader { - m.buf.reset(true) - return &m.buf -} - -func (m *MuxConn) doneSniffing() { - m.buf.reset(false) -} diff --git a/vendor/github.com/soheilhy/cmux/doc.go b/vendor/github.com/soheilhy/cmux/doc.go deleted file mode 100644 index aaa8f3158998e..0000000000000 --- a/vendor/github.com/soheilhy/cmux/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2016 The CMux Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the License. - -// Package cmux is a library to multiplex network connections based on -// their payload. Using cmux, you can serve different protocols from the -// same listener. -package cmux diff --git a/vendor/github.com/soheilhy/cmux/matchers.go b/vendor/github.com/soheilhy/cmux/matchers.go deleted file mode 100644 index 878ae98cc3cc5..0000000000000 --- a/vendor/github.com/soheilhy/cmux/matchers.go +++ /dev/null @@ -1,267 +0,0 @@ -// Copyright 2016 The CMux Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the License. - -package cmux - -import ( - "bufio" - "crypto/tls" - "io" - "io/ioutil" - "net/http" - "strings" - - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" -) - -// Any is a Matcher that matches any connection. -func Any() Matcher { - return func(r io.Reader) bool { return true } -} - -// PrefixMatcher returns a matcher that matches a connection if it -// starts with any of the strings in strs. -func PrefixMatcher(strs ...string) Matcher { - pt := newPatriciaTreeString(strs...) - return pt.matchPrefix -} - -func prefixByteMatcher(list ...[]byte) Matcher { - pt := newPatriciaTree(list...) - return pt.matchPrefix -} - -var defaultHTTPMethods = []string{ - "OPTIONS", - "GET", - "HEAD", - "POST", - "PUT", - "DELETE", - "TRACE", - "CONNECT", -} - -// HTTP1Fast only matches the methods in the HTTP request. -// -// This matcher is very optimistic: if it returns true, it does not mean that -// the request is a valid HTTP response. If you want a correct but slower HTTP1 -// matcher, use HTTP1 instead. -func HTTP1Fast(extMethods ...string) Matcher { - return PrefixMatcher(append(defaultHTTPMethods, extMethods...)...) -} - -// TLS matches HTTPS requests. -// -// By default, any TLS handshake packet is matched. An optional whitelist -// of versions can be passed in to restrict the matcher, for example: -// TLS(tls.VersionTLS11, tls.VersionTLS12) -func TLS(versions ...int) Matcher { - if len(versions) == 0 { - versions = []int{ - tls.VersionSSL30, - tls.VersionTLS10, - tls.VersionTLS11, - tls.VersionTLS12, - } - } - prefixes := [][]byte{} - for _, v := range versions { - prefixes = append(prefixes, []byte{22, byte(v >> 8 & 0xff), byte(v & 0xff)}) - } - return prefixByteMatcher(prefixes...) -} - -const maxHTTPRead = 4096 - -// HTTP1 parses the first line or upto 4096 bytes of the request to see if -// the conection contains an HTTP request. -func HTTP1() Matcher { - return func(r io.Reader) bool { - br := bufio.NewReader(&io.LimitedReader{R: r, N: maxHTTPRead}) - l, part, err := br.ReadLine() - if err != nil || part { - return false - } - - _, _, proto, ok := parseRequestLine(string(l)) - if !ok { - return false - } - - v, _, ok := http.ParseHTTPVersion(proto) - return ok && v == 1 - } -} - -// grabbed from net/http. -func parseRequestLine(line string) (method, uri, proto string, ok bool) { - s1 := strings.Index(line, " ") - s2 := strings.Index(line[s1+1:], " ") - if s1 < 0 || s2 < 0 { - return - } - s2 += s1 + 1 - return line[:s1], line[s1+1 : s2], line[s2+1:], true -} - -// HTTP2 parses the frame header of the first frame to detect whether the -// connection is an HTTP2 connection. -func HTTP2() Matcher { - return hasHTTP2Preface -} - -// HTTP1HeaderField returns a matcher matching the header fields of the first -// request of an HTTP 1 connection. -func HTTP1HeaderField(name, value string) Matcher { - return func(r io.Reader) bool { - return matchHTTP1Field(r, name, func(gotValue string) bool { - return gotValue == value - }) - } -} - -// HTTP1HeaderFieldPrefix returns a matcher matching the header fields of the -// first request of an HTTP 1 connection. If the header with key name has a -// value prefixed with valuePrefix, this will match. -func HTTP1HeaderFieldPrefix(name, valuePrefix string) Matcher { - return func(r io.Reader) bool { - return matchHTTP1Field(r, name, func(gotValue string) bool { - return strings.HasPrefix(gotValue, valuePrefix) - }) - } -} - -// HTTP2HeaderField returns a matcher matching the header fields of the first -// headers frame. -func HTTP2HeaderField(name, value string) Matcher { - return func(r io.Reader) bool { - return matchHTTP2Field(ioutil.Discard, r, name, func(gotValue string) bool { - return gotValue == value - }) - } -} - -// HTTP2HeaderFieldPrefix returns a matcher matching the header fields of the -// first headers frame. If the header with key name has a value prefixed with -// valuePrefix, this will match. -func HTTP2HeaderFieldPrefix(name, valuePrefix string) Matcher { - return func(r io.Reader) bool { - return matchHTTP2Field(ioutil.Discard, r, name, func(gotValue string) bool { - return strings.HasPrefix(gotValue, valuePrefix) - }) - } -} - -// HTTP2MatchHeaderFieldSendSettings matches the header field and writes the -// settings to the server. Prefer HTTP2HeaderField over this one, if the client -// does not block on receiving a SETTING frame. -func HTTP2MatchHeaderFieldSendSettings(name, value string) MatchWriter { - return func(w io.Writer, r io.Reader) bool { - return matchHTTP2Field(w, r, name, func(gotValue string) bool { - return gotValue == value - }) - } -} - -// HTTP2MatchHeaderFieldPrefixSendSettings matches the header field prefix -// and writes the settings to the server. Prefer HTTP2HeaderFieldPrefix over -// this one, if the client does not block on receiving a SETTING frame. -func HTTP2MatchHeaderFieldPrefixSendSettings(name, valuePrefix string) MatchWriter { - return func(w io.Writer, r io.Reader) bool { - return matchHTTP2Field(w, r, name, func(gotValue string) bool { - return strings.HasPrefix(gotValue, valuePrefix) - }) - } -} - -func hasHTTP2Preface(r io.Reader) bool { - var b [len(http2.ClientPreface)]byte - last := 0 - - for { - n, err := r.Read(b[last:]) - if err != nil { - return false - } - - last += n - eq := string(b[:last]) == http2.ClientPreface[:last] - if last == len(http2.ClientPreface) { - return eq - } - if !eq { - return false - } - } -} - -func matchHTTP1Field(r io.Reader, name string, matches func(string) bool) (matched bool) { - req, err := http.ReadRequest(bufio.NewReader(r)) - if err != nil { - return false - } - - return matches(req.Header.Get(name)) -} - -func matchHTTP2Field(w io.Writer, r io.Reader, name string, matches func(string) bool) (matched bool) { - if !hasHTTP2Preface(r) { - return false - } - - done := false - framer := http2.NewFramer(w, r) - hdec := hpack.NewDecoder(uint32(4<<10), func(hf hpack.HeaderField) { - if hf.Name == name { - done = true - if matches(hf.Value) { - matched = true - } - } - }) - for { - f, err := framer.ReadFrame() - if err != nil { - return false - } - - switch f := f.(type) { - case *http2.SettingsFrame: - // Sender acknoweldged the SETTINGS frame. No need to write - // SETTINGS again. - if f.IsAck() { - break - } - if err := framer.WriteSettings(); err != nil { - return false - } - case *http2.ContinuationFrame: - if _, err := hdec.Write(f.HeaderBlockFragment()); err != nil { - return false - } - done = done || f.FrameHeader.Flags&http2.FlagHeadersEndHeaders != 0 - case *http2.HeadersFrame: - if _, err := hdec.Write(f.HeaderBlockFragment()); err != nil { - return false - } - done = done || f.FrameHeader.Flags&http2.FlagHeadersEndHeaders != 0 - } - - if done { - return matched - } - } -} diff --git a/vendor/github.com/soheilhy/cmux/patricia.go b/vendor/github.com/soheilhy/cmux/patricia.go deleted file mode 100644 index c3e3d85bdeaf0..0000000000000 --- a/vendor/github.com/soheilhy/cmux/patricia.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2016 The CMux Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the License. - -package cmux - -import ( - "bytes" - "io" -) - -// patriciaTree is a simple patricia tree that handles []byte instead of string -// and cannot be changed after instantiation. -type patriciaTree struct { - root *ptNode - maxDepth int // max depth of the tree. -} - -func newPatriciaTree(bs ...[]byte) *patriciaTree { - max := 0 - for _, b := range bs { - if max < len(b) { - max = len(b) - } - } - return &patriciaTree{ - root: newNode(bs), - maxDepth: max + 1, - } -} - -func newPatriciaTreeString(strs ...string) *patriciaTree { - b := make([][]byte, len(strs)) - for i, s := range strs { - b[i] = []byte(s) - } - return newPatriciaTree(b...) -} - -func (t *patriciaTree) matchPrefix(r io.Reader) bool { - buf := make([]byte, t.maxDepth) - n, _ := io.ReadFull(r, buf) - return t.root.match(buf[:n], true) -} - -func (t *patriciaTree) match(r io.Reader) bool { - buf := make([]byte, t.maxDepth) - n, _ := io.ReadFull(r, buf) - return t.root.match(buf[:n], false) -} - -type ptNode struct { - prefix []byte - next map[byte]*ptNode - terminal bool -} - -func newNode(strs [][]byte) *ptNode { - if len(strs) == 0 { - return &ptNode{ - prefix: []byte{}, - terminal: true, - } - } - - if len(strs) == 1 { - return &ptNode{ - prefix: strs[0], - terminal: true, - } - } - - p, strs := splitPrefix(strs) - n := &ptNode{ - prefix: p, - } - - nexts := make(map[byte][][]byte) - for _, s := range strs { - if len(s) == 0 { - n.terminal = true - continue - } - nexts[s[0]] = append(nexts[s[0]], s[1:]) - } - - n.next = make(map[byte]*ptNode) - for first, rests := range nexts { - n.next[first] = newNode(rests) - } - - return n -} - -func splitPrefix(bss [][]byte) (prefix []byte, rest [][]byte) { - if len(bss) == 0 || len(bss[0]) == 0 { - return prefix, bss - } - - if len(bss) == 1 { - return bss[0], [][]byte{{}} - } - - for i := 0; ; i++ { - var cur byte - eq := true - for j, b := range bss { - if len(b) <= i { - eq = false - break - } - - if j == 0 { - cur = b[i] - continue - } - - if cur != b[i] { - eq = false - break - } - } - - if !eq { - break - } - - prefix = append(prefix, cur) - } - - rest = make([][]byte, 0, len(bss)) - for _, b := range bss { - rest = append(rest, b[len(prefix):]) - } - - return prefix, rest -} - -func (n *ptNode) match(b []byte, prefix bool) bool { - l := len(n.prefix) - if l > 0 { - if l > len(b) { - l = len(b) - } - if !bytes.Equal(b[:l], n.prefix) { - return false - } - } - - if n.terminal && (prefix || len(n.prefix) == len(b)) { - return true - } - - if l >= len(b) { - return false - } - - nextN, ok := n.next[b[l]] - if !ok { - return false - } - - if l == len(b) { - b = b[l:l] - } else { - b = b[l+1:] - } - return nextN.match(b, prefix) -} diff --git a/vendor/google.golang.org/grpc/experimental/experimental.go b/vendor/google.golang.org/grpc/experimental/experimental.go new file mode 100644 index 0000000000000..de7f13a2210ef --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/experimental.go @@ -0,0 +1,65 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package experimental is a collection of experimental features that might +// have some rough edges to them. Housing experimental features in this package +// results in a user accessing these APIs as `experimental.Foo`, thereby making +// it explicit that the feature is experimental and using them in production +// code is at their own risk. +// +// All APIs in this package are experimental. +package experimental + +import ( + "google.golang.org/grpc" + "google.golang.org/grpc/internal" +) + +// WithRecvBufferPool returns a grpc.DialOption that configures the use of +// bufferPool for parsing incoming messages on a grpc.ClientConn. Depending on +// the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize +// one, begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the +// following options are used: WithStatsHandler, EnableTracing, or binary +// logging. In such cases, the shared buffer pool will be ignored. +// +// Note: It is not recommended to use the shared buffer pool when compression is +// enabled. +func WithRecvBufferPool(bufferPool grpc.SharedBufferPool) grpc.DialOption { + return internal.WithRecvBufferPool.(func(grpc.SharedBufferPool) grpc.DialOption)(bufferPool) +} + +// RecvBufferPool returns a grpc.ServerOption that configures the server to use +// the provided shared buffer pool for parsing incoming messages. Depending on +// the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize +// one, begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the +// following options are used: StatsHandler, EnableTracing, or binary logging. +// In such cases, the shared buffer pool will be ignored. +// +// Note: It is not recommended to use the shared buffer pool when compression is +// enabled. +func RecvBufferPool(bufferPool grpc.SharedBufferPool) grpc.ServerOption { + return internal.RecvBufferPool.(func(grpc.SharedBufferPool) grpc.ServerOption)(bufferPool) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 904dbc58f4d04..d414bbd3d538b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -927,7 +927,7 @@ github.com/gorilla/websocket # github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 ## explicit; go 1.17 github.com/grafana/cloudflare-go -# github.com/grafana/dskit v0.0.0-20240104111617-ea101a3b86eb +# github.com/grafana/dskit v0.0.0-20240528015923-27d7d41066d3 ## explicit; go 1.20 github.com/grafana/dskit/aws github.com/grafana/dskit/backoff @@ -976,7 +976,7 @@ github.com/grafana/dskit/user # github.com/grafana/go-gelf/v2 v2.0.1 ## explicit; go 1.17 github.com/grafana/go-gelf/v2/gelf -# github.com/grafana/gomemcache v0.0.0-20231204155601-7de47a8c3cb0 +# github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 ## explicit; go 1.18 github.com/grafana/gomemcache/memcache # github.com/grafana/jsonparser v0.0.0-20240209175146-098958973a2d @@ -1297,6 +1297,9 @@ github.com/pierrec/lz4/v4/internal/lz4block github.com/pierrec/lz4/v4/internal/lz4errors github.com/pierrec/lz4/v4/internal/lz4stream github.com/pierrec/lz4/v4/internal/xxh32 +# github.com/pires/go-proxyproto v0.7.0 +## explicit; go 1.18 +github.com/pires/go-proxyproto # github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c ## explicit; go 1.14 github.com/pkg/browser @@ -1468,9 +1471,6 @@ github.com/shurcooL/vfsgen # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus -# github.com/soheilhy/cmux v0.1.5 -## explicit; go 1.11 -github.com/soheilhy/cmux # github.com/sony/gobreaker v0.5.0 ## explicit; go 1.12 github.com/sony/gobreaker @@ -1912,6 +1912,7 @@ google.golang.org/grpc/credentials/tls/certprovider/pemfile google.golang.org/grpc/encoding google.golang.org/grpc/encoding/gzip google.golang.org/grpc/encoding/proto +google.golang.org/grpc/experimental google.golang.org/grpc/grpclog google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal
feat
add recalculateOwnedStreams to check stream ownership if the ring is changed (#13103)
5def37d63d8a4279b9f26d4bc62bd04077e42814
2023-10-25 18:50:10
Periklis Tsirakidis
operator: Prepare community release v0.5.0 (#10996)
false
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md index 6d86c1bcadf77..e907413eeba13 100644 --- a/operator/CHANGELOG.md +++ b/operator/CHANGELOG.md @@ -1,5 +1,7 @@ ## Main +## 0.5.0 (2023-10-24) + - [10924](https://github.com/grafana/loki/pull/10924) **periklis**: Update Loki operand to v2.9.2 - [10874](https://github.com/grafana/loki/pull/10874) **periklis**: Bump deps to address CVE-2023-39325 and CVE-2023-44487 - [10854](https://github.com/grafana/loki/pull/10854) **periklis**: Add missing marker/sweeper panels in retention dashboard diff --git a/operator/Makefile b/operator/Makefile index 88a35068364f5..680b4f509ca3c 100644 --- a/operator/Makefile +++ b/operator/Makefile @@ -21,7 +21,7 @@ LOKI_OPERATOR_NS ?= kubernetes-operators # To re-generate a bundle for another specific version without changing the standard setup, you can: # - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) # - use environment variables to overwrite this value (e.g export VERSION=0.0.2) -VERSION ?= 0.4.0 +VERSION ?= 0.5.0 CHANNELS ?= "alpha" DEFAULT_CHANNEL ?= "alpha" SUPPORTED_OCP_VERSIONS="v4.12" diff --git a/operator/bundle/community-openshift/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml b/operator/bundle/community-openshift/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml index 7101ca5af48a5..6f5c94cffea84 100644 --- a/operator/bundle/community-openshift/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml +++ b/operator/bundle/community-openshift/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml @@ -5,11 +5,11 @@ metadata: service.beta.openshift.io/serving-cert-secret-name: loki-operator-metrics creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: loki-operator-controller-manager-metrics-service spec: ports: diff --git a/operator/bundle/community-openshift/manifests/loki-operator-manager-config_v1_configmap.yaml b/operator/bundle/community-openshift/manifests/loki-operator-manager-config_v1_configmap.yaml index a18569f871c59..d90b7551218d1 100644 --- a/operator/bundle/community-openshift/manifests/loki-operator-manager-config_v1_configmap.yaml +++ b/operator/bundle/community-openshift/manifests/loki-operator-manager-config_v1_configmap.yaml @@ -60,9 +60,9 @@ data: kind: ConfigMap metadata: labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: loki-operator-manager-config diff --git a/operator/bundle/community-openshift/manifests/loki-operator-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml b/operator/bundle/community-openshift/manifests/loki-operator-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml index 97128e4175d9b..7f5c401e1c073 100644 --- a/operator/bundle/community-openshift/manifests/loki-operator-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml +++ b/operator/bundle/community-openshift/manifests/loki-operator-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml @@ -2,11 +2,11 @@ apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: loki-operator name: loki-operator-metrics-monitor spec: diff --git a/operator/bundle/community-openshift/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml b/operator/bundle/community-openshift/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml index 96738fd625cc7..ed266cabf3925 100644 --- a/operator/bundle/community-openshift/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml +++ b/operator/bundle/community-openshift/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -3,11 +3,11 @@ kind: ClusterRole metadata: creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: loki-operator-metrics-reader rules: - nonResourceURLs: diff --git a/operator/bundle/community-openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml b/operator/bundle/community-openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml index 5472ebb497895..f427ef94b46d0 100644 --- a/operator/bundle/community-openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml +++ b/operator/bundle/community-openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml @@ -6,11 +6,11 @@ metadata: include.release.openshift.io/single-node-developer: "true" creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: loki-operator-prometheus rules: - apiGroups: diff --git a/operator/bundle/community-openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml b/operator/bundle/community-openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml index d37772946f2d7..ac051dc2b594a 100644 --- a/operator/bundle/community-openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml +++ b/operator/bundle/community-openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml @@ -6,11 +6,11 @@ metadata: include.release.openshift.io/single-node-developer: "true" creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: loki-operator-prometheus roleRef: apiGroup: rbac.authorization.k8s.io diff --git a/operator/bundle/community-openshift/manifests/loki-operator-webhook-service_v1_service.yaml b/operator/bundle/community-openshift/manifests/loki-operator-webhook-service_v1_service.yaml index 5979829a9e38c..94d97b4c3339d 100644 --- a/operator/bundle/community-openshift/manifests/loki-operator-webhook-service_v1_service.yaml +++ b/operator/bundle/community-openshift/manifests/loki-operator-webhook-service_v1_service.yaml @@ -3,11 +3,11 @@ kind: Service metadata: creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: loki-operator-webhook-service spec: ports: diff --git a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml index 9ea06a23e299b..4d70fb9e082b4 100644 --- a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml @@ -149,8 +149,8 @@ metadata: capabilities: Full Lifecycle categories: OpenShift Optional, Logging & Tracing certified: "false" - containerImage: docker.io/grafana/loki-operator:0.4.0 - createdAt: "2023-10-17T07:40:29Z" + containerImage: docker.io/grafana/loki-operator:0.5.0 + createdAt: "2023-10-23T07:39:01Z" description: The Community Loki Operator provides Kubernetes native deployment and management of Loki and related logging components. operators.operatorframework.io/builder: operator-sdk-unknown @@ -160,7 +160,7 @@ metadata: labels: operatorframework.io/arch.amd64: supported operatorframework.io/arch.arm64: supported - name: loki-operator.v0.4.0 + name: loki-operator.v0.5.0 namespace: placeholder spec: apiservicedefinitions: {} @@ -1623,11 +1623,11 @@ spec: serviceAccountName: default deployments: - label: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 control-plane: controller-manager name: loki-operator-controller-manager spec: @@ -1661,7 +1661,7 @@ spec: value: quay.io/observatorium/api:latest - name: RELATED_IMAGE_OPA value: quay.io/observatorium/opa-openshift:latest - image: docker.io/grafana/loki-operator:0.4.0 + image: docker.io/grafana/loki-operator:0.5.0 imagePullPolicy: IfNotPresent livenessProbe: httpGet: @@ -1785,8 +1785,8 @@ spec: name: gateway - image: quay.io/observatorium/opa-openshift:latest name: opa - replaces: loki-operator.v0.3.0 - version: 0.4.0 + replaces: loki-operator.v0.4.0 + version: 0.5.0 webhookdefinitions: - admissionReviewVersions: - v1 diff --git a/operator/bundle/community-openshift/manifests/loki.grafana.com_alertingrules.yaml b/operator/bundle/community-openshift/manifests/loki.grafana.com_alertingrules.yaml index e8ebdc32528f7..cbff5d853c27b 100644 --- a/operator/bundle/community-openshift/manifests/loki.grafana.com_alertingrules.yaml +++ b/operator/bundle/community-openshift/manifests/loki.grafana.com_alertingrules.yaml @@ -5,11 +5,11 @@ metadata: controller-gen.kubebuilder.io/version: v0.13.0 creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: alertingrules.loki.grafana.com spec: conversion: diff --git a/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml index 86c36c4eb88b4..1a4120613e358 100644 --- a/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml +++ b/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml @@ -5,11 +5,11 @@ metadata: controller-gen.kubebuilder.io/version: v0.13.0 creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: lokistacks.loki.grafana.com spec: conversion: diff --git a/operator/bundle/community-openshift/manifests/loki.grafana.com_recordingrules.yaml b/operator/bundle/community-openshift/manifests/loki.grafana.com_recordingrules.yaml index 2a827b65e0c63..0d157b3359cb6 100644 --- a/operator/bundle/community-openshift/manifests/loki.grafana.com_recordingrules.yaml +++ b/operator/bundle/community-openshift/manifests/loki.grafana.com_recordingrules.yaml @@ -5,11 +5,11 @@ metadata: controller-gen.kubebuilder.io/version: v0.13.0 creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: recordingrules.loki.grafana.com spec: conversion: diff --git a/operator/bundle/community-openshift/manifests/loki.grafana.com_rulerconfigs.yaml b/operator/bundle/community-openshift/manifests/loki.grafana.com_rulerconfigs.yaml index a218c6e076c0e..7c90da29b19bc 100644 --- a/operator/bundle/community-openshift/manifests/loki.grafana.com_rulerconfigs.yaml +++ b/operator/bundle/community-openshift/manifests/loki.grafana.com_rulerconfigs.yaml @@ -5,11 +5,11 @@ metadata: controller-gen.kubebuilder.io/version: v0.13.0 creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: rulerconfigs.loki.grafana.com spec: conversion: diff --git a/operator/bundle/community/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml b/operator/bundle/community/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml index b7f50405c453f..eb601bf525c4d 100644 --- a/operator/bundle/community/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml +++ b/operator/bundle/community/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml @@ -3,11 +3,11 @@ kind: Service metadata: creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: loki-operator-controller-manager-metrics-service spec: ports: diff --git a/operator/bundle/community/manifests/loki-operator-manager-config_v1_configmap.yaml b/operator/bundle/community/manifests/loki-operator-manager-config_v1_configmap.yaml index a5e6538f4f1f7..487a72b6e3ad1 100644 --- a/operator/bundle/community/manifests/loki-operator-manager-config_v1_configmap.yaml +++ b/operator/bundle/community/manifests/loki-operator-manager-config_v1_configmap.yaml @@ -24,9 +24,9 @@ data: kind: ConfigMap metadata: labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: loki-operator-manager-config diff --git a/operator/bundle/community/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml b/operator/bundle/community/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml index 96738fd625cc7..ed266cabf3925 100644 --- a/operator/bundle/community/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml +++ b/operator/bundle/community/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -3,11 +3,11 @@ kind: ClusterRole metadata: creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: loki-operator-metrics-reader rules: - nonResourceURLs: diff --git a/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml b/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml index 5472ebb497895..f427ef94b46d0 100644 --- a/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml +++ b/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml @@ -6,11 +6,11 @@ metadata: include.release.openshift.io/single-node-developer: "true" creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: loki-operator-prometheus rules: - apiGroups: diff --git a/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml b/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml index d37772946f2d7..ac051dc2b594a 100644 --- a/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml +++ b/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml @@ -6,11 +6,11 @@ metadata: include.release.openshift.io/single-node-developer: "true" creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: loki-operator-prometheus roleRef: apiGroup: rbac.authorization.k8s.io diff --git a/operator/bundle/community/manifests/loki-operator-webhook-service_v1_service.yaml b/operator/bundle/community/manifests/loki-operator-webhook-service_v1_service.yaml index 5979829a9e38c..94d97b4c3339d 100644 --- a/operator/bundle/community/manifests/loki-operator-webhook-service_v1_service.yaml +++ b/operator/bundle/community/manifests/loki-operator-webhook-service_v1_service.yaml @@ -3,11 +3,11 @@ kind: Service metadata: creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: loki-operator-webhook-service spec: ports: diff --git a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml index e65f2a081488f..3b989f498e5ab 100644 --- a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml @@ -149,8 +149,8 @@ metadata: capabilities: Full Lifecycle categories: OpenShift Optional, Logging & Tracing certified: "false" - containerImage: docker.io/grafana/loki-operator:0.4.0 - createdAt: "2023-10-17T07:40:27Z" + containerImage: docker.io/grafana/loki-operator:0.5.0 + createdAt: "2023-10-23T07:38:57Z" description: The Community Loki Operator provides Kubernetes native deployment and management of Loki and related logging components. operators.operatorframework.io/builder: operator-sdk-unknown @@ -160,7 +160,7 @@ metadata: labels: operatorframework.io/arch.amd64: supported operatorframework.io/arch.arm64: supported - name: loki-operator.v0.4.0 + name: loki-operator.v0.5.0 namespace: placeholder spec: apiservicedefinitions: {} @@ -1610,11 +1610,11 @@ spec: serviceAccountName: default deployments: - label: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 control-plane: controller-manager name: loki-operator-controller-manager spec: @@ -1648,7 +1648,7 @@ spec: value: quay.io/observatorium/api:latest - name: RELATED_IMAGE_OPA value: quay.io/observatorium/opa-openshift:latest - image: docker.io/grafana/loki-operator:0.4.0 + image: docker.io/grafana/loki-operator:0.5.0 imagePullPolicy: IfNotPresent livenessProbe: httpGet: @@ -1760,8 +1760,8 @@ spec: name: gateway - image: quay.io/observatorium/opa-openshift:latest name: opa - replaces: loki-operator.v0.3.0 - version: 0.4.0 + replaces: loki-operator.v0.4.0 + version: 0.5.0 webhookdefinitions: - admissionReviewVersions: - v1 diff --git a/operator/bundle/community/manifests/loki.grafana.com_alertingrules.yaml b/operator/bundle/community/manifests/loki.grafana.com_alertingrules.yaml index dbb6b869602ee..3d6fdd8edc5a2 100644 --- a/operator/bundle/community/manifests/loki.grafana.com_alertingrules.yaml +++ b/operator/bundle/community/manifests/loki.grafana.com_alertingrules.yaml @@ -5,11 +5,11 @@ metadata: controller-gen.kubebuilder.io/version: v0.13.0 creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: alertingrules.loki.grafana.com spec: conversion: diff --git a/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml index 90ba4f19c5275..3c0f8321ebe60 100644 --- a/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml +++ b/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml @@ -5,11 +5,11 @@ metadata: controller-gen.kubebuilder.io/version: v0.13.0 creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: lokistacks.loki.grafana.com spec: conversion: diff --git a/operator/bundle/community/manifests/loki.grafana.com_recordingrules.yaml b/operator/bundle/community/manifests/loki.grafana.com_recordingrules.yaml index ec5eb9cc61358..ef3be7886f92c 100644 --- a/operator/bundle/community/manifests/loki.grafana.com_recordingrules.yaml +++ b/operator/bundle/community/manifests/loki.grafana.com_recordingrules.yaml @@ -5,11 +5,11 @@ metadata: controller-gen.kubebuilder.io/version: v0.13.0 creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: recordingrules.loki.grafana.com spec: conversion: diff --git a/operator/bundle/community/manifests/loki.grafana.com_rulerconfigs.yaml b/operator/bundle/community/manifests/loki.grafana.com_rulerconfigs.yaml index 689d10a5d6ff4..db77d5805247f 100644 --- a/operator/bundle/community/manifests/loki.grafana.com_rulerconfigs.yaml +++ b/operator/bundle/community/manifests/loki.grafana.com_rulerconfigs.yaml @@ -5,11 +5,11 @@ metadata: controller-gen.kubebuilder.io/version: v0.13.0 creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: rulerconfigs.loki.grafana.com spec: conversion: diff --git a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml index f640812464a43..7568b49a76286 100644 --- a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml @@ -150,7 +150,7 @@ metadata: categories: OpenShift Optional, Logging & Tracing certified: "false" containerImage: quay.io/openshift-logging/loki-operator:0.1.0 - createdAt: "2023-10-17T07:40:31Z" + createdAt: "2023-10-23T07:39:06Z" description: | The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging. ## Prerequisites and Requirements diff --git a/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml index 7a4af5c0f016a..91dcd4100b59e 100644 --- a/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml +++ b/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml @@ -6,7 +6,7 @@ metadata: capabilities: Full Lifecycle categories: OpenShift Optional, Logging & Tracing certified: "false" - containerImage: docker.io/grafana/loki-operator:0.4.0 + containerImage: docker.io/grafana/loki-operator:0.5.0 createdAt: "2022-12-22T13:28:40+00:00" description: The Community Loki Operator provides Kubernetes native deployment and management of Loki and related logging components. @@ -2218,5 +2218,5 @@ spec: minKubeVersion: 1.21.1 provider: name: Grafana Loki SIG Operator - replaces: loki-operator.v0.3.0 + replaces: loki-operator.v0.4.0 version: 0.0.0 diff --git a/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml index 2d8f27cd01f11..a41a17cba4608 100644 --- a/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml +++ b/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml @@ -6,7 +6,7 @@ metadata: capabilities: Full Lifecycle categories: OpenShift Optional, Logging & Tracing certified: "false" - containerImage: docker.io/grafana/loki-operator:0.4.0 + containerImage: docker.io/grafana/loki-operator:0.5.0 createdAt: "2022-12-22T13:28:40+00:00" description: The Community Loki Operator provides Kubernetes native deployment and management of Loki and related logging components. @@ -2205,5 +2205,5 @@ spec: minKubeVersion: 1.21.1 provider: name: Grafana Loki SIG Operator - replaces: loki-operator.v0.3.0 + replaces: loki-operator.v0.4.0 version: 0.0.0 diff --git a/operator/config/overlays/community-openshift/kustomization.yaml b/operator/config/overlays/community-openshift/kustomization.yaml index 2981d73c458dc..957281a019b73 100644 --- a/operator/config/overlays/community-openshift/kustomization.yaml +++ b/operator/config/overlays/community-openshift/kustomization.yaml @@ -11,8 +11,8 @@ labels: app.kubernetes.io/managed-by: operator-lifecycle-manager includeSelectors: true - pairs: - app.kubernetes.io/instance: loki-operator-v0.4.0 - app.kubernetes.io/version: "0.4.0" + app.kubernetes.io/instance: loki-operator-v0.5.0 + app.kubernetes.io/version: "0.5.0" configMapGenerator: - files: @@ -27,4 +27,4 @@ patchesStrategicMerge: images: - name: controller newName: docker.io/grafana/loki-operator - newTag: 0.4.0 + newTag: 0.5.0 diff --git a/operator/config/overlays/community/kustomization.yaml b/operator/config/overlays/community/kustomization.yaml index 7aa216f1f7166..144da82d5dc65 100644 --- a/operator/config/overlays/community/kustomization.yaml +++ b/operator/config/overlays/community/kustomization.yaml @@ -22,8 +22,8 @@ labels: app.kubernetes.io/managed-by: operator-lifecycle-manager includeSelectors: true - pairs: - app.kubernetes.io/instance: loki-operator-v0.4.0 - app.kubernetes.io/version: "0.4.0" + app.kubernetes.io/instance: loki-operator-v0.5.0 + app.kubernetes.io/version: "0.5.0" generatorOptions: disableNameSuffixHash: true @@ -43,7 +43,7 @@ patchesStrategicMerge: images: - name: controller newName: docker.io/grafana/loki-operator - newTag: 0.4.0 + newTag: 0.5.0 # the following config is for teaching kustomize how to do var substitution vars:
operator
Prepare community release v0.5.0 (#10996)
e7fdeb974aff62c5775b9f98ebb2228000b28c8d
2024-05-15 21:26:14
Shantanu Alshi
perf: Improve Detected labels API (#12816)
false
diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index ac6a29e81d43f..640c64eee6b63 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -1397,7 +1397,7 @@ func (i *Ingester) GetDetectedLabels(ctx context.Context, req *logproto.Detected } } - labelMap, err := instance.LabelsWithValues(ctx, *req.Start, matchers...) + labelMap, err := instance.LabelsWithValues(ctx, req.Start, matchers...) if err != nil { return nil, err diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index 378123709a067..b31053a5ded17 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -833,8 +833,8 @@ func TestIngester_GetDetectedLabels(t *testing.T) { require.NoError(t, err) res, err := i.GetDetectedLabels(ctx, &logproto.DetectedLabelsRequest{ - Start: &[]time.Time{time.Now().Add(11 * time.Nanosecond)}[0], - End: nil, + Start: []time.Time{time.Now().Add(11 * time.Nanosecond)}[0], + End: []time.Time{time.Now().Add(12 * time.Nanosecond)}[0], Query: "", }) @@ -893,8 +893,8 @@ func TestIngester_GetDetectedLabelsWithQuery(t *testing.T) { require.NoError(t, err) res, err := i.GetDetectedLabels(ctx, &logproto.DetectedLabelsRequest{ - Start: &[]time.Time{time.Now().Add(11 * time.Nanosecond)}[0], - End: nil, + Start: []time.Time{time.Now().Add(11 * time.Nanosecond)}[0], + End: []time.Time{time.Now().Add(11 * time.Nanosecond)}[0], Query: `{foo="bar"}`, }) diff --git a/pkg/loghttp/labels.go b/pkg/loghttp/labels.go index b2c5a343637be..360c750048a5e 100644 --- a/pkg/loghttp/labels.go +++ b/pkg/loghttp/labels.go @@ -1,6 +1,7 @@ package loghttp import ( + "errors" "net/http" "sort" "strconv" @@ -88,14 +89,20 @@ func ParseLabelQuery(r *http.Request) (*logproto.LabelRequest, error) { } func ParseDetectedLabelsQuery(r *http.Request) (*logproto.DetectedLabelsRequest, error) { + var err error + start, end, err := bounds(r) if err != nil { return nil, err } + if end.Before(start) { + return nil, errors.New("end timestamp must not be before or equal to start time") + } + return &logproto.DetectedLabelsRequest{ - Start: &start, - End: &end, + Start: start, + End: end, Query: query(r), }, nil } diff --git a/pkg/logproto/logproto.pb.go b/pkg/logproto/logproto.pb.go index 5ba5e49c1060b..0d0827f5a094c 100644 --- a/pkg/logproto/logproto.pb.go +++ b/pkg/logproto/logproto.pb.go @@ -2890,9 +2890,9 @@ func (m *DetectedField) GetSketch() []byte { } type DetectedLabelsRequest struct { - Start *time.Time `protobuf:"bytes,1,opt,name=start,proto3,stdtime" json:"start,omitempty"` - End *time.Time `protobuf:"bytes,2,opt,name=end,proto3,stdtime" json:"end,omitempty"` - Query string `protobuf:"bytes,3,opt,name=query,proto3" json:"query,omitempty"` + Start time.Time `protobuf:"bytes,1,opt,name=start,proto3,stdtime" json:"start"` + End time.Time `protobuf:"bytes,2,opt,name=end,proto3,stdtime" json:"end"` + Query string `protobuf:"bytes,3,opt,name=query,proto3" json:"query,omitempty"` } func (m *DetectedLabelsRequest) Reset() { *m = DetectedLabelsRequest{} } @@ -2927,18 +2927,18 @@ func (m *DetectedLabelsRequest) XXX_DiscardUnknown() { var xxx_messageInfo_DetectedLabelsRequest proto.InternalMessageInfo -func (m *DetectedLabelsRequest) GetStart() *time.Time { +func (m *DetectedLabelsRequest) GetStart() time.Time { if m != nil { return m.Start } - return nil + return time.Time{} } -func (m *DetectedLabelsRequest) GetEnd() *time.Time { +func (m *DetectedLabelsRequest) GetEnd() time.Time { if m != nil { return m.End } - return nil + return time.Time{} } func (m *DetectedLabelsRequest) GetQuery() string { @@ -2994,6 +2994,7 @@ func (m *DetectedLabelsResponse) GetDetectedLabels() []*DetectedLabel { type DetectedLabel struct { Label string `protobuf:"bytes,1,opt,name=label,proto3" json:"label,omitempty"` Cardinality uint64 `protobuf:"varint,2,opt,name=cardinality,proto3" json:"cardinality,omitempty"` + Sketch []byte `protobuf:"bytes,3,opt,name=sketch,proto3" json:"sketch,omitempty"` } func (m *DetectedLabel) Reset() { *m = DetectedLabel{} } @@ -3042,6 +3043,13 @@ func (m *DetectedLabel) GetCardinality() uint64 { return 0 } +func (m *DetectedLabel) GetSketch() []byte { + if m != nil { + return m.Sketch + } + return nil +} + func init() { proto.RegisterEnum("logproto.Direction", Direction_name, Direction_value) proto.RegisterType((*LabelToValuesResponse)(nil), "logproto.LabelToValuesResponse") @@ -3105,174 +3113,174 @@ func init() { func init() { proto.RegisterFile("pkg/logproto/logproto.proto", fileDescriptor_c28a5f14f1f4c79a) } var fileDescriptor_c28a5f14f1f4c79a = []byte{ - // 2670 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x1a, 0x4d, 0x8c, 0x5b, 0x47, - 0xd9, 0xcf, 0x7e, 0xfe, 0xfb, 0xec, 0xdd, 0x6c, 0x66, 0x9d, 0xc4, 0xda, 0xa4, 0x7e, 0xdb, 0x11, - 0xb4, 0xa1, 0x49, 0xd7, 0x4d, 0x4a, 0x4b, 0x9a, 0x52, 0x4a, 0xbc, 0xdb, 0x6c, 0x93, 0x6e, 0xd3, - 0x74, 0x36, 0x4d, 0x0b, 0xa2, 0xaa, 0x5e, 0xec, 0x59, 0xef, 0x53, 0xec, 0xf7, 0x9c, 0xf7, 0xc6, - 0x4d, 0xf7, 0x86, 0xc4, 0x19, 0x51, 0x89, 0x03, 0x70, 0x41, 0x42, 0x42, 0x02, 0x81, 0x7a, 0x41, - 0x9c, 0x10, 0x82, 0x0b, 0x87, 0x72, 0x2b, 0xb7, 0xaa, 0x07, 0x43, 0xb7, 0x17, 0xb4, 0xa7, 0x4a, - 0x48, 0x1c, 0x7a, 0x42, 0xf3, 0xf7, 0xde, 0xbc, 0xb7, 0x5e, 0x52, 0x6f, 0x83, 0x4a, 0x2e, 0xf6, - 0xcc, 0x37, 0xdf, 0x7c, 0x33, 0xdf, 0xcf, 0x7c, 0x7f, 0x36, 0x9c, 0x1c, 0xdd, 0xee, 0xb7, 0x07, - 0x41, 0x7f, 0x14, 0x06, 0x2c, 0x88, 0x07, 0x2b, 0xe2, 0x13, 0x55, 0xf4, 0x7c, 0xa9, 0xd1, 0x0f, - 0xfa, 0x81, 0xc4, 0xe1, 0x23, 0xb9, 0xbe, 0xe4, 0xf4, 0x83, 0xa0, 0x3f, 0xa0, 0x6d, 0x31, 0xbb, - 0x35, 0xde, 0x6a, 0x33, 0x6f, 0x48, 0x23, 0xe6, 0x0e, 0x47, 0x0a, 0x61, 0x59, 0x51, 0xbf, 0x33, - 0x18, 0x06, 0x3d, 0x3a, 0x68, 0x47, 0xcc, 0x65, 0x91, 0xfc, 0x54, 0x18, 0x8b, 0x1c, 0x63, 0x34, - 0x8e, 0xb6, 0xc5, 0x87, 0x04, 0xe2, 0xdf, 0x5b, 0x70, 0x6c, 0xc3, 0xbd, 0x45, 0x07, 0x37, 0x82, - 0x9b, 0xee, 0x60, 0x4c, 0x23, 0x42, 0xa3, 0x51, 0xe0, 0x47, 0x14, 0xad, 0x42, 0x69, 0xc0, 0x17, - 0xa2, 0xa6, 0xb5, 0x5c, 0x38, 0x5d, 0x3b, 0x7f, 0x66, 0x25, 0xbe, 0xf2, 0xd4, 0x0d, 0x12, 0x1a, - 0xbd, 0xe0, 0xb3, 0x70, 0x87, 0xa8, 0xad, 0x4b, 0x37, 0xa1, 0x66, 0x80, 0xd1, 0x02, 0x14, 0x6e, - 0xd3, 0x9d, 0xa6, 0xb5, 0x6c, 0x9d, 0xae, 0x12, 0x3e, 0x44, 0xe7, 0xa0, 0xf8, 0x36, 0x27, 0xd3, - 0xcc, 0x2f, 0x5b, 0xa7, 0x6b, 0xe7, 0x4f, 0x26, 0x87, 0xbc, 0xe6, 0x7b, 0x77, 0xc6, 0x54, 0xec, - 0x56, 0x07, 0x49, 0xcc, 0x8b, 0xf9, 0x0b, 0x16, 0x3e, 0x03, 0x47, 0xf7, 0xad, 0xa3, 0xe3, 0x50, - 0x12, 0x18, 0xf2, 0xc6, 0x55, 0xa2, 0x66, 0xb8, 0x01, 0x68, 0x93, 0x85, 0xd4, 0x1d, 0x12, 0x97, - 0xf1, 0xfb, 0xde, 0x19, 0xd3, 0x88, 0xe1, 0x97, 0x61, 0x31, 0x05, 0x55, 0x6c, 0x3f, 0x0d, 0xb5, - 0x28, 0x01, 0x2b, 0xde, 0x1b, 0xc9, 0xb5, 0x92, 0x3d, 0xc4, 0x44, 0xc4, 0x3f, 0xb7, 0x00, 0x92, - 0x35, 0xd4, 0x02, 0x90, 0xab, 0x2f, 0xba, 0xd1, 0xb6, 0x60, 0xd8, 0x26, 0x06, 0x04, 0x9d, 0x85, - 0xa3, 0xc9, 0xec, 0x5a, 0xb0, 0xb9, 0xed, 0x86, 0x3d, 0x21, 0x03, 0x9b, 0xec, 0x5f, 0x40, 0x08, - 0xec, 0xd0, 0x65, 0xb4, 0x59, 0x58, 0xb6, 0x4e, 0x17, 0x88, 0x18, 0x73, 0x6e, 0x19, 0xf5, 0x5d, - 0x9f, 0x35, 0x6d, 0x21, 0x4e, 0x35, 0xe3, 0x70, 0xae, 0x5f, 0x1a, 0x35, 0x8b, 0xcb, 0xd6, 0xe9, - 0x39, 0xa2, 0x66, 0xf8, 0xdf, 0x05, 0xa8, 0xbf, 0x3a, 0xa6, 0xe1, 0x8e, 0x12, 0x00, 0x6a, 0x41, - 0x25, 0xa2, 0x03, 0xda, 0x65, 0x41, 0x28, 0x35, 0xd2, 0xc9, 0x37, 0x2d, 0x12, 0xc3, 0x50, 0x03, - 0x8a, 0x03, 0x6f, 0xe8, 0x31, 0x71, 0xad, 0x39, 0x22, 0x27, 0xe8, 0x22, 0x14, 0x23, 0xe6, 0x86, - 0x4c, 0xdc, 0xa5, 0x76, 0x7e, 0x69, 0x45, 0x1a, 0xe6, 0x8a, 0x36, 0xcc, 0x95, 0x1b, 0xda, 0x30, - 0x3b, 0x95, 0xf7, 0x27, 0x4e, 0xee, 0xdd, 0xbf, 0x3b, 0x16, 0x91, 0x5b, 0xd0, 0xd3, 0x50, 0xa0, - 0x7e, 0x4f, 0xdc, 0xf7, 0xf3, 0xee, 0xe4, 0x1b, 0xd0, 0x39, 0xa8, 0xf6, 0xbc, 0x90, 0x76, 0x99, - 0x17, 0xf8, 0x82, 0xab, 0xf9, 0xf3, 0x8b, 0x89, 0x46, 0xd6, 0xf4, 0x12, 0x49, 0xb0, 0xd0, 0x59, - 0x28, 0x45, 0x5c, 0x74, 0x51, 0xb3, 0xcc, 0x6d, 0xa1, 0xd3, 0xd8, 0x9b, 0x38, 0x0b, 0x12, 0x72, - 0x36, 0x18, 0x7a, 0x8c, 0x0e, 0x47, 0x6c, 0x87, 0x28, 0x1c, 0xf4, 0x18, 0x94, 0x7b, 0x74, 0x40, - 0xb9, 0xc2, 0x2b, 0x42, 0xe1, 0x0b, 0x06, 0x79, 0xb1, 0x40, 0x34, 0x02, 0x7a, 0x13, 0xec, 0xd1, - 0xc0, 0xf5, 0x9b, 0x55, 0xc1, 0xc5, 0x7c, 0x82, 0x78, 0x7d, 0xe0, 0xfa, 0x9d, 0x67, 0x3e, 0x9a, - 0x38, 0x4f, 0xf5, 0x3d, 0xb6, 0x3d, 0xbe, 0xb5, 0xd2, 0x0d, 0x86, 0xed, 0x7e, 0xe8, 0x6e, 0xb9, - 0xbe, 0xdb, 0x1e, 0x04, 0xb7, 0xbd, 0xf6, 0xdb, 0x4f, 0xb6, 0xf9, 0x1b, 0xbc, 0x33, 0xa6, 0xa1, - 0x47, 0xc3, 0x36, 0x27, 0xb3, 0x22, 0x54, 0xc2, 0xb7, 0x12, 0x41, 0x16, 0x5d, 0xe5, 0xf6, 0x17, - 0x84, 0x74, 0x75, 0x7b, 0xec, 0xdf, 0x8e, 0x9a, 0x20, 0x4e, 0x39, 0x91, 0x9c, 0x22, 0xe0, 0x84, - 0x6e, 0xad, 0x87, 0xc1, 0x78, 0xd4, 0x39, 0xb2, 0x37, 0x71, 0x4c, 0x7c, 0x62, 0x4e, 0xae, 0xda, - 0x95, 0xd2, 0x42, 0x19, 0xbf, 0x57, 0x00, 0xb4, 0xe9, 0x0e, 0x47, 0x03, 0x3a, 0x93, 0xfa, 0x63, - 0x45, 0xe7, 0x0f, 0xad, 0xe8, 0xc2, 0xac, 0x8a, 0x4e, 0xb4, 0x66, 0xcf, 0xa6, 0xb5, 0xe2, 0xe7, - 0xd5, 0x5a, 0xe9, 0xff, 0x5e, 0x6b, 0xb8, 0x09, 0x36, 0xa7, 0xcc, 0x9d, 0x65, 0xe8, 0xde, 0x15, - 0xba, 0xa9, 0x13, 0x3e, 0xc4, 0x1b, 0x50, 0x92, 0x7c, 0xa1, 0xa5, 0xac, 0xf2, 0xd2, 0xef, 0x36, - 0x51, 0x5c, 0x41, 0xab, 0x64, 0x21, 0x51, 0x49, 0x41, 0x08, 0x1b, 0xff, 0xd1, 0x82, 0x39, 0x65, - 0x11, 0xca, 0xf7, 0xdd, 0x82, 0xb2, 0xf4, 0x3d, 0xda, 0xef, 0x9d, 0xc8, 0xfa, 0xbd, 0x4b, 0x3d, - 0x77, 0xc4, 0x68, 0xd8, 0x69, 0xbf, 0x3f, 0x71, 0xac, 0x8f, 0x26, 0xce, 0xa3, 0x07, 0x09, 0x4d, - 0xc7, 0x1a, 0xed, 0x2f, 0x35, 0x61, 0x74, 0x46, 0xdc, 0x8e, 0x45, 0xca, 0xac, 0x8e, 0xac, 0xc8, - 0x10, 0x75, 0xc5, 0xef, 0xd3, 0x88, 0x53, 0xb6, 0xb9, 0x45, 0x10, 0x89, 0xc3, 0xd9, 0xbc, 0xeb, - 0x86, 0xbe, 0xe7, 0xf7, 0xa3, 0x66, 0x41, 0xf8, 0xf4, 0x78, 0x8e, 0x7f, 0x6a, 0xc1, 0x62, 0xca, - 0xac, 0x15, 0x13, 0x17, 0xa0, 0x14, 0x71, 0x4d, 0x69, 0x1e, 0x0c, 0xa3, 0xd8, 0x14, 0xf0, 0xce, - 0xbc, 0xba, 0x7c, 0x49, 0xce, 0x89, 0xc2, 0xbf, 0x7f, 0x57, 0xfb, 0x8b, 0x05, 0x75, 0x11, 0x98, - 0xf4, 0x5b, 0x43, 0x60, 0xfb, 0xee, 0x90, 0x2a, 0x55, 0x89, 0xb1, 0x11, 0xad, 0xf8, 0x71, 0x15, - 0x1d, 0xad, 0x66, 0x75, 0xb0, 0xd6, 0xa1, 0x1d, 0xac, 0x95, 0xbc, 0xbb, 0x06, 0x14, 0xb9, 0x79, - 0xef, 0x08, 0xe7, 0x5a, 0x25, 0x72, 0x82, 0x1f, 0x85, 0x39, 0xc5, 0x85, 0x12, 0xed, 0x41, 0x01, - 0x76, 0x08, 0x25, 0xa9, 0x09, 0xf4, 0x15, 0xa8, 0xc6, 0x89, 0x89, 0xe0, 0xb6, 0xd0, 0x29, 0xed, - 0x4d, 0x9c, 0x3c, 0x8b, 0x48, 0xb2, 0x80, 0x1c, 0x33, 0xe8, 0x5b, 0x9d, 0xea, 0xde, 0xc4, 0x91, - 0x00, 0x15, 0xe2, 0xd1, 0x29, 0xb0, 0xb7, 0x79, 0xdc, 0xe4, 0x22, 0xb0, 0x3b, 0x95, 0xbd, 0x89, - 0x23, 0xe6, 0x44, 0x7c, 0xe2, 0x75, 0xa8, 0x6f, 0xd0, 0xbe, 0xdb, 0xdd, 0x51, 0x87, 0x36, 0x34, - 0x39, 0x7e, 0xa0, 0xa5, 0x69, 0x3c, 0x0c, 0xf5, 0xf8, 0xc4, 0xb7, 0x86, 0x91, 0x7a, 0x0d, 0xb5, - 0x18, 0xf6, 0x72, 0x84, 0x7f, 0x66, 0x81, 0xb2, 0x01, 0x84, 0x8d, 0x6c, 0x87, 0xfb, 0x42, 0xd8, - 0x9b, 0x38, 0x0a, 0xa2, 0x93, 0x19, 0xf4, 0x2c, 0x94, 0x23, 0x71, 0x22, 0x27, 0x96, 0x35, 0x2d, - 0xb1, 0xd0, 0x39, 0xc2, 0x4d, 0x64, 0x6f, 0xe2, 0x68, 0x44, 0xa2, 0x07, 0x68, 0x25, 0x95, 0x10, - 0x48, 0xc6, 0xe6, 0xf7, 0x26, 0x8e, 0x01, 0x35, 0x13, 0x04, 0xfc, 0x99, 0x05, 0xb5, 0x1b, 0xae, - 0x17, 0x9b, 0x50, 0x53, 0xab, 0x28, 0xf1, 0xd5, 0x12, 0xc0, 0x2d, 0xb1, 0x47, 0x07, 0xee, 0xce, - 0xe5, 0x20, 0x14, 0x74, 0xe7, 0x48, 0x3c, 0x4f, 0x62, 0xb8, 0x3d, 0x35, 0x86, 0x17, 0x67, 0x77, - 0xed, 0xff, 0x5b, 0x47, 0x7a, 0xd5, 0xae, 0xe4, 0x17, 0x0a, 0xf8, 0x3d, 0x0b, 0xea, 0x92, 0x79, - 0x65, 0x79, 0xdf, 0x83, 0x92, 0x94, 0x8d, 0x60, 0xff, 0xbf, 0x38, 0xa6, 0x33, 0xb3, 0x38, 0x25, - 0x45, 0x13, 0x3d, 0x0f, 0xf3, 0xbd, 0x30, 0x18, 0x8d, 0x68, 0x6f, 0x53, 0xb9, 0xbf, 0x7c, 0xd6, - 0xfd, 0xad, 0x99, 0xeb, 0x24, 0x83, 0x8e, 0xff, 0x6a, 0xc1, 0x9c, 0x72, 0x26, 0x4a, 0x5d, 0xb1, - 0x88, 0xad, 0x43, 0x47, 0xcf, 0xfc, 0xac, 0xd1, 0xf3, 0x38, 0x94, 0xfa, 0x3c, 0xbe, 0x68, 0x87, - 0xa4, 0x66, 0xb3, 0x45, 0x55, 0x7c, 0x15, 0xe6, 0x35, 0x2b, 0x07, 0x78, 0xd4, 0xa5, 0xac, 0x47, - 0xbd, 0xd2, 0xa3, 0x3e, 0xf3, 0xb6, 0xbc, 0xd8, 0x47, 0x2a, 0x7c, 0xfc, 0x23, 0x0b, 0x16, 0xb2, - 0x28, 0x68, 0x2d, 0x53, 0x58, 0x3c, 0x72, 0x30, 0x39, 0xb3, 0xa6, 0xd0, 0xa4, 0x55, 0x65, 0xf1, - 0xd4, 0xbd, 0x2a, 0x8b, 0x86, 0xe9, 0x64, 0xaa, 0xca, 0x2b, 0xe0, 0x9f, 0x58, 0x30, 0x97, 0xd2, - 0x25, 0xba, 0x00, 0xf6, 0x56, 0x18, 0x0c, 0x67, 0x52, 0x94, 0xd8, 0x81, 0xbe, 0x0e, 0x79, 0x16, - 0xcc, 0xa4, 0xa6, 0x3c, 0x0b, 0xb8, 0x96, 0x14, 0xfb, 0x05, 0x99, 0xb7, 0xcb, 0x19, 0x7e, 0x0a, - 0xaa, 0x82, 0xa1, 0xeb, 0xae, 0x17, 0x4e, 0x0d, 0x18, 0xd3, 0x19, 0x7a, 0x16, 0x8e, 0x48, 0x67, - 0x38, 0x7d, 0x73, 0x7d, 0xda, 0xe6, 0xba, 0xde, 0x7c, 0x12, 0x8a, 0x22, 0xe9, 0xe0, 0x5b, 0x7a, - 0x2e, 0x73, 0xf5, 0x16, 0x3e, 0xc6, 0xc7, 0x60, 0x91, 0xbf, 0x41, 0x1a, 0x46, 0xab, 0xc1, 0xd8, - 0x67, 0xba, 0x6e, 0x3a, 0x0b, 0x8d, 0x34, 0x58, 0x59, 0x49, 0x03, 0x8a, 0x5d, 0x0e, 0x10, 0x34, - 0xe6, 0x88, 0x9c, 0xe0, 0x5f, 0x5a, 0x80, 0xd6, 0x29, 0x13, 0xa7, 0x5c, 0x59, 0x8b, 0x9f, 0xc7, - 0x12, 0x54, 0x86, 0x2e, 0xeb, 0x6e, 0xd3, 0x30, 0xd2, 0xf9, 0x8b, 0x9e, 0x7f, 0x19, 0x89, 0x27, - 0x3e, 0x07, 0x8b, 0xa9, 0x5b, 0x2a, 0x9e, 0x96, 0xa0, 0xd2, 0x55, 0x30, 0x15, 0xf2, 0xe2, 0x39, - 0xfe, 0x5d, 0x1e, 0x2a, 0x3a, 0xad, 0x43, 0xe7, 0xa0, 0xb6, 0xe5, 0xf9, 0x7d, 0x1a, 0x8e, 0x42, - 0x4f, 0x89, 0xc0, 0x96, 0x69, 0x9e, 0x01, 0x26, 0xe6, 0x04, 0x3d, 0x0e, 0xe5, 0x71, 0x44, 0xc3, - 0xb7, 0x3c, 0xf9, 0xd2, 0xab, 0x9d, 0xc6, 0xee, 0xc4, 0x29, 0xbd, 0x16, 0xd1, 0xf0, 0xca, 0x1a, - 0x0f, 0x3e, 0x63, 0x31, 0x22, 0xf2, 0xbb, 0x87, 0x5e, 0x52, 0x66, 0x2a, 0x12, 0xb8, 0xce, 0x37, - 0xf8, 0xf5, 0x33, 0xae, 0x6e, 0x14, 0x06, 0x43, 0xca, 0xb6, 0xe9, 0x38, 0x6a, 0x77, 0x83, 0xe1, - 0x30, 0xf0, 0xdb, 0xa2, 0x13, 0x20, 0x98, 0xe6, 0x11, 0x94, 0x6f, 0x57, 0x96, 0x7b, 0x03, 0xca, - 0x6c, 0x3b, 0x0c, 0xc6, 0xfd, 0x6d, 0x11, 0x18, 0x0a, 0x9d, 0x8b, 0xb3, 0xd3, 0xd3, 0x14, 0x88, - 0x1e, 0xa0, 0x87, 0xb9, 0xb4, 0x68, 0xf7, 0x76, 0x34, 0x1e, 0xca, 0xda, 0xb3, 0x53, 0xdc, 0x9b, - 0x38, 0xd6, 0xe3, 0x24, 0x06, 0xe3, 0x4b, 0x30, 0x97, 0x4a, 0x85, 0xd1, 0x13, 0x60, 0x87, 0x74, - 0x4b, 0xbb, 0x02, 0xb4, 0x3f, 0x63, 0x96, 0xd1, 0x9f, 0xe3, 0x10, 0xf1, 0x89, 0x7f, 0x98, 0x07, - 0xc7, 0xa8, 0xfa, 0x2f, 0x07, 0xe1, 0xcb, 0x94, 0x85, 0x5e, 0xf7, 0x9a, 0x3b, 0xa4, 0xda, 0xbc, - 0x1c, 0xa8, 0x0d, 0x05, 0xf0, 0x2d, 0xe3, 0x15, 0xc1, 0x30, 0xc6, 0x43, 0x0f, 0x01, 0x88, 0x67, - 0x27, 0xd7, 0xe5, 0x83, 0xaa, 0x0a, 0x88, 0x58, 0x5e, 0x4d, 0x09, 0xbb, 0x3d, 0xa3, 0x70, 0x94, - 0x90, 0xaf, 0x64, 0x85, 0x3c, 0x33, 0x9d, 0x58, 0xb2, 0xe6, 0x73, 0x29, 0xa6, 0x9f, 0x0b, 0xfe, - 0x9b, 0x05, 0xad, 0x0d, 0x7d, 0xf3, 0x43, 0x8a, 0x43, 0xf3, 0x9b, 0xbf, 0x4f, 0xfc, 0x16, 0xbe, - 0x18, 0xbf, 0xb8, 0x05, 0xb0, 0xe1, 0xf9, 0xf4, 0xb2, 0x37, 0x60, 0x34, 0x9c, 0x52, 0x08, 0xfd, - 0xb8, 0x90, 0x78, 0x15, 0x42, 0xb7, 0x34, 0x9f, 0xab, 0x86, 0x2b, 0xbf, 0x1f, 0x6c, 0xe4, 0xef, - 0xa3, 0xda, 0x0a, 0x19, 0x2f, 0xe7, 0x43, 0x79, 0x4b, 0xb0, 0x27, 0xa3, 0x72, 0xaa, 0xc7, 0x94, - 0xf0, 0xde, 0xf9, 0x96, 0x3a, 0xfc, 0xe9, 0x7b, 0x24, 0x55, 0xa2, 0xf3, 0xd7, 0x8e, 0x76, 0x7c, - 0xe6, 0xbe, 0x63, 0xec, 0x27, 0xfa, 0x10, 0xe4, 0xaa, 0xbc, 0xad, 0x38, 0x35, 0x6f, 0x7b, 0x4e, - 0x1d, 0xf3, 0x45, 0x72, 0x37, 0xfc, 0x5c, 0xe2, 0x44, 0x85, 0x52, 0x94, 0x13, 0x7d, 0xe4, 0x5e, - 0x4f, 0x5c, 0x3d, 0xec, 0x3f, 0x59, 0xb0, 0xb0, 0x4e, 0x59, 0x3a, 0x8f, 0x7a, 0x80, 0x54, 0x8a, - 0x5f, 0x84, 0xa3, 0xc6, 0xfd, 0x15, 0xf7, 0x4f, 0x66, 0x92, 0xa7, 0x63, 0x09, 0xff, 0x57, 0xfc, - 0x1e, 0x7d, 0x47, 0xd5, 0xa4, 0xe9, 0xbc, 0xe9, 0x3a, 0xd4, 0x8c, 0x45, 0x74, 0x29, 0x93, 0x31, - 0x2d, 0x66, 0x5a, 0xb1, 0x3c, 0xea, 0x77, 0x1a, 0x8a, 0x27, 0x59, 0x79, 0xaa, 0x7c, 0x38, 0xce, - 0x2e, 0x36, 0x01, 0x09, 0x75, 0x09, 0xb2, 0x66, 0x7c, 0x13, 0xd0, 0x97, 0xe2, 0xd4, 0x29, 0x9e, - 0xa3, 0x87, 0xc1, 0x0e, 0x83, 0xbb, 0x3a, 0x15, 0x9e, 0x4b, 0x8e, 0x24, 0xc1, 0x5d, 0x22, 0x96, - 0xf0, 0xb3, 0x50, 0x20, 0xc1, 0x5d, 0xd4, 0x02, 0x08, 0x5d, 0xbf, 0x4f, 0x6f, 0xc6, 0x45, 0x58, - 0x9d, 0x18, 0x90, 0x03, 0x72, 0x8f, 0x55, 0x38, 0x6a, 0xde, 0x48, 0xaa, 0x7b, 0x05, 0xca, 0xaf, - 0x8e, 0x4d, 0x71, 0x35, 0x32, 0xe2, 0x92, 0xb5, 0xbe, 0x46, 0xe2, 0x36, 0x03, 0x09, 0x1c, 0x9d, - 0x82, 0x2a, 0x73, 0x6f, 0x0d, 0xe8, 0xb5, 0xc4, 0xcd, 0x25, 0x00, 0xbe, 0xca, 0xeb, 0xc7, 0x9b, - 0x46, 0x12, 0x95, 0x00, 0xd0, 0x63, 0xb0, 0x90, 0xdc, 0xf9, 0x7a, 0x48, 0xb7, 0xbc, 0x77, 0x84, - 0x86, 0xeb, 0x64, 0x1f, 0x1c, 0x9d, 0x86, 0x23, 0x09, 0x6c, 0x53, 0x24, 0x2b, 0xb6, 0x40, 0xcd, - 0x82, 0xb9, 0x6c, 0x04, 0xbb, 0x2f, 0xdc, 0x19, 0xbb, 0x03, 0xf1, 0xf8, 0xea, 0xc4, 0x80, 0xe0, - 0x3f, 0x5b, 0x70, 0x54, 0xaa, 0x9a, 0xb9, 0xec, 0x81, 0xb4, 0xfa, 0x5f, 0x59, 0x80, 0x4c, 0x0e, - 0x94, 0x69, 0x7d, 0xd5, 0xec, 0x25, 0xf1, 0x6c, 0xa8, 0x26, 0xca, 0x62, 0x09, 0x4a, 0xda, 0x41, - 0x18, 0x4a, 0x5d, 0xd9, 0x33, 0x13, 0xcd, 0x6f, 0x59, 0x77, 0x4b, 0x08, 0x51, 0xdf, 0xc8, 0x81, - 0xe2, 0xad, 0x1d, 0x46, 0x23, 0x55, 0x35, 0x8b, 0x76, 0x81, 0x00, 0x10, 0xf9, 0xc5, 0xcf, 0xa2, - 0x3e, 0x13, 0x56, 0x63, 0x27, 0x67, 0x29, 0x10, 0xd1, 0x03, 0xfc, 0xdb, 0x3c, 0xcc, 0xdd, 0x0c, - 0x06, 0xe3, 0x24, 0x30, 0x3e, 0x48, 0x01, 0x23, 0x55, 0xca, 0x17, 0x75, 0x29, 0x8f, 0xc0, 0x8e, - 0x18, 0x1d, 0x09, 0xcb, 0x2a, 0x10, 0x31, 0x46, 0x18, 0xea, 0xcc, 0x0d, 0xfb, 0x94, 0xc9, 0x02, - 0xa9, 0x59, 0x12, 0x99, 0x6b, 0x0a, 0x86, 0x96, 0xa1, 0xe6, 0xf6, 0xfb, 0x21, 0xed, 0xbb, 0x8c, - 0x76, 0x76, 0x9a, 0x65, 0x71, 0x98, 0x09, 0xc2, 0x6f, 0xc0, 0xbc, 0x16, 0x96, 0x52, 0xe9, 0x13, - 0x50, 0x7e, 0x5b, 0x40, 0xa6, 0xb4, 0xd6, 0x24, 0xaa, 0x72, 0x63, 0x1a, 0x2d, 0xfd, 0x13, 0x82, - 0xbe, 0x33, 0xbe, 0x0a, 0x25, 0x89, 0x8e, 0x4e, 0x99, 0x65, 0x8e, 0xcc, 0xf4, 0xf8, 0x5c, 0xd5, - 0x2c, 0x18, 0x4a, 0x92, 0x90, 0x52, 0xbc, 0xb0, 0x0d, 0x09, 0x21, 0xea, 0x1b, 0xff, 0xcb, 0x82, - 0x63, 0x6b, 0x94, 0xd1, 0x2e, 0xa3, 0xbd, 0xcb, 0x1e, 0x1d, 0xf4, 0xbe, 0xd4, 0x0a, 0x3c, 0xee, - 0xa3, 0x15, 0x8c, 0x3e, 0x1a, 0xf7, 0x3b, 0x03, 0xcf, 0xa7, 0x1b, 0x46, 0x23, 0x26, 0x01, 0x70, - 0x0f, 0xb1, 0xc5, 0x2f, 0x2e, 0x97, 0xe5, 0x6f, 0x36, 0x06, 0x24, 0xd6, 0x70, 0x29, 0xd1, 0x30, - 0xfe, 0x81, 0x05, 0xc7, 0xb3, 0x5c, 0x2b, 0x25, 0xb5, 0xa1, 0x24, 0x36, 0x4f, 0x69, 0xe1, 0xa6, - 0x76, 0x10, 0x85, 0x86, 0x2e, 0xa4, 0xce, 0x17, 0xbf, 0xf5, 0x74, 0x9a, 0x7b, 0x13, 0xa7, 0x91, - 0x40, 0x8d, 0x2e, 0x81, 0x81, 0x8b, 0xff, 0xc0, 0x6b, 0x69, 0x93, 0xa6, 0xd0, 0x37, 0xb7, 0x2f, - 0xe5, 0x7b, 0xe5, 0x04, 0x7d, 0x0d, 0x6c, 0xb6, 0x33, 0x52, 0x2e, 0xb7, 0x73, 0xec, 0xb3, 0x89, - 0x73, 0x34, 0xb5, 0xed, 0xc6, 0xce, 0x88, 0x12, 0x81, 0xc2, 0xcd, 0xb2, 0xeb, 0x86, 0x3d, 0xcf, - 0x77, 0x07, 0x1e, 0x93, 0x62, 0xb4, 0x89, 0x09, 0x42, 0x4d, 0x28, 0x8f, 0xdc, 0x30, 0xd2, 0x79, - 0x53, 0x95, 0xe8, 0xa9, 0x68, 0x73, 0xdc, 0xa6, 0xac, 0xbb, 0x2d, 0xdd, 0xac, 0x6a, 0x73, 0x08, - 0x48, 0xaa, 0xcd, 0x21, 0x20, 0xf8, 0x17, 0x86, 0xe1, 0xc8, 0x37, 0x71, 0x48, 0xc3, 0xb1, 0x0e, - 0x6d, 0x38, 0xd6, 0x3d, 0x0c, 0x07, 0x7f, 0x27, 0xd1, 0xb2, 0xbe, 0xa2, 0xd2, 0xf2, 0xf3, 0x30, - 0xdf, 0x4b, 0xad, 0x1c, 0xac, 0x6d, 0xd9, 0xc2, 0xcd, 0xa0, 0xe3, 0xf5, 0x44, 0x75, 0x02, 0x72, - 0x80, 0xea, 0x32, 0xfa, 0xc8, 0xef, 0xd3, 0xc7, 0x63, 0x8f, 0x40, 0x35, 0xfe, 0x01, 0x0e, 0xd5, - 0xa0, 0x7c, 0xf9, 0x15, 0xf2, 0xfa, 0x25, 0xb2, 0xb6, 0x90, 0x43, 0x75, 0xa8, 0x74, 0x2e, 0xad, - 0xbe, 0x24, 0x66, 0xd6, 0xf9, 0xdf, 0x94, 0x74, 0x68, 0x0f, 0xd1, 0x37, 0xa1, 0x28, 0xe3, 0xf5, - 0xf1, 0xe4, 0xba, 0xe6, 0x6f, 0x53, 0x4b, 0x27, 0xf6, 0xc1, 0x25, 0xdf, 0x38, 0xf7, 0x84, 0x85, - 0xae, 0x41, 0x4d, 0x00, 0x55, 0xf7, 0xf7, 0x54, 0xb6, 0x09, 0x9b, 0xa2, 0xf4, 0xd0, 0x01, 0xab, - 0x06, 0xbd, 0x8b, 0x50, 0x94, 0x22, 0x38, 0x9e, 0x49, 0xab, 0xa6, 0xdc, 0x26, 0xd5, 0x0f, 0xc7, - 0x39, 0xf4, 0x0c, 0xd8, 0x37, 0x5c, 0x6f, 0x80, 0x8c, 0xac, 0xce, 0x68, 0xda, 0x2e, 0x1d, 0xcf, - 0x82, 0x8d, 0x63, 0x9f, 0x8b, 0x7b, 0xcf, 0x27, 0xb2, 0x0d, 0x30, 0xbd, 0xbd, 0xb9, 0x7f, 0x21, - 0x3e, 0xf9, 0x15, 0xd9, 0x21, 0xd5, 0x6d, 0x18, 0xf4, 0x50, 0xfa, 0xa8, 0x4c, 0xd7, 0x66, 0xa9, - 0x75, 0xd0, 0x72, 0x4c, 0x70, 0x03, 0x6a, 0x46, 0x0b, 0xc4, 0x14, 0xeb, 0xfe, 0xfe, 0x8d, 0x29, - 0xd6, 0x29, 0x7d, 0x13, 0x9c, 0x43, 0xeb, 0x50, 0xe1, 0xb9, 0xb0, 0xf8, 0xa9, 0xe4, 0x64, 0x36, - 0xe5, 0x35, 0x52, 0x9d, 0xa5, 0x53, 0xd3, 0x17, 0x63, 0x42, 0xdf, 0x86, 0xea, 0x3a, 0x65, 0x2a, - 0x5e, 0x9c, 0xc8, 0x06, 0x9c, 0x29, 0x92, 0x4a, 0x07, 0x2d, 0x9c, 0x43, 0x6f, 0x88, 0xb4, 0x3c, - 0xed, 0x2e, 0x91, 0x73, 0x80, 0x5b, 0x8c, 0xef, 0xb5, 0x7c, 0x30, 0x42, 0x4c, 0xf9, 0xf5, 0x14, - 0x65, 0x15, 0x59, 0x9d, 0x03, 0x9e, 0x60, 0x4c, 0xd9, 0xb9, 0xc7, 0x1f, 0x29, 0x70, 0xee, 0xfc, - 0x9b, 0xfa, 0xbf, 0x04, 0x6b, 0x2e, 0x73, 0xd1, 0x2b, 0x30, 0x2f, 0x64, 0x19, 0xff, 0xd9, 0x20, - 0x65, 0xf3, 0xfb, 0xfe, 0xd9, 0x90, 0xb2, 0xf9, 0xfd, 0xff, 0x70, 0xc0, 0xb9, 0xce, 0x9b, 0x1f, - 0x7c, 0xdc, 0xca, 0x7d, 0xf8, 0x71, 0x2b, 0xf7, 0xe9, 0xc7, 0x2d, 0xeb, 0xfb, 0xbb, 0x2d, 0xeb, - 0xd7, 0xbb, 0x2d, 0xeb, 0xfd, 0xdd, 0x96, 0xf5, 0xc1, 0x6e, 0xcb, 0xfa, 0xc7, 0x6e, 0xcb, 0xfa, - 0xe7, 0x6e, 0x2b, 0xf7, 0xe9, 0x6e, 0xcb, 0x7a, 0xf7, 0x93, 0x56, 0xee, 0x83, 0x4f, 0x5a, 0xb9, - 0x0f, 0x3f, 0x69, 0xe5, 0xbe, 0xfb, 0xe8, 0xbd, 0x4b, 0x50, 0xe9, 0xe8, 0x4a, 0xe2, 0xeb, 0xc9, - 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x04, 0x88, 0x1d, 0xe6, 0xf1, 0x22, 0x00, 0x00, + // 2671 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x3a, 0x4d, 0x6c, 0x1b, 0xc7, + 0xd5, 0x5c, 0x72, 0xf9, 0xf7, 0x48, 0xc9, 0xf2, 0x88, 0xb6, 0x09, 0xd9, 0xe1, 0x2a, 0x83, 0xef, + 0x4b, 0xdc, 0xd8, 0x11, 0x63, 0xa7, 0x49, 0x1d, 0xa7, 0x69, 0x6a, 0x4a, 0xb1, 0x63, 0x47, 0x71, + 0x9c, 0x91, 0xe3, 0xa4, 0x45, 0x83, 0x60, 0x4d, 0x8e, 0xa8, 0x85, 0xc9, 0x5d, 0x7a, 0x77, 0x18, + 0x47, 0xb7, 0x02, 0x3d, 0x17, 0x0d, 0xd0, 0x43, 0xdb, 0x4b, 0x81, 0x02, 0x05, 0x5a, 0xb4, 0xc8, + 0xa5, 0xe8, 0xa9, 0x28, 0xda, 0x4b, 0x0f, 0xe9, 0x2d, 0xbd, 0x05, 0x39, 0xb0, 0x8d, 0x72, 0x29, + 0x74, 0x0a, 0x50, 0xa0, 0x87, 0x9c, 0x8a, 0xf9, 0xdb, 0x9d, 0x5d, 0x91, 0x75, 0xe8, 0xb8, 0x48, + 0x7c, 0x21, 0x67, 0xde, 0xbc, 0x79, 0x33, 0xef, 0x67, 0xde, 0x1f, 0x09, 0xc7, 0x47, 0xb7, 0xfa, + 0xed, 0x41, 0xd0, 0x1f, 0x85, 0x01, 0x0b, 0xe2, 0xc1, 0x9a, 0xf8, 0x44, 0x15, 0x3d, 0x5f, 0x69, + 0xf4, 0x83, 0x7e, 0x20, 0x71, 0xf8, 0x48, 0xae, 0xaf, 0x38, 0xfd, 0x20, 0xe8, 0x0f, 0x68, 0x5b, + 0xcc, 0x6e, 0x8e, 0xb7, 0xdb, 0xcc, 0x1b, 0xd2, 0x88, 0xb9, 0xc3, 0x91, 0x42, 0x58, 0x55, 0xd4, + 0x6f, 0x0f, 0x86, 0x41, 0x8f, 0x0e, 0xda, 0x11, 0x73, 0x59, 0x24, 0x3f, 0x15, 0xc6, 0x32, 0xc7, + 0x18, 0x8d, 0xa3, 0x1d, 0xf1, 0x21, 0x81, 0xf8, 0xf7, 0x16, 0x1c, 0xd9, 0x74, 0x6f, 0xd2, 0xc1, + 0xf5, 0xe0, 0x86, 0x3b, 0x18, 0xd3, 0x88, 0xd0, 0x68, 0x14, 0xf8, 0x11, 0x45, 0xeb, 0x50, 0x1a, + 0xf0, 0x85, 0xa8, 0x69, 0xad, 0x16, 0x4e, 0xd6, 0xce, 0x9e, 0x5a, 0x8b, 0xaf, 0x3c, 0x75, 0x83, + 0x84, 0x46, 0x2f, 0xf8, 0x2c, 0xdc, 0x25, 0x6a, 0xeb, 0xca, 0x0d, 0xa8, 0x19, 0x60, 0xb4, 0x04, + 0x85, 0x5b, 0x74, 0xb7, 0x69, 0xad, 0x5a, 0x27, 0xab, 0x84, 0x0f, 0xd1, 0x19, 0x28, 0xbe, 0xcd, + 0xc9, 0x34, 0xf3, 0xab, 0xd6, 0xc9, 0xda, 0xd9, 0xe3, 0xc9, 0x21, 0xaf, 0xf9, 0xde, 0xed, 0x31, + 0x15, 0xbb, 0xd5, 0x41, 0x12, 0xf3, 0x7c, 0xfe, 0x9c, 0x85, 0x4f, 0xc1, 0xe1, 0x03, 0xeb, 0xe8, + 0x28, 0x94, 0x04, 0x86, 0xbc, 0x71, 0x95, 0xa8, 0x19, 0x6e, 0x00, 0xda, 0x62, 0x21, 0x75, 0x87, + 0xc4, 0x65, 0xfc, 0xbe, 0xb7, 0xc7, 0x34, 0x62, 0xf8, 0x65, 0x58, 0x4e, 0x41, 0x15, 0xdb, 0x4f, + 0x43, 0x2d, 0x4a, 0xc0, 0x8a, 0xf7, 0x46, 0x72, 0xad, 0x64, 0x0f, 0x31, 0x11, 0xf1, 0xcf, 0x2d, + 0x80, 0x64, 0x0d, 0xb5, 0x00, 0xe4, 0xea, 0x8b, 0x6e, 0xb4, 0x23, 0x18, 0xb6, 0x89, 0x01, 0x41, + 0xa7, 0xe1, 0x70, 0x32, 0xbb, 0x1a, 0x6c, 0xed, 0xb8, 0x61, 0x4f, 0xc8, 0xc0, 0x26, 0x07, 0x17, + 0x10, 0x02, 0x3b, 0x74, 0x19, 0x6d, 0x16, 0x56, 0xad, 0x93, 0x05, 0x22, 0xc6, 0x9c, 0x5b, 0x46, + 0x7d, 0xd7, 0x67, 0x4d, 0x5b, 0x88, 0x53, 0xcd, 0x38, 0x9c, 0xeb, 0x97, 0x46, 0xcd, 0xe2, 0xaa, + 0x75, 0x72, 0x81, 0xa8, 0x19, 0xfe, 0x77, 0x01, 0xea, 0xaf, 0x8e, 0x69, 0xb8, 0xab, 0x04, 0x80, + 0x5a, 0x50, 0x89, 0xe8, 0x80, 0x76, 0x59, 0x10, 0x4a, 0x8d, 0x74, 0xf2, 0x4d, 0x8b, 0xc4, 0x30, + 0xd4, 0x80, 0xe2, 0xc0, 0x1b, 0x7a, 0x4c, 0x5c, 0x6b, 0x81, 0xc8, 0x09, 0x3a, 0x0f, 0xc5, 0x88, + 0xb9, 0x21, 0x13, 0x77, 0xa9, 0x9d, 0x5d, 0x59, 0x93, 0x86, 0xb9, 0xa6, 0x0d, 0x73, 0xed, 0xba, + 0x36, 0xcc, 0x4e, 0xe5, 0xfd, 0x89, 0x93, 0x7b, 0xf7, 0xef, 0x8e, 0x45, 0xe4, 0x16, 0xf4, 0x34, + 0x14, 0xa8, 0xdf, 0x13, 0xf7, 0xfd, 0xbc, 0x3b, 0xf9, 0x06, 0x74, 0x06, 0xaa, 0x3d, 0x2f, 0xa4, + 0x5d, 0xe6, 0x05, 0xbe, 0xe0, 0x6a, 0xf1, 0xec, 0x72, 0xa2, 0x91, 0x0d, 0xbd, 0x44, 0x12, 0x2c, + 0x74, 0x1a, 0x4a, 0x11, 0x17, 0x5d, 0xd4, 0x2c, 0x73, 0x5b, 0xe8, 0x34, 0xf6, 0x27, 0xce, 0x92, + 0x84, 0x9c, 0x0e, 0x86, 0x1e, 0xa3, 0xc3, 0x11, 0xdb, 0x25, 0x0a, 0x07, 0x3d, 0x06, 0xe5, 0x1e, + 0x1d, 0x50, 0xae, 0xf0, 0x8a, 0x50, 0xf8, 0x92, 0x41, 0x5e, 0x2c, 0x10, 0x8d, 0x80, 0xde, 0x04, + 0x7b, 0x34, 0x70, 0xfd, 0x66, 0x55, 0x70, 0xb1, 0x98, 0x20, 0x5e, 0x1b, 0xb8, 0x7e, 0xe7, 0x99, + 0x8f, 0x26, 0xce, 0x53, 0x7d, 0x8f, 0xed, 0x8c, 0x6f, 0xae, 0x75, 0x83, 0x61, 0xbb, 0x1f, 0xba, + 0xdb, 0xae, 0xef, 0xb6, 0x07, 0xc1, 0x2d, 0xaf, 0xfd, 0xf6, 0x93, 0x6d, 0xfe, 0x06, 0x6f, 0x8f, + 0x69, 0xe8, 0xd1, 0xb0, 0xcd, 0xc9, 0xac, 0x09, 0x95, 0xf0, 0xad, 0x44, 0x90, 0x45, 0x57, 0xb8, + 0xfd, 0x05, 0x21, 0x5d, 0xdf, 0x19, 0xfb, 0xb7, 0xa2, 0x26, 0x88, 0x53, 0x8e, 0x25, 0xa7, 0x08, + 0x38, 0xa1, 0xdb, 0x97, 0xc2, 0x60, 0x3c, 0xea, 0x1c, 0xda, 0x9f, 0x38, 0x26, 0x3e, 0x31, 0x27, + 0x57, 0xec, 0x4a, 0x69, 0xa9, 0x8c, 0xdf, 0x2b, 0x00, 0xda, 0x72, 0x87, 0xa3, 0x01, 0x9d, 0x4b, + 0xfd, 0xb1, 0xa2, 0xf3, 0xf7, 0xac, 0xe8, 0xc2, 0xbc, 0x8a, 0x4e, 0xb4, 0x66, 0xcf, 0xa7, 0xb5, + 0xe2, 0xe7, 0xd5, 0x5a, 0xe9, 0x2b, 0xaf, 0x35, 0xdc, 0x04, 0x9b, 0x53, 0xe6, 0xce, 0x32, 0x74, + 0xef, 0x08, 0xdd, 0xd4, 0x09, 0x1f, 0xe2, 0x4d, 0x28, 0x49, 0xbe, 0xd0, 0x4a, 0x56, 0x79, 0xe9, + 0x77, 0x9b, 0x28, 0xae, 0xa0, 0x55, 0xb2, 0x94, 0xa8, 0xa4, 0x20, 0x84, 0x8d, 0xff, 0x68, 0xc1, + 0x82, 0xb2, 0x08, 0xe5, 0xfb, 0x6e, 0x42, 0x59, 0xfa, 0x1e, 0xed, 0xf7, 0x8e, 0x65, 0xfd, 0xde, + 0x85, 0x9e, 0x3b, 0x62, 0x34, 0xec, 0xb4, 0xdf, 0x9f, 0x38, 0xd6, 0x47, 0x13, 0xe7, 0xd1, 0x59, + 0x42, 0xd3, 0xb1, 0x46, 0xfb, 0x4b, 0x4d, 0x18, 0x9d, 0x12, 0xb7, 0x63, 0x91, 0x32, 0xab, 0x43, + 0x6b, 0x32, 0x44, 0x5d, 0xf6, 0xfb, 0x34, 0xe2, 0x94, 0x6d, 0x6e, 0x11, 0x44, 0xe2, 0x70, 0x36, + 0xef, 0xb8, 0xa1, 0xef, 0xf9, 0xfd, 0xa8, 0x59, 0x10, 0x3e, 0x3d, 0x9e, 0xe3, 0x9f, 0x5a, 0xb0, + 0x9c, 0x32, 0x6b, 0xc5, 0xc4, 0x39, 0x28, 0x45, 0x5c, 0x53, 0x9a, 0x07, 0xc3, 0x28, 0xb6, 0x04, + 0xbc, 0xb3, 0xa8, 0x2e, 0x5f, 0x92, 0x73, 0xa2, 0xf0, 0xef, 0xdf, 0xd5, 0xfe, 0x62, 0x41, 0x5d, + 0x04, 0x26, 0xfd, 0xd6, 0x10, 0xd8, 0xbe, 0x3b, 0xa4, 0x4a, 0x55, 0x62, 0x6c, 0x44, 0x2b, 0x7e, + 0x5c, 0x45, 0x47, 0xab, 0x79, 0x1d, 0xac, 0x75, 0xcf, 0x0e, 0xd6, 0x4a, 0xde, 0x5d, 0x03, 0x8a, + 0xdc, 0xbc, 0x77, 0x85, 0x73, 0xad, 0x12, 0x39, 0xc1, 0x8f, 0xc2, 0x82, 0xe2, 0x42, 0x89, 0x76, + 0x56, 0x80, 0x1d, 0x42, 0x49, 0x6a, 0x02, 0xfd, 0x1f, 0x54, 0xe3, 0xc4, 0x44, 0x70, 0x5b, 0xe8, + 0x94, 0xf6, 0x27, 0x4e, 0x9e, 0x45, 0x24, 0x59, 0x40, 0x8e, 0x19, 0xf4, 0xad, 0x4e, 0x75, 0x7f, + 0xe2, 0x48, 0x80, 0x0a, 0xf1, 0xe8, 0x04, 0xd8, 0x3b, 0x3c, 0x6e, 0x72, 0x11, 0xd8, 0x9d, 0xca, + 0xfe, 0xc4, 0x11, 0x73, 0x22, 0x3e, 0xf1, 0x25, 0xa8, 0x6f, 0xd2, 0xbe, 0xdb, 0xdd, 0x55, 0x87, + 0x36, 0x34, 0x39, 0x7e, 0xa0, 0xa5, 0x69, 0x3c, 0x0c, 0xf5, 0xf8, 0xc4, 0xb7, 0x86, 0x91, 0x7a, + 0x0d, 0xb5, 0x18, 0xf6, 0x72, 0x84, 0x7f, 0x66, 0x81, 0xb2, 0x01, 0x84, 0x8d, 0x6c, 0x87, 0xfb, + 0x42, 0xd8, 0x9f, 0x38, 0x0a, 0xa2, 0x93, 0x19, 0xf4, 0x2c, 0x94, 0x23, 0x71, 0x22, 0x27, 0x96, + 0x35, 0x2d, 0xb1, 0xd0, 0x39, 0xc4, 0x4d, 0x64, 0x7f, 0xe2, 0x68, 0x44, 0xa2, 0x07, 0x68, 0x2d, + 0x95, 0x10, 0x48, 0xc6, 0x16, 0xf7, 0x27, 0x8e, 0x01, 0x35, 0x13, 0x04, 0xfc, 0x99, 0x05, 0xb5, + 0xeb, 0xae, 0x17, 0x9b, 0x50, 0x53, 0xab, 0x28, 0xf1, 0xd5, 0x12, 0xc0, 0x2d, 0xb1, 0x47, 0x07, + 0xee, 0xee, 0xc5, 0x20, 0x14, 0x74, 0x17, 0x48, 0x3c, 0x4f, 0x62, 0xb8, 0x3d, 0x35, 0x86, 0x17, + 0xe7, 0x77, 0xed, 0xff, 0x5b, 0x47, 0x7a, 0xc5, 0xae, 0xe4, 0x97, 0x0a, 0xf8, 0x3d, 0x0b, 0xea, + 0x92, 0x79, 0x65, 0x79, 0xdf, 0x83, 0x92, 0x94, 0x8d, 0x60, 0xff, 0xbf, 0x38, 0xa6, 0x53, 0xf3, + 0x38, 0x25, 0x45, 0x13, 0x3d, 0x0f, 0x8b, 0xbd, 0x30, 0x18, 0x8d, 0x68, 0x6f, 0x4b, 0xb9, 0xbf, + 0x7c, 0xd6, 0xfd, 0x6d, 0x98, 0xeb, 0x24, 0x83, 0x8e, 0xff, 0x6a, 0xc1, 0x82, 0x72, 0x26, 0x4a, + 0x5d, 0xb1, 0x88, 0xad, 0x7b, 0x8e, 0x9e, 0xf9, 0x79, 0xa3, 0xe7, 0x51, 0x28, 0xf5, 0x79, 0x7c, + 0xd1, 0x0e, 0x49, 0xcd, 0xe6, 0x8b, 0xaa, 0xf8, 0x0a, 0x2c, 0x6a, 0x56, 0x66, 0x78, 0xd4, 0x95, + 0xac, 0x47, 0xbd, 0xdc, 0xa3, 0x3e, 0xf3, 0xb6, 0xbd, 0xd8, 0x47, 0x2a, 0x7c, 0xfc, 0x23, 0x0b, + 0x96, 0xb2, 0x28, 0x68, 0x23, 0x53, 0x58, 0x3c, 0x32, 0x9b, 0x9c, 0x59, 0x53, 0x68, 0xd2, 0xaa, + 0xb2, 0x78, 0xea, 0x6e, 0x95, 0x45, 0xc3, 0x74, 0x32, 0x55, 0xe5, 0x15, 0xf0, 0x4f, 0x2c, 0x58, + 0x48, 0xe9, 0x12, 0x9d, 0x03, 0x7b, 0x3b, 0x0c, 0x86, 0x73, 0x29, 0x4a, 0xec, 0x40, 0x5f, 0x87, + 0x3c, 0x0b, 0xe6, 0x52, 0x53, 0x9e, 0x05, 0x5c, 0x4b, 0x8a, 0xfd, 0x82, 0xcc, 0xdb, 0xe5, 0x0c, + 0x3f, 0x05, 0x55, 0xc1, 0xd0, 0x35, 0xd7, 0x0b, 0xa7, 0x06, 0x8c, 0xe9, 0x0c, 0x3d, 0x0b, 0x87, + 0xa4, 0x33, 0x9c, 0xbe, 0xb9, 0x3e, 0x6d, 0x73, 0x5d, 0x6f, 0x3e, 0x0e, 0x45, 0x91, 0x74, 0xf0, + 0x2d, 0x3d, 0x97, 0xb9, 0x7a, 0x0b, 0x1f, 0xe3, 0x23, 0xb0, 0xcc, 0xdf, 0x20, 0x0d, 0xa3, 0xf5, + 0x60, 0xec, 0x33, 0x5d, 0x37, 0x9d, 0x86, 0x46, 0x1a, 0xac, 0xac, 0xa4, 0x01, 0xc5, 0x2e, 0x07, + 0x08, 0x1a, 0x0b, 0x44, 0x4e, 0xf0, 0x2f, 0x2d, 0x40, 0x97, 0x28, 0x13, 0xa7, 0x5c, 0xde, 0x88, + 0x9f, 0xc7, 0x0a, 0x54, 0x86, 0x2e, 0xeb, 0xee, 0xd0, 0x30, 0xd2, 0xf9, 0x8b, 0x9e, 0x7f, 0x19, + 0x89, 0x27, 0x3e, 0x03, 0xcb, 0xa9, 0x5b, 0x2a, 0x9e, 0x56, 0xa0, 0xd2, 0x55, 0x30, 0x15, 0xf2, + 0xe2, 0x39, 0xfe, 0x5d, 0x1e, 0x2a, 0x3a, 0xad, 0x43, 0x67, 0xa0, 0xb6, 0xed, 0xf9, 0x7d, 0x1a, + 0x8e, 0x42, 0x4f, 0x89, 0xc0, 0x96, 0x69, 0x9e, 0x01, 0x26, 0xe6, 0x04, 0x3d, 0x0e, 0xe5, 0x71, + 0x44, 0xc3, 0xb7, 0x3c, 0xf9, 0xd2, 0xab, 0x9d, 0xc6, 0xde, 0xc4, 0x29, 0xbd, 0x16, 0xd1, 0xf0, + 0xf2, 0x06, 0x0f, 0x3e, 0x63, 0x31, 0x22, 0xf2, 0xbb, 0x87, 0x5e, 0x52, 0x66, 0x2a, 0x12, 0xb8, + 0xce, 0x37, 0xf8, 0xf5, 0x33, 0xae, 0x6e, 0x14, 0x06, 0x43, 0xca, 0x76, 0xe8, 0x38, 0x6a, 0x77, + 0x83, 0xe1, 0x30, 0xf0, 0xdb, 0xa2, 0x13, 0x20, 0x98, 0xe6, 0x11, 0x94, 0x6f, 0x57, 0x96, 0x7b, + 0x1d, 0xca, 0x6c, 0x27, 0x0c, 0xc6, 0xfd, 0x1d, 0x11, 0x18, 0x0a, 0x9d, 0xf3, 0xf3, 0xd3, 0xd3, + 0x14, 0x88, 0x1e, 0xa0, 0x87, 0xb9, 0xb4, 0x68, 0xf7, 0x56, 0x34, 0x1e, 0xca, 0xda, 0xb3, 0x53, + 0xdc, 0x9f, 0x38, 0xd6, 0xe3, 0x24, 0x06, 0xe3, 0x0b, 0xb0, 0x90, 0x4a, 0x85, 0xd1, 0x13, 0x60, + 0x87, 0x74, 0x5b, 0xbb, 0x02, 0x74, 0x30, 0x63, 0x96, 0xd1, 0x9f, 0xe3, 0x10, 0xf1, 0x89, 0x7f, + 0x98, 0x07, 0xc7, 0xa8, 0xfa, 0x2f, 0x06, 0xe1, 0xcb, 0x94, 0x85, 0x5e, 0xf7, 0xaa, 0x3b, 0xa4, + 0xda, 0xbc, 0x1c, 0xa8, 0x0d, 0x05, 0xf0, 0x2d, 0xe3, 0x15, 0xc1, 0x30, 0xc6, 0x43, 0x0f, 0x01, + 0x88, 0x67, 0x27, 0xd7, 0xe5, 0x83, 0xaa, 0x0a, 0x88, 0x58, 0x5e, 0x4f, 0x09, 0xbb, 0x3d, 0xa7, + 0x70, 0x94, 0x90, 0x2f, 0x67, 0x85, 0x3c, 0x37, 0x9d, 0x58, 0xb2, 0xe6, 0x73, 0x29, 0xa6, 0x9f, + 0x0b, 0xfe, 0x9b, 0x05, 0xad, 0x4d, 0x7d, 0xf3, 0x7b, 0x14, 0x87, 0xe6, 0x37, 0x7f, 0x9f, 0xf8, + 0x2d, 0x7c, 0x31, 0x7e, 0x71, 0x0b, 0x60, 0xd3, 0xf3, 0xe9, 0x45, 0x6f, 0xc0, 0x68, 0x38, 0xa5, + 0x10, 0xfa, 0x71, 0x21, 0xf1, 0x2a, 0x84, 0x6e, 0x6b, 0x3e, 0xd7, 0x0d, 0x57, 0x7e, 0x3f, 0xd8, + 0xc8, 0xdf, 0x47, 0xb5, 0x15, 0x32, 0x5e, 0xce, 0x87, 0xf2, 0xb6, 0x60, 0x4f, 0x46, 0xe5, 0x54, + 0x8f, 0x29, 0xe1, 0xbd, 0xf3, 0x2d, 0x75, 0xf8, 0xd3, 0x77, 0x49, 0xaa, 0x44, 0xe7, 0xaf, 0x1d, + 0xed, 0xfa, 0xcc, 0x7d, 0xc7, 0xd8, 0x4f, 0xf4, 0x21, 0xc8, 0x55, 0x79, 0x5b, 0x71, 0x6a, 0xde, + 0xf6, 0x9c, 0x3a, 0xe6, 0x8b, 0xe4, 0x6e, 0xf8, 0xb9, 0xc4, 0x89, 0x0a, 0xa5, 0x28, 0x27, 0xfa, + 0xc8, 0xdd, 0x9e, 0xb8, 0x7a, 0xd8, 0x7f, 0xb2, 0x60, 0xe9, 0x12, 0x65, 0xe9, 0x3c, 0xea, 0x01, + 0x52, 0x29, 0x7e, 0x11, 0x0e, 0x1b, 0xf7, 0x57, 0xdc, 0x3f, 0x99, 0x49, 0x9e, 0x8e, 0x24, 0xfc, + 0x5f, 0xf6, 0x7b, 0xf4, 0x1d, 0x55, 0x93, 0xa6, 0xf3, 0xa6, 0x6b, 0x50, 0x33, 0x16, 0xd1, 0x85, + 0x4c, 0xc6, 0xb4, 0x9c, 0x69, 0xc5, 0xf2, 0xa8, 0xdf, 0x69, 0x28, 0x9e, 0x64, 0xe5, 0xa9, 0xf2, + 0xe1, 0x38, 0xbb, 0xd8, 0x02, 0x24, 0xd4, 0x25, 0xc8, 0x9a, 0xf1, 0x4d, 0x40, 0x5f, 0x8a, 0x53, + 0xa7, 0x78, 0x8e, 0x1e, 0x06, 0x3b, 0x0c, 0xee, 0xe8, 0x54, 0x78, 0x21, 0x39, 0x92, 0x04, 0x77, + 0x88, 0x58, 0xc2, 0xcf, 0x42, 0x81, 0x04, 0x77, 0x50, 0x0b, 0x20, 0x74, 0xfd, 0x3e, 0xbd, 0x11, + 0x17, 0x61, 0x75, 0x62, 0x40, 0x66, 0xe4, 0x1e, 0xeb, 0x70, 0xd8, 0xbc, 0x91, 0x54, 0xf7, 0x1a, + 0x94, 0x5f, 0x1d, 0x9b, 0xe2, 0x6a, 0x64, 0xc4, 0x25, 0x6b, 0x7d, 0x8d, 0xc4, 0x6d, 0x06, 0x12, + 0x38, 0x3a, 0x01, 0x55, 0xe6, 0xde, 0x1c, 0xd0, 0xab, 0x89, 0x9b, 0x4b, 0x00, 0x7c, 0x95, 0xd7, + 0x8f, 0x37, 0x8c, 0x24, 0x2a, 0x01, 0xa0, 0xc7, 0x60, 0x29, 0xb9, 0xf3, 0xb5, 0x90, 0x6e, 0x7b, + 0xef, 0x08, 0x0d, 0xd7, 0xc9, 0x01, 0x38, 0x3a, 0x09, 0x87, 0x12, 0xd8, 0x96, 0x48, 0x56, 0x6c, + 0x81, 0x9a, 0x05, 0x73, 0xd9, 0x08, 0x76, 0x5f, 0xb8, 0x3d, 0x76, 0x07, 0xe2, 0xf1, 0xd5, 0x89, + 0x01, 0xc1, 0x7f, 0xb6, 0xe0, 0xb0, 0x54, 0x35, 0x73, 0xd9, 0x03, 0x69, 0xf5, 0xbf, 0xb2, 0x00, + 0x99, 0x1c, 0x28, 0xd3, 0xfa, 0x7f, 0xb3, 0x97, 0xc4, 0xb3, 0xa1, 0x9a, 0x28, 0x8b, 0x25, 0x28, + 0x69, 0x07, 0x61, 0x28, 0x75, 0x65, 0xcf, 0x4c, 0x34, 0xbf, 0x65, 0xdd, 0x2d, 0x21, 0x44, 0x7d, + 0x23, 0x07, 0x8a, 0x37, 0x77, 0x19, 0x8d, 0x54, 0xd5, 0x2c, 0xda, 0x05, 0x02, 0x40, 0xe4, 0x17, + 0x3f, 0x8b, 0xfa, 0x4c, 0x58, 0x8d, 0x9d, 0x9c, 0xa5, 0x40, 0x44, 0x0f, 0xf0, 0x6f, 0xf3, 0xb0, + 0x70, 0x23, 0x18, 0x8c, 0x93, 0xc0, 0xf8, 0x20, 0x05, 0x8c, 0x54, 0x29, 0x5f, 0xd4, 0xa5, 0x3c, + 0x02, 0x3b, 0x62, 0x74, 0x24, 0x2c, 0xab, 0x40, 0xc4, 0x18, 0x61, 0xa8, 0x33, 0x37, 0xec, 0x53, + 0x26, 0x0b, 0xa4, 0x66, 0x49, 0x64, 0xae, 0x29, 0x18, 0x5a, 0x85, 0x9a, 0xdb, 0xef, 0x87, 0xb4, + 0xef, 0x32, 0xda, 0xd9, 0x6d, 0x96, 0xc5, 0x61, 0x26, 0x08, 0xbf, 0x01, 0x8b, 0x5a, 0x58, 0x4a, + 0xa5, 0x4f, 0x40, 0xf9, 0x6d, 0x01, 0x99, 0xd2, 0x5a, 0x93, 0xa8, 0xca, 0x8d, 0x69, 0xb4, 0xf4, + 0x4f, 0x08, 0xfa, 0xce, 0xf8, 0x0a, 0x94, 0x24, 0x3a, 0x3a, 0x61, 0x96, 0x39, 0x32, 0xd3, 0xe3, + 0x73, 0x55, 0xb3, 0x60, 0x28, 0x49, 0x42, 0x4a, 0xf1, 0xc2, 0x36, 0x24, 0x84, 0xa8, 0x6f, 0xfc, + 0x2f, 0x0b, 0x8e, 0x6c, 0x50, 0x46, 0xbb, 0x8c, 0xf6, 0x2e, 0x7a, 0x74, 0xd0, 0xfb, 0x52, 0x2b, + 0xf0, 0xb8, 0x8f, 0x56, 0x30, 0xfa, 0x68, 0xdc, 0xef, 0x0c, 0x3c, 0x9f, 0x6e, 0x1a, 0x8d, 0x98, + 0x04, 0xc0, 0x3d, 0xc4, 0x36, 0xbf, 0xb8, 0x5c, 0x96, 0xbf, 0xd9, 0x18, 0x90, 0x58, 0xc3, 0xa5, + 0x44, 0xc3, 0xf8, 0x07, 0x16, 0x1c, 0xcd, 0x72, 0xad, 0x94, 0xd4, 0x86, 0x92, 0xd8, 0x3c, 0xa5, + 0x85, 0x9b, 0xda, 0x41, 0x14, 0x1a, 0x3a, 0x97, 0x3a, 0x5f, 0xfc, 0xd6, 0xd3, 0x69, 0xee, 0x4f, + 0x9c, 0x46, 0x02, 0x35, 0xba, 0x04, 0x06, 0x2e, 0xfe, 0x03, 0xaf, 0xa5, 0x4d, 0x9a, 0x42, 0xdf, + 0xdc, 0xbe, 0x94, 0xef, 0x95, 0x13, 0xf4, 0x35, 0xb0, 0xd9, 0xee, 0x48, 0xb9, 0xdc, 0xce, 0x91, + 0xcf, 0x26, 0xce, 0xe1, 0xd4, 0xb6, 0xeb, 0xbb, 0x23, 0x4a, 0x04, 0x0a, 0x37, 0xcb, 0xae, 0x1b, + 0xf6, 0x3c, 0xdf, 0x1d, 0x78, 0x4c, 0x8a, 0xd1, 0x26, 0x26, 0x08, 0x35, 0xa1, 0x3c, 0x72, 0xc3, + 0x48, 0xe7, 0x4d, 0x55, 0xa2, 0xa7, 0xa2, 0xcd, 0x71, 0x8b, 0xb2, 0xee, 0x8e, 0x74, 0xb3, 0xaa, + 0xcd, 0x21, 0x20, 0xa9, 0x36, 0x87, 0x80, 0xe0, 0x5f, 0x18, 0x86, 0x23, 0xdf, 0xc4, 0x57, 0xce, + 0x70, 0xf0, 0x77, 0x12, 0x2d, 0xeb, 0x2b, 0x2a, 0x2d, 0x3f, 0x0f, 0x8b, 0xbd, 0xd4, 0xca, 0x6c, + 0x6d, 0xcb, 0x16, 0x6e, 0x06, 0x1d, 0x8f, 0x13, 0xd5, 0x09, 0xc8, 0x0c, 0xd5, 0x65, 0xf4, 0x91, + 0x3f, 0xa8, 0x8f, 0x44, 0xea, 0x85, 0xbb, 0x4b, 0xfd, 0xb1, 0x47, 0xa0, 0x1a, 0xff, 0x5c, 0x87, + 0x6a, 0x50, 0xbe, 0xf8, 0x0a, 0x79, 0xfd, 0x02, 0xd9, 0x58, 0xca, 0xa1, 0x3a, 0x54, 0x3a, 0x17, + 0xd6, 0x5f, 0x12, 0x33, 0xeb, 0xec, 0x6f, 0x4a, 0x3a, 0x11, 0x08, 0xd1, 0x37, 0xa1, 0x28, 0xa3, + 0xfb, 0xd1, 0x84, 0x39, 0xf3, 0x97, 0xac, 0x95, 0x63, 0x07, 0xe0, 0x52, 0x4a, 0x38, 0xf7, 0x84, + 0x85, 0xae, 0x42, 0x4d, 0x00, 0x55, 0xaf, 0xf8, 0x44, 0xb6, 0x65, 0x9b, 0xa2, 0xf4, 0xd0, 0x8c, + 0x55, 0x83, 0xde, 0x79, 0x28, 0x4a, 0x81, 0x1d, 0xcd, 0x24, 0x61, 0x53, 0x6e, 0x93, 0xea, 0x9e, + 0xe3, 0x1c, 0x7a, 0x06, 0xec, 0xeb, 0xae, 0x37, 0x40, 0x46, 0x0e, 0x68, 0xb4, 0x78, 0x57, 0x8e, + 0x66, 0xc1, 0xc6, 0xb1, 0xcf, 0xc5, 0x9d, 0xea, 0x63, 0xd9, 0x76, 0x99, 0xde, 0xde, 0x3c, 0xb8, + 0x10, 0x9f, 0xfc, 0x8a, 0xec, 0xa7, 0xea, 0xa6, 0x0d, 0x7a, 0x28, 0x7d, 0x54, 0xa6, 0xc7, 0xb3, + 0xd2, 0x9a, 0xb5, 0x1c, 0x13, 0xdc, 0x84, 0x9a, 0xd1, 0x30, 0x31, 0xc5, 0x7a, 0xb0, 0xdb, 0x63, + 0x8a, 0x75, 0x4a, 0x97, 0x05, 0xe7, 0xd0, 0x25, 0xa8, 0xf0, 0xcc, 0x59, 0xfc, 0xb0, 0x72, 0x3c, + 0x9b, 0x20, 0x1b, 0x89, 0xd1, 0xca, 0x89, 0xe9, 0x8b, 0x31, 0xa1, 0x6f, 0x43, 0xf5, 0x12, 0x65, + 0x2a, 0xba, 0x1c, 0xcb, 0x86, 0xa7, 0x29, 0x92, 0x4a, 0x87, 0x38, 0x9c, 0x43, 0x6f, 0x88, 0x24, + 0x3e, 0xed, 0x5c, 0x91, 0x33, 0xc3, 0x89, 0xc6, 0xf7, 0x5a, 0x9d, 0x8d, 0x10, 0x53, 0x7e, 0x3d, + 0x45, 0x59, 0xc5, 0x61, 0x67, 0xc6, 0x83, 0x8d, 0x29, 0x3b, 0x77, 0xf9, 0xdb, 0x05, 0xce, 0x9d, + 0x7d, 0x53, 0xff, 0xf3, 0x60, 0xc3, 0x65, 0x2e, 0x7a, 0x05, 0x16, 0x85, 0x2c, 0xe3, 0xbf, 0x26, + 0xa4, 0x6c, 0xfe, 0xc0, 0xff, 0x20, 0x52, 0x36, 0x7f, 0xf0, 0xff, 0x10, 0x38, 0xd7, 0x79, 0xf3, + 0x83, 0x8f, 0x5b, 0xb9, 0x0f, 0x3f, 0x6e, 0xe5, 0x3e, 0xfd, 0xb8, 0x65, 0x7d, 0x7f, 0xaf, 0x65, + 0xfd, 0x7a, 0xaf, 0x65, 0xbd, 0xbf, 0xd7, 0xb2, 0x3e, 0xd8, 0x6b, 0x59, 0xff, 0xd8, 0x6b, 0x59, + 0xff, 0xdc, 0x6b, 0xe5, 0x3e, 0xdd, 0x6b, 0x59, 0xef, 0x7e, 0xd2, 0xca, 0x7d, 0xf0, 0x49, 0x2b, + 0xf7, 0xe1, 0x27, 0xad, 0xdc, 0x77, 0x1f, 0xbd, 0x7b, 0xc1, 0x2a, 0xdd, 0x62, 0x49, 0x7c, 0x3d, + 0xf9, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8a, 0xd4, 0xcd, 0x88, 0x1f, 0x23, 0x00, 0x00, } func (x Direction) String() string { @@ -4996,18 +5004,10 @@ func (this *DetectedLabelsRequest) Equal(that interface{}) bool { } else if this == nil { return false } - if that1.Start == nil { - if this.Start != nil { - return false - } - } else if !this.Start.Equal(*that1.Start) { + if !this.Start.Equal(that1.Start) { return false } - if that1.End == nil { - if this.End != nil { - return false - } - } else if !this.End.Equal(*that1.End) { + if !this.End.Equal(that1.End) { return false } if this.Query != that1.Query { @@ -5069,6 +5069,9 @@ func (this *DetectedLabel) Equal(that interface{}) bool { if this.Cardinality != that1.Cardinality { return false } + if !bytes.Equal(this.Sketch, that1.Sketch) { + return false + } return true } func (this *LabelToValuesResponse) GoString() string { @@ -5774,10 +5777,11 @@ func (this *DetectedLabel) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) + s := make([]string, 0, 7) s = append(s, "&logproto.DetectedLabel{") s = append(s, "Label: "+fmt.Sprintf("%#v", this.Label)+",\n") s = append(s, "Cardinality: "+fmt.Sprintf("%#v", this.Cardinality)+",\n") + s = append(s, "Sketch: "+fmt.Sprintf("%#v", this.Sketch)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -8754,26 +8758,22 @@ func (m *DetectedLabelsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x1a } - if m.End != nil { - n26, err26 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.End):]) - if err26 != nil { - return 0, err26 - } - i -= n26 - i = encodeVarintLogproto(dAtA, i, uint64(n26)) - i-- - dAtA[i] = 0x12 + n26, err26 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):]) + if err26 != nil { + return 0, err26 } - if m.Start != nil { - n27, err27 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Start):]) - if err27 != nil { - return 0, err27 - } - i -= n27 - i = encodeVarintLogproto(dAtA, i, uint64(n27)) - i-- - dAtA[i] = 0xa + i -= n26 + i = encodeVarintLogproto(dAtA, i, uint64(n26)) + i-- + dAtA[i] = 0x12 + n27, err27 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) + if err27 != nil { + return 0, err27 } + i -= n27 + i = encodeVarintLogproto(dAtA, i, uint64(n27)) + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } @@ -8834,6 +8834,13 @@ func (m *DetectedLabel) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Sketch) > 0 { + i -= len(m.Sketch) + copy(dAtA[i:], m.Sketch) + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Sketch))) + i-- + dAtA[i] = 0x1a + } if m.Cardinality != 0 { i = encodeVarintLogproto(dAtA, i, uint64(m.Cardinality)) i-- @@ -9894,14 +9901,10 @@ func (m *DetectedLabelsRequest) Size() (n int) { } var l int _ = l - if m.Start != nil { - l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Start) - n += 1 + l + sovLogproto(uint64(l)) - } - if m.End != nil { - l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.End) - n += 1 + l + sovLogproto(uint64(l)) - } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Start) + n += 1 + l + sovLogproto(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.End) + n += 1 + l + sovLogproto(uint64(l)) l = len(m.Query) if l > 0 { n += 1 + l + sovLogproto(uint64(l)) @@ -9937,6 +9940,10 @@ func (m *DetectedLabel) Size() (n int) { if m.Cardinality != 0 { n += 1 + sovLogproto(uint64(m.Cardinality)) } + l = len(m.Sketch) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } return n } @@ -10642,8 +10649,8 @@ func (this *DetectedLabelsRequest) String() string { return "nil" } s := strings.Join([]string{`&DetectedLabelsRequest{`, - `Start:` + strings.Replace(fmt.Sprintf("%v", this.Start), "Timestamp", "types.Timestamp", 1) + `,`, - `End:` + strings.Replace(fmt.Sprintf("%v", this.End), "Timestamp", "types.Timestamp", 1) + `,`, + `Start:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Start), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `End:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.End), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, `Query:` + fmt.Sprintf("%v", this.Query) + `,`, `}`, }, "") @@ -10671,6 +10678,7 @@ func (this *DetectedLabel) String() string { s := strings.Join([]string{`&DetectedLabel{`, `Label:` + fmt.Sprintf("%v", this.Label) + `,`, `Cardinality:` + fmt.Sprintf("%v", this.Cardinality) + `,`, + `Sketch:` + fmt.Sprintf("%v", this.Sketch) + `,`, `}`, }, "") return s @@ -17650,10 +17658,7 @@ func (m *DetectedLabelsRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Start == nil { - m.Start = new(time.Time) - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Start, dAtA[iNdEx:postIndex]); err != nil { + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Start, dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -17686,10 +17691,7 @@ func (m *DetectedLabelsRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.End == nil { - m.End = new(time.Time) - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.End, dAtA[iNdEx:postIndex]); err != nil { + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.End, dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -17916,6 +17918,40 @@ func (m *DetectedLabel) Unmarshal(dAtA []byte) error { break } } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sketch", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sketch = append(m.Sketch[:0], dAtA[iNdEx:postIndex]...) + if m.Sketch == nil { + m.Sketch = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipLogproto(dAtA[iNdEx:]) diff --git a/pkg/logproto/logproto.proto b/pkg/logproto/logproto.proto index b9c3cd987c7aa..189cec5d948ca 100644 --- a/pkg/logproto/logproto.proto +++ b/pkg/logproto/logproto.proto @@ -478,11 +478,11 @@ message DetectedField { message DetectedLabelsRequest { google.protobuf.Timestamp start = 1 [ (gogoproto.stdtime) = true, - (gogoproto.nullable) = true + (gogoproto.nullable) = false ]; google.protobuf.Timestamp end = 2 [ (gogoproto.stdtime) = true, - (gogoproto.nullable) = true + (gogoproto.nullable) = false ]; string query = 3; } @@ -494,4 +494,5 @@ message DetectedLabelsResponse { message DetectedLabel { string label = 1; uint64 cardinality = 2; + bytes sketch = 3 [(gogoproto.jsontag) = "sketch,omitempty"]; } diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index bafbe334cdf75..f2fe80566b461 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -5,7 +5,6 @@ import ( "flag" "fmt" "net/http" - "regexp" "sort" "strconv" "time" @@ -14,6 +13,7 @@ import ( "github.com/dustin/go-humanize" "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/google/uuid" "github.com/grafana/dskit/httpgrpc" "github.com/grafana/dskit/tenant" "github.com/opentracing/opentracing-go" @@ -50,14 +50,10 @@ const ( // before checking if a new entry is available (to avoid spinning the CPU in a continuous // check loop) tailerWaitEntryThrottle = time.Second / 2 - - idPattern = `^(?:(?:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})|(?:(?:\{)?[0-9a-fA-F]{8}(?:-?[0-9a-fA-F]{4}){3}-?[0-9a-fA-F]{12}(?:\})?)|(\d+(?:\.\d+)?))$` ) var ( nowFunc = func() time.Time { return time.Now() } - - idRegexp = regexp.MustCompile(idPattern) ) type interval struct { @@ -921,7 +917,6 @@ func (q *SingleTenantQuerier) DetectedLabels(ctx context.Context, req *logproto. if err != nil { return nil, err } - var detectedLabels []*logproto.DetectedLabel staticLabels := map[string]struct{}{"cluster": {}, "namespace": {}, "instance": {}, "pod": {}} // Enforce the query timeout while querying backends @@ -930,24 +925,26 @@ func (q *SingleTenantQuerier) DetectedLabels(ctx context.Context, req *logproto. defer cancel() g, ctx := errgroup.WithContext(ctx) - if *req.Start, *req.End, err = validateQueryTimeRangeLimits(ctx, userID, q.limits, *req.Start, *req.End); err != nil { + if req.Start, req.End, err = validateQueryTimeRangeLimits(ctx, userID, q.limits, req.Start, req.End); err != nil { return nil, err } - ingesterQueryInterval, storeQueryInterval := q.buildQueryIntervals(*req.Start, *req.End) + ingesterQueryInterval, storeQueryInterval := q.buildQueryIntervals(req.Start, req.End) + // Fetch labels from ingesters var ingesterLabels *logproto.LabelToValuesResponse if !q.cfg.QueryStoreOnly && ingesterQueryInterval != nil { g.Go(func() error { var err error splitReq := *req - splitReq.Start = &ingesterQueryInterval.start - splitReq.End = &ingesterQueryInterval.end + splitReq.Start = ingesterQueryInterval.start + splitReq.End = ingesterQueryInterval.end ingesterLabels, err = q.ingesterQuerier.DetectedLabel(ctx, &splitReq) return err }) } + // Fetch labels from the store storeLabelsMap := make(map[string][]string) if !q.cfg.QueryIngesterOnly && storeQueryInterval != nil { var matchers []*labels.Matcher @@ -967,9 +964,7 @@ func (q *SingleTenantQuerier) DetectedLabels(ctx context.Context, req *logproto. if err != nil { return err } - if q.isLabelRelevant(label, values, staticLabels) { - storeLabelsMap[label] = values - } + storeLabelsMap[label] = values } return err }) @@ -985,40 +980,58 @@ func (q *SingleTenantQuerier) DetectedLabels(ctx context.Context, req *logproto. }, nil } - if ingesterLabels != nil { - // append static labels before so they are in sorted order - for l := range staticLabels { - if values, present := ingesterLabels.Labels[l]; present { - detectedLabels = append(detectedLabels, &logproto.DetectedLabel{Label: l, Cardinality: uint64(len(values.Values))}) - } - } + return &logproto.DetectedLabelsResponse{ + DetectedLabels: countLabelsAndCardinality(storeLabelsMap, ingesterLabels, staticLabels), + }, nil +} + +func countLabelsAndCardinality(storeLabelsMap map[string][]string, ingesterLabels *logproto.LabelToValuesResponse, staticLabels map[string]struct{}) []*logproto.DetectedLabel { + dlMap := make(map[string]*parsedFields) - for label, values := range ingesterLabels.Labels { - if q.isLabelRelevant(label, values.Values, staticLabels) { - combinedValues := values.Values - storeValues, storeHasLabel := storeLabelsMap[label] - if storeHasLabel { - combinedValues = append(combinedValues, storeValues...) + if ingesterLabels != nil { + for label, val := range ingesterLabels.Labels { + if _, isStatic := staticLabels[label]; isStatic || !containsAllIDTypes(val.Values) { + _, ok := dlMap[label] + if !ok { + dlMap[label] = newParsedLabels() } - slices.Sort(combinedValues) - uniqueValues := slices.Compact(combinedValues) - // TODO(shantanu): There's a bug here. Unique values can go above 50. Will need a bit of refactoring - detectedLabels = append(detectedLabels, &logproto.DetectedLabel{Label: label, Cardinality: uint64(len(uniqueValues))}) - delete(storeLabelsMap, label) + parsedFields := dlMap[label] + for _, v := range val.Values { + parsedFields.Insert(v) + } } } } for label, values := range storeLabelsMap { - slices.Sort(values) - uniqueValues := slices.Compact(values) - detectedLabels = append(detectedLabels, &logproto.DetectedLabel{Label: label, Cardinality: uint64(len(uniqueValues))}) + if _, isStatic := staticLabels[label]; isStatic || !containsAllIDTypes(values) { + _, ok := dlMap[label] + if !ok { + dlMap[label] = newParsedLabels() + } + + parsedFields := dlMap[label] + for _, v := range values { + parsedFields.Insert(v) + } + } } - return &logproto.DetectedLabelsResponse{ - DetectedLabels: detectedLabels, - }, nil + var detectedLabels []*logproto.DetectedLabel + for k, v := range dlMap { + sketch, err := v.sketch.MarshalBinary() + if err != nil { + // TODO: add log here + continue + } + detectedLabels = append(detectedLabels, &logproto.DetectedLabel{ + Label: k, + Cardinality: v.Estimate(), + Sketch: sketch, + }) + } + return detectedLabels } type PatterQuerier interface { @@ -1037,24 +1050,15 @@ func (q *SingleTenantQuerier) Patterns(ctx context.Context, req *logproto.QueryP return res, err } -// isLabelRelevant returns if the label is relevant for logs app. A label is relevant if it is not of any numeric, UUID or GUID type -// It is also not relevant to return if the values are less than 1 or beyond 50. -func (q *SingleTenantQuerier) isLabelRelevant(label string, values []string, staticLabels map[string]struct{}) bool { - cardinality := len(values) - _, isStaticLabel := staticLabels[label] - if isStaticLabel || (cardinality < 2 || cardinality > 50) || - containsAllIDTypes(values) { - return false - } - - return true -} - // containsAllIDTypes filters out all UUID, GUID and numeric types. Returns false if even one value is not of the type func containsAllIDTypes(values []string) bool { for _, v := range values { - if !idRegexp.MatchString(v) { - return false + _, err := strconv.ParseFloat(v, 64) + if err != nil { + _, err = uuid.Parse(v) + if err != nil { + return false + } } } @@ -1141,6 +1145,13 @@ func newParsedFields(parser *string) *parsedFields { } } +func newParsedLabels() *parsedFields { + return &parsedFields{ + sketch: hyperloglog.New(), + fieldType: logproto.DetectedFieldString, + } +} + func (p *parsedFields) Insert(value string) { p.sketch.Insert([]byte(value)) } diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 66370c34460be..a787616efeeee 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -1379,81 +1379,6 @@ func (d *mockDeleteGettter) GetAllDeleteRequestsForUser(_ context.Context, userI return d.results, nil } -func TestQuerier_isLabelRelevant(t *testing.T) { - for _, tc := range []struct { - name string - label string - values []string - expected bool - }{ - { - label: "uuidv4 values are not relevant", - values: []string{"751e8ee6-b377-4b2e-b7b5-5508fbe980ef", "6b7e2663-8ecb-42e1-8bdc-0c5de70185b3", "2e1e67ff-be4f-47b8-aee1-5d67ff1ddabf", "c95b2d62-74ed-4ed7-a8a1-eb72fc67946e"}, - expected: false, - }, - { - label: "guid values are not relevant", - values: []string{"57808f62-f117-4a22-84a0-bc3282c7f106", "5076e837-cd8d-4dd7-95ff-fecb087dccf6", "2e2a6554-1744-4399-b89a-88ae79c27096", "d3c31248-ec0c-4bc4-b11c-8fb1cfb42e62"}, - expected: false, - }, - { - label: "integer values are not relevant", - values: []string{"1", "2", "3", "4"}, - expected: false, - }, - { - label: "string values are relevant", - values: []string{"ingester", "querier", "query-frontend", "index-gateway"}, - expected: true, - }, - { - label: "guid with braces are not relevant", - values: []string{"{E9550CF7-58D9-48B9-8845-D9800C651AAC}", "{1617921B-1749-4FF0-A058-31AFB5D98149}", "{C119D92E-A4B9-48A3-A92C-6CA8AA8A6CCC}", "{228AAF1D-2DE7-4909-A4E9-246A7FA9D988}"}, - expected: false, - }, - { - label: "float values are not relevant", - values: []string{"1.2", "2.5", "3.3", "4.1"}, - expected: false, - }, - } { - t.Run(tc.name, func(t *testing.T) { - querier := &SingleTenantQuerier{cfg: mockQuerierConfig()} - assert.Equal(t, tc.expected, querier.isLabelRelevant(tc.label, tc.values, map[string]struct{}{"host": {}, "cluster": {}, "namespace": {}, "instance": {}, "pod": {}})) - }) - - } -} - -func TestQuerier_containsAllIDTypes(t *testing.T) { - for _, tc := range []struct { - name string - values []string - expected bool - }{ - { - name: "all uuidv4 values are valid", - values: []string{"751e8ee6-b377-4b2e-b7b5-5508fbe980ef", "6b7e2663-8ecb-42e1-8bdc-0c5de70185b3", "2e1e67ff-be4f-47b8-aee1-5d67ff1ddabf", "c95b2d62-74ed-4ed7-a8a1-eb72fc67946e"}, - expected: true, - }, - { - name: "one uuidv4 values are invalid", - values: []string{"w", "5076e837-cd8d-4dd7-95ff-fecb087dccf6", "2e2a6554-1744-4399-b89a-88ae79c27096", "d3c31248-ec0c-4bc4-b11c-8fb1cfb42e62"}, - expected: false, - }, - { - name: "all uuidv4 values are invalid", - values: []string{"w", "x", "y", "z"}, - expected: false, - }, - } { - t.Run(tc.name, func(t *testing.T) { - assert.Equal(t, tc.expected, containsAllIDTypes(tc.values)) - }) - - } -} - func TestQuerier_DetectedLabels(t *testing.T) { manyValues := []string{} now := time.Now() @@ -1469,8 +1394,8 @@ func TestQuerier_DetectedLabels(t *testing.T) { conf.IngesterQueryStoreMaxLookback = 0 request := logproto.DetectedLabelsRequest{ - Start: &now, - End: &now, + Start: now, + End: now, Query: "", } @@ -1507,8 +1432,11 @@ func TestQuerier_DetectedLabels(t *testing.T) { detectedLabels := resp.DetectedLabels assert.Len(t, detectedLabels, 3) - assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "storeLabel", Cardinality: 2}) - assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "ingesterLabel", Cardinality: 3}) + expectedCardinality := map[string]uint64{"storeLabel": 2, "ingesterLabel": 3, "cluster": 1} + for _, d := range detectedLabels { + card := expectedCardinality[d.Label] + assert.Equal(t, d.Cardinality, card, "Expected cardinality mismatch for: ", d.Label) + } }) t.Run("when both store and ingester responses are present, duplicates are removed", func(t *testing.T) { @@ -1547,9 +1475,12 @@ func TestQuerier_DetectedLabels(t *testing.T) { detectedLabels := resp.DetectedLabels assert.Len(t, detectedLabels, 4) - assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "storeLabel", Cardinality: 2}) - assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "ingesterLabel", Cardinality: 3}) - assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "commonLabel", Cardinality: 5}) + + expectedCardinality := map[string]uint64{"storeLabel": 2, "ingesterLabel": 3, "cluster": 1, "commonLabel": 5} + for _, d := range detectedLabels { + card := expectedCardinality[d.Label] + assert.Equal(t, d.Cardinality, card, "Expected cardinality mismatch for: ", d.Label) + } }) t.Run("returns a response when ingester data is empty", func(t *testing.T) { @@ -1579,8 +1510,11 @@ func TestQuerier_DetectedLabels(t *testing.T) { detectedLabels := resp.DetectedLabels assert.Len(t, detectedLabels, 2) - assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "storeLabel1", Cardinality: 2}) - assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "storeLabel2", Cardinality: 2}) + expectedCardinality := map[string]uint64{"storeLabel1": 2, "storeLabel2": 2} + for _, d := range detectedLabels { + card := expectedCardinality[d.Label] + assert.Equal(t, d.Cardinality, card, "Expected cardinality mismatch for: ", d.Label) + } }) t.Run("returns a response when store data is empty", func(t *testing.T) { @@ -1611,8 +1545,11 @@ func TestQuerier_DetectedLabels(t *testing.T) { detectedLabels := resp.DetectedLabels assert.Len(t, detectedLabels, 2) - assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "cluster", Cardinality: 1}) - assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "ingesterLabel", Cardinality: 3}) + expectedCardinality := map[string]uint64{"cluster": 1, "ingesterLabel": 3} + for _, d := range detectedLabels { + card := expectedCardinality[d.Label] + assert.Equal(t, d.Cardinality, card, "Expected cardinality mismatch for: ", d.Label) + } }) t.Run("id types like uuids, guids and numbers are not relevant detected labels", func(t *testing.T) { @@ -1646,36 +1583,6 @@ func TestQuerier_DetectedLabels(t *testing.T) { assert.Len(t, detectedLabels, 0) }) - t.Run("labels with more than required cardinality are not relevant", func(t *testing.T) { - ingesterResponse := logproto.LabelToValuesResponse{Labels: map[string]*logproto.UniqueLabelValues{ - "less-than-m-values": {Values: []string{"val1"}}, - "more-than-n-values": {Values: manyValues}, - }} - - ingesterClient := newQuerierClientMock() - storeClient := newStoreMock() - - ingesterClient.On("GetDetectedLabels", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). - Return(&ingesterResponse, nil) - storeClient.On("LabelNamesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). - Return([]string{}, nil) - - querier, err := newQuerier( - conf, - mockIngesterClientConfig(), - newIngesterClientMockFactory(ingesterClient), - mockReadRingWithOneActiveIngester(), - &mockDeleteGettter{}, - storeClient, limits) - require.NoError(t, err) - - resp, err := querier.DetectedLabels(ctx, &request) - require.NoError(t, err) - - detectedLabels := resp.DetectedLabels - assert.Len(t, detectedLabels, 0) - }) - t.Run("static labels are always returned no matter their cardinality or value types", func(t *testing.T) { ingesterResponse := logproto.LabelToValuesResponse{Labels: map[string]*logproto.UniqueLabelValues{ "cluster": {Values: []string{"val1"}}, @@ -1691,8 +1598,8 @@ func TestQuerier_DetectedLabels(t *testing.T) { storeClient.On("LabelNamesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). Return([]string{}, nil) request := logproto.DetectedLabelsRequest{ - Start: &now, - End: &now, + Start: now, + End: now, Query: "", } @@ -1710,9 +1617,11 @@ func TestQuerier_DetectedLabels(t *testing.T) { detectedLabels := resp.DetectedLabels assert.Len(t, detectedLabels, 3) - assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "cluster", Cardinality: 1}) - assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "pod", Cardinality: 4}) - assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "namespace", Cardinality: 60}) + expectedCardinality := map[string]uint64{"cluster": 1, "pod": 4, "namespace": 60} + for _, d := range detectedLabels { + card := expectedCardinality[d.Label] + assert.Equal(t, d.Cardinality, card, "Expected cardinality mismatch for: ", d.Label) + } }) t.Run("no panics with ingester response is nil", func(t *testing.T) { @@ -1724,8 +1633,8 @@ func TestQuerier_DetectedLabels(t *testing.T) { storeClient.On("LabelNamesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). Return([]string{}, nil) request := logproto.DetectedLabelsRequest{ - Start: &now, - End: &now, + Start: now, + End: now.Add(2 * time.Hour), Query: "", } @@ -1742,3 +1651,49 @@ func TestQuerier_DetectedLabels(t *testing.T) { require.NoError(t, err) }) } + +func BenchmarkQuerierDetectedLabels(b *testing.B) { + now := time.Now() + + limits, _ := validation.NewOverrides(defaultLimitsTestConfig(), nil) + ctx := user.InjectOrgID(context.Background(), "test") + + conf := mockQuerierConfig() + conf.IngesterQueryStoreMaxLookback = 0 + + request := logproto.DetectedLabelsRequest{ + Start: now, + End: now, + Query: "", + } + ingesterResponse := logproto.LabelToValuesResponse{Labels: map[string]*logproto.UniqueLabelValues{ + "cluster": {Values: []string{"ingester"}}, + "ingesterLabel": {Values: []string{"abc", "def", "ghi", "abc"}}, + }} + + ingesterClient := newQuerierClientMock() + storeClient := newStoreMock() + + ingesterClient.On("GetDetectedLabels", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(&ingesterResponse, nil) + storeClient.On("LabelNamesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return([]string{"storeLabel"}, nil). + On("LabelValuesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, "storeLabel", mock.Anything). + Return([]string{"val1", "val2"}, nil) + + querier, _ := newQuerier( + conf, + mockIngesterClientConfig(), + newIngesterClientMockFactory(ingesterClient), + mockReadRingWithOneActiveIngester(), + &mockDeleteGettter{}, + storeClient, limits) + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + _, err := querier.DetectedLabels(ctx, &request) + assert.NoError(b, err) + } +} diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go index 6626d08f87753..01ff8772a4c75 100644 --- a/pkg/querier/queryrange/codec.go +++ b/pkg/querier/queryrange/codec.go @@ -272,19 +272,19 @@ func (r *DetectedLabelsRequest) AsProto() *logproto.DetectedLabelsRequest { } func (r *DetectedLabelsRequest) GetEnd() time.Time { - return *r.End + return r.End } func (r *DetectedLabelsRequest) GetEndTs() time.Time { - return *r.End + return r.End } func (r *DetectedLabelsRequest) GetStart() time.Time { - return *r.Start + return r.Start } func (r *DetectedLabelsRequest) GetStartTs() time.Time { - return *r.Start + return r.Start } func (r *DetectedLabelsRequest) GetStep() int64 { @@ -293,8 +293,8 @@ func (r *DetectedLabelsRequest) GetStep() int64 { func (r *DetectedLabelsRequest) WithStartEnd(s, e time.Time) queryrangebase.Request { clone := *r - clone.Start = &s - clone.End = &e + clone.Start = s + clone.End = e return &clone } @@ -1546,6 +1546,25 @@ func (Codec) MergeResponse(responses ...queryrangebase.Response) (queryrangebase }, Headers: headers, }, nil + case *DetectedLabelsResponse: + resp0 := responses[0].(*DetectedLabelsResponse) + headers := resp0.Headers + var labels []*logproto.DetectedLabel + + for _, r := range responses { + labels = append(labels, r.(*DetectedLabelsResponse).Response.DetectedLabels...) + } + mergedLabels, err := detected.MergeLabels(labels) + if err != nil { + return nil, err + } + + return &DetectedLabelsResponse{ + Response: &logproto.DetectedLabelsResponse{ + DetectedLabels: mergedLabels, + }, + Headers: headers, + }, nil default: return nil, fmt.Errorf("unknown response type (%T) in merging responses", responses[0]) } @@ -1744,6 +1763,10 @@ func ParamsFromRequest(req queryrangebase.Request) (logql.Params, error) { return &paramsDetectedFieldsWrapper{ DetectedFieldsRequest: r, }, nil + case *DetectedLabelsRequest: + return &paramsDetectedLabelsWrapper{ + DetectedLabelsRequest: r, + }, nil default: return nil, fmt.Errorf("expected one of the *LokiRequest, *LokiInstantRequest, *LokiSeriesRequest, *LokiLabelNamesRequest, *DetectedFieldsRequest, got (%T)", r) } @@ -1968,6 +1991,51 @@ func (p paramsDetectedFieldsWrapper) Shards() []string { return make([]string, 0) } +type paramsDetectedLabelsWrapper struct { + *DetectedLabelsRequest +} + +func (p paramsDetectedLabelsWrapper) QueryString() string { + return p.GetQuery() +} + +func (p paramsDetectedLabelsWrapper) GetExpression() syntax.Expr { + expr, err := syntax.ParseExpr(p.GetQuery()) + if err != nil { + return nil + } + + return expr +} + +func (p paramsDetectedLabelsWrapper) Start() time.Time { + return p.GetStartTs() +} + +func (p paramsDetectedLabelsWrapper) End() time.Time { + return p.GetEndTs() +} + +func (p paramsDetectedLabelsWrapper) Step() time.Duration { + return time.Duration(p.GetStep() * 1e6) +} + +func (p paramsDetectedLabelsWrapper) Interval() time.Duration { + return 0 +} + +func (p paramsDetectedLabelsWrapper) Direction() logproto.Direction { + return logproto.BACKWARD +} +func (p paramsDetectedLabelsWrapper) Limit() uint32 { return 0 } +func (p paramsDetectedLabelsWrapper) Shards() []string { + return make([]string, 0) +} + +func (p paramsDetectedLabelsWrapper) GetStoreChunks() *logproto.ChunkRefGroup { + return nil +} + func (p paramsDetectedFieldsWrapper) GetStoreChunks() *logproto.ChunkRefGroup { return nil } @@ -2036,6 +2104,10 @@ func NewEmptyResponse(r queryrangebase.Request) (queryrangebase.Response, error) return &VolumeResponse{ Response: &logproto.VolumeResponse{}, }, nil + case *DetectedLabelsRequest: + return &DetectedLabelsResponse{ + Response: &logproto.DetectedLabelsResponse{}, + }, nil default: return nil, fmt.Errorf("unsupported request type %T", req) } diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go index ff7c4ba4dbff1..61da06929fe14 100644 --- a/pkg/querier/queryrange/roundtrip.go +++ b/pkg/querier/queryrange/roundtrip.go @@ -261,8 +261,8 @@ func NewMiddleware( schema, metrics, indexStatsTripperware, - metricsNamespace) - + metricsNamespace, + codec, limits, iqo) if err != nil { return nil, nil, err } @@ -284,16 +284,17 @@ func NewMiddleware( }), StopperWrapper{resultsCache, statsCache, volumeCache}, nil } -func NewDetectedLabelsTripperware(cfg Config, opts logql.EngineOpts, logger log.Logger, l Limits, schema config.SchemaConfig, metrics *Metrics, mw base.Middleware, namespace string) (base.Middleware, error) { +func NewDetectedLabelsTripperware(cfg Config, opts logql.EngineOpts, logger log.Logger, l Limits, schema config.SchemaConfig, metrics *Metrics, mw base.Middleware, namespace string, merger base.Merger, limits Limits, iqo util.IngesterQueryOptions) (base.Middleware, error) { return base.MiddlewareFunc(func(next base.Handler) base.Handler { statsHandler := mw.Wrap(next) + splitter := newDefaultSplitter(limits, iqo) queryRangeMiddleware := []base.Middleware{ StatsCollectorMiddleware(), NewLimitsMiddleware(l), NewQuerySizeLimiterMiddleware(schema.Configs, opts, logger, l, statsHandler), base.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), - } + SplitByIntervalMiddleware(schema.Configs, limits, merger, splitter, metrics.SplitByMetrics)} // The sharding middleware takes care of enforcing this limit for both shardable and non-shardable queries. // If we are not using sharding, we enforce the limit by adding this middleware after time splitting. @@ -307,11 +308,38 @@ func NewDetectedLabelsTripperware(cfg Config, opts logql.EngineOpts, logger log. base.NewRetryMiddleware(logger, cfg.MaxRetries, metrics.RetryMiddlewareMetrics, namespace), ) } - - return NewLimitedRoundTripper(next, l, schema.Configs, queryRangeMiddleware...) + limitedRt := NewLimitedRoundTripper(next, l, schema.Configs, queryRangeMiddleware...) + return NewDetectedLabelsCardinalityFilter(limitedRt) }), nil } +func NewDetectedLabelsCardinalityFilter(rt queryrangebase.Handler) queryrangebase.Handler { + return queryrangebase.HandlerFunc( + func(ctx context.Context, req queryrangebase.Request) (queryrangebase.Response, error) { + res, err := rt.Do(ctx, req) + if err != nil { + return nil, err + } + + resp, ok := res.(*DetectedLabelsResponse) + if !ok { + return res, nil + } + + var result []*logproto.DetectedLabel + + for _, dl := range resp.Response.DetectedLabels { + if dl.Cardinality > 2 && dl.Cardinality < 50 { + result = append(result, &logproto.DetectedLabel{Label: dl.Label, Cardinality: dl.Cardinality}) + } + } + return &DetectedLabelsResponse{ + Response: &logproto.DetectedLabelsResponse{DetectedLabels: result}, + Headers: resp.Headers, + }, nil + }) +} + type roundTripper struct { logger log.Logger @@ -442,7 +470,16 @@ func (r roundTripper) Do(ctx context.Context, req base.Request) (base.Response, ) return r.detectedFields.Do(ctx, req) - // TODO(shantanu): Add DetectedLabels + case *DetectedLabelsRequest: + level.Info(logger).Log( + "msg", "executing query", + "type", "detected_label", + "end", op.End, + "length", op.End.Sub(op.Start), + "query", op.Query, + "start", op.Start, + ) + return r.detectedLabels.Do(ctx, req) default: return r.next.Do(ctx, req) } diff --git a/pkg/querier/queryrange/split_by_interval.go b/pkg/querier/queryrange/split_by_interval.go index fc71742859798..7dfeb729e149a 100644 --- a/pkg/querier/queryrange/split_by_interval.go +++ b/pkg/querier/queryrange/split_by_interval.go @@ -228,7 +228,7 @@ func (h *splitByInterval) Do(ctx context.Context, r queryrangebase.Request) (que for i, j := 0, len(intervals)-1; i < j; i, j = i+1, j-1 { intervals[i], intervals[j] = intervals[j], intervals[i] } - case *LokiSeriesRequest, *LabelRequest, *logproto.IndexStatsRequest, *logproto.VolumeRequest, *logproto.ShardsRequest: + case *LokiSeriesRequest, *LabelRequest, *logproto.IndexStatsRequest, *logproto.VolumeRequest, *logproto.ShardsRequest, *DetectedLabelsRequest: // Set this to 0 since this is not used in Series/Labels/Index Request. limit = 0 default: diff --git a/pkg/querier/queryrange/splitters.go b/pkg/querier/queryrange/splitters.go index 42a81f6defd39..fe3453b2ee717 100644 --- a/pkg/querier/queryrange/splitters.go +++ b/pkg/querier/queryrange/splitters.go @@ -1,10 +1,14 @@ package queryrange import ( + "fmt" "time" + "github.com/go-kit/log/level" "github.com/prometheus/common/model" + util_log "github.com/grafana/loki/v3/pkg/util/log" + "github.com/grafana/loki/v3/pkg/logproto" "github.com/grafana/loki/v3/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/v3/pkg/util" @@ -109,7 +113,19 @@ func (s *defaultSplitter) split(execTime time.Time, tenantIDs []string, req quer path: r.path, }) } + case *DetectedLabelsRequest: + factory = func(start, end time.Time) { + reqs = append(reqs, &DetectedLabelsRequest{ + DetectedLabelsRequest: logproto.DetectedLabelsRequest{ + Start: start, + End: end, + Query: r.Query, + }, + path: r.path, + }) + } default: + level.Warn(util_log.Logger).Log("msg", fmt.Sprintf("splitter: unsupported request type: %T", req)) return nil, nil } diff --git a/pkg/querier/queryrange/stats.go b/pkg/querier/queryrange/stats.go index 384ee7ceed53c..67ca803d52964 100644 --- a/pkg/querier/queryrange/stats.go +++ b/pkg/querier/queryrange/stats.go @@ -179,6 +179,10 @@ func StatsCollectorMiddleware() queryrangebase.Middleware { responseStats = &stats.Result{} // TODO: support stats in query patterns totalEntries = len(r.Response.Series) queryType = queryTypeQueryPatterns + case *DetectedLabelsResponse: + responseStats = &stats.Result{} + totalEntries = 1 + queryType = queryTypeDetectedLabels default: level.Warn(logger).Log("msg", fmt.Sprintf("cannot compute stats, unexpected type: %T", resp)) } diff --git a/pkg/storage/detected/labels.go b/pkg/storage/detected/labels.go new file mode 100644 index 0000000000000..66b721a79b800 --- /dev/null +++ b/pkg/storage/detected/labels.go @@ -0,0 +1,64 @@ +package detected + +import ( + "github.com/axiomhq/hyperloglog" + + "github.com/grafana/loki/v3/pkg/logproto" +) + +type UnmarshaledDetectedLabel struct { + Label string + Sketch *hyperloglog.Sketch +} + +func unmarshalDetectedLabel(l *logproto.DetectedLabel) (*UnmarshaledDetectedLabel, error) { + sketch := hyperloglog.New() + err := sketch.UnmarshalBinary(l.Sketch) + if err != nil { + return nil, err + } + return &UnmarshaledDetectedLabel{ + Label: l.Label, + Sketch: sketch, + }, nil +} + +func (m *UnmarshaledDetectedLabel) Merge(dl *logproto.DetectedLabel) error { + sketch := hyperloglog.New() + err := sketch.UnmarshalBinary(dl.Sketch) + if err != nil { + return err + } + return m.Sketch.Merge(sketch) +} + +func MergeLabels(labels []*logproto.DetectedLabel) (result []*logproto.DetectedLabel, err error) { + mergedLabels := make(map[string]*UnmarshaledDetectedLabel) + for _, label := range labels { + l, ok := mergedLabels[label.Label] + if !ok { + unmarshaledLabel, err := unmarshalDetectedLabel(label) + if err != nil { + return nil, err + } + mergedLabels[label.Label] = unmarshaledLabel + } else { + err := l.Merge(label) + if err != nil { + return nil, err + } + } + } + + for _, label := range mergedLabels { + detectedLabel := &logproto.DetectedLabel{ + Label: label.Label, + Cardinality: label.Sketch.Estimate(), + Sketch: nil, + } + + result = append(result, detectedLabel) + } + + return +}
perf
Improve Detected labels API (#12816)
547ca708b9b56e2761bd19ebfcfc9f8571d9af2a
2024-09-20 00:57:20
Quentin Bisson
feat: add missing cluster label to mixins (#12870)
false
diff --git a/production/helm/loki/src/alerts.yaml.tpl b/production/helm/loki/src/alerts.yaml.tpl index 144e263f7061f..0aa37b708b523 100644 --- a/production/helm/loki/src/alerts.yaml.tpl +++ b/production/helm/loki/src/alerts.yaml.tpl @@ -52,7 +52,7 @@ groups: message: | {{`{{`}} $labels.cluster {{`}}`}} {{`{{`}} $labels.namespace {{`}}`}} has had {{`{{`}} printf "%.0f" $value {{`}}`}} compactors running for more than 5m. Only one compactor should run at a time. expr: | - sum(loki_boltdb_shipper_compactor_running) by (namespace, cluster) > 1 + sum(loki_boltdb_shipper_compactor_running) by (cluster, namespace) > 1 for: "5m" labels: severity: "warning" diff --git a/production/loki-mixin-compiled-ssd/alerts.yaml b/production/loki-mixin-compiled-ssd/alerts.yaml index 7c0825d8580d6..09b9b6f543412 100644 --- a/production/loki-mixin-compiled-ssd/alerts.yaml +++ b/production/loki-mixin-compiled-ssd/alerts.yaml @@ -4,12 +4,12 @@ groups: - alert: LokiRequestErrors annotations: description: | - {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}% errors. + {{ $labels.cluster }} {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}% errors. summary: Loki request error rate is high. expr: | - 100 * sum(rate(loki_request_duration_seconds_count{status_code=~"5.."}[2m])) by (namespace, job, route) + 100 * sum(rate(loki_request_duration_seconds_count{status_code=~"5.."}[2m])) by (cluster, namespace, job, route) / - sum(rate(loki_request_duration_seconds_count[2m])) by (namespace, job, route) + sum(rate(loki_request_duration_seconds_count[2m])) by (cluster, namespace, job, route) > 10 for: 15m labels: @@ -17,16 +17,16 @@ groups: - alert: LokiRequestPanics annotations: description: | - {{ $labels.job }} is experiencing {{ printf "%.2f" $value }}% increase of panics. + {{ $labels.cluster }} {{ $labels.job }} is experiencing {{ printf "%.2f" $value }}% increase of panics. summary: Loki requests are causing code panics. expr: | - sum(increase(loki_panic_total[10m])) by (namespace, job) > 0 + sum(increase(loki_panic_total[10m])) by (cluster, namespace, job) > 0 labels: severity: critical - alert: LokiRequestLatency annotations: description: | - {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}s 99th percentile latency. + {{ $labels.cluster }} {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}s 99th percentile latency. summary: Loki request error latency is high. expr: | cluster_namespace_job_route:loki_request_duration_seconds:99quantile{route!~"(?i).*tail.*|/schedulerpb.SchedulerForQuerier/QuerierLoop"} > 1 @@ -39,7 +39,7 @@ groups: {{ $labels.cluster }} {{ $labels.namespace }} has had {{ printf "%.0f" $value }} compactors running for more than 5m. Only one compactor should run at a time. summary: Loki deployment is running more than one compactor. expr: | - sum(loki_boltdb_shipper_compactor_running) by (namespace, cluster) > 1 + sum(loki_boltdb_shipper_compactor_running) by (cluster, namespace) > 1 for: 5m labels: severity: warning diff --git a/production/loki-mixin-compiled/alerts.yaml b/production/loki-mixin-compiled/alerts.yaml index 7c0825d8580d6..09b9b6f543412 100644 --- a/production/loki-mixin-compiled/alerts.yaml +++ b/production/loki-mixin-compiled/alerts.yaml @@ -4,12 +4,12 @@ groups: - alert: LokiRequestErrors annotations: description: | - {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}% errors. + {{ $labels.cluster }} {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}% errors. summary: Loki request error rate is high. expr: | - 100 * sum(rate(loki_request_duration_seconds_count{status_code=~"5.."}[2m])) by (namespace, job, route) + 100 * sum(rate(loki_request_duration_seconds_count{status_code=~"5.."}[2m])) by (cluster, namespace, job, route) / - sum(rate(loki_request_duration_seconds_count[2m])) by (namespace, job, route) + sum(rate(loki_request_duration_seconds_count[2m])) by (cluster, namespace, job, route) > 10 for: 15m labels: @@ -17,16 +17,16 @@ groups: - alert: LokiRequestPanics annotations: description: | - {{ $labels.job }} is experiencing {{ printf "%.2f" $value }}% increase of panics. + {{ $labels.cluster }} {{ $labels.job }} is experiencing {{ printf "%.2f" $value }}% increase of panics. summary: Loki requests are causing code panics. expr: | - sum(increase(loki_panic_total[10m])) by (namespace, job) > 0 + sum(increase(loki_panic_total[10m])) by (cluster, namespace, job) > 0 labels: severity: critical - alert: LokiRequestLatency annotations: description: | - {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}s 99th percentile latency. + {{ $labels.cluster }} {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}s 99th percentile latency. summary: Loki request error latency is high. expr: | cluster_namespace_job_route:loki_request_duration_seconds:99quantile{route!~"(?i).*tail.*|/schedulerpb.SchedulerForQuerier/QuerierLoop"} > 1 @@ -39,7 +39,7 @@ groups: {{ $labels.cluster }} {{ $labels.namespace }} has had {{ printf "%.0f" $value }} compactors running for more than 5m. Only one compactor should run at a time. summary: Loki deployment is running more than one compactor. expr: | - sum(loki_boltdb_shipper_compactor_running) by (namespace, cluster) > 1 + sum(loki_boltdb_shipper_compactor_running) by (cluster, namespace) > 1 for: 5m labels: severity: warning diff --git a/production/loki-mixin/alerts.libsonnet b/production/loki-mixin/alerts.libsonnet index 5bff18e72c6e5..9261dbccecf99 100644 --- a/production/loki-mixin/alerts.libsonnet +++ b/production/loki-mixin/alerts.libsonnet @@ -6,36 +6,36 @@ rules: [ { alert: 'LokiRequestErrors', - expr: ||| - 100 * sum(rate(loki_request_duration_seconds_count{status_code=~"5.."}[2m])) by (namespace, job, route) + expr: std.strReplace(||| + 100 * sum(rate(loki_request_duration_seconds_count{status_code=~"5.."}[2m])) by (cluster, namespace, job, route) / - sum(rate(loki_request_duration_seconds_count[2m])) by (namespace, job, route) + sum(rate(loki_request_duration_seconds_count[2m])) by (cluster, namespace, job, route) > 10 - |||, + |||, 'cluster', $._config.per_cluster_label), 'for': '15m', labels: { severity: 'critical', }, annotations: { summary: 'Loki request error rate is high.', - description: ||| - {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}% errors. - |||, + description: std.strReplace(||| + {{ $labels.cluster }} {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}% errors. + |||, 'cluster', $._config.per_cluster_label), }, }, { alert: 'LokiRequestPanics', expr: ||| - sum(increase(loki_panic_total[10m])) by (namespace, job) > 0 - |||, + sum(increase(loki_panic_total[10m])) by (%s, namespace, job) > 0 + ||| % $._config.per_cluster_label, labels: { severity: 'critical', }, annotations: { summary: 'Loki requests are causing code panics.', - description: ||| - {{ $labels.job }} is experiencing {{ printf "%.2f" $value }}% increase of panics. - |||, + description: std.strReplace(||| + {{ $labels.cluster }} {{ $labels.job }} is experiencing {{ printf "%.2f" $value }}% increase of panics. + |||, 'cluster', $._config.per_cluster_label), }, }, { @@ -49,15 +49,15 @@ }, annotations: { summary: 'Loki request error latency is high.', - description: ||| - {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}s 99th percentile latency. - |||, + description: std.strReplace(||| + {{ $labels.cluster }} {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}s 99th percentile latency. + |||, 'cluster', $._config.per_cluster_label), }, }, { alert: 'LokiTooManyCompactorsRunning', expr: ||| - sum(loki_boltdb_shipper_compactor_running) by (namespace, %s) > 1 + sum(loki_boltdb_shipper_compactor_running) by (%s, namespace) > 1 ||| % $._config.per_cluster_label, 'for': '5m', labels: {
feat
add missing cluster label to mixins (#12870)
7db62bfbc2e062d513e34afa63cbb2782e669624
2024-12-05 23:10:16
renovate[bot]
chore(deps): update grafana/grafana docker tag to v11.3.2 (#15268)
false
diff --git a/production/docker/docker-compose.yaml b/production/docker/docker-compose.yaml index eb0d5a7b0c1ca..b575d22b98e08 100644 --- a/production/docker/docker-compose.yaml +++ b/production/docker/docker-compose.yaml @@ -24,7 +24,7 @@ services: - loki grafana: - image: grafana/grafana:11.3.1 + image: grafana/grafana:11.3.2 ports: - "3000:3000" environment:
chore
update grafana/grafana docker tag to v11.3.2 (#15268)
f571282ffc1fb29220218ee92f2ba2d3a6cbb22c
2022-09-29 18:47:48
Dylan Guedes
tests: Improve several tests behavior (#7283)
false
diff --git a/clients/pkg/promtail/targets/file/filetarget_test.go b/clients/pkg/promtail/targets/file/filetarget_test.go index 7b55c977c17d6..30e9272a5e753 100644 --- a/clients/pkg/promtail/targets/file/filetarget_test.go +++ b/clients/pkg/promtail/targets/file/filetarget_test.go @@ -295,6 +295,10 @@ func TestFileTargetPathExclusion(t *testing.T) { return receivedStopWatch.Load() == 1 }, time.Second*10, time.Millisecond*1, "Expected received stopping watch event to be 1 at this point in the test...") + require.NoError(t, os.RemoveAll(logDir2)) + require.NoError(t, os.RemoveAll(logDir3)) + require.NoError(t, target.sync()) + target.Stop() ps.Stop() } diff --git a/clients/pkg/promtail/targets/gcplog/pull_target.go b/clients/pkg/promtail/targets/gcplog/pull_target.go index fa6ea17e7884e..490b0a2100e75 100644 --- a/clients/pkg/promtail/targets/gcplog/pull_target.go +++ b/clients/pkg/promtail/targets/gcplog/pull_target.go @@ -147,5 +147,6 @@ func (t *pullTarget) Stop() error { t.cancel() t.wg.Wait() t.handler.Stop() + t.ps.Close() return nil } diff --git a/clients/pkg/promtail/targets/gcplog/pull_target_test.go b/clients/pkg/promtail/targets/gcplog/pull_target_test.go index 8f840e0fbea58..a85eecbd77d48 100644 --- a/clients/pkg/promtail/targets/gcplog/pull_target_test.go +++ b/clients/pkg/promtail/targets/gcplog/pull_target_test.go @@ -138,7 +138,7 @@ func testPullTarget(ctx context.Context, t *testing.T) (*pullTarget, *fake.Clien handler: handler, relabelConfig: nil, config: testConfig, - jobName: "job-test-gcplogtarget", + jobName: t.Name() + "job-test-gcplogtarget", ctx: ctx, cancel: cancel, ps: mockpubsubClient, @@ -150,6 +150,7 @@ func testPullTarget(ctx context.Context, t *testing.T) (*pullTarget, *fake.Clien cancel() conn.Close() mockSvr.Close() + mockpubsubClient.Close() } } diff --git a/clients/pkg/promtail/targets/gcplog/push_target.go b/clients/pkg/promtail/targets/gcplog/push_target.go index 0a91938038346..7c94c605312bb 100644 --- a/clients/pkg/promtail/targets/gcplog/push_target.go +++ b/clients/pkg/promtail/targets/gcplog/push_target.go @@ -159,6 +159,7 @@ func (h *pushTarget) Details() interface{} { func (h *pushTarget) Stop() error { level.Info(h.logger).Log("msg", "stopping gcp push target", "job", h.jobName) + h.server.Stop() h.server.Shutdown() h.handler.Stop() return nil diff --git a/clients/pkg/promtail/targets/gcplog/push_target_test.go b/clients/pkg/promtail/targets/gcplog/push_target_test.go index fa61ece4925e8..f9e4735d22fc9 100644 --- a/clients/pkg/promtail/targets/gcplog/push_target_test.go +++ b/clients/pkg/promtail/targets/gcplog/push_target_test.go @@ -153,6 +153,7 @@ func TestPushTarget(t *testing.T) { }, } for name, tc := range cases { + outerName := t.Name() t.Run(name, func(t *testing.T) { // Create fake promtail client eh := fake.New(func() {}) @@ -169,7 +170,7 @@ func TestPushTarget(t *testing.T) { prometheus.DefaultRegisterer = prometheus.NewRegistry() metrics := gcplog.NewMetrics(prometheus.DefaultRegisterer) - pt, err := gcplog.NewGCPLogTarget(metrics, logger, eh, tc.args.RelabelConfigs, "test_job", config) + pt, err := gcplog.NewGCPLogTarget(metrics, logger, eh, tc.args.RelabelConfigs, outerName+"_test_job", config) require.NoError(t, err) defer func() { _ = pt.Stop() @@ -231,7 +232,7 @@ func TestPushTarget_UseIncomingTimestamp(t *testing.T) { prometheus.DefaultRegisterer = prometheus.NewRegistry() metrics := gcplog.NewMetrics(prometheus.DefaultRegisterer) - pt, err := gcplog.NewGCPLogTarget(metrics, logger, eh, nil, "test_job", config) + pt, err := gcplog.NewGCPLogTarget(metrics, logger, eh, nil, t.Name()+"_test_job", config) require.NoError(t, err) defer func() { _ = pt.Stop() @@ -284,7 +285,7 @@ func TestPushTarget_UseTenantIDHeaderIfPresent(t *testing.T) { Action: relabel.Replace, }, } - pt, err := gcplog.NewGCPLogTarget(metrics, logger, eh, tenantIDRelabelConfig, "test_job", config) + pt, err := gcplog.NewGCPLogTarget(metrics, logger, eh, tenantIDRelabelConfig, t.Name()+"_test_job", config) require.NoError(t, err) defer func() { _ = pt.Stop() @@ -327,7 +328,7 @@ func TestPushTarget_ErroneousPayloadsAreRejected(t *testing.T) { prometheus.DefaultRegisterer = prometheus.NewRegistry() metrics := gcplog.NewMetrics(prometheus.DefaultRegisterer) - pt, err := gcplog.NewGCPLogTarget(metrics, logger, eh, nil, "test_job", config) + pt, err := gcplog.NewGCPLogTarget(metrics, logger, eh, nil, t.Name()+"_test_job", config) require.NoError(t, err) defer func() { _ = pt.Stop() @@ -363,6 +364,7 @@ func TestPushTarget_ErroneousPayloadsAreRejected(t *testing.T) { req, err := makeGCPPushRequest(fmt.Sprintf("http://%s:%d", localhost, port), testPayload) require.NoError(t, err, "expected request to be created successfully") res, err := http.DefaultClient.Do(req) + res.Request.Body.Close() require.NoError(t, err) require.Equal(t, http.StatusBadRequest, res.StatusCode, "expected bad request status code") }) diff --git a/clients/pkg/promtail/targets/gcplog/target_test.go b/clients/pkg/promtail/targets/gcplog/target_test.go index c3b2fab01c49a..a1482a6d7562f 100644 --- a/clients/pkg/promtail/targets/gcplog/target_test.go +++ b/clients/pkg/promtail/targets/gcplog/target_test.go @@ -47,7 +47,7 @@ func TestNewGCPLogTarget(t *testing.T) { logger: logger, handler: eh, relabel: nil, - jobName: "test_job", + jobName: "test_job_defaults_to_pull_target", config: &scrapeconfig.GcplogTargetConfig{ SubscriptionType: "", }, @@ -62,7 +62,7 @@ func TestNewGCPLogTarget(t *testing.T) { logger: logger, handler: eh, relabel: nil, - jobName: "test_job", + jobName: "test_job_pull_subscriptiontype_creates_new", config: &scrapeconfig.GcplogTargetConfig{ SubscriptionType: "pull", }, @@ -77,7 +77,7 @@ func TestNewGCPLogTarget(t *testing.T) { logger: logger, handler: eh, relabel: nil, - jobName: "test_job", + jobName: "test_job_push_subscription_creates_new", config: &scrapeconfig.GcplogTargetConfig{ SubscriptionType: "push", }, @@ -92,7 +92,7 @@ func TestNewGCPLogTarget(t *testing.T) { logger: logger, handler: eh, relabel: nil, - jobName: "test_job", + jobName: "test_job_unknown_substype_fails_to_create_target", config: &scrapeconfig.GcplogTargetConfig{ SubscriptionType: "magic", },
tests
Improve several tests behavior (#7283)
7c63dbf7b0e061f5a8e2d8fe3a38e2323be0da72
2018-12-12 20:26:34
Daniel Lee
readme: remove unnecessary flag in docker run
false
diff --git a/README.md b/README.md index a094e2922ea54..b312961fb4adc 100644 --- a/README.md +++ b/README.md @@ -48,7 +48,7 @@ To test locally using `docker run`: 3. Then start the Promtail agent. The default config polls the contents of your `/var/log` directory. ```bash - docker run --name promtail --network=loki --volume "$PWD/docs:/etc/promtail" --volume "/var/log:/var/log" --network="container:loki" grafana/promtail:master -config.file=/etc/promtail/promtail-local-config.yaml + docker run --name promtail --network=loki --volume "$PWD/docs:/etc/promtail" --volume "/var/log:/var/log" grafana/promtail:master -config.file=/etc/promtail/promtail-local-config.yaml ``` 4. If you also want to run Grafana in docker:
readme
remove unnecessary flag in docker run
3fa6cc9fded401be1ed1febbea08612042fb243e
2022-04-21 17:21:10
Danny Kopping
querier: prevent unnecessary calls to ingesters (#5984)
false
diff --git a/CHANGELOG.md b/CHANGELOG.md index a1799c097baf2..ae18a678b012e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,5 @@ ## Main +* [5984](https://github.com/grafana/loki/pull/5984) **dannykopping** and **salvacorts**: Querier: prevent unnecessary calls to ingesters. * [5899](https://github.com/grafana/loki/pull/5899) **simonswine**: Update go image to 1.17.9. * [5888](https://github.com/grafana/loki/pull/5888) **Papawy** Fix common config net interface name overwritten by ring common config * [5799](https://github.com/grafana/loki/pull/5799) **cyriltovena** Fix deduping issues when multiple entries with the same timestamp exist. diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 6cf307120ac18..9207381a30ca8 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -237,19 +237,42 @@ func (q *SingleTenantQuerier) deletesForUser(ctx context.Context, startT, endT t return deletes, nil } +func (q *SingleTenantQuerier) isWithinIngesterMaxLookbackPeriod(maxLookback time.Duration, queryEnd time.Time) bool { + // if no lookback limits are configured, always consider this within the range of the lookback period + if maxLookback <= 0 { + return true + } + + // find the first instance that we would want to query the ingester from... + ingesterOldestStartTime := time.Now().Add(-maxLookback) + + // ...and if the query range ends before that, don't query the ingester + return queryEnd.After(ingesterOldestStartTime) +} + +func (q *SingleTenantQuerier) calculateIngesterMaxLookbackPeriod() time.Duration { + mlb := time.Duration(-1) + if q.cfg.IngesterQueryStoreMaxLookback != 0 { + // IngesterQueryStoreMaxLookback takes the precedence over QueryIngestersWithin while also limiting the store query range. + mlb = q.cfg.IngesterQueryStoreMaxLookback + } else if q.cfg.QueryIngestersWithin != 0 { + mlb = q.cfg.QueryIngestersWithin + } + + return mlb +} + func (q *SingleTenantQuerier) buildQueryIntervals(queryStart, queryEnd time.Time) (*interval, *interval) { // limitQueryInterval is a flag for whether store queries should be limited to start time of ingester queries. limitQueryInterval := false // ingesterMLB having -1 means query ingester for whole duration. - ingesterMLB := time.Duration(-1) if q.cfg.IngesterQueryStoreMaxLookback != 0 { // IngesterQueryStoreMaxLookback takes the precedence over QueryIngestersWithin while also limiting the store query range. limitQueryInterval = true - ingesterMLB = q.cfg.IngesterQueryStoreMaxLookback - } else if q.cfg.QueryIngestersWithin != 0 { - ingesterMLB = q.cfg.QueryIngestersWithin } + ingesterMLB := q.calculateIngesterMaxLookbackPeriod() + // query ingester for whole duration. if ingesterMLB == -1 { i := &interval{ @@ -266,15 +289,18 @@ func (q *SingleTenantQuerier) buildQueryIntervals(queryStart, queryEnd time.Time return i, i } + ingesterQueryWithinRange := q.isWithinIngesterMaxLookbackPeriod(ingesterMLB, queryEnd) + // see if there is an overlap between ingester query interval and actual query interval, if not just do the store query. - ingesterOldestStartTime := time.Now().Add(-ingesterMLB) - if queryEnd.Before(ingesterOldestStartTime) { + if !ingesterQueryWithinRange { return nil, &interval{ start: queryStart, end: queryEnd, } } + ingesterOldestStartTime := time.Now().Add(-ingesterMLB) + // if there is an overlap and we are not limiting the query interval then do both store and ingester query for whole query interval. if !limitQueryInterval { i := &interval{ @@ -327,17 +353,25 @@ func (q *SingleTenantQuerier) Label(ctx context.Context, req *logproto.LabelRequ ctx, cancel := context.WithDeadline(ctx, time.Now().Add(q.cfg.QueryTimeout)) defer cancel() + ingesterQueryInterval, storeQueryInterval := q.buildQueryIntervals(*req.Start, *req.End) + var ingesterValues [][]string - if !q.cfg.QueryStoreOnly { - ingesterValues, err = q.ingesterQuerier.Label(ctx, req) + if !q.cfg.QueryStoreOnly && ingesterQueryInterval != nil { + timeFramedReq := *req + timeFramedReq.Start = &ingesterQueryInterval.start + timeFramedReq.End = &ingesterQueryInterval.end + + ingesterValues, err = q.ingesterQuerier.Label(ctx, &timeFramedReq) if err != nil { return nil, err } } var storeValues []string - if !q.cfg.QueryIngesterOnly { - from, through := model.TimeFromUnixNano(req.Start.UnixNano()), model.TimeFromUnixNano(req.End.UnixNano()) + if !q.cfg.QueryIngesterOnly && storeQueryInterval != nil { + from := model.TimeFromUnixNano(storeQueryInterval.start.UnixNano()) + through := model.TimeFromUnixNano(storeQueryInterval.end.UnixNano()) + if req.Values { storeValues, err = q.store.LabelValuesForMetricName(ctx, userID, from, through, "logs", req.Name) if err != nil { @@ -446,13 +480,17 @@ func (q *SingleTenantQuerier) awaitSeries(ctx context.Context, req *logproto.Ser series := make(chan [][]logproto.SeriesIdentifier, 2) errs := make(chan error, 2) + ingesterQueryInterval, storeQueryInterval := q.buildQueryIntervals(req.Start, req.End) + // fetch series from ingesters and store concurrently - if q.cfg.QueryStoreOnly { - series <- [][]logproto.SeriesIdentifier{} - } else { + if !q.cfg.QueryStoreOnly && ingesterQueryInterval != nil { + timeFramedReq := *req + timeFramedReq.Start = ingesterQueryInterval.start + timeFramedReq.End = ingesterQueryInterval.end + go func() { // fetch series identifiers from ingesters - resps, err := q.ingesterQuerier.Series(ctx, req) + resps, err := q.ingesterQuerier.Series(ctx, &timeFramedReq) if err != nil { errs <- err return @@ -460,17 +498,24 @@ func (q *SingleTenantQuerier) awaitSeries(ctx context.Context, req *logproto.Ser series <- resps }() + } else { + // If only queriying the store or the query range does not overlap with the ingester max lookback period (defined by `query_ingesters_within`) + // then don't call out to the ingesters, and send an empty result back to the channel + series <- [][]logproto.SeriesIdentifier{} } - if !q.cfg.QueryIngesterOnly { + if !q.cfg.QueryIngesterOnly && storeQueryInterval != nil { go func() { - storeValues, err := q.seriesForMatchers(ctx, req.Start, req.End, req.GetGroups(), req.Shards) + storeValues, err := q.seriesForMatchers(ctx, storeQueryInterval.start, storeQueryInterval.end, req.GetGroups(), req.Shards) if err != nil { errs <- err return } series <- [][]logproto.SeriesIdentifier{storeValues} }() + } else { + // If we are not querying the store, send an empty result back to the channel + series <- [][]logproto.SeriesIdentifier{} } var sets [][]logproto.SeriesIdentifier diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index c9a080ea032ca..3411c5f9b7975 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -686,6 +686,328 @@ func TestQuerier_buildQueryIntervals(t *testing.T) { } } +func TestQuerier_calculateIngesterMaxLookbackPeriod(t *testing.T) { + for _, tc := range []struct { + name string + ingesterQueryStoreMaxLookback time.Duration + queryIngestersWithin time.Duration + expected time.Duration + }{ + { + name: "defaults are set; infinite lookback period if no values are set", + expected: -1, + }, + { + name: "only setting ingesterQueryStoreMaxLookback", + ingesterQueryStoreMaxLookback: time.Hour, + expected: time.Hour, + }, + { + name: "setting both ingesterQueryStoreMaxLookback and queryIngestersWithin; ingesterQueryStoreMaxLookback takes precedence", + ingesterQueryStoreMaxLookback: time.Hour, + queryIngestersWithin: time.Minute, + expected: time.Hour, + }, + { + name: "only setting queryIngestersWithin", + queryIngestersWithin: time.Minute, + expected: time.Minute, + }, + } { + t.Run(tc.name, func(t *testing.T) { + querier := SingleTenantQuerier{cfg: Config{ + IngesterQueryStoreMaxLookback: tc.ingesterQueryStoreMaxLookback, + QueryIngestersWithin: tc.queryIngestersWithin, + }} + + assert.Equal(t, tc.expected, querier.calculateIngesterMaxLookbackPeriod()) + }) + } +} + +func TestQuerier_isWithinIngesterMaxLookbackPeriod(t *testing.T) { + overlappingQuery := interval{ + start: time.Now().Add(-6 * time.Hour), + end: time.Now(), + } + + nonOverlappingQuery := interval{ + start: time.Now().Add(-24 * time.Hour), + end: time.Now().Add(-12 * time.Hour), + } + + for _, tc := range []struct { + name string + ingesterQueryStoreMaxLookback time.Duration + queryIngestersWithin time.Duration + overlappingWithinRange bool + nonOverlappingWithinRange bool + }{ + { + name: "default values, query ingesters and store for whole duration", + overlappingWithinRange: true, + nonOverlappingWithinRange: true, + }, + { + name: "ingesterQueryStoreMaxLookback set to 1h", + ingesterQueryStoreMaxLookback: time.Hour, + overlappingWithinRange: true, + nonOverlappingWithinRange: false, + }, + { + name: "ingesterQueryStoreMaxLookback set to 10h", + ingesterQueryStoreMaxLookback: 10 * time.Hour, + overlappingWithinRange: true, + nonOverlappingWithinRange: false, + }, + { + name: "ingesterQueryStoreMaxLookback set to 1h and queryIngestersWithin set to 16h, ingesterQueryStoreMaxLookback takes precedence", + ingesterQueryStoreMaxLookback: time.Hour, + queryIngestersWithin: 16 * time.Hour, // if used, this would put the nonOverlapping query in range + overlappingWithinRange: true, + nonOverlappingWithinRange: false, + }, + { + name: "ingesterQueryStoreMaxLookback set to -1, query just ingesters", + ingesterQueryStoreMaxLookback: -1, + overlappingWithinRange: true, + nonOverlappingWithinRange: true, + }, + { + name: "queryIngestersWithin set to 1h", + queryIngestersWithin: time.Hour, + overlappingWithinRange: true, + nonOverlappingWithinRange: false, + }, + { + name: "queryIngestersWithin set to 10h", + queryIngestersWithin: 10 * time.Hour, + overlappingWithinRange: true, + nonOverlappingWithinRange: false, + }, + } { + t.Run(tc.name, func(t *testing.T) { + querier := SingleTenantQuerier{cfg: Config{ + IngesterQueryStoreMaxLookback: tc.ingesterQueryStoreMaxLookback, + QueryIngestersWithin: tc.queryIngestersWithin, + }} + + lookbackPeriod := querier.calculateIngesterMaxLookbackPeriod() + assert.Equal(t, tc.overlappingWithinRange, querier.isWithinIngesterMaxLookbackPeriod(lookbackPeriod, overlappingQuery.end)) + assert.Equal(t, tc.nonOverlappingWithinRange, querier.isWithinIngesterMaxLookbackPeriod(lookbackPeriod, nonOverlappingQuery.end)) + }) + } +} + +func TestQuerier_RequestingIngesters(t *testing.T) { + ctx := user.InjectOrgID(context.Background(), "test") + + requestMapping := map[string]struct { + ingesterMethod string + storeMethod string + }{ + "SelectLogs": { + ingesterMethod: "Query", + storeMethod: "SelectLogs", + }, + "SelectSamples": { + ingesterMethod: "QuerySample", + storeMethod: "SelectSamples", + }, + "LabelValuesForMetricName": { + ingesterMethod: "Label", + storeMethod: "LabelValuesForMetricName", + }, + "LabelNamesForMetricName": { + ingesterMethod: "Label", + storeMethod: "LabelNamesForMetricName", + }, + "Series": { + ingesterMethod: "Series", + storeMethod: "Series", + }, + } + + tests := []struct { + desc string + start, end time.Time + setIngesterQueryStoreMaxLookback bool + expectedCallsStore int + expectedCallsIngesters int + }{ + { + desc: "Data in storage and ingesters", + start: time.Now().Add(-time.Hour * 2), + end: time.Now(), + expectedCallsStore: 1, + expectedCallsIngesters: 1, + }, + { + desc: "Data in ingesters (IngesterQueryStoreMaxLookback not set)", + start: time.Now().Add(-time.Minute * 15), + end: time.Now(), + expectedCallsStore: 1, + expectedCallsIngesters: 1, + }, + { + desc: "Data only in storage", + start: time.Now().Add(-time.Hour * 2), + end: time.Now().Add(-time.Hour * 1), + expectedCallsStore: 1, + expectedCallsIngesters: 0, + }, + { + desc: "Data in ingesters (IngesterQueryStoreMaxLookback set)", + start: time.Now().Add(-time.Minute * 15), + end: time.Now(), + setIngesterQueryStoreMaxLookback: true, + expectedCallsStore: 0, + expectedCallsIngesters: 1, + }, + } + + requests := []struct { + name string + do func(querier *SingleTenantQuerier, start, end time.Time) error + }{ + { + name: "SelectLogs", + do: func(querier *SingleTenantQuerier, start, end time.Time) error { + _, err := querier.SelectLogs(ctx, logql.SelectLogParams{ + QueryRequest: &logproto.QueryRequest{ + Selector: "{type=\"test\", fail=\"yes\"} |= \"foo\"", + Limit: 10, + Start: start, + End: end, + Direction: logproto.FORWARD, + }, + }) + + return err + }, + }, + { + name: "SelectSamples", + do: func(querier *SingleTenantQuerier, start, end time.Time) error { + _, err := querier.SelectSamples(ctx, logql.SelectSampleParams{ + SampleQueryRequest: &logproto.SampleQueryRequest{ + Selector: "count_over_time({foo=\"bar\"}[5m])", + Start: start, + End: end, + }, + }) + return err + }, + }, + { + name: "LabelValuesForMetricName", + do: func(querier *SingleTenantQuerier, start, end time.Time) error { + _, err := querier.Label(ctx, &logproto.LabelRequest{ + Name: "type", + Values: true, + Start: &start, + End: &end, + }) + return err + }, + }, + { + name: "LabelNamesForMetricName", + do: func(querier *SingleTenantQuerier, start, end time.Time) error { + _, err := querier.Label(ctx, &logproto.LabelRequest{ + Values: false, + Start: &start, + End: &end, + }) + return err + }, + }, + { + name: "Series", + do: func(querier *SingleTenantQuerier, start, end time.Time) error { + _, err := querier.Series(ctx, &logproto.SeriesRequest{ + Start: start, + End: end, + }) + return err + }, + }, + } + + for _, tc := range tests { + t.Run(tc.desc, func(t *testing.T) { + + conf := mockQuerierConfig() + conf.QueryIngestersWithin = time.Minute * 30 + if tc.setIngesterQueryStoreMaxLookback { + conf.IngesterQueryStoreMaxLookback = conf.QueryIngestersWithin + } + + limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) + require.NoError(t, err) + + for _, request := range requests { + t.Run(request.name, func(t *testing.T) { + ingesterClient, store, querier, err := setupIngesterQuerierMocks(conf, limits) + require.NoError(t, err) + + err = request.do(querier, tc.start, tc.end) + require.NoError(t, err) + + callsIngesters := ingesterClient.GetMockedCallsByMethod(requestMapping[request.name].ingesterMethod) + assert.Equal(t, tc.expectedCallsIngesters, len(callsIngesters)) + + callsStore := store.GetMockedCallsByMethod(requestMapping[request.name].storeMethod) + assert.Equal(t, tc.expectedCallsStore, len(callsStore)) + }) + } + }) + } +} + +func setupIngesterQuerierMocks(conf Config, limits *validation.Overrides) (*querierClientMock, *storeMock, *SingleTenantQuerier, error) { + queryClient := newQueryClientMock() + queryClient.On("Recv").Return(mockQueryResponse([]logproto.Stream{mockStream(1, 1)}), nil) + + querySampleClient := newQuerySampleClientMock() + querySampleClient.On("Recv").Return(mockQueryResponse([]logproto.Stream{mockStream(1, 1)}), nil) + + ingesterClient := newQuerierClientMock() + ingesterClient.On("Query", mock.Anything, mock.Anything, mock.Anything).Return(queryClient, nil) + ingesterClient.On("QuerySample", mock.Anything, mock.Anything, mock.Anything).Return(querySampleClient, nil) + ingesterClient.On("Label", mock.Anything, mock.Anything, mock.Anything).Return(mockLabelResponse([]string{"bar"}), nil) + ingesterClient.On("Series", mock.Anything, mock.Anything, mock.Anything).Return(&logproto.SeriesResponse{ + Series: []logproto.SeriesIdentifier{ + { + Labels: map[string]string{"bar": "1"}, + }, + }, + }, nil) + + store := newStoreMock() + store.On("SelectLogs", mock.Anything, mock.Anything).Return(mockStreamIterator(0, 1), nil) + store.On("SelectSamples", mock.Anything, mock.Anything).Return(mockSampleIterator(querySampleClient), nil) + store.On("LabelValuesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]string{"1", "2", "3"}, nil) + store.On("LabelNamesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]string{"foo"}, nil) + store.On("Series", mock.Anything, mock.Anything).Return([]logproto.SeriesIdentifier{ + {Labels: map[string]string{"foo": "1"}}, + }, nil) + + querier, err := newQuerier( + conf, + mockIngesterClientConfig(), + newIngesterClientMockFactory(ingesterClient), + mockReadRingWithOneActiveIngester(), + &mockDeleteGettter{}, + store, limits) + + if err != nil { + return nil, nil, nil, err + } + + return ingesterClient, store, querier, nil +} + type fakeTimeLimits struct { maxQueryLookback time.Duration maxQueryLength time.Duration
querier
prevent unnecessary calls to ingesters (#5984)
f5b0fb6b998dc0a49cd36c0968862340c7e517bf
2024-10-31 15:58:50
George Robinson
feat: add gauge loki_ingest_storage_reader_phase (#14679)
false
diff --git a/pkg/kafka/partition/reader.go b/pkg/kafka/partition/reader.go index d90b028e8af81..a1008d88c8596 100644 --- a/pkg/kafka/partition/reader.go +++ b/pkg/kafka/partition/reader.go @@ -100,6 +100,9 @@ func (p *Reader) start(ctx context.Context) error { return errors.Wrap(err, "creating kafka reader client") } + p.metrics.phase.WithLabelValues("starting").Set(1) + p.metrics.phase.WithLabelValues("running").Set(0) + // We manage our commits manually, so we must fetch the last offset for our consumer group to find out where to read from. lastCommittedOffset := p.fetchLastCommittedOffset(ctx) p.client.AddConsumePartitions(map[string]map[int32]kgo.Offset{ @@ -141,6 +144,9 @@ func (p *Reader) start(ctx context.Context) error { // data from Kafka, and send it to the consumer. func (p *Reader) run(ctx context.Context) error { level.Info(p.logger).Log("msg", "starting partition reader", "partition", p.partitionID, "consumer_group", p.consumerGroup) + p.metrics.phase.WithLabelValues("starting").Set(0) + p.metrics.phase.WithLabelValues("running").Set(1) + ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -513,6 +519,7 @@ func isErrFetch(fetch kgo.Fetch) bool { } type readerMetrics struct { + phase *prometheus.GaugeVec receiveDelayWhenStarting prometheus.Observer receiveDelayWhenRunning prometheus.Observer recordsPerFetch prometheus.Histogram @@ -538,6 +545,10 @@ func newReaderMetrics(reg prometheus.Registerer) readerMetrics { }, []string{"phase"}) return readerMetrics{ + phase: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ + Name: "loki_ingest_storage_reader_phase", + Help: "The current phase of the consumer.", + }, []string{"phase"}), receiveDelayWhenStarting: receiveDelay.WithLabelValues("starting"), receiveDelayWhenRunning: receiveDelay.WithLabelValues("running"), kprom: client.NewReaderClientMetrics("partition-reader", reg),
feat
add gauge loki_ingest_storage_reader_phase (#14679)
defba23526e73cb71d40a71b1a1790789d47ded8
2022-09-07 21:00:06
Mohamed-Amine Bouqsimi
operator: Configure kube-rbac-proxy sidecar to use Intermediate TLS security profile in OCP (#7092)
false
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md index 1651ee9f1a755..d49daea3f446d 100644 --- a/operator/CHANGELOG.md +++ b/operator/CHANGELOG.md @@ -1,5 +1,6 @@ ## Main +- [7092](https://github.com/grafana/loki/pull/7092) **aminesnow**: Configure kube-rbac-proxy sidecar to use Intermediate TLS security profile in OCP - [6870](https://github.com/grafana/loki/pull/6870) **aminesnow**: Configure gateway to honor the global tlsSecurityProfile on Openshift - [6999](https://github.com/grafana/loki/pull/6999) **Red-GV**: Adding LokiStack Gateway alerts - [7000](https://github.com/grafana/loki/pull/7000) **xperimental**: Configure default node affinity for all pods diff --git a/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml index 43f298c64a223..46ea9c32bba29 100644 --- a/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml @@ -1231,6 +1231,8 @@ spec: - --logtostderr=true - --tls-cert-file=/var/run/secrets/serving-cert/tls.crt - --tls-private-key-file=/var/run/secrets/serving-cert/tls.key + - --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA256 + - --tls-min-version=VersionTLS12 - --v=0 image: quay.io/openshift/origin-kube-rbac-proxy:latest name: kube-rbac-proxy diff --git a/operator/config/overlays/openshift/manager_auth_proxy_patch.yaml b/operator/config/overlays/openshift/manager_auth_proxy_patch.yaml index 261cbfd4ccffc..0746eba8736fc 100644 --- a/operator/config/overlays/openshift/manager_auth_proxy_patch.yaml +++ b/operator/config/overlays/openshift/manager_auth_proxy_patch.yaml @@ -14,6 +14,8 @@ spec: - "--logtostderr=true" - "--tls-cert-file=/var/run/secrets/serving-cert/tls.crt" - "--tls-private-key-file=/var/run/secrets/serving-cert/tls.key" + - "--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA256" + - "--tls-min-version=VersionTLS12" - "--v=0" ports: - containerPort: 8443
operator
Configure kube-rbac-proxy sidecar to use Intermediate TLS security profile in OCP (#7092)
9f06bc008329a80c59064157678341ba8300f161
2022-08-01 20:22:47
Kaviraj Kanagaraj
chore(groupcache): Groupcache doesn't need global server handler anymore (#6820)
false
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 2994494b1a2c4..186a98a186dfe 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -161,7 +161,7 @@ func (t *Loki) initGroupcache() (_ services.Service, err error) { t.groupcacheRingManager = rm t.Server.HTTP.Path("/groupcache/ring").Methods("GET", "POST").Handler(t.groupcacheRingManager) - gc, err := cache.NewGroupCache(rm, t.Cfg.Common.GroupCacheConfig, t.Server, util_log.Logger, prometheus.DefaultRegisterer) + gc, err := cache.NewGroupCache(rm, t.Cfg.Common.GroupCacheConfig, util_log.Logger, prometheus.DefaultRegisterer) if err != nil { return nil, err } diff --git a/pkg/storage/chunk/cache/groupcache.go b/pkg/storage/chunk/cache/groupcache.go index 79954ece36a06..315edaffddd99 100644 --- a/pkg/storage/chunk/cache/groupcache.go +++ b/pkg/storage/chunk/cache/groupcache.go @@ -21,7 +21,6 @@ import ( "github.com/mailgun/groupcache/v2" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" - "github.com/weaveworks/common/server" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -89,7 +88,7 @@ type ringManager interface { Ring() ring.ReadRing } -func NewGroupCache(rm ringManager, config GroupCacheConfig, server *server.Server, logger log.Logger, reg prometheus.Registerer) (*GroupCache, error) { +func NewGroupCache(rm ringManager, config GroupCacheConfig, logger log.Logger, reg prometheus.Registerer) (*GroupCache, error) { addr := fmt.Sprintf("http://%s", rm.Addr()) level.Info(logger).Log("msg", "groupcache local address set to", "addr", addr) diff --git a/pkg/storage/chunk/cache/groupcache_test.go b/pkg/storage/chunk/cache/groupcache_test.go index 199a0dcd3274c..c75d2a727d0b5 100644 --- a/pkg/storage/chunk/cache/groupcache_test.go +++ b/pkg/storage/chunk/cache/groupcache_test.go @@ -5,8 +5,6 @@ import ( "testing" "github.com/go-kit/log" - "github.com/gorilla/mux" - "github.com/weaveworks/common/server" "github.com/grafana/dskit/ring" "github.com/stretchr/testify/assert" @@ -64,7 +62,7 @@ func setupGroupCache() (*GroupCache, error) { return NewGroupCache(&mockRingManager{}, GroupCacheConfig{ Enabled: true, CapacityMB: 1, - }, &server.Server{HTTP: mux.NewRouter()}, log.NewNopLogger(), nil) + }, log.NewNopLogger(), nil) } type mockRingManager struct{}
chore
Groupcache doesn't need global server handler anymore (#6820)
6f2aa5fb68a756681caa647a900731abfb8a6082
2023-03-29 10:09:06
Mohamed-Amine Bouqsimi
operator: Update LokiStack annotation on RulerConfig delete (#8911)
false
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md index 9eab3b3c92eb7..6e86d2e91bbbf 100644 --- a/operator/CHANGELOG.md +++ b/operator/CHANGELOG.md @@ -1,5 +1,8 @@ ## Main + +- [8911](https://github.com/grafana/loki/pull/8911) **aminesnow**: Update LokiStack annotaion on RulerConfig delete + ## 0.2.0 (2023-03-27) - [8651](https://github.com/grafana/loki/pull/8651) **periklis**: Prepare Community Loki Operator release v0.2.0 diff --git a/operator/controllers/loki/internal/lokistack/ruler_config_discovery.go b/operator/controllers/loki/internal/lokistack/ruler_config_discovery.go index ce7195b36893a..40213c0db8c95 100644 --- a/operator/controllers/loki/internal/lokistack/ruler_config_discovery.go +++ b/operator/controllers/loki/internal/lokistack/ruler_config_discovery.go @@ -19,19 +19,12 @@ const ( // to the named Lokistack in the same namespace of the RulerConfig. If no LokiStack is found, then // skip reconciliation. func AnnotateForRulerConfig(ctx context.Context, k k8s.Client, name, namespace string) error { - var s lokiv1.LokiStack key := client.ObjectKey{Name: name, Namespace: namespace} - - if err := k.Get(ctx, key, &s); err != nil { - if apierrors.IsNotFound(err) { - // Do nothing - return nil - } - - return kverrors.Wrap(err, "failed to get lokistack", "key", key) + ss, err := getLokiStack(ctx, k, key) + if ss == nil || err != nil { + return err } - ss := s.DeepCopy() timeStamp := time.Now().UTC().Format(time.RFC3339) if err := updateAnnotation(ctx, k, ss, annotationRulerConfigDiscoveredAt, timeStamp); err != nil { return kverrors.Wrap(err, "failed to update lokistack `rulerConfigDiscoveredAt` annotation", "key", key) @@ -39,3 +32,35 @@ func AnnotateForRulerConfig(ctx context.Context, k k8s.Client, name, namespace s return nil } + +// RemoveRulerConfigAnnotation removes the `loki.grafana.com/rulerConfigDiscoveredAt` annotation +// from the named Lokistack in the same namespace of the RulerConfig. If no LokiStack is found, then +// skip reconciliation. +func RemoveRulerConfigAnnotation(ctx context.Context, k k8s.Client, name, namespace string) error { + key := client.ObjectKey{Name: name, Namespace: namespace} + ss, err := getLokiStack(ctx, k, key) + if ss == nil || err != nil { + return err + } + + if err := removeAnnotation(ctx, k, ss, annotationRulerConfigDiscoveredAt); err != nil { + return kverrors.Wrap(err, "failed to update lokistack `rulerConfigDiscoveredAt` annotation", "key", key) + } + + return nil +} + +func getLokiStack(ctx context.Context, k k8s.Client, key client.ObjectKey) (*lokiv1.LokiStack, error) { + var s lokiv1.LokiStack + + if err := k.Get(ctx, key, &s); err != nil { + if apierrors.IsNotFound(err) { + // Do nothing + return nil, nil + } + + return nil, kverrors.Wrap(err, "failed to get lokistack", "key", key) + } + + return s.DeepCopy(), nil +} diff --git a/operator/controllers/loki/internal/lokistack/update.go b/operator/controllers/loki/internal/lokistack/update.go index aca04ab855499..311f6d9b15b00 100644 --- a/operator/controllers/loki/internal/lokistack/update.go +++ b/operator/controllers/loki/internal/lokistack/update.go @@ -41,3 +41,35 @@ func updateAnnotation(ctx context.Context, k k8s.Client, stack *lokiv1.LokiStack return k.Update(ctx, stack) }) } + +func removeAnnotation(ctx context.Context, k k8s.Client, stack *lokiv1.LokiStack, key string) error { + if stack.Annotations == nil { + return nil + } + delete(stack.Annotations, key) + + err := k.Update(ctx, stack) + switch { + case err == nil: + return nil + case errors.IsConflict(err): + // break into retry logic below on conflict + break + case err != nil: + return err + } + + objectKey := client.ObjectKeyFromObject(stack) + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + if err := k.Get(ctx, objectKey, stack); err != nil { + return err + } + + if stack.Annotations == nil { + return nil + } + delete(stack.Annotations, key) + + return k.Update(ctx, stack) + }) +} diff --git a/operator/controllers/loki/rulerconfig_controller.go b/operator/controllers/loki/rulerconfig_controller.go index c3311dad999aa..5e827b54e2ffa 100644 --- a/operator/controllers/loki/rulerconfig_controller.go +++ b/operator/controllers/loki/rulerconfig_controller.go @@ -32,6 +32,12 @@ func (r *RulerConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) key := client.ObjectKey{Name: req.Name, Namespace: req.Namespace} if err := r.Get(ctx, key, &rc); err != nil { if errors.IsNotFound(err) { + // RulerConfig not found, remove annotation from LokiStack. + err = lokistack.RemoveRulerConfigAnnotation(ctx, r.Client, req.Name, req.Namespace) + if err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{}, nil }
operator
Update LokiStack annotation on RulerConfig delete (#8911)
e0ac0d0efd02b9af607fdb3de682619bdf7de80d
2024-11-21 01:27:02
renovate[bot]
fix(deps): update module github.com/grpc-ecosystem/go-grpc-middleware to v2 (#15045)
false
diff --git a/go.mod b/go.mod index 9a559000e6254..15bf63b859d42 100644 --- a/go.mod +++ b/go.mod @@ -56,7 +56,7 @@ require ( github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc github.com/grafana/tail v0.0.0-20230510142333-77b18831edf0 - github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 github.com/hashicorp/consul/api v1.30.0 github.com/hashicorp/golang-lru/v2 v2.0.7 diff --git a/go.sum b/go.sum index dc26891818b9f..b50d624279b46 100644 --- a/go.sum +++ b/go.sum @@ -1776,8 +1776,8 @@ github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:Fecb github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwnKaMyD8uC+34TLdndZMAKk= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= @@ -2813,7 +2813,6 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= go4.org/netipx v0.0.0-20230125063823-8449b0a6169f h1:ketMxHg+vWm3yccyYiq+uK8D3fRmna2Fcj+awpQp84s= @@ -3208,7 +3207,6 @@ golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/pkg/ruler/evaluator_remote.go b/pkg/ruler/evaluator_remote.go index 8ef0fb88e4152..63aeebfa9d4f5 100644 --- a/pkg/ruler/evaluator_remote.go +++ b/pkg/ruler/evaluator_remote.go @@ -25,7 +25,6 @@ import ( "github.com/grafana/dskit/instrument" "github.com/grafana/dskit/middleware" "github.com/grafana/dskit/user" - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" otgrpc "github.com/opentracing-contrib/go-grpc" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" @@ -182,11 +181,9 @@ func DialQueryFrontend(cfg *QueryFrontendConfig) (httpgrpc.HTTPClient, error) { PermitWithoutStream: true, }, ), - grpc.WithUnaryInterceptor( - grpc_middleware.ChainUnaryClient( - otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()), - middleware.ClientUserHeaderInterceptor, - ), + grpc.WithChainUnaryInterceptor( + otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()), + middleware.ClientUserHeaderInterceptor, ), grpc.WithDefaultServiceConfig(serviceConfig), }, diff --git a/pkg/util/server/recovery.go b/pkg/util/server/recovery.go index ce3ad109512b7..58eac396dd750 100644 --- a/pkg/util/server/recovery.go +++ b/pkg/util/server/recovery.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/dskit/httpgrpc" "github.com/grafana/dskit/middleware" - grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" + grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.gitignore b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.gitignore deleted file mode 100644 index 826caa3902bb8..0000000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.gitignore +++ /dev/null @@ -1,204 +0,0 @@ -# Created by .ignore support plugin (hsz.mobi) -### Go template -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof -### Windows template -# Windows image file caches -Thumbs.db -ehthumbs.db - -# Folder config file -Desktop.ini - -# Recycle Bin used on file shares -$RECYCLE.BIN/ - -# Windows Installer files -*.cab -*.msi -*.msm -*.msp - -# Windows shortcuts -*.lnk -### Kate template -# Swap Files # -.*.kate-swp -.swp.* -### SublimeText template -# cache files for sublime text -*.tmlanguage.cache -*.tmPreferences.cache -*.stTheme.cache - -# workspace files are user-specific -*.sublime-workspace - -# project files should be checked into the repository, unless a significant -# proportion of contributors will probably not be using SublimeText -# *.sublime-project - -# sftp configuration file -sftp-config.json -### Linux template -*~ - -# temporary files which can be created if a process still has a handle open of a deleted file -.fuse_hidden* - -# KDE directory preferences -.directory - -# Linux trash folder which might appear on any partition or disk -.Trash-* -### JetBrains template -# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm -# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 - -# User-specific stuff: -.idea -.idea/tasks.xml -.idea/dictionaries -.idea/vcs.xml -.idea/jsLibraryMappings.xml - -# Sensitive or high-churn files: -.idea/dataSources.ids -.idea/dataSources.xml -.idea/dataSources.local.xml -.idea/sqlDataSources.xml -.idea/dynamic.xml -.idea/uiDesigner.xml - -# Gradle: -.idea/gradle.xml -.idea/libraries - -# Mongo Explorer plugin: -.idea/mongoSettings.xml - -## File-based project format: -*.iws - -## Plugin-specific files: - -# IntelliJ -/out/ - -# mpeltonen/sbt-idea plugin -.idea_modules/ - -# JIRA plugin -atlassian-ide-plugin.xml - -# Crashlytics plugin (for Android Studio and IntelliJ) -com_crashlytics_export_strings.xml -crashlytics.properties -crashlytics-build.properties -fabric.properties -### Xcode template -# Xcode -# -# gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore - -## Build generated -build/ -DerivedData/ - -## Various settings -*.pbxuser -!default.pbxuser -*.mode1v3 -!default.mode1v3 -*.mode2v3 -!default.mode2v3 -*.perspectivev3 -!default.perspectivev3 -xcuserdata/ - -## Other -*.moved-aside -*.xccheckout -*.xcscmblueprint -### Eclipse template - -.metadata -bin/ -tmp/ -*.tmp -*.bak -*.swp -*~.nib -local.properties -.settings/ -.loadpath -.recommenders - -# Eclipse Core -.project - -# External tool builders -.externalToolBuilders/ - -# Locally stored "Eclipse launch configurations" -*.launch - -# PyDev specific (Python IDE for Eclipse) -*.pydevproject - -# CDT-specific (C/C++ Development Tooling) -.cproject - -# JDT-specific (Eclipse Java Development Tools) -.classpath - -# Java annotation processor (APT) -.factorypath - -# PDT-specific (PHP Development Tools) -.buildpath - -# sbteclipse plugin -.target - -# Tern plugin -.tern-project - -# TeXlipse plugin -.texlipse - -# STS (Spring Tool Suite) -.springBeans - -# Code Recommenders -.recommenders/ - - -coverage.txt - -#vendor -vendor/ - -.envrc \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CONTRIBUTING.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CONTRIBUTING.md deleted file mode 100644 index dd52ab8938e05..0000000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CONTRIBUTING.md +++ /dev/null @@ -1,20 +0,0 @@ -# Contributing - -We would love to have people submit pull requests and help make `grpc-ecosystem/go-grpc-middleware` even better 👍. - -Fork, then clone the repo: - -```bash -git clone [email protected]:your-username/go-grpc-middleware.git -``` - -Before checking in please run the following: - -```bash -make all -``` - -This will `vet`, `fmt`, regenerate documentation and run all tests. - - -Push to your fork and open a pull request. \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md deleted file mode 100644 index a12b40904bc26..0000000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md +++ /dev/null @@ -1,93 +0,0 @@ -# Go gRPC Middleware - -[![Travis Build](https://travis-ci.org/grpc-ecosystem/go-grpc-middleware.svg?branch=master)](https://travis-ci.org/grpc-ecosystem/go-grpc-middleware) -[![Go Report Card](https://goreportcard.com/badge/github.com/grpc-ecosystem/go-grpc-middleware)](https://goreportcard.com/report/github.com/grpc-ecosystem/go-grpc-middleware) -[![GoDoc](http://img.shields.io/badge/GoDoc-Reference-blue.svg)](https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware) -[![SourceGraph](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-middleware/-/badge.svg)](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-middleware/?badge) -[![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware) -[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) -[![quality: production](https://img.shields.io/badge/quality-production-orange.svg)](#status) -[![Slack](https://img.shields.io/badge/slack-%23grpc--middleware-brightgreen)](https://gophers.slack.com/archives/CNJL30P4P) - -[gRPC Go](https://github.com/grpc/grpc-go) Middleware: interceptors, helpers, utilities. - -## ⚠️ Status - -Version [v2](https://github.com/grpc-ecosystem/go-grpc-middleware/tree/v2) is about to be released, with migration guide, which will replace v1. Try v2 and give us feedback! - -Version v1 is currently in deprecation mode, which means only critical and safety bug fixes will be merged. - - -## Middleware - -[gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for -Interceptors, i.e. [middleware](https://medium.com/@matryer/writing-middleware-in-golang-and-how-go-makes-it-so-much-fun-4375c1246e81#.gv7tdlghs) -that is executed either on the gRPC Server before the request is passed onto the user's application logic, or on the gRPC client around the user call. It is a perfect way to implement -common patterns: auth, logging, message, validation, retries, or monitoring. - -These are generic building blocks that make it easy to build multiple microservices easily. -The purpose of this repository is to act as a go-to point for such reusable functionality. It contains -some of them itself, but also will link to useful external repos. - -`grpc_middleware` itself provides support for chaining interceptors, here's an example: - -```go -import "github.com/grpc-ecosystem/go-grpc-middleware" - -myServer := grpc.NewServer( - grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( - grpc_ctxtags.StreamServerInterceptor(), - grpc_opentracing.StreamServerInterceptor(), - grpc_prometheus.StreamServerInterceptor, - grpc_zap.StreamServerInterceptor(zapLogger), - grpc_auth.StreamServerInterceptor(myAuthFunction), - grpc_recovery.StreamServerInterceptor(), - )), - grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( - grpc_ctxtags.UnaryServerInterceptor(), - grpc_opentracing.UnaryServerInterceptor(), - grpc_prometheus.UnaryServerInterceptor, - grpc_zap.UnaryServerInterceptor(zapLogger), - grpc_auth.UnaryServerInterceptor(myAuthFunction), - grpc_recovery.UnaryServerInterceptor(), - )), -) -``` - -## Interceptors - -_Please send a PR to add new interceptors or middleware to this list_ - -#### Auth - -- [`grpc_auth`](auth) - a customizable (via `AuthFunc`) piece of auth middleware - -#### Logging - -- [`grpc_ctxtags`](tags/) - a library that adds a `Tag` map to context, with data populated from request body -- [`grpc_zap`](logging/zap/) - integration of [zap](https://github.com/uber-go/zap) logging library into gRPC handlers. -- [`grpc_logrus`](logging/logrus/) - integration of [logrus](https://github.com/sirupsen/logrus) logging library into gRPC handlers. -- [`grpc_kit`](logging/kit/) - integration of [go-kit/log](https://github.com/go-kit/log) logging library into gRPC handlers. -- [`grpc_grpc_logsettable`](logging/settable/) - a wrapper around `grpclog.LoggerV2` that allows to replace loggers in runtime (thread-safe). - -#### Monitoring - -- [`grpc_prometheus`⚡](https://github.com/grpc-ecosystem/go-grpc-prometheus) - Prometheus client-side and server-side monitoring middleware -- [`otgrpc`⚡](https://github.com/grpc-ecosystem/grpc-opentracing/tree/master/go/otgrpc) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors -- [`grpc_opentracing`](tracing/opentracing) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors with support for streaming and handler-returned tags -- [`otelgrpc`](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation/google.golang.org/grpc/otelgrpc) - [OpenTelemetry](https://opentelemetry.io/) client-side and server-side interceptors - -#### Client - -- [`grpc_retry`](retry/) - a generic gRPC response code retry mechanism, client-side middleware - -#### Server - -- [`grpc_validator`](validator/) - codegen inbound message validation from `.proto` options -- [`grpc_recovery`](recovery/) - turn panics into gRPC errors -- [`ratelimit`](ratelimit/) - grpc rate limiting by your own limiter - - -## License - -`go-grpc-middleware` is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details. diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go deleted file mode 100644 index 407d9332c9b00..0000000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -// gRPC Server Interceptor chaining middleware. - -package grpc_middleware - -import ( - "context" - - "google.golang.org/grpc" -) - -// ChainUnaryServer creates a single interceptor out of a chain of many interceptors. -// -// Execution is done in left-to-right order, including passing of context. -// For example ChainUnaryServer(one, two, three) will execute one before two before three, and three -// will see context changes of one and two. -// -// While this can be useful in some scenarios, it is generally advisable to use google.golang.org/grpc.ChainUnaryInterceptor directly. -func ChainUnaryServer(interceptors ...grpc.UnaryServerInterceptor) grpc.UnaryServerInterceptor { - n := len(interceptors) - - // Dummy interceptor maintained for backward compatibility to avoid returning nil. - if n == 0 { - return func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - return handler(ctx, req) - } - } - - // The degenerate case, just return the single wrapped interceptor directly. - if n == 1 { - return interceptors[0] - } - - // Return a function which satisfies the interceptor interface, and which is - // a closure over the given list of interceptors to be chained. - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - currHandler := handler - // Iterate backwards through all interceptors except the first (outermost). - // Wrap each one in a function which satisfies the handler interface, but - // is also a closure over the `info` and `handler` parameters. Then pass - // each pseudo-handler to the next outer interceptor as the handler to be called. - for i := n - 1; i > 0; i-- { - // Rebind to loop-local vars so they can be closed over. - innerHandler, i := currHandler, i - currHandler = func(currentCtx context.Context, currentReq interface{}) (interface{}, error) { - return interceptors[i](currentCtx, currentReq, info, innerHandler) - } - } - // Finally return the result of calling the outermost interceptor with the - // outermost pseudo-handler created above as its handler. - return interceptors[0](ctx, req, info, currHandler) - } -} - -// ChainStreamServer creates a single interceptor out of a chain of many interceptors. -// -// Execution is done in left-to-right order, including passing of context. -// For example ChainUnaryServer(one, two, three) will execute one before two before three. -// If you want to pass context between interceptors, use WrapServerStream. -// -// While this can be useful in some scenarios, it is generally advisable to use google.golang.org/grpc.ChainStreamInterceptor directly. -func ChainStreamServer(interceptors ...grpc.StreamServerInterceptor) grpc.StreamServerInterceptor { - n := len(interceptors) - - // Dummy interceptor maintained for backward compatibility to avoid returning nil. - if n == 0 { - return func(srv interface{}, stream grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - return handler(srv, stream) - } - } - - if n == 1 { - return interceptors[0] - } - - return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - currHandler := handler - for i := n - 1; i > 0; i-- { - innerHandler, i := currHandler, i - currHandler = func(currentSrv interface{}, currentStream grpc.ServerStream) error { - return interceptors[i](currentSrv, currentStream, info, innerHandler) - } - } - return interceptors[0](srv, stream, info, currHandler) - } -} - -// ChainUnaryClient creates a single interceptor out of a chain of many interceptors. -// -// Execution is done in left-to-right order, including passing of context. -// For example ChainUnaryClient(one, two, three) will execute one before two before three. -func ChainUnaryClient(interceptors ...grpc.UnaryClientInterceptor) grpc.UnaryClientInterceptor { - n := len(interceptors) - - // Dummy interceptor maintained for backward compatibility to avoid returning nil. - if n == 0 { - return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - return invoker(ctx, method, req, reply, cc, opts...) - } - } - - if n == 1 { - return interceptors[0] - } - - return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - currInvoker := invoker - for i := n - 1; i > 0; i-- { - innerInvoker, i := currInvoker, i - currInvoker = func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error { - return interceptors[i](currentCtx, currentMethod, currentReq, currentRepl, currentConn, innerInvoker, currentOpts...) - } - } - return interceptors[0](ctx, method, req, reply, cc, currInvoker, opts...) - } -} - -// ChainStreamClient creates a single interceptor out of a chain of many interceptors. -// -// Execution is done in left-to-right order, including passing of context. -// For example ChainStreamClient(one, two, three) will execute one before two before three. -func ChainStreamClient(interceptors ...grpc.StreamClientInterceptor) grpc.StreamClientInterceptor { - n := len(interceptors) - - // Dummy interceptor maintained for backward compatibility to avoid returning nil. - if n == 0 { - return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { - return streamer(ctx, desc, cc, method, opts...) - } - } - - if n == 1 { - return interceptors[0] - } - - return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { - currStreamer := streamer - for i := n - 1; i > 0; i-- { - innerStreamer, i := currStreamer, i - currStreamer = func(currentCtx context.Context, currentDesc *grpc.StreamDesc, currentConn *grpc.ClientConn, currentMethod string, currentOpts ...grpc.CallOption) (grpc.ClientStream, error) { - return interceptors[i](currentCtx, currentDesc, currentConn, currentMethod, innerStreamer, currentOpts...) - } - } - return interceptors[0](ctx, desc, cc, method, currStreamer, opts...) - } -} - -// Chain creates a single interceptor out of a chain of many interceptors. -// -// WithUnaryServerChain is a grpc.Server config option that accepts multiple unary interceptors. -// Basically syntactic sugar. -// -// Deprecated: use google.golang.org/grpc.ChainUnaryInterceptor instead. -func WithUnaryServerChain(interceptors ...grpc.UnaryServerInterceptor) grpc.ServerOption { - return grpc.ChainUnaryInterceptor(interceptors...) -} - -// WithStreamServerChain is a grpc.Server config option that accepts multiple stream interceptors. -// Basically syntactic sugar. -// -// Deprecated: use google.golang.org/grpc.ChainStreamInterceptor instead. -func WithStreamServerChain(interceptors ...grpc.StreamServerInterceptor) grpc.ServerOption { - return grpc.ChainStreamInterceptor(interceptors...) -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go deleted file mode 100644 index 718e10046a224..0000000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -/* -`grpc_middleware` is a collection of gRPC middleware packages: interceptors, helpers and tools. - -Middleware - -gRPC is a fantastic RPC middleware, which sees a lot of adoption in the Golang world. However, the -upstream gRPC codebase is relatively bare bones. - -This package, and most of its child packages provides commonly needed middleware for gRPC: -client-side interceptors for retires, server-side interceptors for input validation and auth, -functions for chaining said interceptors, metadata convenience methods and more. - -Chaining - -By default, gRPC doesn't allow one to have more than one interceptor either on the client nor on -the server side. `grpc_middleware` provides convenient chaining methods - -Simple way of turning a multiple interceptors into a single interceptor. Here's an example for -server chaining: - - myServer := grpc.NewServer( - grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(loggingStream, monitoringStream, authStream)), - grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(loggingUnary, monitoringUnary, authUnary)), - ) - -These interceptors will be executed from left to right: logging, monitoring and auth. - -Here's an example for client side chaining: - - clientConn, err = grpc.Dial( - address, - grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(monitoringClientUnary, retryUnary)), - grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(monitoringClientStream, retryStream)), - ) - client = pb_testproto.NewTestServiceClient(clientConn) - resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"}) - -These interceptors will be executed from left to right: monitoring and then retry logic. - -The retry interceptor will call every interceptor that follows it whenever when a retry happens. - -Writing Your Own - -Implementing your own interceptor is pretty trivial: there are interfaces for that. But the interesting -bit exposing common data to handlers (and other middleware), similarly to HTTP Middleware design. -For example, you may want to pass the identity of the caller from the auth interceptor all the way -to the handling function. - -For example, a client side interceptor example for auth looks like: - - func FakeAuthUnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - newCtx := context.WithValue(ctx, "user_id", "[email protected]") - return handler(newCtx, req) - } - -Unfortunately, it's not as easy for streaming RPCs. These have the `context.Context` embedded within -the `grpc.ServerStream` object. To pass values through context, a wrapper (`WrappedServerStream`) is -needed. For example: - - func FakeAuthStreamingInterceptor(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - newStream := grpc_middleware.WrapServerStream(stream) - newStream.WrappedContext = context.WithValue(ctx, "user_id", "[email protected]") - return handler(srv, newStream) - } -*/ -package grpc_middleware diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/makefile b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/makefile deleted file mode 100644 index b18d2d2bb1199..0000000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/makefile +++ /dev/null @@ -1,17 +0,0 @@ -SHELL=/bin/bash - -GOFILES_NOVENDOR = $(shell go list ./... | grep -v /vendor/) - -all: vet fmt test - -fmt: - go fmt $(GOFILES_NOVENDOR) - -vet: - # do not check lostcancel, they are intentional. - go vet -lostcancel=false $(GOFILES_NOVENDOR) - -test: vet - ./scripts/test_all.sh - -.PHONY: all test diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/recovery/interceptors.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/recovery/interceptors.go deleted file mode 100644 index 15ee54da651bf..0000000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/recovery/interceptors.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2017 David Ackroyd. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_recovery - -import ( - "context" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// RecoveryHandlerFunc is a function that recovers from the panic `p` by returning an `error`. -type RecoveryHandlerFunc func(p interface{}) (err error) - -// RecoveryHandlerFuncContext is a function that recovers from the panic `p` by returning an `error`. -// The context can be used to extract request scoped metadata and context values. -type RecoveryHandlerFuncContext func(ctx context.Context, p interface{}) (err error) - -// UnaryServerInterceptor returns a new unary server interceptor for panic recovery. -func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { - o := evaluateOptions(opts) - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (_ interface{}, err error) { - panicked := true - - defer func() { - if r := recover(); r != nil || panicked { - err = recoverFrom(ctx, r, o.recoveryHandlerFunc) - } - }() - - resp, err := handler(ctx, req) - panicked = false - return resp, err - } -} - -// StreamServerInterceptor returns a new streaming server interceptor for panic recovery. -func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { - o := evaluateOptions(opts) - return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) (err error) { - panicked := true - - defer func() { - if r := recover(); r != nil || panicked { - err = recoverFrom(stream.Context(), r, o.recoveryHandlerFunc) - } - }() - - err = handler(srv, stream) - panicked = false - return err - } -} - -func recoverFrom(ctx context.Context, p interface{}, r RecoveryHandlerFuncContext) error { - if r == nil { - return status.Errorf(codes.Internal, "%v", p) - } - return r(ctx, p) -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/slack.png b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/slack.png deleted file mode 100644 index cc8f9a68a9368..0000000000000 Binary files a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/slack.png and /dev/null differ diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/COPYRIGHT b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/COPYRIGHT new file mode 100644 index 0000000000000..3b13627cdbb65 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/COPYRIGHT @@ -0,0 +1,2 @@ +Copyright (c) The go-grpc-middleware Authors. +Licensed under the Apache License 2.0. diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/LICENSE similarity index 100% rename from vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE rename to vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/LICENSE diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/recovery/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery/doc.go similarity index 50% rename from vendor/github.com/grpc-ecosystem/go-grpc-middleware/recovery/doc.go rename to vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery/doc.go index 2806de5e28fef..78021207902a5 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/recovery/doc.go +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery/doc.go @@ -1,10 +1,15 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + // Copyright 2017 David Ackroyd. All Rights Reserved. // See LICENSE for licensing terms. /* -`grpc_recovery` are interceptors that recover from gRPC handler panics. +Package recovery is a middleware that recovers from panics and logs the panic message. + +`recovery` are interceptors that recover from gRPC handler panics. -Server Side Recovery Middleware +# Server Side Recovery Middleware By default a panic will be converted into a gRPC error with `code.Internal`. @@ -12,4 +17,4 @@ Handling can be customised by providing an alternate recovery function. Please see examples for simple examples of use. */ -package grpc_recovery +package recovery diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery/interceptors.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery/interceptors.go new file mode 100644 index 0000000000000..6f0c8ffcd2169 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery/interceptors.go @@ -0,0 +1,68 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +// Copyright 2017 David Ackroyd. All Rights Reserved. +// See LICENSE for licensing terms. + +package recovery + +import ( + "context" + "fmt" + "runtime" + + "google.golang.org/grpc" +) + +// RecoveryHandlerFunc is a function that recovers from the panic `p` by returning an `error`. +type RecoveryHandlerFunc func(p any) (err error) + +// RecoveryHandlerFuncContext is a function that recovers from the panic `p` by returning an `error`. +// The context can be used to extract request scoped metadata and context values. +type RecoveryHandlerFuncContext func(ctx context.Context, p any) (err error) + +// UnaryServerInterceptor returns a new unary server interceptor for panic recovery. +func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { + o := evaluateOptions(opts) + return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (_ any, err error) { + defer func() { + if r := recover(); r != nil { + err = recoverFrom(ctx, r, o.recoveryHandlerFunc) + } + }() + + return handler(ctx, req) + } +} + +// StreamServerInterceptor returns a new streaming server interceptor for panic recovery. +func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { + o := evaluateOptions(opts) + return func(srv any, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) (err error) { + defer func() { + if r := recover(); r != nil { + err = recoverFrom(stream.Context(), r, o.recoveryHandlerFunc) + } + }() + + return handler(srv, stream) + } +} + +func recoverFrom(ctx context.Context, p any, r RecoveryHandlerFuncContext) error { + if r != nil { + return r(ctx, p) + } + stack := make([]byte, 64<<10) + stack = stack[:runtime.Stack(stack, false)] + return &PanicError{Panic: p, Stack: stack} +} + +type PanicError struct { + Panic any + Stack []byte +} + +func (e *PanicError) Error() string { + return fmt.Sprintf("panic caught: %v\n\n%s", e.Panic, e.Stack) +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/recovery/options.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery/options.go similarity index 86% rename from vendor/github.com/grpc-ecosystem/go-grpc-middleware/recovery/options.go rename to vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery/options.go index ffe9c64c7d85e..2c55d68cc4fdd 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/recovery/options.go +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery/options.go @@ -1,7 +1,10 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + // Copyright 2017 David Ackroyd. All Rights Reserved. // See LICENSE for licensing terms. -package grpc_recovery +package recovery import "context" @@ -29,7 +32,7 @@ type Option func(*options) // WithRecoveryHandler customizes the function for recovering from a panic. func WithRecoveryHandler(f RecoveryHandlerFunc) Option { return func(o *options) { - o.recoveryHandlerFunc = RecoveryHandlerFuncContext(func(ctx context.Context, p interface{}) error { + o.recoveryHandlerFunc = RecoveryHandlerFuncContext(func(ctx context.Context, p any) error { return f(p) }) } diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go deleted file mode 100644 index 05ccfb3f24a79..0000000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_middleware - -import ( - "context" - - "google.golang.org/grpc" -) - -// WrappedServerStream is a thin wrapper around grpc.ServerStream that allows modifying context. -type WrappedServerStream struct { - grpc.ServerStream - // WrappedContext is the wrapper's own Context. You can assign it. - WrappedContext context.Context -} - -// Context returns the wrapper's WrappedContext, overwriting the nested grpc.ServerStream.Context() -func (w *WrappedServerStream) Context() context.Context { - return w.WrappedContext -} - -// WrapServerStream returns a ServerStream that has the ability to overwrite context. -func WrapServerStream(stream grpc.ServerStream) *WrappedServerStream { - if existing, ok := stream.(*WrappedServerStream); ok { - return existing - } - return &WrappedServerStream{ServerStream: stream, WrappedContext: stream.Context()} -} diff --git a/vendor/modules.txt b/vendor/modules.txt index e40624a1e319e..d4483761ccbfa 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1036,10 +1036,9 @@ github.com/grafana/tail/ratelimiter github.com/grafana/tail/util github.com/grafana/tail/watch github.com/grafana/tail/winfile -# github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 -## explicit; go 1.14 -github.com/grpc-ecosystem/go-grpc-middleware -github.com/grpc-ecosystem/go-grpc-middleware/recovery +# github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 +## explicit; go 1.19 +github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery # github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 ## explicit github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc
fix
update module github.com/grpc-ecosystem/go-grpc-middleware to v2 (#15045)
50dedb1c3dd731f7c1f600a0cdbd75c9c0db7874
2024-12-03 09:39:57
Owen Diehl
chore(blockbuilder): updates comments (#15220)
false
diff --git a/pkg/blockbuilder/types/grpc_transport.go b/pkg/blockbuilder/types/grpc_transport.go index 675eb92413ac7..0d132e2d07bd7 100644 --- a/pkg/blockbuilder/types/grpc_transport.go +++ b/pkg/blockbuilder/types/grpc_transport.go @@ -22,7 +22,7 @@ var _ Transport = &GRPCTransport{} type GRPCTransportConfig struct { Address string `yaml:"address,omitempty"` - // GRPCClientConfig configures the gRPC connection between the Bloom Gateway client and the server. + // GRPCClientConfig configures the gRPC connection between the block-builder and its scheduler. GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config"` }
chore
updates comments (#15220)
68ef8f4dabe9ce4bc6bd8c44781c5074cc82c553
2021-09-22 18:36:45
Karen Miller
docs: Remove wording like "As of version 1.6, you can..." (#4363)
false
diff --git a/docs/sources/best-practices/_index.md b/docs/sources/best-practices/_index.md index 50c84797d1a4b..6632d636711b9 100644 --- a/docs/sources/best-practices/_index.md +++ b/docs/sources/best-practices/_index.md @@ -107,20 +107,20 @@ It's also worth noting that the batching nature of the Loki push API can lead to ## Use `chunk_target_size` -This was added earlier in the [Loki v1.3.0](https://grafana.com/blog/2020/01/22/loki-1.3.0-released/) release, and we've been experimenting with it for several months. We have `chunk_target_size: 1536000` in all our environments now. This instructs Loki to try to fill all chunks to a target _compressed_ size of 1.5MB. These larger chunks are more efficient for Loki to process. +Using `chunk_target_size` instructs Loki to try to fill all chunks to a target _compressed_ size of 1.5MB. These larger chunks are more efficient for Loki to process. -A couple other config variables affect how full a chunk can get. Loki has a default `max_chunk_age` of 1h and `chunk_idle_period` of 30m to limit the amount of memory used as well as the exposure of lost logs if the process crashes. +Other configuration variables affect how full a chunk can get. Loki has a default `max_chunk_age` of 1h and `chunk_idle_period` of 30m to limit the amount of memory used as well as the exposure of lost logs if the process crashes. Depending on the compression used (we have been using snappy which has less compressibility but faster performance), you need 5-10x or 7.5-10MB of raw log data to fill a 1.5MB chunk. Remembering that a chunk is per stream, the more streams you break up your log files into, the more chunks that sit in memory, and the higher likelihood they get flushed by hitting one of those timeouts mentioned above before they are filled. -Lots of small, unfilled chunks are currently kryptonite for Loki. We are always working to improve this and may consider a compactor to improve this in some situations. But, in general, the guidance should stay about the same: Try your best to fill chunks! +Lots of small, unfilled chunks negatively affect Loki. We are always working to improve this and may consider a compactor to improve this in some situations. But, in general, the guidance should stay about the same: try your best to fill chunks. If you have an application that can log fast enough to fill these chunks quickly (much less than `max_chunk_age`), then it becomes more reasonable to use dynamic labels to break that up into separate streams. ## Use `-print-config-stderr` or `-log-config-reverse-order` -Starting in version 1.6.0 Loki and Promtail have flags which will dump the entire config object to stderr, or the log file, when they start. +Loki and Promtail have flags which will dump the entire config object to stderr or the log file when they start. -`-print-config-stderr` is nice when running loki directly e.g. `./loki ` as you can get a quick output of the entire Loki config. +`-print-config-stderr` works well when invoking Loki from the command line, as you can get a quick output of the entire Loki configuration. -`-log-config-reverse-order` is the flag we run Loki with in all our environments, the config entries are reversed so that the order of configs reads correctly top to bottom when viewed in Grafana's Explore. +`-log-config-reverse-order` is the flag we run Loki with in all our environments. The configuration entries are reversed, so that the order of the configuration reads correctly top to bottom when viewed in Grafana's Explore. diff --git a/docs/sources/getting-started/grafana.md b/docs/sources/getting-started/grafana.md index 4ea2346666fe1..6903b57d605ba 100644 --- a/docs/sources/getting-started/grafana.md +++ b/docs/sources/getting-started/grafana.md @@ -3,10 +3,10 @@ title: Loki in Grafana --- # Loki in Grafana -Grafana ships with built-in support for Loki for versions greater than -[6.0](https://grafana.com/grafana/download/6.0.0). Using -[6.3](https://grafana.com/grafana/download/6.3.0) or later is highly -recommended to take advantage of new [LogQL]({{< relref "../logql/_index.md" >}}) functionality. +[Grafana 6.0](https://grafana.com/grafana/download/6.0.0) and more recent +versions have built-in support for Loki. +Use [Grafana 6.3](https://grafana.com/grafana/download/6.3.0) or a more +recent version to take advantage of [LogQL]({{< relref "../logql/_index.md" >}}) functionality. 1. Log into your Grafana instance. If this is your first time running Grafana, the username and password are both defaulted to `admin`. @@ -27,7 +27,6 @@ Read more about Grafana's Explore feature in the [Grafana documentation](http://docs.grafana.org/features/explore) and on how to search and filter for logs with Loki. -> To configure the datasource via provisioning, see [Configuring Grafana via -> Provisioning](http://docs.grafana.org/features/datasources/loki/#configure-the-datasource-with-provisioning) -> in the Grafana documentation and make sure to adjust the URL similarly as -> shown above. +To configure Loki as a datasource via provisioning, see [Configuring Grafana via +Provisioning](http://docs.grafana.org/features/datasources/loki/#configure-the-datasource-with-provisioning). +Set the URL in the provisioning. diff --git a/docs/sources/getting-started/troubleshooting.md b/docs/sources/getting-started/troubleshooting.md index c224e0acaab61..3e246781e87c3 100644 --- a/docs/sources/getting-started/troubleshooting.md +++ b/docs/sources/getting-started/troubleshooting.md @@ -63,11 +63,15 @@ $ kubectl port-forward loki-promtail-jrfg7 9080 ## Debug output -Both Loki and Promtail support a log level flag on the command-line: +Both Loki and Promtail support a log level flag with the addition of +a command-line option: ```bash -$ loki -log.level=debug -$ promtail -log.level=debug +loki -log.level=debug +``` + +```bash +promtail -log.level=debug ``` ## Failed to create target, `ioutil.ReadDir: readdirent: not a directory` diff --git a/docs/sources/installation/docker.md b/docs/sources/installation/docker.md index a61d8ff1f5439..d17c5d88a167e 100644 --- a/docs/sources/installation/docker.md +++ b/docs/sources/installation/docker.md @@ -28,7 +28,7 @@ When finished, `loki-config.yaml` and `promtail-config.yaml` are downloaded in t Navigate to http://localhost:3100/metrics to view the metrics and http://localhost:3100/ready for readiness. -As of v1.6.0, image is configured to run by default as user loki with UID `10001` and GID `10001`. You can use a different user, specially if you are using bind mounts, by specifying the UID with a `docker run` command and using `--user=UID` with numeric UID suited to your needs. +The image is configured to run by default as user loki with UID `10001` and GID `10001`. You can use a different user, specially if you are using bind mounts, by specifying the UID with a `docker run` command and using `--user=UID` with numeric UID suited to your needs. **Windows** diff --git a/docs/sources/installation/helm.md b/docs/sources/installation/helm.md index 35eb33bc3ae1f..6d9654195d353 100644 --- a/docs/sources/installation/helm.md +++ b/docs/sources/installation/helm.md @@ -9,9 +9,6 @@ Make sure you have Helm [installed](https://helm.sh/docs/using_helm/#installing- Add [Loki's chart repository](https://github.com/grafana/helm-charts) to Helm: -> **PLEASE NOTE** On 2020/12/11 Loki's Helm charts were moved from their initial location within the -Loki repo and hosted at https://grafana.github.io/loki/charts to their new location at https://github.com/grafana/helm-charts which are hosted at https://grafana.github.io/helm-charts - ```bash helm repo add grafana https://grafana.github.io/helm-charts ``` @@ -24,7 +21,7 @@ helm repo update ## Deploy Loki to your cluster -### Deploy with default config +### Deploy with default configuration ```bash helm upgrade --install loki grafana/loki-stack @@ -36,7 +33,7 @@ helm upgrade --install loki grafana/loki-stack helm upgrade --install loki --namespace=loki grafana/loki ``` -### Deploy with custom config +### Deploy with custom configuration ```bash helm upgrade --install loki grafana/loki --set "key1=val1,key2=val2,..." @@ -87,11 +84,11 @@ output above. Then follow the [instructions for adding the Loki Data Source](../ ## Run Loki behind HTTPS ingress -If Loki and Promtail are deployed on different clusters you can add an Ingress -in front of Loki. By adding a certificate you create an HTTPS endpoint. For -extra security you can also enable Basic Authentication on the Ingress. +If Loki and Promtail are deployed on different clusters, you can add an Ingress +in front of Loki. By adding a certificate, you create an HTTPS endpoint. For +extra security you can also enable Basic Authentication on Ingress. -In Promtail, set the following values to communicate using HTTPS and basic authentication: +In the Promtail configuration, set the following values to communicate using HTTPS and basic authentication: ```yaml loki: @@ -127,11 +124,11 @@ spec: ## Run Promtail with syslog support -In order to receive and process syslog message into Promtail, the following changes will be necessary: +In order to receive and process syslog messages in Promtail, the following changes will be necessary: * Review the [Promtail syslog-receiver configuration documentation](/docs/clients/promtail/scraping.md#syslog-receiver) -* Configure the Promtail helm chart with the syslog configuration added to the `extraScrapeConfigs` section and associated service definition to listen for syslog messages. For example: +* Configure the Promtail Helm chart with the syslog configuration added to the `extraScrapeConfigs` section and associated service definition to listen for syslog messages. For example: ```yaml extraScrapeConfigs: @@ -155,7 +152,7 @@ In order to receive and process syslog message into Promtail, the following chan * Review the [Promtail systemd-journal configuration documentation](/docs/clients/promtail/scraping.md#journal-scraping-linux-only) -* Configure the Promtail helm chart with the systemd-journal configuration added to the `extraScrapeConfigs` section and volume mounts for the Promtail pods to access the log files. For example: +* Configure the Promtail Helm chart with the systemd-journal configuration added to the `extraScrapeConfigs` section and volume mounts for the Promtail pods to access the log files. For example: ```yaml # Add additional scrape config diff --git a/docs/sources/installation/local.md b/docs/sources/installation/local.md index 91aa699ea63d5..ba970c6b1d03b 100644 --- a/docs/sources/installation/local.md +++ b/docs/sources/installation/local.md @@ -15,26 +15,28 @@ In order to log events with Loki, you must download and install both Promtail an **Note:** Do not download LogCLI or Loki Canary at this time. [LogCLI](../../getting-started/logcli/) allows you to run Loki queries in a command line interface. [Loki Canary](../../operations/loki-canary/) is a tool to audit Loki performance. 4. Unzip the package contents into the same directory. This is where the two programs will run. 5. In the command line, change directory (`cd` on most systems) to the directory with Loki and Promtail. Copy and paste the commands below into your command line to download generic configuration files: -``` -wget https://raw.githubusercontent.com/grafana/loki/master/cmd/loki/loki-local-config.yaml -wget https://raw.githubusercontent.com/grafana/loki/main/clients/cmd/promtail/promtail-local-config.yaml -``` + + ``` + wget https://raw.githubusercontent.com/grafana/loki/master/cmd/loki/loki-local-config.yaml + wget https://raw.githubusercontent.com/grafana/loki/main/clients/cmd/promtail/promtail-local-config.yaml + ``` 6. Enter the following command to start Loki: -**Windows** + **Windows** -``` -.\loki-windows-amd64.exe --config.file=loki-local-config.yaml -``` + ``` + .\loki-windows-amd64.exe --config.file=loki-local-config.yaml + ``` -**Linux** -``` -./loki-linux-amd64 -config.file=loki-local-config.yaml -``` + **Linux** + ``` + ./loki-linux-amd64 -config.file=loki-local-config.yaml + ``` Loki runs and displays Loki logs in your command line and on http://localhost:3100/metrics. -Congratulations, Loki is installed and running! Next, you might want edit the Promtail config file to [get logs into Loki](../../getting-started/get-logs-into-loki/). +The next step will be running an agent to send logs to Loki. +To do so with Promtail, refer to [get logs into Loki](../../getting-started/get-logs-into-loki/). ## Release binaries - openSUSE Linux only
docs
Remove wording like "As of version 1.6, you can..." (#4363)
00e686ddcea0be6158c4de71467db86a0e05a7c4
2024-08-08 15:10:16
benclive
chore: Add metastore client as dep for rf1 querier & ignore auth for ListBlocks (#13786)
false
diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go index 14c7290bfa94b..e20283b33696a 100644 --- a/pkg/loki/loki.go +++ b/pkg/loki/loki.go @@ -418,6 +418,7 @@ func (t *Loki) setupAuthMiddleware() { "/grpc.health.v1.Health/Check", "/grpc.health.v1.Health/Watch", "/metastorepb.MetastoreService/AddBlock", + "/metastorepb.MetastoreService/ListBlocksForQuery", "/logproto.StreamData/GetStreamRates", "/frontend.Frontend/Process", "/frontend.Frontend/NotifyClientShutdown", @@ -724,7 +725,7 @@ func (t *Loki) setupModuleManager() error { Store: {Overrides, IndexGatewayRing}, IngesterRF1: {Store, Server, MemberlistKV, TenantConfigs, MetastoreClient, Analytics}, Ingester: {Store, Server, MemberlistKV, TenantConfigs, Analytics}, - Querier: {Store, Ring, Server, IngesterQuerier, PatternRingClient, Overrides, Analytics, CacheGenerationLoader, QuerySchedulerRing}, + Querier: {Store, Ring, Server, IngesterQuerier, PatternRingClient, MetastoreClient, Overrides, Analytics, CacheGenerationLoader, QuerySchedulerRing}, QueryFrontendTripperware: {Server, Overrides, TenantConfigs}, QueryFrontend: {QueryFrontendTripperware, Analytics, CacheGenerationLoader, QuerySchedulerRing}, QueryScheduler: {Server, Overrides, MemberlistKV, Analytics, QuerySchedulerRing}, diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index d823f5cedb5cd..898249682b02d 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -1829,12 +1829,16 @@ func (t *Loki) initMetastore() (services.Service, error) { if err != nil { return nil, err } + // Service methods have tenant auth disabled in the fakeauth.SetupAuthMiddleware call since this is a shared service metastorepb.RegisterMetastoreServiceServer(t.Server.GRPC, m) return m, nil } func (t *Loki) initMetastoreClient() (services.Service, error) { + if !t.Cfg.IngesterRF1.Enabled && !t.Cfg.QuerierRF1.Enabled { + return nil, nil + } mc, err := metastoreclient.New(t.Cfg.MetastoreClient, prometheus.DefaultRegisterer) if err != nil { return nil, err
chore
Add metastore client as dep for rf1 querier & ignore auth for ListBlocks (#13786)
c74d0eb1076c2f13cc7728286b41b58b14a2d1cd
2024-03-26 01:46:56
Christian Haudum
fix(bloom-gateway): Close all block queriers in case of error (#12338)
false
diff --git a/pkg/bloomgateway/processor.go b/pkg/bloomgateway/processor.go index 9fc6aca57dc11..e49679fe61ea8 100644 --- a/pkg/bloomgateway/processor.go +++ b/pkg/bloomgateway/processor.go @@ -97,13 +97,21 @@ func (p *processor) processBlocks(ctx context.Context, data []blockWithTasks) er return err } + defer func() { + for i := range bqs { + if bqs[i] == nil { + continue + } + bqs[i].Close() + } + }() + return concurrency.ForEachJob(ctx, len(bqs), p.concurrency, func(ctx context.Context, i int) error { bq := bqs[i] if bq == nil { // TODO(chaudum): Add metric for skipped blocks return nil } - defer bq.Close() block := data[i] level.Debug(p.logger).Log(
fix
Close all block queriers in case of error (#12338)
75e031d8e81ef201647333e2b808a8e8f949e9ba
2024-03-22 15:15:21
BiKangNing
chore: fix function names in doc strings (#12313)
false
diff --git a/pkg/logql/evaluator.go b/pkg/logql/evaluator.go index 2d6837ef6a78a..903c4a5555b2d 100644 --- a/pkg/logql/evaluator.go +++ b/pkg/logql/evaluator.go @@ -646,7 +646,7 @@ func (r AbsentRangeVectorEvaluator) Error() error { return r.iter.Error() } -// binOpExpr explicitly does not handle when both legs are literals as +// newBinOpStepEvaluator explicitly does not handle when both legs are literals as // it makes the type system simpler and these are reduced in mustNewBinOpExpr func newBinOpStepEvaluator( ctx context.Context, diff --git a/pkg/logqlmodel/stats/context.go b/pkg/logqlmodel/stats/context.go index 65bbb4fde8750..31344b01c585d 100644 --- a/pkg/logqlmodel/stats/context.go +++ b/pkg/logqlmodel/stats/context.go @@ -153,7 +153,7 @@ func JoinResults(ctx context.Context, res Result) { stats.result.Merge(res) } -// JoinIngesterResult joins the ingester result statistics in a concurrency-safe manner. +// JoinIngesters joins the ingester result statistics in a concurrency-safe manner. func JoinIngesters(ctx context.Context, inc Ingester) { stats := FromContext(ctx) stats.mtx.Lock() diff --git a/pkg/storage/chunk/cache/cache_gen.go b/pkg/storage/chunk/cache/cache_gen.go index 5bd7038aa4016..689e165e75d01 100644 --- a/pkg/storage/chunk/cache/cache_gen.go +++ b/pkg/storage/chunk/cache/cache_gen.go @@ -55,7 +55,7 @@ func InjectCacheGenNumber(ctx context.Context, cacheGen string) context.Context return context.WithValue(ctx, interface{}(cacheGenContextKey), cacheGen) } -// ExtractCacheGenNumbersFromHeaders gets the cache gen from the context. +// ExtractCacheGenNumber gets the cache gen from the context. func ExtractCacheGenNumber(ctx context.Context) string { cacheGenNumber, ok := ctx.Value(cacheGenContextKey).(string) if !ok { diff --git a/pkg/storage/chunk/client/aws/dynamodb_storage_client.go b/pkg/storage/chunk/client/aws/dynamodb_storage_client.go index d27985ce3ecf5..c48bf518cc23a 100644 --- a/pkg/storage/chunk/client/aws/dynamodb_storage_client.go +++ b/pkg/storage/chunk/client/aws/dynamodb_storage_client.go @@ -538,7 +538,7 @@ func processChunkResponse(response *dynamodb.BatchGetItemOutput, chunksByKey map return result, nil } -// PutChunkAndIndex implements chunk.ObjectAndIndexClient +// PutChunksAndIndex implements chunk.ObjectAndIndexClient // Combine both sets of writes before sending to DynamoDB, for performance func (a dynamoDBStorageClient) PutChunksAndIndex(ctx context.Context, chunks []chunk.Chunk, index index.WriteBatch) error { dynamoDBWrites, err := a.writesForChunks(chunks) diff --git a/pkg/util/metrics_helper.go b/pkg/util/metrics_helper.go index 5e7b908922e4f..58733c1c1e9ea 100644 --- a/pkg/util/metrics_helper.go +++ b/pkg/util/metrics_helper.go @@ -759,7 +759,7 @@ func GetSumOfHistogramSampleCount(families []*dto.MetricFamily, metricName strin return sum } -// GetLables returns list of label combinations used by this collector at the time of call. +// GetLabels returns list of label combinations used by this collector at the time of call. // This can be used to find and delete unused metrics. func GetLabels(c prometheus.Collector, filter map[string]string) ([]labels.Labels, error) { ch := make(chan prometheus.Metric, 16)
chore
fix function names in doc strings (#12313)
50c4216d00cf5a0350bd8f389fd9d68c15708c46
2023-07-18 22:27:29
Joao Marcal
operator: add mTLS authentication to tenants (#9906)
false
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md index caf214ae2c618..de3787a56ba19 100644 --- a/operator/CHANGELOG.md +++ b/operator/CHANGELOG.md @@ -1,5 +1,6 @@ ## Main +- [9906](https://github.com/grafana/loki/pull/9906) **JoaoBraveCoding**: Add mTLS authentication to tenants - [9963](https://github.com/grafana/loki/pull/9963) **xperimental**: Fix application tenant alertmanager configuration - [9795](https://github.com/grafana/loki/pull/9795) **JoaoBraveCoding**: Add initContainer to zone aware components to gatekeep them from starting without the AZ annotation - [9503](https://github.com/grafana/loki/pull/9503) **shwetaap**: Add Pod annotations with node topology labels to support zone aware scheduling diff --git a/operator/apis/loki/v1/lokistack_types.go b/operator/apis/loki/v1/lokistack_types.go index 9811e0f93af5c..298e668996d63 100644 --- a/operator/apis/loki/v1/lokistack_types.go +++ b/operator/apis/loki/v1/lokistack_types.go @@ -183,6 +183,16 @@ type OIDCSpec struct { UsernameClaim string `json:"usernameClaim,omitempty"` } +// MTLSSpec specifies mTLS configuration parameters. +type MTLSSpec struct { + // CA defines the spec for the custom CA for tenant's authentication. + // + // +required + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="CA ConfigMap" + CA *CASpec `json:"ca"` +} + // AuthenticationSpec defines the oidc configuration per tenant for lokiStack Gateway component. type AuthenticationSpec struct { // TenantName defines the name of the tenant. @@ -199,10 +209,15 @@ type AuthenticationSpec struct { TenantID string `json:"tenantId"` // OIDC defines the spec for the OIDC tenant's authentication. // - // +required - // +kubebuilder:validation:Required + // +optional // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="OIDC Configuration" - OIDC *OIDCSpec `json:"oidc"` + OIDC *OIDCSpec `json:"oidc,omitempty"` + + // TLSConfig defines the spec for the mTLS tenant's authentication. + // + // +optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="mTLS Configuration" + MTLS *MTLSSpec `json:"mTLS,omitempty"` } // ModeType is the authentication/authorization mode in which LokiStack Gateway will be configured. @@ -414,8 +429,7 @@ type HashRingSpec struct { MemberList *MemberListSpec `json:"memberlist,omitempty"` } -// ObjectStorageTLSSpec is the TLS configuration for reaching the object storage endpoint. -type ObjectStorageTLSSpec struct { +type CASpec struct { // Key is the data key of a ConfigMap containing a CA certificate. // It needs to be in the same namespace as the LokiStack custom resource. // If empty, it defaults to "service-ca.crt". @@ -433,6 +447,11 @@ type ObjectStorageTLSSpec struct { CA string `json:"caName"` } +// ObjectStorageTLSSpec is the TLS configuration for reaching the object storage endpoint. +type ObjectStorageTLSSpec struct { + CASpec `json:",inline"` +} + // ObjectStorageSecretType defines the type of storage which can be used with the Loki cluster. // // +kubebuilder:validation:Enum=azure;gcs;s3;swift;alibabacloud; @@ -926,8 +945,15 @@ const ( // ReasonMissingGatewayTenantSecret when the required tenant secret // for authentication is missing. ReasonMissingGatewayTenantSecret LokiStackConditionReason = "MissingGatewayTenantSecret" + // ReasonMissingGatewayTenantConfigMap when the required tenant configmap + // for authentication is missing. + ReasonMissingGatewayTenantConfigMap LokiStackConditionReason = "MissingGatewayTenantConfigMap" // ReasonInvalidGatewayTenantSecret when the format of the secret is invalid. ReasonInvalidGatewayTenantSecret LokiStackConditionReason = "InvalidGatewayTenantSecret" + // ReasonInvalidGatewayTenantConfigMap when the format of the configmap is invalid. + ReasonInvalidGatewayTenantConfigMap LokiStackConditionReason = "InvalidGatewayTenantConfigMap" + // ReasonMissingGatewayAuthenticationConfig when the config for when a tenant is missing authentication config + ReasonMissingGatewayAuthenticationConfig LokiStackConditionReason = "MissingGatewayTenantAuthenticationConfig" // ReasonInvalidTenantsConfiguration when the tenant configuration provided is invalid. ReasonInvalidTenantsConfiguration LokiStackConditionReason = "InvalidTenantsConfiguration" // ReasonMissingGatewayOpenShiftBaseDomain when the reconciler cannot lookup the OpenShift DNS base domain. diff --git a/operator/apis/loki/v1/zz_generated.deepcopy.go b/operator/apis/loki/v1/zz_generated.deepcopy.go index ced43973e2c21..41af4bf5ef3ee 100644 --- a/operator/apis/loki/v1/zz_generated.deepcopy.go +++ b/operator/apis/loki/v1/zz_generated.deepcopy.go @@ -380,6 +380,11 @@ func (in *AuthenticationSpec) DeepCopyInto(out *AuthenticationSpec) { *out = new(OIDCSpec) (*in).DeepCopyInto(*out) } + if in.MTLS != nil { + in, out := &in.MTLS, &out.MTLS + *out = new(MTLSSpec) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationSpec. @@ -426,6 +431,21 @@ func (in *AuthorizationSpec) DeepCopy() *AuthorizationSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CASpec) DeepCopyInto(out *CASpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CASpec. +func (in *CASpec) DeepCopy() *CASpec { + if in == nil { + return nil + } + out := new(CASpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterProxy) DeepCopyInto(out *ClusterProxy) { *out = *in @@ -911,6 +931,26 @@ func (in *LokiTemplateSpec) DeepCopy() *LokiTemplateSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MTLSSpec) DeepCopyInto(out *MTLSSpec) { + *out = *in + if in.CA != nil { + in, out := &in.CA, &out.CA + *out = new(CASpec) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MTLSSpec. +func (in *MTLSSpec) DeepCopy() *MTLSSpec { + if in == nil { + return nil + } + out := new(MTLSSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MemberListSpec) DeepCopyInto(out *MemberListSpec) { *out = *in @@ -1020,6 +1060,7 @@ func (in *ObjectStorageSpec) DeepCopy() *ObjectStorageSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ObjectStorageTLSSpec) DeepCopyInto(out *ObjectStorageTLSSpec) { *out = *in + out.CASpec = in.CASpec } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageTLSSpec. diff --git a/operator/apis/loki/v1beta1/lokistack_types.go b/operator/apis/loki/v1beta1/lokistack_types.go index 5a3b147ea2b21..28f59785280aa 100644 --- a/operator/apis/loki/v1beta1/lokistack_types.go +++ b/operator/apis/loki/v1beta1/lokistack_types.go @@ -866,7 +866,9 @@ func (src *LokiStack) ConvertTo(dstRaw conversion.Hub) error { var storageTLS *v1.ObjectStorageTLSSpec if src.Spec.Storage.TLS != nil { storageTLS = &v1.ObjectStorageTLSSpec{ - CA: src.Spec.Storage.TLS.CA, + CASpec: v1.CASpec{ + CA: src.Spec.Storage.TLS.CA, + }, } } diff --git a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml index e4aaa3911f026..d91aee8bf9e39 100644 --- a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml @@ -150,7 +150,7 @@ metadata: categories: OpenShift Optional, Logging & Tracing certified: "false" containerImage: docker.io/grafana/loki-operator:main-ac1c1fd - createdAt: "2023-07-04T17:17:17Z" + createdAt: "2023-07-17T16:04:46Z" description: The Community Loki Operator provides Kubernetes native deployment and management of Loki and related logging components. operators.operatorframework.io/builder: operator-sdk-unknown @@ -698,6 +698,23 @@ spec: configuration spec per tenant. displayName: Authentication path: tenants.authentication + - description: TLSConfig defines the spec for the mTLS tenant's authentication. + displayName: mTLS Configuration + path: tenants.authentication[0].mTLS + - description: CA defines the spec for the custom CA for tenant's authentication. + displayName: CA ConfigMap + path: tenants.authentication[0].mTLS.ca + - description: Key is the data key of a ConfigMap containing a CA certificate. + It needs to be in the same namespace as the LokiStack custom resource. If + empty, it defaults to "service-ca.crt". + displayName: CA ConfigMap Key + path: tenants.authentication[0].mTLS.ca.caKey + - description: CA is the name of a ConfigMap containing a CA certificate. It + needs to be in the same namespace as the LokiStack custom resource. + displayName: CA ConfigMap Name + path: tenants.authentication[0].mTLS.ca.caName + x-descriptors: + - urn:alm:descriptor:io.kubernetes:ConfigMap - description: OIDC defines the spec for the OIDC tenant's authentication. displayName: OIDC Configuration path: tenants.authentication[0].oidc @@ -1359,14 +1376,6 @@ spec: - get - list - watch - - apiGroups: - - "" - resources: - - secrets - verbs: - - get - - list - - watch - apiGroups: - apps resources: diff --git a/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml index 9debe361c0ee1..c194829794ab7 100644 --- a/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml +++ b/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml @@ -3659,6 +3659,31 @@ spec: description: AuthenticationSpec defines the oidc configuration per tenant for lokiStack Gateway component. properties: + mTLS: + description: TLSConfig defines the spec for the mTLS tenant's + authentication. + properties: + ca: + description: CA defines the spec for the custom CA for + tenant's authentication. + properties: + caKey: + description: Key is the data key of a ConfigMap + containing a CA certificate. It needs to be in + the same namespace as the LokiStack custom resource. + If empty, it defaults to "service-ca.crt". + type: string + caName: + description: CA is the name of a ConfigMap containing + a CA certificate. It needs to be in the same namespace + as the LokiStack custom resource. + type: string + required: + - caName + type: object + required: + - ca + type: object oidc: description: OIDC defines the spec for the OIDC tenant's authentication. @@ -3697,7 +3722,6 @@ spec: description: TenantName defines the name of the tenant. type: string required: - - oidc - tenantId - tenantName type: object diff --git a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml index 9407609184ed1..92d0c0fa961f7 100644 --- a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml @@ -150,7 +150,7 @@ metadata: categories: OpenShift Optional, Logging & Tracing certified: "false" containerImage: docker.io/grafana/loki-operator:main-ac1c1fd - createdAt: "2023-07-04T17:17:12Z" + createdAt: "2023-07-17T16:04:44Z" description: The Community Loki Operator provides Kubernetes native deployment and management of Loki and related logging components. operators.operatorframework.io/builder: operator-sdk-unknown @@ -698,6 +698,23 @@ spec: configuration spec per tenant. displayName: Authentication path: tenants.authentication + - description: TLSConfig defines the spec for the mTLS tenant's authentication. + displayName: mTLS Configuration + path: tenants.authentication[0].mTLS + - description: CA defines the spec for the custom CA for tenant's authentication. + displayName: CA ConfigMap + path: tenants.authentication[0].mTLS.ca + - description: Key is the data key of a ConfigMap containing a CA certificate. + It needs to be in the same namespace as the LokiStack custom resource. If + empty, it defaults to "service-ca.crt". + displayName: CA ConfigMap Key + path: tenants.authentication[0].mTLS.ca.caKey + - description: CA is the name of a ConfigMap containing a CA certificate. It + needs to be in the same namespace as the LokiStack custom resource. + displayName: CA ConfigMap Name + path: tenants.authentication[0].mTLS.ca.caName + x-descriptors: + - urn:alm:descriptor:io.kubernetes:ConfigMap - description: OIDC defines the spec for the OIDC tenant's authentication. displayName: OIDC Configuration path: tenants.authentication[0].oidc @@ -1346,14 +1363,6 @@ spec: - get - list - watch - - apiGroups: - - "" - resources: - - secrets - verbs: - - get - - list - - watch - apiGroups: - apps resources: diff --git a/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml index fe851a38a93ca..c0c4097d53063 100644 --- a/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml +++ b/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml @@ -3659,6 +3659,31 @@ spec: description: AuthenticationSpec defines the oidc configuration per tenant for lokiStack Gateway component. properties: + mTLS: + description: TLSConfig defines the spec for the mTLS tenant's + authentication. + properties: + ca: + description: CA defines the spec for the custom CA for + tenant's authentication. + properties: + caKey: + description: Key is the data key of a ConfigMap + containing a CA certificate. It needs to be in + the same namespace as the LokiStack custom resource. + If empty, it defaults to "service-ca.crt". + type: string + caName: + description: CA is the name of a ConfigMap containing + a CA certificate. It needs to be in the same namespace + as the LokiStack custom resource. + type: string + required: + - caName + type: object + required: + - ca + type: object oidc: description: OIDC defines the spec for the OIDC tenant's authentication. @@ -3697,7 +3722,6 @@ spec: description: TenantName defines the name of the tenant. type: string required: - - oidc - tenantId - tenantName type: object diff --git a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml index 451ad2714b0e1..f7ac804c93c96 100644 --- a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml @@ -150,7 +150,7 @@ metadata: categories: OpenShift Optional, Logging & Tracing certified: "false" containerImage: quay.io/openshift-logging/loki-operator:v0.1.0 - createdAt: "2023-07-04T17:17:21Z" + createdAt: "2023-07-17T16:04:47Z" description: | The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging. ## Prerequisites and Requirements @@ -711,6 +711,23 @@ spec: configuration spec per tenant. displayName: Authentication path: tenants.authentication + - description: TLSConfig defines the spec for the mTLS tenant's authentication. + displayName: mTLS Configuration + path: tenants.authentication[0].mTLS + - description: CA defines the spec for the custom CA for tenant's authentication. + displayName: CA ConfigMap + path: tenants.authentication[0].mTLS.ca + - description: Key is the data key of a ConfigMap containing a CA certificate. + It needs to be in the same namespace as the LokiStack custom resource. If + empty, it defaults to "service-ca.crt". + displayName: CA ConfigMap Key + path: tenants.authentication[0].mTLS.ca.caKey + - description: CA is the name of a ConfigMap containing a CA certificate. It + needs to be in the same namespace as the LokiStack custom resource. + displayName: CA ConfigMap Name + path: tenants.authentication[0].mTLS.ca.caName + x-descriptors: + - urn:alm:descriptor:io.kubernetes:ConfigMap - description: OIDC defines the spec for the OIDC tenant's authentication. displayName: OIDC Configuration path: tenants.authentication[0].oidc @@ -1344,14 +1361,6 @@ spec: - get - list - watch - - apiGroups: - - "" - resources: - - secrets - verbs: - - get - - list - - watch - apiGroups: - apps resources: diff --git a/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml index 4267ea1ad3312..54a2d5527e2cb 100644 --- a/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml +++ b/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml @@ -3659,6 +3659,31 @@ spec: description: AuthenticationSpec defines the oidc configuration per tenant for lokiStack Gateway component. properties: + mTLS: + description: TLSConfig defines the spec for the mTLS tenant's + authentication. + properties: + ca: + description: CA defines the spec for the custom CA for + tenant's authentication. + properties: + caKey: + description: Key is the data key of a ConfigMap + containing a CA certificate. It needs to be in + the same namespace as the LokiStack custom resource. + If empty, it defaults to "service-ca.crt". + type: string + caName: + description: CA is the name of a ConfigMap containing + a CA certificate. It needs to be in the same namespace + as the LokiStack custom resource. + type: string + required: + - caName + type: object + required: + - ca + type: object oidc: description: OIDC defines the spec for the OIDC tenant's authentication. @@ -3697,7 +3722,6 @@ spec: description: TenantName defines the name of the tenant. type: string required: - - oidc - tenantId - tenantName type: object diff --git a/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml b/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml index babe7ef7ac123..bb419cee885e3 100644 --- a/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml +++ b/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml @@ -3642,6 +3642,31 @@ spec: description: AuthenticationSpec defines the oidc configuration per tenant for lokiStack Gateway component. properties: + mTLS: + description: TLSConfig defines the spec for the mTLS tenant's + authentication. + properties: + ca: + description: CA defines the spec for the custom CA for + tenant's authentication. + properties: + caKey: + description: Key is the data key of a ConfigMap + containing a CA certificate. It needs to be in + the same namespace as the LokiStack custom resource. + If empty, it defaults to "service-ca.crt". + type: string + caName: + description: CA is the name of a ConfigMap containing + a CA certificate. It needs to be in the same namespace + as the LokiStack custom resource. + type: string + required: + - caName + type: object + required: + - ca + type: object oidc: description: OIDC defines the spec for the OIDC tenant's authentication. @@ -3680,7 +3705,6 @@ spec: description: TenantName defines the name of the tenant. type: string required: - - oidc - tenantId - tenantName type: object diff --git a/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml index 48aa49ee12b1c..a5a2d2f10a523 100644 --- a/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml +++ b/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml @@ -611,6 +611,23 @@ spec: configuration spec per tenant. displayName: Authentication path: tenants.authentication + - description: TLSConfig defines the spec for the mTLS tenant's authentication. + displayName: mTLS Configuration + path: tenants.authentication[0].mTLS + - description: CA defines the spec for the custom CA for tenant's authentication. + displayName: CA ConfigMap + path: tenants.authentication[0].mTLS.ca + - description: Key is the data key of a ConfigMap containing a CA certificate. + It needs to be in the same namespace as the LokiStack custom resource. If + empty, it defaults to "service-ca.crt". + displayName: CA ConfigMap Key + path: tenants.authentication[0].mTLS.ca.caKey + - description: CA is the name of a ConfigMap containing a CA certificate. It + needs to be in the same namespace as the LokiStack custom resource. + displayName: CA ConfigMap Name + path: tenants.authentication[0].mTLS.ca.caName + x-descriptors: + - urn:alm:descriptor:io.kubernetes:ConfigMap - description: OIDC defines the spec for the OIDC tenant's authentication. displayName: OIDC Configuration path: tenants.authentication[0].oidc diff --git a/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml index 02ca02678a709..cd6cf3cb516ad 100644 --- a/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml +++ b/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml @@ -611,6 +611,23 @@ spec: configuration spec per tenant. displayName: Authentication path: tenants.authentication + - description: TLSConfig defines the spec for the mTLS tenant's authentication. + displayName: mTLS Configuration + path: tenants.authentication[0].mTLS + - description: CA defines the spec for the custom CA for tenant's authentication. + displayName: CA ConfigMap + path: tenants.authentication[0].mTLS.ca + - description: Key is the data key of a ConfigMap containing a CA certificate. + It needs to be in the same namespace as the LokiStack custom resource. If + empty, it defaults to "service-ca.crt". + displayName: CA ConfigMap Key + path: tenants.authentication[0].mTLS.ca.caKey + - description: CA is the name of a ConfigMap containing a CA certificate. It + needs to be in the same namespace as the LokiStack custom resource. + displayName: CA ConfigMap Name + path: tenants.authentication[0].mTLS.ca.caName + x-descriptors: + - urn:alm:descriptor:io.kubernetes:ConfigMap - description: OIDC defines the spec for the OIDC tenant's authentication. displayName: OIDC Configuration path: tenants.authentication[0].oidc diff --git a/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml index bd788d64d3e63..68ecb43802a04 100644 --- a/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml +++ b/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml @@ -623,6 +623,23 @@ spec: configuration spec per tenant. displayName: Authentication path: tenants.authentication + - description: TLSConfig defines the spec for the mTLS tenant's authentication. + displayName: mTLS Configuration + path: tenants.authentication[0].mTLS + - description: CA defines the spec for the custom CA for tenant's authentication. + displayName: CA ConfigMap + path: tenants.authentication[0].mTLS.ca + - description: Key is the data key of a ConfigMap containing a CA certificate. + It needs to be in the same namespace as the LokiStack custom resource. If + empty, it defaults to "service-ca.crt". + displayName: CA ConfigMap Key + path: tenants.authentication[0].mTLS.ca.caKey + - description: CA is the name of a ConfigMap containing a CA certificate. It + needs to be in the same namespace as the LokiStack custom resource. + displayName: CA ConfigMap Name + path: tenants.authentication[0].mTLS.ca.caName + x-descriptors: + - urn:alm:descriptor:io.kubernetes:ConfigMap - description: OIDC defines the spec for the OIDC tenant's authentication. displayName: OIDC Configuration path: tenants.authentication[0].oidc diff --git a/operator/config/rbac/role.yaml b/operator/config/rbac/role.yaml index 509c03861a4f6..f2df6c8a2cfe0 100644 --- a/operator/config/rbac/role.yaml +++ b/operator/config/rbac/role.yaml @@ -35,14 +35,6 @@ rules: - get - list - watch -- apiGroups: - - "" - resources: - - secrets - verbs: - - get - - list - - watch - apiGroups: - apps resources: diff --git a/operator/controllers/loki/lokistack_controller.go b/operator/controllers/loki/lokistack_controller.go index f9f2a541c2f24..f085cac5a060f 100644 --- a/operator/controllers/loki/lokistack_controller.go +++ b/operator/controllers/loki/lokistack_controller.go @@ -122,7 +122,6 @@ type LokiStackReconciler struct { // +kubebuilder:rbac:groups=loki.grafana.com,resources=lokistacks/status,verbs=get;update;patch // +kubebuilder:rbac:groups=loki.grafana.com,resources=lokistacks/finalizers,verbs=update // +kubebuilder:rbac:groups="",resources=pods;nodes;services;endpoints;configmaps;secrets;serviceaccounts,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch // +kubebuilder:rbac:groups=apps,resources=deployments;statefulsets,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterrolebindings;clusterroles;roles;rolebindings,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors;prometheusrules,verbs=get;list;watch;create;update;delete diff --git a/operator/internal/handlers/internal/gateway/tenant_secrets.go b/operator/internal/handlers/internal/gateway/tenant_secrets.go index c1bad8b930b2f..8901d50caebb9 100644 --- a/operator/internal/handlers/internal/gateway/tenant_secrets.go +++ b/operator/internal/handlers/internal/gateway/tenant_secrets.go @@ -30,39 +30,83 @@ func GetTenantSecrets( var ( tenantSecrets []*manifests.TenantSecrets gatewaySecret corev1.Secret + caConfigMap corev1.ConfigMap ) for _, tenant := range stack.Spec.Tenants.Authentication { - key := client.ObjectKey{Name: tenant.OIDC.Secret.Name, Namespace: req.Namespace} - if err := k.Get(ctx, key, &gatewaySecret); err != nil { - if apierrors.IsNotFound(err) { + switch { + case tenant.OIDC != nil: + key := client.ObjectKey{Name: tenant.OIDC.Secret.Name, Namespace: req.Namespace} + if err := k.Get(ctx, key, &gatewaySecret); err != nil { + if apierrors.IsNotFound(err) { + return nil, &status.DegradedError{ + Message: fmt.Sprintf("Missing secrets for tenant %s", tenant.TenantName), + Reason: lokiv1.ReasonMissingGatewayTenantSecret, + Requeue: true, + } + } + return nil, kverrors.Wrap(err, "failed to lookup lokistack gateway tenant secret", + "name", key) + } + + oidcSecret, err := extractOIDCSecret(&gatewaySecret) + if err != nil { return nil, &status.DegradedError{ - Message: fmt.Sprintf("Missing secrets for tenant %s", tenant.TenantName), - Reason: lokiv1.ReasonMissingGatewayTenantSecret, + Message: "Invalid gateway tenant secret contents", + Reason: lokiv1.ReasonInvalidGatewayTenantSecret, Requeue: true, } } - return nil, kverrors.Wrap(err, "failed to lookup lokistack gateway tenant secret", - "name", key) - } - - var ts *manifests.TenantSecrets - ts, err := extractSecret(&gatewaySecret, tenant.TenantName) - if err != nil { + tenantSecrets = append(tenantSecrets, &manifests.TenantSecrets{ + TenantName: tenant.TenantName, + OIDCSecret: oidcSecret, + }) + case tenant.MTLS != nil: + key := client.ObjectKey{Name: tenant.MTLS.CA.CA, Namespace: req.Namespace} + if err := k.Get(ctx, key, &caConfigMap); err != nil { + if apierrors.IsNotFound(err) { + return nil, &status.DegradedError{ + Message: fmt.Sprintf("Missing configmap for tenant %s", tenant.TenantName), + Reason: lokiv1.ReasonMissingGatewayTenantConfigMap, + Requeue: true, + } + } + return nil, kverrors.Wrap(err, "failed to lookup lokistack gateway tenant configMap", + "name", key) + } + // Default key if the user doesn't specify it + cmKey := "service-ca.crt" + if tenant.MTLS.CA.CAKey != "" { + cmKey = tenant.MTLS.CA.CAKey + } + err := checkKeyIsPresent(&caConfigMap, cmKey) + if err != nil { + return nil, &status.DegradedError{ + Message: "Invalid gateway tenant configmap contents", + Reason: lokiv1.ReasonInvalidGatewayTenantConfigMap, + Requeue: true, + } + } + tenantSecrets = append(tenantSecrets, &manifests.TenantSecrets{ + TenantName: tenant.TenantName, + MTLSSecret: &manifests.MTLSSecret{ + CAPath: manifests.TenantMTLSCAPath(tenant.TenantName, cmKey), + }, + }) + default: return nil, &status.DegradedError{ - Message: "Invalid gateway tenant secret contents", + Message: "No gateway tenant authentication method provided", Reason: lokiv1.ReasonInvalidGatewayTenantSecret, Requeue: true, } } - tenantSecrets = append(tenantSecrets, ts) } return tenantSecrets, nil } -// extractSecret reads a k8s secret into a manifest tenant secret struct if valid. -func extractSecret(s *corev1.Secret, tenantName string) (*manifests.TenantSecrets, error) { +// extractOIDCSecret reads a k8s secret into a manifest tenant secret struct if valid. +func extractOIDCSecret(s *corev1.Secret) (*manifests.OIDCSecret, error) { // Extract and validate mandatory fields clientID := s.Data["clientID"] if len(clientID) == 0 { @@ -71,10 +115,18 @@ func extractSecret(s *corev1.Secret, tenantName string) (*manifests.TenantSecret clientSecret := s.Data["clientSecret"] issuerCAPath := s.Data["issuerCAPath"] - return &manifests.TenantSecrets{ - TenantName: tenantName, + return &manifests.OIDCSecret{ ClientID: string(clientID), ClientSecret: string(clientSecret), IssuerCAPath: string(issuerCAPath), }, nil } + +// checkKeyIsPresent checks if key is present in the configmap +func checkKeyIsPresent(cm *corev1.ConfigMap, key string) error { + ca := cm.Data[key] + if len(ca) == 0 { + return kverrors.New(fmt.Sprintf("missing %s field", key), "field", key) + } + return nil +} diff --git a/operator/internal/handlers/internal/gateway/tenant_secrets_test.go b/operator/internal/handlers/internal/gateway/tenant_secrets_test.go index 354c301ca6913..3c5eaf0dc7728 100644 --- a/operator/internal/handlers/internal/gateway/tenant_secrets_test.go +++ b/operator/internal/handlers/internal/gateway/tenant_secrets_test.go @@ -2,6 +2,7 @@ package gateway import ( "context" + "strings" "testing" "github.com/stretchr/testify/require" @@ -17,24 +18,17 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -func TestGetTenantSecrets_StaticMode(t *testing.T) { - k := &k8sfakes.FakeClient{} - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - s := &lokiv1.LokiStack{ - ObjectMeta: metav1.ObjectMeta{ - Name: "mystack", - Namespace: "some-ns", - }, - Spec: lokiv1.LokiStackSpec{ - Tenants: &lokiv1.TenantsSpec{ - Mode: lokiv1.Static, - Authentication: []lokiv1.AuthenticationSpec{ +func TestGetTenantSecrets(t *testing.T) { + for _, mode := range []lokiv1.ModeType{lokiv1.Static, lokiv1.Dynamic} { + for _, tc := range []struct { + name string + authNSpec []lokiv1.AuthenticationSpec + object client.Object + expected []*manifests.TenantSecrets + }{ + { + name: "oidc", + authNSpec: []lokiv1.AuthenticationSpec{ { TenantName: "test", TenantID: "test", @@ -45,105 +39,98 @@ func TestGetTenantSecrets_StaticMode(t *testing.T) { }, }, }, - }, - }, - } - - k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { - if name.Name == "test" && name.Namespace == "some-ns" { - k.SetClientObject(object, &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "some-ns", + object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "some-ns", + }, + Data: map[string][]byte{ + "clientID": []byte("test"), + "clientSecret": []byte("test"), + "issuerCAPath": []byte("/path/to/ca/file"), + }, }, - Data: map[string][]byte{ - "clientID": []byte("test"), - "clientSecret": []byte("test"), - "issuerCAPath": []byte("/path/to/ca/file"), + expected: []*manifests.TenantSecrets{ + { + TenantName: "test", + OIDCSecret: &manifests.OIDCSecret{ + ClientID: "test", + ClientSecret: "test", + IssuerCAPath: "/path/to/ca/file", + }, + }, }, - }) - } - return nil - } - - ts, err := GetTenantSecrets(context.TODO(), k, r, s) - require.NoError(t, err) - - expected := []*manifests.TenantSecrets{ - { - TenantName: "test", - ClientID: "test", - ClientSecret: "test", - IssuerCAPath: "/path/to/ca/file", - }, - } - require.ElementsMatch(t, ts, expected) -} - -func TestGetTenantSecrets_DynamicMode(t *testing.T) { - k := &k8sfakes.FakeClient{} - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - s := &lokiv1.LokiStack{ - ObjectMeta: metav1.ObjectMeta{ - Name: "mystack", - Namespace: "some-ns", - }, - Spec: lokiv1.LokiStackSpec{ - Tenants: &lokiv1.TenantsSpec{ - Mode: lokiv1.Dynamic, - Authentication: []lokiv1.AuthenticationSpec{ + }, + { + name: "mTLS", + authNSpec: []lokiv1.AuthenticationSpec{ { TenantName: "test", TenantID: "test", - OIDC: &lokiv1.OIDCSpec{ - Secret: &lokiv1.TenantSecretSpec{ - Name: "test", + MTLS: &lokiv1.MTLSSpec{ + CA: &lokiv1.CASpec{ + CA: "test", + CAKey: "special-ca.crt", }, }, }, }, - }, - }, - } - - k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { - if name.Name == "test" && name.Namespace == "some-ns" { - k.SetClientObject(object, &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "some-ns", + object: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "some-ns", + }, + Data: map[string]string{ + "special-ca.crt": "my-specila-ca", + }, }, - Data: map[string][]byte{ - "clientID": []byte("test"), - "clientSecret": []byte("test"), - "issuerCAPath": []byte("/path/to/ca/file"), + expected: []*manifests.TenantSecrets{ + { + TenantName: "test", + MTLSSecret: &manifests.MTLSSecret{ + CAPath: "/var/run/tls/tenants/test/special-ca.crt", + }, + }, }, + }, + } { + t.Run(strings.Join([]string{string(mode), tc.name}, "_"), func(t *testing.T) { + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + s := &lokiv1.LokiStack{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mystack", + Namespace: "some-ns", + }, + Spec: lokiv1.LokiStackSpec{ + Tenants: &lokiv1.TenantsSpec{ + Mode: mode, + Authentication: tc.authNSpec, + }, + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { + if name.Name == "test" && name.Namespace == "some-ns" { + k.SetClientObject(object, tc.object) + } + return nil + } + ts, err := GetTenantSecrets(context.TODO(), k, r, s) + require.NoError(t, err) + require.ElementsMatch(t, ts, tc.expected) }) } - return nil } - - ts, err := GetTenantSecrets(context.TODO(), k, r, s) - require.NoError(t, err) - - expected := []*manifests.TenantSecrets{ - { - TenantName: "test", - ClientID: "test", - ClientSecret: "test", - IssuerCAPath: "/path/to/ca/file", - }, - } - require.ElementsMatch(t, ts, expected) } -func TestExtractSecret(t *testing.T) { +func TestExtractOIDCSecret(t *testing.T) { type test struct { name string tenantName string @@ -174,7 +161,47 @@ func TestExtractSecret(t *testing.T) { t.Run(tst.name, func(t *testing.T) { t.Parallel() - _, err := extractSecret(tst.secret, tst.tenantName) + _, err := extractOIDCSecret(tst.secret) + if !tst.wantErr { + require.NoError(t, err) + } + if tst.wantErr { + require.NotNil(t, err) + } + }) + } +} + +func TestCheckKeyIsPresent(t *testing.T) { + type test struct { + name string + tenantName string + configMap *corev1.ConfigMap + wantErr bool + } + table := []test{ + { + name: "missing key", + tenantName: "tenant-a", + configMap: &corev1.ConfigMap{}, + wantErr: true, + }, + { + name: "all set", + tenantName: "tenant-a", + configMap: &corev1.ConfigMap{ + Data: map[string]string{ + "test": "test", + }, + }, + }, + } + for _, tst := range table { + tst := tst + t.Run(tst.name, func(t *testing.T) { + t.Parallel() + + err := checkKeyIsPresent(tst.configMap, "test") if !tst.wantErr { require.NoError(t, err) } diff --git a/operator/internal/handlers/lokistack_create_or_update_test.go b/operator/internal/handlers/lokistack_create_or_update_test.go index 7c5a099d30805..179f5fad9914a 100644 --- a/operator/internal/handlers/lokistack_create_or_update_test.go +++ b/operator/internal/handlers/lokistack_create_or_update_test.go @@ -944,7 +944,9 @@ func TestCreateOrUpdateLokiStack_WhenMissingCAConfigMap_SetDegraded(t *testing.T Type: lokiv1.ObjectStorageSecretS3, }, TLS: &lokiv1.ObjectStorageTLSSpec{ - CA: "not-existing", + lokiv1.CASpec{ + CA: "not-existing", + }, }, }, }, @@ -1014,7 +1016,9 @@ func TestCreateOrUpdateLokiStack_WhenInvalidCAConfigMap_SetDegraded(t *testing.T Type: lokiv1.ObjectStorageSecretS3, }, TLS: &lokiv1.ObjectStorageTLSSpec{ - CA: invalidCAConfigMap.Name, + lokiv1.CASpec{ + CA: invalidCAConfigMap.Name, + }, }, }, }, diff --git a/operator/internal/manifests/gateway.go b/operator/internal/manifests/gateway.go index 1e1d3bffe89d0..16942d30ee853 100644 --- a/operator/internal/manifests/gateway.go +++ b/operator/internal/manifests/gateway.go @@ -76,12 +76,11 @@ func BuildGateway(opts Options) ([]client.Object, error) { } if opts.Stack.Tenants != nil { - mode := opts.Stack.Tenants.Mode - if err := configureGatewayDeploymentForMode(dpl, mode, opts.Gates, minTLSVersion, ciphers); err != nil { + if err := configureGatewayDeploymentForMode(dpl, opts.Stack.Tenants, opts.Gates, minTLSVersion, ciphers); err != nil { return nil, err } - if err := configureGatewayServiceForMode(&svc.Spec, mode); err != nil { + if err := configureGatewayServiceForMode(&svc.Spec, opts.Stack.Tenants.Mode); err != nil { return nil, err } @@ -441,13 +440,25 @@ func gatewayConfigObjs(opt Options) (*corev1.ConfigMap, *corev1.Secret, string, // gatewayConfigOptions converts Options to gateway.Options func gatewayConfigOptions(opt Options) gateway.Options { - var gatewaySecrets []*gateway.Secret + var ( + gatewaySecrets []*gateway.Secret + gatewaySecret *gateway.Secret + ) for _, secret := range opt.Tenants.Secrets { - gatewaySecret := &gateway.Secret{ - TenantName: secret.TenantName, - ClientID: secret.ClientID, - ClientSecret: secret.ClientSecret, - IssuerCAPath: secret.IssuerCAPath, + gatewaySecret = &gateway.Secret{ + TenantName: secret.TenantName, + } + switch { + case secret.OIDCSecret != nil: + gatewaySecret.OIDC = &gateway.OIDC{ + ClientID: secret.OIDCSecret.ClientID, + ClientSecret: secret.OIDCSecret.ClientSecret, + IssuerCAPath: secret.OIDCSecret.IssuerCAPath, + } + case secret.MTLSSecret != nil: + gatewaySecret.MTLS = &gateway.MTLS{ + CAPath: secret.MTLSSecret.CAPath, + } } gatewaySecrets = append(gatewaySecrets, gatewaySecret) } diff --git a/operator/internal/manifests/gateway_tenants.go b/operator/internal/manifests/gateway_tenants.go index 76dd2d5df149b..83e9e7d367995 100644 --- a/operator/internal/manifests/gateway_tenants.go +++ b/operator/internal/manifests/gateway_tenants.go @@ -1,6 +1,8 @@ package manifests import ( + "strings" + "github.com/ViaQ/logerr/v2/kverrors" "github.com/imdario/mergo" @@ -11,6 +13,7 @@ import ( "github.com/grafana/loki/operator/internal/manifests/internal/config" "github.com/grafana/loki/operator/internal/manifests/openshift" + routev1 "github.com/openshift/api/route/v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" @@ -62,13 +65,16 @@ func ApplyGatewayDefaultOptions(opts *Options) error { return nil } -func configureGatewayDeploymentForMode(d *appsv1.Deployment, mode lokiv1.ModeType, fg configv1.FeatureGates, minTLSVersion string, ciphers string) error { - switch mode { +func configureGatewayDeploymentForMode(d *appsv1.Deployment, tenants *lokiv1.TenantsSpec, fg configv1.FeatureGates, minTLSVersion string, ciphers string) error { + switch tenants.Mode { case lokiv1.Static, lokiv1.Dynamic: - return nil // nothing to configure + if tenants != nil { + return configureMTLS(d, tenants) + } + return nil case lokiv1.OpenshiftLogging, lokiv1.OpenshiftNetwork: tlsDir := gatewayServerHTTPTLSDir() - return openshift.ConfigureGatewayDeployment(d, mode, tlsSecretVolume, tlsDir, minTLSVersion, ciphers, fg.HTTPEncryption) + return openshift.ConfigureGatewayDeployment(d, tenants.Mode, tlsSecretVolume, tlsDir, minTLSVersion, ciphers, fg.HTTPEncryption) } return nil @@ -120,7 +126,19 @@ func configureGatewayObjsForMode(objs []client.Object, opts Options) []client.Ob switch opts.Stack.Tenants.Mode { case lokiv1.Static, lokiv1.Dynamic: - // nothing to configure + // If a single tenant configure mTLS change Route termination policy + // to Passthrough + for _, o := range objs { + switch r := o.(type) { + case *routev1.Route: + for _, secret := range opts.Tenants.Secrets { + if secret.MTLSSecret != nil { + r.Spec.TLS.Termination = routev1.TLSTerminationPassthrough + break + } + } + } + } case lokiv1.OpenshiftLogging, lokiv1.OpenshiftNetwork: for _, o := range objs { switch sa := o.(type) { @@ -175,3 +193,64 @@ func ConfigureOptionsForMode(cfg *config.Options, opt Options) error { return nil } + +// configureMTLS will mount CA bundles and fix CLI arguments for the gateway container +// if any tenant configured mTLS authentication +func configureMTLS(d *appsv1.Deployment, tenants *lokiv1.TenantsSpec) error { + var gwIndex int + for i, c := range d.Spec.Template.Spec.Containers { + if c.Name == gatewayContainerName { + gwIndex = i + break + } + } + + gwContainer := d.Spec.Template.Spec.Containers[gwIndex].DeepCopy() + gwArgs := gwContainer.Args + gwVolumes := d.Spec.Template.Spec.Volumes + + mTLS := false + for _, tenant := range tenants.Authentication { + if tenant.MTLS != nil { + gwContainer.VolumeMounts = append(gwContainer.VolumeMounts, corev1.VolumeMount{ + Name: tenantMTLSVolumeName(tenant.TenantName), + MountPath: tenantMTLSCADir(tenant.TenantName), + }) + gwVolumes = append(gwVolumes, corev1.Volume{ + Name: tenantMTLSVolumeName(tenant.TenantName), + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: tenant.MTLS.CA.CA, + }, + }, + }, + }) + mTLS = true + } + } + if !mTLS { + return nil // nothing to configure + } + + // Remove old tls.client-auth-type + for i, arg := range gwArgs { + if strings.HasPrefix(arg, "--tls.client-auth-type=") { + gwArgs = append(gwArgs[:i], gwArgs[i+1:]...) + break + } + } + gwArgs = append(gwArgs, "--tls.client-auth-type=RequestClientCert") + + gwContainer.Args = gwArgs + p := corev1.PodSpec{ + Containers: []corev1.Container{ + *gwContainer, + }, + Volumes: gwVolumes, + } + if err := mergo.Merge(&d.Spec.Template.Spec, p, mergo.WithOverride); err != nil { + return kverrors.Wrap(err, "failed to merge server pki into container spec ") + } + return nil +} diff --git a/operator/internal/manifests/gateway_tenants_test.go b/operator/internal/manifests/gateway_tenants_test.go index 843194954a01b..e26beb2cd16c4 100644 --- a/operator/internal/manifests/gateway_tenants_test.go +++ b/operator/internal/manifests/gateway_tenants_test.go @@ -18,6 +18,25 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" ) +func defaultGatewayDeployment() *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + }, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: gatewayContainerName, + }, + }, + }, + }, + }, + } +} + func TestApplyGatewayDefaultsOptions(t *testing.T) { type tt struct { desc string @@ -397,32 +416,50 @@ func TestApplyGatewayDefaultsOptions(t *testing.T) { func TestConfigureDeploymentForMode(t *testing.T) { type tt struct { desc string - mode lokiv1.ModeType stackName string stackNs string featureGates configv1.FeatureGates + tenants *lokiv1.TenantsSpec dpl *appsv1.Deployment want *appsv1.Deployment } tc := []tt{ { - desc: "static mode", - mode: lokiv1.Static, - dpl: &appsv1.Deployment{}, - want: &appsv1.Deployment{}, + desc: "static mode without tenants", + tenants: &lokiv1.TenantsSpec{ + Mode: lokiv1.Static, + }, + dpl: defaultGatewayDeployment(), + want: defaultGatewayDeployment(), }, { desc: "dynamic mode", - mode: lokiv1.Dynamic, - dpl: &appsv1.Deployment{}, - want: &appsv1.Deployment{}, + tenants: &lokiv1.TenantsSpec{ + Mode: lokiv1.Dynamic, + }, + dpl: defaultGatewayDeployment(), + want: defaultGatewayDeployment(), }, { - desc: "openshift-logging mode", - mode: lokiv1.OpenshiftLogging, + desc: "static mode with mTLS tenant configured", stackName: "test", stackNs: "test-ns", + tenants: &lokiv1.TenantsSpec{ + Mode: lokiv1.Static, + Authentication: []lokiv1.AuthenticationSpec{ + { + TenantName: "test-a", + TenantID: "a", + MTLS: &lokiv1.MTLSSpec{ + CA: &lokiv1.CASpec{ + CA: "my-ca", + CAKey: "my-ca-key", + }, + }, + }, + }, + }, dpl: &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Namespace: "test-ns", @@ -433,12 +470,113 @@ func TestConfigureDeploymentForMode(t *testing.T) { Containers: []corev1.Container{ { Name: gatewayContainerName, + Args: []string{"--tls.client-auth-type=NoClientCert"}, + }, + }, + }, + }, + }, + }, + want: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + }, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: gatewayContainerName, + Args: []string{"--tls.client-auth-type=RequestClientCert"}, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "test-a-ca-bundle", + MountPath: "/var/run/tls/tenants/test-a", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "test-a-ca-bundle", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "my-ca", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + desc: "dynamic mode with mTLS tenant configured", + stackName: "test", + stackNs: "test-ns", + tenants: &lokiv1.TenantsSpec{ + Mode: lokiv1.Dynamic, + Authentication: []lokiv1.AuthenticationSpec{ + { + TenantName: "test-a", + TenantID: "a", + MTLS: &lokiv1.MTLSSpec{ + CA: &lokiv1.CASpec{ + CA: "my-ca", + CAKey: "my-ca-key", + }, + }, + }, + }, + }, + dpl: defaultGatewayDeployment(), + want: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + }, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: gatewayContainerName, + Args: []string{"--tls.client-auth-type=RequestClientCert"}, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "test-a-ca-bundle", + MountPath: "/var/run/tls/tenants/test-a", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "test-a-ca-bundle", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "my-ca", + }, + }, + }, }, }, }, }, }, }, + }, + { + desc: "openshift-logging mode", + tenants: &lokiv1.TenantsSpec{ + Mode: lokiv1.OpenshiftLogging, + }, + stackName: "test", + stackNs: "test-ns", + dpl: defaultGatewayDeployment(), want: &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Namespace: "test-ns", @@ -510,8 +648,10 @@ func TestConfigureDeploymentForMode(t *testing.T) { }, }, { - desc: "openshift-logging mode with http encryption", - mode: lokiv1.OpenshiftLogging, + desc: "openshift-logging mode with http encryption", + tenants: &lokiv1.TenantsSpec{ + Mode: lokiv1.OpenshiftLogging, + }, stackName: "test", stackNs: "test-ns", featureGates: configv1.FeatureGates{ @@ -629,8 +769,10 @@ func TestConfigureDeploymentForMode(t *testing.T) { }, }, { - desc: "openshift-network mode", - mode: lokiv1.OpenshiftNetwork, + desc: "openshift-network mode", + tenants: &lokiv1.TenantsSpec{ + Mode: lokiv1.OpenshiftNetwork, + }, stackName: "test", stackNs: "test-ns", dpl: &appsv1.Deployment{ @@ -729,8 +871,10 @@ func TestConfigureDeploymentForMode(t *testing.T) { }, }, { - desc: "openshift-network mode with http encryption", - mode: lokiv1.OpenshiftNetwork, + desc: "openshift-network mode with http encryption", + tenants: &lokiv1.TenantsSpec{ + Mode: lokiv1.OpenshiftNetwork, + }, stackName: "test", stackNs: "test-ns", featureGates: configv1.FeatureGates{ @@ -849,7 +993,7 @@ func TestConfigureDeploymentForMode(t *testing.T) { tc := tc t.Run(tc.desc, func(t *testing.T) { t.Parallel() - err := configureGatewayDeploymentForMode(tc.dpl, tc.mode, tc.featureGates, "min-version", "cipher1,cipher2") + err := configureGatewayDeploymentForMode(tc.dpl, tc.tenants, tc.featureGates, "min-version", "cipher1,cipher2") require.NoError(t, err) require.Equal(t, tc.want, tc.dpl) }) diff --git a/operator/internal/manifests/gateway_test.go b/operator/internal/manifests/gateway_test.go index 00e2c0b93b6ff..720b72f5b2fdf 100644 --- a/operator/internal/manifests/gateway_test.go +++ b/operator/internal/manifests/gateway_test.go @@ -194,10 +194,12 @@ func TestGatewayConfigMap_ReturnsSHA1OfBinaryContents(t *testing.T) { Tenants: Tenants{ Secrets: []*TenantSecrets{ { - TenantName: "test", - ClientID: "test", - ClientSecret: "test", - IssuerCAPath: "/tmp/test", + TenantName: "test", + OIDCSecret: &OIDCSecret{ + ClientID: "test", + ClientSecret: "test", + IssuerCAPath: "/tmp/test", + }, }, }, }, diff --git a/operator/internal/manifests/internal/gateway/build.go b/operator/internal/manifests/internal/gateway/build.go index 1e1a4cb0d97b6..659e4e32c6acb 100644 --- a/operator/internal/manifests/internal/gateway/build.go +++ b/operator/internal/manifests/internal/gateway/build.go @@ -34,7 +34,9 @@ var ( lokiGatewayRbacYAMLTmpl = template.Must(template.ParseFS(lokiGatewayRbacYAMLTmplFile, "gateway-rbac.yaml")) - lokiGatewayTenantsYAMLTmpl = template.Must(template.ParseFS(lokiGatewayTenantsYAMLTmplFile, "gateway-tenants.yaml")) + lokiGatewayTenantsYAMLTmpl = template.Must(template.New("gateway-tenants.yaml").Funcs(template.FuncMap{ + "make_array": func(els ...any) []any { return els }, + }).ParseFS(lokiGatewayTenantsYAMLTmplFile, "gateway-tenants.yaml")) lokiStackGatewayRegoTmpl = template.Must(template.ParseFS(lokiStackGatewayRegoTmplFile, "lokistack-gateway.rego")) ) diff --git a/operator/internal/manifests/internal/gateway/build_test.go b/operator/internal/manifests/internal/gateway/build_test.go index 7922a0e499fb7..3ee2e6e4ce82f 100644 --- a/operator/internal/manifests/internal/gateway/build_test.go +++ b/operator/internal/manifests/internal/gateway/build_test.go @@ -9,7 +9,64 @@ import ( ) func TestBuild_StaticMode(t *testing.T) { - expTntCfg := ` + for _, tc := range []struct { + name string + authNSpec []lokiv1.AuthenticationSpec + tenantSecrets []*Secret + authZSpec *lokiv1.AuthorizationSpec + expTntCfg string + expRbacCfg string + }{ + { + name: "oidc", + authNSpec: []lokiv1.AuthenticationSpec{ + { + TenantName: "test-a", + TenantID: "test", + OIDC: &lokiv1.OIDCSpec{ + Secret: &lokiv1.TenantSecretSpec{ + Name: "test", + }, + IssuerURL: "https://127.0.0.1:5556/dex", + RedirectURL: "https://localhost:8443/oidc/test-a/callback", + GroupClaim: "test", + UsernameClaim: "test", + }, + }, + }, + tenantSecrets: []*Secret{ + { + TenantName: "test-a", + OIDC: &OIDC{ + ClientID: "test", + ClientSecret: "test123", + IssuerCAPath: "/tmp/ca/path", + }, + }, + }, + authZSpec: &lokiv1.AuthorizationSpec{ + Roles: []lokiv1.RoleSpec{ + { + Name: "some-name", + Resources: []string{"metrics"}, + Tenants: []string{"test-a"}, + Permissions: []lokiv1.PermissionType{"read"}, + }, + }, + RoleBindings: []lokiv1.RoleBindingsSpec{ + { + Name: "test-a", + Subjects: []lokiv1.Subject{ + { + Name: "[email protected]", + Kind: "user", + }, + }, + Roles: []string{"read-write"}, + }, + }, + }, + expTntCfg: ` tenants: - name: test-a id: test @@ -26,8 +83,8 @@ tenants: paths: - /etc/lokistack-gateway/rbac.yaml - /etc/lokistack-gateway/lokistack-gateway.rego -` - expRbacCfg := ` +`, + expRbacCfg: ` roleBindings: - name: test-a roles: @@ -43,70 +100,140 @@ roles: - metrics tenants: - test-a -` - opts := Options{ - Stack: lokiv1.LokiStackSpec{ - Tenants: &lokiv1.TenantsSpec{ - Mode: lokiv1.Static, - Authentication: []lokiv1.AuthenticationSpec{ - { - TenantName: "test-a", - TenantID: "test", - OIDC: &lokiv1.OIDCSpec{ - Secret: &lokiv1.TenantSecretSpec{ - Name: "test", - }, - IssuerURL: "https://127.0.0.1:5556/dex", - RedirectURL: "https://localhost:8443/oidc/test-a/callback", - GroupClaim: "test", - UsernameClaim: "test", +`, + }, + { + name: "mTLS", + authNSpec: []lokiv1.AuthenticationSpec{ + { + TenantName: "test-a", + TenantID: "test", + MTLS: &lokiv1.MTLSSpec{ + CA: &lokiv1.CASpec{ + CA: "my-custom-ca", + CAKey: "special-ca.crt", }, }, }, - Authorization: &lokiv1.AuthorizationSpec{ - Roles: []lokiv1.RoleSpec{ - { - Name: "some-name", - Resources: []string{"metrics"}, - Tenants: []string{"test-a"}, - Permissions: []lokiv1.PermissionType{"read"}, - }, + }, + tenantSecrets: []*Secret{ + { + TenantName: "test-a", + MTLS: &MTLS{ + CAPath: "/var/run/tls/tenants/test-a/special-ca.crt", + }, + }, + }, + authZSpec: &lokiv1.AuthorizationSpec{ + Roles: []lokiv1.RoleSpec{ + { + Name: "some-name", + Resources: []string{"metrics"}, + Tenants: []string{"test-a"}, + Permissions: []lokiv1.PermissionType{"read"}, }, - RoleBindings: []lokiv1.RoleBindingsSpec{ - { - Name: "test-a", - Subjects: []lokiv1.Subject{ - { - Name: "[email protected]", - Kind: "user", - }, + }, + RoleBindings: []lokiv1.RoleBindingsSpec{ + { + Name: "test-a", + Subjects: []lokiv1.Subject{ + { + Name: "[email protected]", + Kind: "user", }, - Roles: []string{"read-write"}, }, + Roles: []string{"read-write"}, }, }, }, + expTntCfg: ` +tenants: +- name: test-a + id: test + mTLS: + caPath: /var/run/tls/tenants/test-a/special-ca.crt + opa: + query: data.lokistack.allow + paths: + - /etc/lokistack-gateway/rbac.yaml + - /etc/lokistack-gateway/lokistack-gateway.rego +`, + expRbacCfg: ` +roleBindings: +- name: test-a + roles: + - read-write + subjects: + - kind: user + name: [email protected] +roles: +- name: some-name + permissions: + - read + resources: + - metrics + tenants: + - test-a +`, }, - Namespace: "test-ns", - Name: "test", - TenantSecrets: []*Secret{ - { - TenantName: "test-a", - ClientID: "test", - ClientSecret: "test123", - IssuerCAPath: "/tmp/ca/path", - }, - }, + } { + t.Run(tc.name, func(t *testing.T) { + opts := Options{ + Stack: lokiv1.LokiStackSpec{ + Tenants: &lokiv1.TenantsSpec{ + Mode: lokiv1.Static, + Authentication: tc.authNSpec, + Authorization: tc.authZSpec, + }, + }, + Namespace: "test-ns", + Name: "test", + TenantSecrets: tc.tenantSecrets, + } + rbacConfig, tenantsConfig, regoCfg, err := Build(opts) + require.NoError(t, err) + require.YAMLEq(t, tc.expTntCfg, string(tenantsConfig)) + require.YAMLEq(t, tc.expRbacCfg, string(rbacConfig)) + require.NotEmpty(t, regoCfg) + }) } - rbacConfig, tenantsConfig, regoCfg, err := Build(opts) - require.NoError(t, err) - require.YAMLEq(t, expTntCfg, string(tenantsConfig)) - require.YAMLEq(t, expRbacCfg, string(rbacConfig)) - require.NotEmpty(t, regoCfg) } func TestBuild_DynamicMode(t *testing.T) { - expTntCfg := ` + for _, tc := range []struct { + name string + authNSpec []lokiv1.AuthenticationSpec + tenantSecrets []*Secret + expTntCfg string + }{ + { + name: "oidc", + authNSpec: []lokiv1.AuthenticationSpec{ + { + TenantName: "test-a", + TenantID: "test", + OIDC: &lokiv1.OIDCSpec{ + Secret: &lokiv1.TenantSecretSpec{ + Name: "test", + }, + IssuerURL: "https://127.0.0.1:5556/dex", + RedirectURL: "https://localhost:8443/oidc/test-a/callback", + GroupClaim: "test", + UsernameClaim: "test", + }, + }, + }, + tenantSecrets: []*Secret{ + { + TenantName: "test-a", + OIDC: &OIDC{ + ClientID: "test", + ClientSecret: "test123", + IssuerCAPath: "/tmp/ca/path", + }, + }, + }, + expTntCfg: ` tenants: - name: test-a id: test @@ -120,49 +247,65 @@ tenants: groupClaim: test opa: url: http://127.0.0.1:8181/v1/data/observatorium/allow -` - opts := Options{ - Stack: lokiv1.LokiStackSpec{ - Tenants: &lokiv1.TenantsSpec{ - Mode: lokiv1.Dynamic, - Authentication: []lokiv1.AuthenticationSpec{ - { - TenantName: "test-a", - TenantID: "test", - OIDC: &lokiv1.OIDCSpec{ - Secret: &lokiv1.TenantSecretSpec{ - Name: "test", - }, - IssuerURL: "https://127.0.0.1:5556/dex", - RedirectURL: "https://localhost:8443/oidc/test-a/callback", - GroupClaim: "test", - UsernameClaim: "test", +`, + }, + { + name: "mTLS", + authNSpec: []lokiv1.AuthenticationSpec{ + { + TenantName: "test-a", + TenantID: "test", + MTLS: &lokiv1.MTLSSpec{ + CA: &lokiv1.CASpec{ + CA: "my-custom-ca", + CAKey: "special-ca.crt", }, }, }, - Authorization: &lokiv1.AuthorizationSpec{ - OPA: &lokiv1.OPASpec{ - URL: "http://127.0.0.1:8181/v1/data/observatorium/allow", + }, + tenantSecrets: []*Secret{ + { + TenantName: "test-a", + MTLS: &MTLS{ + CAPath: "/var/run/tls/tenants/test-a/special-ca.crt", }, }, }, + expTntCfg: ` +tenants: +- name: test-a + id: test + mTLS: + caPath: /var/run/tls/tenants/test-a/special-ca.crt + opa: + url: http://127.0.0.1:8181/v1/data/observatorium/allow +`, }, - Namespace: "test-ns", - Name: "test", - TenantSecrets: []*Secret{ - { - TenantName: "test-a", - ClientID: "test", - ClientSecret: "test123", - IssuerCAPath: "/tmp/ca/path", - }, - }, + } { + t.Run(tc.name, func(t *testing.T) { + opts := Options{ + Stack: lokiv1.LokiStackSpec{ + Tenants: &lokiv1.TenantsSpec{ + Mode: lokiv1.Dynamic, + Authentication: tc.authNSpec, + Authorization: &lokiv1.AuthorizationSpec{ + OPA: &lokiv1.OPASpec{ + URL: "http://127.0.0.1:8181/v1/data/observatorium/allow", + }, + }, + }, + }, + Namespace: "test-ns", + Name: "test", + TenantSecrets: tc.tenantSecrets, + } + rbacConfig, tenantsConfig, regoCfg, err := Build(opts) + require.NoError(t, err) + require.YAMLEq(t, tc.expTntCfg, string(tenantsConfig)) + require.Empty(t, rbacConfig) + require.Empty(t, regoCfg) + }) } - rbacConfig, tenantsConfig, regoCfg, err := Build(opts) - require.NoError(t, err) - require.YAMLEq(t, expTntCfg, string(tenantsConfig)) - require.Empty(t, rbacConfig) - require.Empty(t, regoCfg) } func TestBuild_OpenshiftLoggingMode(t *testing.T) { @@ -234,22 +377,28 @@ tenants: Name: "test", TenantSecrets: []*Secret{ { - TenantName: "application", - ClientID: "test", - ClientSecret: "ZXhhbXBsZS1hcHAtc2VjcmV0", - IssuerCAPath: "./tmp/certs/ca.pem", + TenantName: "application", + OIDC: &OIDC{ + ClientID: "test", + ClientSecret: "ZXhhbXBsZS1hcHAtc2VjcmV0", + IssuerCAPath: "./tmp/certs/ca.pem", + }, }, { - TenantName: "infrastructure", - ClientID: "test", - ClientSecret: "ZXhhbXBsZS1hcHAtc2VjcmV0", - IssuerCAPath: "./tmp/certs/ca.pem", + TenantName: "infrastructure", + OIDC: &OIDC{ + ClientID: "test", + ClientSecret: "ZXhhbXBsZS1hcHAtc2VjcmV0", + IssuerCAPath: "./tmp/certs/ca.pem", + }, }, { - TenantName: "audit", - ClientID: "test", - ClientSecret: "ZXhhbXBsZS1hcHAtc2VjcmV0", - IssuerCAPath: "./tmp/certs/ca.pem", + TenantName: "audit", + OIDC: &OIDC{ + ClientID: "test", + ClientSecret: "ZXhhbXBsZS1hcHAtc2VjcmV0", + IssuerCAPath: "./tmp/certs/ca.pem", + }, }, }, } @@ -298,10 +447,12 @@ tenants: Name: "test", TenantSecrets: []*Secret{ { - TenantName: "network", - ClientID: "test", - ClientSecret: "ZXhhbXBsZS1hcHAtc2VjcmV0", - IssuerCAPath: "./tmp/certs/ca.pem", + TenantName: "network", + OIDC: &OIDC{ + ClientID: "test", + ClientSecret: "ZXhhbXBsZS1hcHAtc2VjcmV0", + IssuerCAPath: "./tmp/certs/ca.pem", + }, }, }, } diff --git a/operator/internal/manifests/internal/gateway/gateway-tenants.yaml b/operator/internal/manifests/internal/gateway/gateway-tenants.yaml index a248e3c443974..aed6870231f60 100644 --- a/operator/internal/manifests/internal/gateway/gateway-tenants.yaml +++ b/operator/internal/manifests/internal/gateway/gateway-tenants.yaml @@ -1,25 +1,26 @@ -tenants: -{{- if $l := . -}} -{{- if eq $l.Stack.Tenants.Mode "static" -}} -{{- range $spec := $l.Stack.Tenants.Authentication }} -- name: {{ $spec.TenantName }} - id: {{ $spec.TenantID }} - oidc: - {{- range $secret := $l.TenantSecrets }} - {{- if eq $secret.TenantName $spec.TenantName -}} - {{ if $secret.ClientID }} - clientID: {{ $secret.ClientID }} +{{- define "mTLS" }} + {{- $secret := index . 0 -}} + mTLS: + {{ if $secret.MTLS.CAPath }} + caPath: {{ $secret.MTLS.CAPath }} {{- end -}} - {{ if $secret.ClientSecret }} - clientSecret: {{ $secret.ClientSecret }} - {{- end -}} - {{ if $secret.IssuerCAPath }} - issuerCAPath: {{ $secret.IssuerCAPath }} +{{- end }} + +{{- define "oidc" }} + {{- $secret := index . 0 -}} + {{- $spec := index . 1 -}} + oidc: + {{- if $secret.OIDC.ClientID }} + clientID: {{ $secret.OIDC.ClientID }} {{- end -}} + {{ if $secret.OIDC.ClientSecret }} + clientSecret: {{ $secret.OIDC.ClientSecret }} {{- end -}} + {{ if $secret.OIDC.IssuerCAPath }} + issuerCAPath: {{ $secret.OIDC.IssuerCAPath }} {{- end }} issuerURL: {{ $spec.OIDC.IssuerURL }} - {{ if $spec.OIDC.RedirectURL }} + {{- if $spec.OIDC.RedirectURL }} redirectURL: {{ $spec.OIDC.RedirectURL }} {{- end -}} {{ if $spec.OIDC.UsernameClaim }} @@ -28,6 +29,29 @@ tenants: {{- if $spec.OIDC.GroupClaim }} groupClaim: {{ $spec.OIDC.GroupClaim }} {{- end }} +{{- end }} + +{{- define "authorization" }} + {{- $l := index . 0 -}} + {{- $spec := index . 1 -}} + {{- range $secret := $l.TenantSecrets }} + {{- if eq $secret.TenantName $spec.TenantName }} + {{- if $secret.OIDC }} + {{ template "oidc" (make_array $secret $spec) }} + {{- else if $secret.MTLS }} + {{ template "mTLS" (make_array $secret) }} + {{- end -}} + {{- end -}} + {{- end }} +{{- end }} + +tenants: +{{- if $l := . -}} +{{- if eq $l.Stack.Tenants.Mode "static" -}} +{{- range $spec := $l.Stack.Tenants.Authentication }} +- name: {{ $spec.TenantName }} + id: {{ $spec.TenantID }} + {{ template "authorization" (make_array $l $spec) }} opa: query: data.lokistack.allow paths: @@ -39,28 +63,7 @@ tenants: {{- range $spec := $tenant.Authentication }} - name: {{ $spec.TenantName }} id: {{ $spec.TenantID }} - oidc: - {{- range $secret := $l.TenantSecrets }} - {{- if eq $secret.TenantName $spec.TenantName -}} - {{ if $secret.ClientID }} - clientID: {{ $secret.ClientID }} - {{- end -}} - {{ if $secret.ClientSecret }} - clientSecret: {{ $secret.ClientSecret }} - {{- end -}} - {{ if $secret.IssuerCAPath }} - issuerCAPath: {{ $secret.IssuerCAPath }} - {{- end -}} - {{- end -}} - {{- end }} - issuerURL: {{ $spec.OIDC.IssuerURL }} - redirectURL: {{ $spec.OIDC.RedirectURL }} - {{- if $spec.OIDC.UsernameClaim }} - usernameClaim: {{ $spec.OIDC.UsernameClaim }} - {{- end -}} - {{- if $spec.OIDC.GroupClaim }} - groupClaim: {{ $spec.OIDC.GroupClaim }} - {{- end }} + {{ template "authorization" (make_array $l $spec) }} opa: url: {{ $tenant.Authorization.OPA.URL }} {{- end -}} diff --git a/operator/internal/manifests/internal/gateway/options.go b/operator/internal/manifests/internal/gateway/options.go index f3f5d1769ee04..eefefedcfbca6 100644 --- a/operator/internal/manifests/internal/gateway/options.go +++ b/operator/internal/manifests/internal/gateway/options.go @@ -17,10 +17,21 @@ type Options struct { TenantSecrets []*Secret } -// Secret for clientID, clientSecret and issuerCAPath for tenant's authentication. +// Secret for tenant's authentication. type Secret struct { - TenantName string + TenantName string + OIDC *OIDC + MTLS *MTLS +} + +// OIDC secret for tenant's authentication. +type OIDC struct { ClientID string ClientSecret string IssuerCAPath string } + +// MTLS config for tenant's authentication. +type MTLS struct { + CAPath string +} diff --git a/operator/internal/manifests/options.go b/operator/internal/manifests/options.go index 4f4e32f947dbe..1e419d60852dd 100644 --- a/operator/internal/manifests/options.go +++ b/operator/internal/manifests/options.go @@ -65,14 +65,23 @@ type Tenants struct { Configs map[string]TenantConfig } -// TenantSecrets for clientID, clientSecret and issuerCAPath for tenant's authentication. +// TenantSecrets for tenant's authentication. type TenantSecrets struct { - TenantName string + TenantName string + OIDCSecret *OIDCSecret + MTLSSecret *MTLSSecret +} + +type OIDCSecret struct { ClientID string ClientSecret string IssuerCAPath string } +type MTLSSecret struct { + CAPath string +} + // TenantConfig for tenant authorizationconfig type TenantConfig struct { OIDC *TenantOIDCSpec diff --git a/operator/internal/manifests/var.go b/operator/internal/manifests/var.go index 140f4ec14a6c4..71a26d257078e 100644 --- a/operator/internal/manifests/var.go +++ b/operator/internal/manifests/var.go @@ -97,6 +97,8 @@ const ( httpTLSDir = "/var/run/tls/http" // grpcTLSDir is the path that is mounted from the secret for TLS grpcTLSDir = "/var/run/tls/grpc" + // tenantMTLSDir is the path that is mounted from the configmaps for mTLS + tenantMTLSDir = "/var/run/tls/tenants" // LokiStackCABundleDir is the path that is mounted from the configmap for TLS caBundleDir = "/var/run/ca" // caFile is the file name of the certificate authority file @@ -279,6 +281,18 @@ func gatewayUpstreamHTTPTLSKey() string { return path.Join(gatewayUpstreamHTTPTLSDir(), corev1.TLSPrivateKeyKey) } +func tenantMTLSVolumeName(tenantName string) string { + return fmt.Sprintf("%s-ca-bundle", tenantName) +} + +func tenantMTLSCADir(tennantName string) string { + return path.Join(tenantMTLSDir, tennantName) +} + +func TenantMTLSCAPath(tennantName, key string) string { + return path.Join(tenantMTLSDir, tennantName, key) +} + func gatewayClientSecretName(stackName string) string { return fmt.Sprintf("%s-gateway-client-http", stackName) }
operator
add mTLS authentication to tenants (#9906)
0f3583058fd4bd389ec0cd210e20acb2d29372fe
2025-01-24 02:28:37
renovate[bot]
fix(deps): update module github.com/aws/aws-sdk-go-v2/service/s3 to v1.74.0 (main) (#15923)
false
diff --git a/tools/lambda-promtail/go.mod b/tools/lambda-promtail/go.mod index c70c14249caf8..b269facf70db6 100644 --- a/tools/lambda-promtail/go.mod +++ b/tools/lambda-promtail/go.mod @@ -6,7 +6,7 @@ require ( github.com/aws/aws-lambda-go v1.47.0 github.com/aws/aws-sdk-go-v2 v1.33.0 github.com/aws/aws-sdk-go-v2/config v1.29.1 - github.com/aws/aws-sdk-go-v2/service/s3 v1.73.2 + github.com/aws/aws-sdk-go-v2/service/s3 v1.74.0 github.com/go-kit/log v0.2.1 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 diff --git a/tools/lambda-promtail/go.sum b/tools/lambda-promtail/go.sum index cefba43f6b738..f9e9fac4fa81c 100644 --- a/tools/lambda-promtail/go.sum +++ b/tools/lambda-promtail/go.sum @@ -74,8 +74,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.9 h1:TQmKDyETF github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.9/go.mod h1:HVLPK2iHQBUx7HfZeOQSEu3v2ubZaAY2YPbAm5/WUyY= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.9 h1:2aInXbh02XsbO0KobPGMNXyv2QP73VDKsWPNJARj/+4= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.9/go.mod h1:dgXS1i+HgWnYkPXqNoPIPKeUsUUYHaUbThC90aDnNiE= -github.com/aws/aws-sdk-go-v2/service/s3 v1.73.2 h1:F3h8VYq9ZLBXYurmwrT8W0SPhgCcU0q+0WZJfT1dFt0= -github.com/aws/aws-sdk-go-v2/service/s3 v1.73.2/go.mod h1:jGJ/v7FIi7Ys9t54tmEFnrxuaWeJLpwNgKp2DXAVhOU= +github.com/aws/aws-sdk-go-v2/service/s3 v1.74.0 h1:ncCHiFU9Eq4qnKCNlzMZXfFmvb9R8OVNfU8SFOskxdI= +github.com/aws/aws-sdk-go-v2/service/s3 v1.74.0/go.mod h1:jGJ/v7FIi7Ys9t54tmEFnrxuaWeJLpwNgKp2DXAVhOU= github.com/aws/aws-sdk-go-v2/service/sso v1.24.11 h1:kuIyu4fTT38Kj7YCC7ouNbVZSSpqkZ+LzIfhCr6Dg+I= github.com/aws/aws-sdk-go-v2/service/sso v1.24.11/go.mod h1:Ro744S4fKiCCuZECXgOi760TiYylUM8ZBf6OGiZzJtY= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.10 h1:l+dgv/64iVlQ3WsBbnn+JSbkj01jIi+SM0wYsj3y/hY=
fix
update module github.com/aws/aws-sdk-go-v2/service/s3 to v1.74.0 (main) (#15923)
869f7b5a41fbb4e2dc1a1fa22932504e2cfbe490
2024-11-07 20:08:10
George Robinson
chore: delete old kafka ingester code (#14819)
false
diff --git a/pkg/kafka/ingester/consumer.go b/pkg/kafka/ingester/consumer.go deleted file mode 100644 index 57abb2b00ff3f..0000000000000 --- a/pkg/kafka/ingester/consumer.go +++ /dev/null @@ -1,303 +0,0 @@ -package ingester - -import ( - "bytes" - "context" - "crypto/rand" - "fmt" - "io" - "math" - "sync" - "time" - - "github.com/dustin/go-humanize" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/oklog/ulid" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "google.golang.org/grpc" - - "github.com/grafana/dskit/backoff" - - "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/metastorepb" - "github.com/grafana/loki/v3/pkg/kafka" - "github.com/grafana/loki/v3/pkg/kafka/partition" - "github.com/grafana/loki/v3/pkg/logproto" - "github.com/grafana/loki/v3/pkg/storage/wal" -) - -// ObjectStorage defines an interface for object storage operations -type ObjectStorage interface { - PutObject(ctx context.Context, objectKey string, object io.Reader) error -} - -// MetadataStore defines an interface for metadata storage operations -type MetadataStore interface { - AddBlock(ctx context.Context, in *metastorepb.AddBlockRequest, opts ...grpc.CallOption) (*metastorepb.AddBlockResponse, error) -} - -// consumer represents a Kafka consumer that processes and stores log entries -type consumer struct { - metastoreClient MetadataStore - storage ObjectStorage - writer *wal.SegmentWriter - committer partition.Committer - flushInterval time.Duration - maxFlushSize int64 - lastOffset int64 - - flushBuf *bytes.Buffer - decoder *kafka.Decoder - toStore []*logproto.Entry - - metrics *consumerMetrics - logger log.Logger -} - -// NewConsumerFactory creates and initializes a new consumer instance -func NewConsumerFactory( - metastoreClient MetadataStore, - storage ObjectStorage, - flushInterval time.Duration, - maxFlushSize int64, - logger log.Logger, - reg prometheus.Registerer, -) partition.ConsumerFactory { - return func(committer partition.Committer) (partition.Consumer, error) { - writer, err := wal.NewWalSegmentWriter() - if err != nil { - return nil, err - } - decoder, err := kafka.NewDecoder() - if err != nil { - return nil, err - } - return &consumer{ - logger: logger, - metastoreClient: metastoreClient, - storage: storage, - writer: writer, - metrics: newConsumerMetrics(reg), - flushBuf: bytes.NewBuffer(make([]byte, 0, 10<<20)), // 10 MB - decoder: decoder, - committer: committer, - flushInterval: flushInterval, - maxFlushSize: maxFlushSize, - lastOffset: -1, - }, nil - } -} - -// Start starts the consumer and returns a function to wait for it to finish -// It consumes records from the recordsChan, and flushes them to storage periodically. -func (c *consumer) Start(ctx context.Context, recordsChan <-chan []partition.Record) func() { - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - flushTicker := time.NewTicker(c.flushInterval) - defer flushTicker.Stop() - for { - select { - case <-flushTicker.C: - level.Info(c.logger).Log("msg", "flushing block") - c.Flush() - case <-ctx.Done(): - level.Info(c.logger).Log("msg", "shutting down consumer") - c.Flush() - return - case records := <-recordsChan: - if err := c.consume(records); err != nil { - level.Error(c.logger).Log("msg", "failed to consume records", "error", err) - return - } - if c.writer.InputSize() > c.maxFlushSize { - level.Info(c.logger).Log("msg", "flushing block due to size limit", "size", humanize.Bytes(uint64(c.writer.InputSize()))) - c.Flush() - } - } - } - }() - return wg.Wait -} - -// consume processes a batch of Kafka records, decoding and storing them -func (c *consumer) consume(records []partition.Record) error { - if len(records) == 0 { - return nil - } - var ( - minOffset = int64(math.MaxInt64) - maxOffset = int64(0) - ) - for _, record := range records { - minOffset = min(minOffset, record.Offset) - maxOffset = max(maxOffset, record.Offset) - } - level.Debug(c.logger).Log("msg", "consuming records", "min_offset", minOffset, "max_offset", maxOffset) - return c.retryWithBackoff(context.Background(), backoff.Config{ - MinBackoff: 250 * time.Millisecond, - MaxBackoff: 2 * time.Second, - MaxRetries: 0, // retry forever - }, func(boff *backoff.Backoff) error { - consumeStart := time.Now() - if err := c.appendRecords(records); err != nil { - level.Error(c.logger).Log( - "msg", "encountered error while ingesting data from Kafka; should retry", - "err", err, - "record_min_offset", minOffset, - "record_max_offset", maxOffset, - "num_retries", boff.NumRetries(), - ) - return err - } - c.lastOffset = maxOffset - c.metrics.currentOffset.Set(float64(c.lastOffset)) - c.metrics.consumeLatency.Observe(time.Since(consumeStart).Seconds()) - return nil - }) -} - -func (c *consumer) appendRecords(records []partition.Record) error { - for _, record := range records { - stream, labels, err := c.decoder.Decode(record.Content) - if err != nil { - return fmt.Errorf("failed to decode record: %w", err) - } - if len(stream.Entries) == 0 { - continue - } - if len(c.toStore) == 0 { - c.toStore = make([]*logproto.Entry, 0, len(stream.Entries)) - } - c.toStore = c.toStore[:0] - for _, entry := range stream.Entries { - c.toStore = append(c.toStore, &logproto.Entry{ - Timestamp: entry.Timestamp, - Line: entry.Line, - StructuredMetadata: entry.StructuredMetadata, - Parsed: entry.Parsed, - }) - } - c.writer.Append(record.TenantID, stream.Labels, labels, c.toStore, time.Now()) - } - return nil -} - -// Flush writes the accumulated data to storage and updates the metadata store -func (c *consumer) Flush() { - if c.writer.InputSize() == 0 { - return - } - if c.lastOffset == -1 { - return - } - if err := c.retryWithBackoff(context.Background(), backoff.Config{ - MinBackoff: 250 * time.Millisecond, - MaxBackoff: 10 * time.Second, - MaxRetries: 0, // retry forever - }, func(boff *backoff.Backoff) error { - start := time.Now() - c.metrics.flushesTotal.Add(1) - defer func() { c.metrics.flushDuration.Observe(time.Since(start).Seconds()) }() - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - if err := c.flush(ctx); err != nil { - c.metrics.flushFailuresTotal.Inc() - level.Error(c.logger).Log( - "msg", "failed to flush block", - "error", err, - "num_retries", boff.NumRetries(), - ) - return err - } - c.lastOffset = -1 - return nil - }); err != nil { - level.Error(c.logger).Log("msg", "failed to flush block", "error", err) - } -} - -func (c *consumer) retryWithBackoff(ctx context.Context, cfg backoff.Config, fn func(boff *backoff.Backoff) error) error { - boff := backoff.New(ctx, cfg) - var err error - for boff.Ongoing() { - err = fn(boff) - if err == nil { - return nil - } - boff.Wait() - } - if err != nil { - return err - } - return boff.ErrCause() -} - -func (c *consumer) flush(ctx context.Context) error { - defer c.flushBuf.Reset() - if _, err := c.writer.WriteTo(c.flushBuf); err != nil { - return err - } - - stats := wal.GetSegmentStats(c.writer, time.Now()) - wal.ReportSegmentStats(stats, c.metrics.segmentMetrics) - - id := ulid.MustNew(ulid.Timestamp(time.Now()), rand.Reader).String() - if err := c.storage.PutObject(ctx, wal.Dir+id, c.flushBuf); err != nil { - return fmt.Errorf("failed to put object to object storage: %w", err) - } - - if _, err := c.metastoreClient.AddBlock(ctx, &metastorepb.AddBlockRequest{ - Block: c.writer.Meta(id), - }); err != nil { - return fmt.Errorf("failed to add block to metastore: %w", err) - } - c.writer.Reset() - if err := c.committer.Commit(ctx, c.lastOffset); err != nil { - return fmt.Errorf("failed to commit offset: %w", err) - } - - return nil -} - -// consumerMetrics holds various Prometheus metrics for monitoring consumer operations -type consumerMetrics struct { - flushesTotal prometheus.Counter - flushFailuresTotal prometheus.Counter - flushDuration prometheus.Histogram - segmentMetrics *wal.SegmentMetrics - consumeLatency prometheus.Histogram - currentOffset prometheus.Gauge -} - -// newConsumerMetrics initializes and returns a new consumerMetrics instance -func newConsumerMetrics(reg prometheus.Registerer) *consumerMetrics { - return &consumerMetrics{ - flushesTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "loki_kafka_ingester_flushes_total", - Help: "The total number of flushes.", - }), - flushFailuresTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "loki_kafka_ingester_flush_failures_total", - Help: "The total number of failed flushes.", - }), - flushDuration: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ - Name: "loki_kafka_ingester_flush_duration_seconds", - Help: "The flush duration (in seconds).", - Buckets: prometheus.ExponentialBuckets(0.001, 4, 8), - NativeHistogramBucketFactor: 1.1, - }), - consumeLatency: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ - Name: "loki_ingest_storage_reader_records_batch_process_duration_seconds", - Help: "How long a consumer spent processing a batch of records from Kafka.", - NativeHistogramBucketFactor: 1.1, - }), - segmentMetrics: wal.NewSegmentMetrics(reg), - currentOffset: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ - Name: "loki_kafka_ingester_current_offset", - Help: "The current offset of the Kafka consumer.", - }), - } -} diff --git a/pkg/kafka/ingester/consumer_test.go b/pkg/kafka/ingester/consumer_test.go deleted file mode 100644 index a0baa92ba86a7..0000000000000 --- a/pkg/kafka/ingester/consumer_test.go +++ /dev/null @@ -1,198 +0,0 @@ -package ingester - -import ( - "context" - "os" - "strings" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" - - "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/metastorepb" - "github.com/grafana/loki/v3/pkg/ingester-rf1/objstore" - "github.com/grafana/loki/v3/pkg/kafka" - "github.com/grafana/loki/v3/pkg/kafka/partition" - "github.com/grafana/loki/v3/pkg/logproto" -) - -type mockCommitter struct { - committed int64 -} - -func newMockCommitter() *mockCommitter { - return &mockCommitter{ - committed: -1, - } -} - -func (m *mockCommitter) Commit(_ context.Context, offset int64) error { - m.committed = offset - return nil -} - -func (m *mockCommitter) EnqueueOffset(offset int64) { - // For testing purposes, we'll just set the committed offset directly - m.committed = offset -} - -func TestConsumer_PeriodicFlush(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - storage, err := objstore.NewTestStorage(t) - require.NoError(t, err) - - metastore := NewTestMetastore() - reg := prometheus.NewRegistry() - - flushInterval := 100 * time.Millisecond - maxFlushSize := int64(1000) - - committer := newMockCommitter() - consumerFactory := NewConsumerFactory(metastore, storage, flushInterval, maxFlushSize, log.NewLogfmtLogger(os.Stdout), reg) - consumer, err := consumerFactory(committer) - require.NoError(t, err) - - recordsChan := make(chan []partition.Record) - _ = consumer.Start(ctx, recordsChan) - - stream := logproto.Stream{ - Labels: `{__name__="test_metric", label="value1"}`, - Entries: []logproto.Entry{ - {Timestamp: time.Unix(0, 1000), Line: "10.5"}, - }, - } - - encodedRecords, err := kafka.Encode(0, "tenant1", stream, 10<<20) - require.NoError(t, err) - - records := []partition.Record{{ - TenantID: "tenant1", - Content: encodedRecords[0].Value, - Offset: 0, - }} - - recordsChan <- records - - require.Eventually(t, func() bool { - blocks, err := metastore.ListBlocksForQuery(ctx, &metastorepb.ListBlocksForQueryRequest{ - TenantId: "tenant1", - StartTime: 0, - EndTime: 100000, - }) - require.NoError(t, err) - return len(blocks.Blocks) == 1 - }, 5*time.Second, 100*time.Millisecond) - - // Verify committed offset - require.Equal(t, int64(0), committer.committed) -} - -func TestConsumer_ShutdownFlush(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - storage, err := objstore.NewTestStorage(t) - require.NoError(t, err) - - metastore := NewTestMetastore() - reg := prometheus.NewRegistry() - - flushInterval := 1 * time.Hour - maxFlushSize := int64(1000) - - committer := newMockCommitter() - consumerFactory := NewConsumerFactory(metastore, storage, flushInterval, maxFlushSize, log.NewLogfmtLogger(os.Stdout), reg) - consumer, err := consumerFactory(committer) - require.NoError(t, err) - - recordsChan := make(chan []partition.Record) - wait := consumer.Start(ctx, recordsChan) - - stream := logproto.Stream{ - Labels: `{__name__="test_metric", label="value1"}`, - Entries: []logproto.Entry{ - {Timestamp: time.Unix(0, 1000), Line: "10.5"}, - }, - } - - encodedRecords, err := kafka.Encode(0, "tenant1", stream, 10<<20) - require.NoError(t, err) - - records := []partition.Record{{ - TenantID: "tenant1", - Content: encodedRecords[0].Value, - Offset: 0, - }} - - recordsChan <- records - - cancel() - wait() - - blocks, err := metastore.ListBlocksForQuery(ctx, &metastorepb.ListBlocksForQueryRequest{ - TenantId: "tenant1", - StartTime: 0, - EndTime: 100000, - }) - require.NoError(t, err) - require.Equal(t, 1, len(blocks.Blocks)) - - // Verify committed offset - require.Equal(t, int64(0), committer.committed) -} - -func TestConsumer_MaxFlushSize(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - storage, err := objstore.NewTestStorage(t) - require.NoError(t, err) - - metastore := NewTestMetastore() - reg := prometheus.NewRegistry() - - flushInterval := 1 * time.Hour - maxFlushSize := int64(10) - - committer := newMockCommitter() - consumerFactory := NewConsumerFactory(metastore, storage, flushInterval, maxFlushSize, log.NewLogfmtLogger(os.Stdout), reg) - consumer, err := consumerFactory(committer) - require.NoError(t, err) - - recordsChan := make(chan []partition.Record) - _ = consumer.Start(ctx, recordsChan) - - stream := logproto.Stream{ - Labels: `{__name__="test_metric", label="value1"}`, - Entries: []logproto.Entry{ - {Timestamp: time.Unix(0, 1000), Line: strings.Repeat("a", 100)}, - }, - } - - encodedRecords, err := kafka.Encode(0, "tenant1", stream, 10<<20) - require.NoError(t, err) - - records := []partition.Record{{ - TenantID: "tenant1", - Content: encodedRecords[0].Value, - Offset: 0, - }} - - recordsChan <- records - - require.Eventually(t, func() bool { - blocks, err := metastore.ListBlocksForQuery(ctx, &metastorepb.ListBlocksForQueryRequest{ - TenantId: "tenant1", - StartTime: 0, - EndTime: 100000, - }) - require.NoError(t, err) - return len(blocks.Blocks) == 1 - }, 5*time.Second, 100*time.Millisecond) - - require.Equal(t, int64(0), committer.committed) -} diff --git a/pkg/kafka/ingester/ingester.go b/pkg/kafka/ingester/ingester.go deleted file mode 100644 index 39595df142ba7..0000000000000 --- a/pkg/kafka/ingester/ingester.go +++ /dev/null @@ -1,383 +0,0 @@ -package ingester - -import ( - "context" - "errors" - "flag" - "fmt" - "net/http" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/grafana/dskit/kv" - "github.com/grafana/dskit/ring" - "github.com/grafana/dskit/services" - "github.com/prometheus/client_golang/prometheus" - "google.golang.org/grpc/health/grpc_health_v1" - - "github.com/grafana/loki/v3/pkg/kafka" - "github.com/grafana/loki/v3/pkg/kafka/ingester/shutdownmarker" - "github.com/grafana/loki/v3/pkg/kafka/partition" - "github.com/grafana/loki/v3/pkg/kafka/partitionring" - util_log "github.com/grafana/loki/v3/pkg/util/log" - - "github.com/grafana/loki/v3/pkg/util" -) - -const ( - RingName = "kafka-ingester" - PartitionRingName = "kafka-partition" -) - -var ( - defaultFlushInterval = 15 * time.Second - defaultFlushSize int64 = 300 << 20 // 300 MB -) - -// Config for an ingester. -type Config struct { - Enabled bool `yaml:"enabled" doc:"description=Whether the kafka ingester is enabled."` - LifecyclerConfig ring.LifecyclerConfig `yaml:"lifecycler,omitempty" doc:"description=Configures how the lifecycle of the ingester will operate and where it will register for discovery."` - ShutdownMarkerPath string `yaml:"shutdown_marker_path"` - FlushInterval time.Duration `yaml:"flush_interval" doc:"description=The interval at which the ingester will flush and commit offsets to Kafka. If not set, the default flush interval will be used."` - FlushSize int64 `yaml:"flush_size" doc:"description=The size at which the ingester will flush and commit offsets to Kafka. If not set, the default flush size will be used."` - PartitionRingConfig partitionring.Config `yaml:"partition_ring" category:"experimental"` - KafkaConfig kafka.Config `yaml:"-"` -} - -// RegisterFlags registers the flags. -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.LifecyclerConfig.RegisterFlagsWithPrefix("kafka-ingester.", f, util_log.Logger) - cfg.PartitionRingConfig.RegisterFlagsWithPrefix("kafka-ingester.", f) - f.StringVar(&cfg.ShutdownMarkerPath, "kafka-ingester.shutdown-marker-path", "", "Path where the shutdown marker file is stored. If not set and common.path_prefix is set then common.path_prefix will be used.") - f.BoolVar(&cfg.Enabled, "kafka-ingester.enabled", false, "Whether the Kafka-based ingester path is enabled") - f.DurationVar(&cfg.FlushInterval, "kafka-ingester.flush-interval", defaultFlushInterval, "The interval at which the ingester will flush and commit offsets to Kafka. If not set, the default flush interval will be used.") - f.Int64Var(&cfg.FlushSize, "kafka-ingester.flush-size", defaultFlushSize, "The size at which the ingester will flush and commit offsets to Kafka. If not set, the default flush size will be used.") -} - -func (cfg *Config) Validate() error { - if !cfg.Enabled { - return nil - } - if cfg.FlushInterval <= 0 { - return errors.New("kafka-ingester.flush-interval must be greater than 0") - } - if cfg.LifecyclerConfig.RingConfig.ReplicationFactor != 1 { - cfg.LifecyclerConfig.RingConfig.ReplicationFactor = 1 - level.Warn(util_log.Logger).Log("msg", "kafka-ingester.lifecycler.replication-factor has been set to 1. This is the only supported replication factor for the kafka-ingester.") - } - return nil -} - -type Wrapper interface { - Wrap(wrapped Interface) Interface -} - -// Interface is an interface for the Ingester -type Interface interface { - services.Service - http.Handler - CheckReady(ctx context.Context) error - FlushHandler(w http.ResponseWriter, _ *http.Request) -} - -// Ingester builds chunks for incoming log streams. -type Ingester struct { - services.Service - - cfg Config - logger log.Logger - - metrics *ingesterMetrics - - lifecycler *ring.Lifecycler - lifecyclerWatcher *services.FailureWatcher - ingesterPartitionID int32 - partitionRingLifecycler *ring.PartitionInstanceLifecycler - partitionReader *partition.Reader -} - -// New makes a new Ingester. -func New(cfg Config, - consumerFactory partition.ConsumerFactory, - logger log.Logger, - metricsNamespace string, - registerer prometheus.Registerer, -) (*Ingester, error) { - metrics := newIngesterMetrics(registerer) - - ingesterPartitionID, err := partitionring.ExtractIngesterPartitionID(cfg.LifecyclerConfig.ID) - if err != nil { - return nil, fmt.Errorf("calculating ingester partition ID: %w", err) - } - - partitionRingKV := cfg.PartitionRingConfig.KVStore.Mock - if partitionRingKV == nil { - partitionRingKV, err = kv.NewClient(cfg.PartitionRingConfig.KVStore, ring.GetPartitionRingCodec(), kv.RegistererWithKVName(registerer, PartitionRingName+"-lifecycler"), logger) - if err != nil { - return nil, fmt.Errorf("creating KV store for ingester partition ring: %w", err) - } - } - - partitionRingLifecycler := ring.NewPartitionInstanceLifecycler( - cfg.PartitionRingConfig.ToLifecyclerConfig(ingesterPartitionID, cfg.LifecyclerConfig.ID), - PartitionRingName, - PartitionRingName+"-key", - partitionRingKV, - logger, - prometheus.WrapRegistererWithPrefix("loki_", registerer)) - i := &Ingester{ - cfg: cfg, - logger: logger, - ingesterPartitionID: ingesterPartitionID, - partitionRingLifecycler: partitionRingLifecycler, - metrics: metrics, - } - - i.lifecycler, err = ring.NewLifecycler(cfg.LifecyclerConfig, i, RingName, RingName+"-ring", true, logger, prometheus.WrapRegistererWithPrefix(metricsNamespace+"_", registerer)) - if err != nil { - return nil, err - } - i.partitionReader, err = partition.NewReader(cfg.KafkaConfig, ingesterPartitionID, cfg.LifecyclerConfig.ID, consumerFactory, logger, registerer) - if err != nil { - return nil, err - } - - i.lifecyclerWatcher = services.NewFailureWatcher() - i.lifecyclerWatcher.WatchService(i.lifecycler) - i.lifecyclerWatcher.WatchService(i.partitionRingLifecycler) - i.lifecyclerWatcher.WatchService(i.partitionReader) - - i.Service = services.NewBasicService(i.starting, i.running, i.stopping) - - return i, nil -} - -// ServeHTTP implements the pattern ring status page. -func (i *Ingester) ServeHTTP(w http.ResponseWriter, r *http.Request) { - i.lifecycler.ServeHTTP(w, r) -} - -func (i *Ingester) starting(ctx context.Context) (err error) { - defer func() { - if err != nil { - // if starting() fails for any reason (e.g., context canceled), - // the lifecycler must be stopped. - _ = services.StopAndAwaitTerminated(context.Background(), i.lifecycler) - } - }() - - // First of all we have to check if the shutdown marker is set. This needs to be done - // as first thing because, if found, it may change the behaviour of the ingester startup. - if exists, err := shutdownmarker.Exists(shutdownmarker.GetPath(i.cfg.ShutdownMarkerPath)); err != nil { - return fmt.Errorf("failed to check ingester shutdown marker: %w", err) - } else if exists { - level.Info(i.logger).Log("msg", "detected existing shutdown marker, setting unregister and flush on shutdown", "path", shutdownmarker.GetPath(i.cfg.ShutdownMarkerPath)) - i.setPrepareShutdown() - } - - // pass new context to lifecycler, so that it doesn't stop automatically when Ingester's service context is done - err = i.lifecycler.StartAsync(context.Background()) - if err != nil { - return err - } - - err = i.lifecycler.AwaitRunning(ctx) - if err != nil { - return err - } - - err = i.partitionRingLifecycler.StartAsync(context.Background()) - if err != nil { - return err - } - err = i.partitionRingLifecycler.AwaitRunning(ctx) - if err != nil { - return err - } - err = i.partitionReader.StartAsync(context.Background()) - if err != nil { - return err - } - err = i.partitionReader.AwaitRunning(ctx) - if err != nil { - return err - } - - return nil -} - -func (i *Ingester) running(ctx context.Context) error { - var serviceError error - select { - // wait until service is asked to stop - case <-ctx.Done(): - // stop - case err := <-i.lifecyclerWatcher.Chan(): - serviceError = fmt.Errorf("lifecycler failed: %w", err) - } - - return serviceError -} - -// stopping is called when Ingester transitions to Stopping state. -// -// At this point, loop no longer runs, but flushers are still running. -func (i *Ingester) stopping(_ error) error { - var errs util.MultiError - - errs.Add(services.StopAndAwaitTerminated(context.Background(), i.partitionReader)) - errs.Add(services.StopAndAwaitTerminated(context.Background(), i.lifecycler)) - errs.Add(services.StopAndAwaitTerminated(context.Background(), i.partitionRingLifecycler)) - // Remove the shutdown marker if it exists since we are shutting down - shutdownMarkerPath := shutdownmarker.GetPath(i.cfg.ShutdownMarkerPath) - exist, err := shutdownmarker.Exists(shutdownMarkerPath) - if err != nil { - level.Warn(i.logger).Log("msg", "failed to check for prepare-shutdown marker file", "path", shutdownMarkerPath, "err", err) - } else if exist { - if err := shutdownmarker.Remove(shutdownMarkerPath); err != nil { - level.Warn(i.logger).Log("msg", "failed to remove shutdown marker", "path", shutdownMarkerPath, "err", err) - } - } - return errs.Err() -} - -// Watch implements grpc_health_v1.HealthCheck. -func (*Ingester) Watch(*grpc_health_v1.HealthCheckRequest, grpc_health_v1.Health_WatchServer) error { - return nil -} - -func (i *Ingester) PreparePartitionDownscaleHandler(w http.ResponseWriter, r *http.Request) { - logger := log.With(i.logger, "partition", i.ingesterPartitionID) - // Don't allow callers to change the shutdown configuration while we're in the middle - // of starting or shutting down. - if i.State() != services.Running { - w.WriteHeader(http.StatusServiceUnavailable) - return - } - - shutdownMarkerPath := shutdownmarker.GetPath(i.cfg.ShutdownMarkerPath) - exists, err := shutdownmarker.Exists(shutdownMarkerPath) - if err != nil { - level.Error(i.logger).Log("msg", "unable to check for prepare-shutdown marker file", "path", shutdownMarkerPath, "err", err) - w.WriteHeader(http.StatusInternalServerError) - return - } - switch r.Method { - case http.MethodPost: - // It's not allowed to prepare the downscale while in PENDING state. Why? Because if the downscale - // will be later cancelled, we don't know if it was requested in PENDING or ACTIVE state, so we - // don't know to which state reverting back. Given a partition is expected to stay in PENDING state - // for a short period, we simply don't allow this case. - state, _, err := i.partitionRingLifecycler.GetPartitionState(r.Context()) - if err != nil { - level.Error(logger).Log("msg", "failed to check partition state in the ring", "err", err) - w.WriteHeader(http.StatusInternalServerError) - return - } - - if state == ring.PartitionPending { - level.Warn(logger).Log("msg", "received a request to prepare partition for shutdown, but the request can't be satisfied because the partition is in PENDING state") - w.WriteHeader(http.StatusConflict) - return - } - - if err := i.partitionRingLifecycler.ChangePartitionState(r.Context(), ring.PartitionInactive); err != nil { - level.Error(logger).Log("msg", "failed to change partition state to inactive", "err", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - if !exists { - if err := shutdownmarker.Create(shutdownMarkerPath); err != nil { - level.Error(i.logger).Log("msg", "unable to create prepare-shutdown marker file", "path", shutdownMarkerPath, "err", err) - w.WriteHeader(http.StatusInternalServerError) - return - } - } - i.setPrepareShutdown() - - case http.MethodDelete: - state, _, err := i.partitionRingLifecycler.GetPartitionState(r.Context()) - if err != nil { - level.Error(logger).Log("msg", "failed to check partition state in the ring", "err", err) - w.WriteHeader(http.StatusInternalServerError) - return - } - - // If partition is inactive, make it active. We ignore other states Active and especially Pending. - if state == ring.PartitionInactive { - - // We don't switch it back to PENDING state if there are not enough owners because we want to guarantee consistency - // in the read path. If the partition is within the lookback period we need to guarantee that partition will be queried. - // Moving back to PENDING will cause us loosing consistency, because PENDING partitions are not queried by design. - // We could move back to PENDING if there are not enough owners and the partition moved to INACTIVE more than - // "lookback period" ago, but since we delete inactive partitions with no owners that moved to inactive since longer - // than "lookback period" ago, it looks to be an edge case not worth to address. - if err := i.partitionRingLifecycler.ChangePartitionState(r.Context(), ring.PartitionActive); err != nil { - level.Error(logger).Log("msg", "failed to change partition state to active", "err", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - if exists { - if err := shutdownmarker.Remove(shutdownMarkerPath); err != nil { - level.Error(i.logger).Log("msg", "unable to remove prepare-shutdown marker file", "path", shutdownMarkerPath, "err", err) - w.WriteHeader(http.StatusInternalServerError) - return - } - } - i.unsetPrepareShutdown() - } - } - - state, stateTimestamp, err := i.partitionRingLifecycler.GetPartitionState(r.Context()) - if err != nil { - level.Error(logger).Log("msg", "failed to check partition state in the ring", "err", err) - w.WriteHeader(http.StatusInternalServerError) - return - } - - if state == ring.PartitionInactive { - util.WriteJSONResponse(w, map[string]any{"timestamp": stateTimestamp.Unix()}) - } else { - util.WriteJSONResponse(w, map[string]any{"timestamp": 0}) - } -} - -// setPrepareShutdown toggles ingester lifecycler config to prepare for shutdown -func (i *Ingester) setPrepareShutdown() { - i.lifecycler.SetUnregisterOnShutdown(true) - i.lifecycler.SetFlushOnShutdown(true) - i.partitionRingLifecycler.SetCreatePartitionOnStartup(false) - i.partitionRingLifecycler.SetRemoveOwnerOnShutdown(true) - i.metrics.shutdownMarker.Set(1) -} - -func (i *Ingester) unsetPrepareShutdown() { - i.lifecycler.SetUnregisterOnShutdown(i.cfg.LifecyclerConfig.UnregisterOnShutdown) - i.lifecycler.SetFlushOnShutdown(true) - i.partitionRingLifecycler.SetCreatePartitionOnStartup(true) - i.partitionRingLifecycler.SetRemoveOwnerOnShutdown(false) - i.metrics.shutdownMarker.Set(0) -} - -// ReadinessHandler is used to indicate to k8s when the ingesters are ready for -// the addition removal of another ingester. Returns 204 when the ingester is -// ready, 500 otherwise. -func (i *Ingester) CheckReady(ctx context.Context) error { - // todo. - if s := i.State(); s != services.Running && s != services.Stopping { - return fmt.Errorf("ingester not ready: %v", s) - } - return i.lifecycler.CheckReady(ctx) -} - -// Flush implements ring.FlushTransferer -// Flush triggers a flush of all the chunks and closes the flush queues. -// Called from the Lifecycler as part of the ingester shutdown. -func (i *Ingester) Flush() { -} - -func (i *Ingester) TransferOut(_ context.Context) error { - return nil -} diff --git a/pkg/kafka/ingester/ingester_test.go b/pkg/kafka/ingester/ingester_test.go deleted file mode 100644 index c7d62b9593a4c..0000000000000 --- a/pkg/kafka/ingester/ingester_test.go +++ /dev/null @@ -1,126 +0,0 @@ -package ingester - -import ( - "context" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/grafana/dskit/flagext" - "github.com/grafana/dskit/kv/consul" - "github.com/grafana/dskit/ring" - "github.com/grafana/dskit/services" - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - - "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/metastorepb" - "github.com/grafana/loki/v3/pkg/ingester-rf1/objstore" - "github.com/grafana/loki/v3/pkg/util/test" -) - -func TestPreparePartitionDownscaleHandler(t *testing.T) { - cfg := defaultIngesterTestConfig(t) - // start ingester. - storage, err := objstore.NewTestStorage(t) - require.NoError(t, err) - ing, err := New(cfg, - NewConsumerFactory(NewTestMetastore(), storage, cfg.FlushInterval, cfg.FlushSize, log.NewNopLogger(), prometheus.NewRegistry()), - log.NewNopLogger(), "test", prometheus.NewRegistry()) - require.NoError(t, err) - err = services.StartAndAwaitRunning(context.Background(), ing) - require.NoError(t, err) - - t.Run("get state", func(t *testing.T) { - w := httptest.NewRecorder() - ing.PreparePartitionDownscaleHandler(w, httptest.NewRequest("GET", "/", nil)) - require.Equal(t, http.StatusOK, w.Code) - require.Equal(t, "{\"timestamp\":0}", w.Body.String()) - }) - t.Run("prepare shutdown pending", func(t *testing.T) { - w := httptest.NewRecorder() - ing.PreparePartitionDownscaleHandler(w, httptest.NewRequest("POST", "/", nil)) - require.Equal(t, http.StatusConflict, w.Code) - }) - t.Run("prepare shutdown and cancel", func(t *testing.T) { - w := httptest.NewRecorder() - test.Poll(t, 5*time.Second, ring.PartitionActive, func() interface{} { - return getState(t, cfg) - }) - ing.PreparePartitionDownscaleHandler(w, httptest.NewRequest("POST", "/", nil)) - require.Equal(t, http.StatusOK, w.Code) - test.Poll(t, 5*time.Second, ring.PartitionInactive, func() interface{} { - return getState(t, cfg) - }) - w2 := httptest.NewRecorder() - ing.PreparePartitionDownscaleHandler(w2, httptest.NewRequest("DELETE", "/", nil)) - require.Equal(t, http.StatusOK, w.Code) - test.Poll(t, 5*time.Second, ring.PartitionActive, func() interface{} { - return getState(t, cfg) - }) - }) - require.NoError(t, services.StopAndAwaitTerminated(context.Background(), ing)) -} - -func getState(t *testing.T, cfg Config) ring.PartitionState { - get, err := cfg.PartitionRingConfig.KVStore.Mock.Get(context.Background(), PartitionRingName+"-key") - require.NoError(t, err) - - ringDesc := ring.GetOrCreatePartitionRingDesc(get) - return ringDesc.Partitions[0].State -} - -// nolint -func defaultIngesterTestConfig(t testing.TB) Config { - kvRing, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) - t.Cleanup(func() { require.NoError(t, closer.Close()) }) - - kvPartitionRing, closerPartitionRing := consul.NewInMemoryClient(ring.GetPartitionRingCodec(), log.NewNopLogger(), nil) - t.Cleanup(func() { require.NoError(t, closerPartitionRing.Close()) }) - - cfg := Config{} - flagext.DefaultValues(&cfg) - - cfg.LifecyclerConfig.RingConfig.KVStore.Mock = kvRing - cfg.PartitionRingConfig.KVStore.Mock = kvPartitionRing - cfg.PartitionRingConfig.MinOwnersCount = 1 - cfg.PartitionRingConfig.MinOwnersDuration = 0 - cfg.LifecyclerConfig.RingConfig.ReplicationFactor = 1 - cfg.LifecyclerConfig.NumTokens = 1 - cfg.LifecyclerConfig.ListenPort = 0 - cfg.LifecyclerConfig.Addr = "localhost" - cfg.LifecyclerConfig.ID = "localhost" - cfg.LifecyclerConfig.FinalSleep = 0 - cfg.LifecyclerConfig.MinReadyDuration = 0 - - return cfg -} - -// TestMetastore is a simple in-memory metastore for testing -type TestMetastore struct { - blocks map[string][]*metastorepb.BlockMeta -} - -func NewTestMetastore() *TestMetastore { - return &TestMetastore{blocks: make(map[string][]*metastorepb.BlockMeta)} -} - -func (m *TestMetastore) ListBlocksForQuery(_ context.Context, req *metastorepb.ListBlocksForQueryRequest, _ ...grpc.CallOption) (*metastorepb.ListBlocksForQueryResponse, error) { - blocks := m.blocks[req.TenantId] - var result []*metastorepb.BlockMeta - for _, block := range blocks { - if block.MinTime <= req.EndTime && block.MaxTime >= req.StartTime { - result = append(result, block) - } - } - return &metastorepb.ListBlocksForQueryResponse{Blocks: result}, nil -} - -func (m *TestMetastore) AddBlock(_ context.Context, in *metastorepb.AddBlockRequest, _ ...grpc.CallOption) (*metastorepb.AddBlockResponse, error) { - for _, stream := range in.Block.TenantStreams { - m.blocks[stream.TenantId] = append(m.blocks[stream.TenantId], in.Block) - } - return &metastorepb.AddBlockResponse{}, nil -} diff --git a/pkg/kafka/ingester/metrics.go b/pkg/kafka/ingester/metrics.go deleted file mode 100644 index e73ee08095c8e..0000000000000 --- a/pkg/kafka/ingester/metrics.go +++ /dev/null @@ -1,20 +0,0 @@ -package ingester - -import ( - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" -) - -type ingesterMetrics struct { - // Shutdown marker for ingester scale down - shutdownMarker prometheus.Gauge -} - -func newIngesterMetrics(r prometheus.Registerer) *ingesterMetrics { - return &ingesterMetrics{ - shutdownMarker: promauto.With(r).NewGauge(prometheus.GaugeOpts{ - Name: "loki_ingester_prepare_shutdown_requested", - Help: "1 if the ingester has been requested to prepare for shutdown via endpoint or marker file.", - }), - } -} diff --git a/pkg/kafka/ingester/shutdownmarker/shutdown_marker.go b/pkg/kafka/ingester/shutdownmarker/shutdown_marker.go deleted file mode 100644 index 7d1a4ec2f353f..0000000000000 --- a/pkg/kafka/ingester/shutdownmarker/shutdown_marker.go +++ /dev/null @@ -1,60 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only - -package shutdownmarker - -import ( - "os" - "path" - "strings" - "time" - - "github.com/grafana/dskit/multierror" - - "github.com/grafana/loki/v3/pkg/util/atomicfs" -) - -const shutdownMarkerFilename = "shutdown-requested.txt" - -// Create writes a marker file on the given path to indicate that a component is -// going to be scaled down in the future. The presence of this file means that a component -// should perform some operations specified by the component itself before being shutdown. -func Create(p string) error { - return atomicfs.CreateFile(p, strings.NewReader(time.Now().UTC().Format(time.RFC3339))) -} - -// Remove removes the shutdown marker file on the given path if it exists. -func Remove(p string) error { - err := os.Remove(p) - if err != nil && !os.IsNotExist(err) { - return err - } - - dir, err := os.OpenFile(path.Dir(p), os.O_RDONLY, 0o777) - if err != nil { - return err - } - - merr := multierror.New() - merr.Add(dir.Sync()) - merr.Add(dir.Close()) - return merr.Err() -} - -// Exists returns true if the shutdown marker file exists on the given path, false otherwise -func Exists(p string) (bool, error) { - s, err := os.Stat(p) - if err != nil && os.IsNotExist(err) { - return false, nil - } - - if err != nil { - return false, err - } - - return s.Mode().IsRegular(), nil -} - -// GetPath returns the absolute path of the shutdown marker file -func GetPath(dirPath string) string { - return path.Join(dirPath, shutdownMarkerFilename) -} diff --git a/pkg/kafka/ingester/shutdownmarker/shutdown_marker_test.go b/pkg/kafka/ingester/shutdownmarker/shutdown_marker_test.go deleted file mode 100644 index c8e0b851be4e1..0000000000000 --- a/pkg/kafka/ingester/shutdownmarker/shutdown_marker_test.go +++ /dev/null @@ -1,49 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only - -package shutdownmarker - -import ( - "path/filepath" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestShutdownMarker_GetPath(t *testing.T) { - dir := "/a/b/c" - expectedPath := filepath.Join(dir, shutdownMarkerFilename) - require.Equal(t, expectedPath, GetPath(dir)) -} - -func TestShutdownMarker_Create(t *testing.T) { - dir := t.TempDir() - shutdownMarkerPath := GetPath(dir) - exists, err := Exists(shutdownMarkerPath) - require.NoError(t, err) - require.False(t, exists) - - err = Create(shutdownMarkerPath) - require.NoError(t, err) - - exists, err = Exists(shutdownMarkerPath) - require.NoError(t, err) - require.True(t, exists) -} - -func TestShutdownMarker_Remove(t *testing.T) { - dir := t.TempDir() - shutdownMarkerPath := GetPath(dir) - exists, err := Exists(shutdownMarkerPath) - require.NoError(t, err) - require.False(t, exists) - - require.Nil(t, Create(shutdownMarkerPath)) - exists, err = Exists(shutdownMarkerPath) - require.NoError(t, err) - require.True(t, exists) - - require.Nil(t, Remove(shutdownMarkerPath)) - exists, err = Exists(shutdownMarkerPath) - require.NoError(t, err) - require.False(t, exists) -} diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go index 1418f01e21e4a..faf15069e02e5 100644 --- a/pkg/loki/loki.go +++ b/pkg/loki/loki.go @@ -43,7 +43,6 @@ import ( metastoreclient "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/client" ingester_client "github.com/grafana/loki/v3/pkg/ingester/client" "github.com/grafana/loki/v3/pkg/kafka" - ingester_kafka "github.com/grafana/loki/v3/pkg/kafka/ingester" "github.com/grafana/loki/v3/pkg/loghttp/push" "github.com/grafana/loki/v3/pkg/loki/common" "github.com/grafana/loki/v3/pkg/lokifrontend" @@ -380,7 +379,6 @@ type Loki struct { MetastoreClient *metastoreclient.Client partitionRingWatcher *ring.PartitionRingWatcher partitionRing *ring.PartitionInstanceRing - kafkaIngester *ingester_kafka.Ingester ClientMetrics storage.ClientMetrics deleteClientMetrics *deletion.DeleteRequestClientMetrics
chore
delete old kafka ingester code (#14819)
3c0e3e2c13591e3af44ce4826245043c81bb66c3
2024-08-12 21:30:51
renovate[bot]
fix(deps): update module github.com/baidubce/bce-sdk-go to v0.9.186 (#13864)
false
diff --git a/go.mod b/go.mod index 9fd5e63be7d07..c55d847b7120a 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/alicebob/miniredis/v2 v2.30.4 github.com/aliyun/aliyun-oss-go-sdk v2.2.10+incompatible github.com/aws/aws-sdk-go v1.54.19 - github.com/baidubce/bce-sdk-go v0.9.141 + github.com/baidubce/bce-sdk-go v0.9.186 github.com/bmatcuk/doublestar v1.3.4 github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500 github.com/cespare/xxhash v1.1.0 diff --git a/go.sum b/go.sum index e664c656f1e14..39d0ec92e4640 100644 --- a/go.sum +++ b/go.sum @@ -385,8 +385,8 @@ github.com/aws/smithy-go v1.11.1 h1:IQ+lPZVkSM3FRtyaDox41R8YS6iwPMYIreejOgPW49g= github.com/aws/smithy-go v1.11.1/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= github.com/axiomhq/hyperloglog v0.0.0-20240507144631-af9851f82b27 h1:60m4tnanN1ctzIu4V3bfCNJ39BiOPSm1gHFlFjTkRE0= github.com/axiomhq/hyperloglog v0.0.0-20240507144631-af9851f82b27/go.mod h1:k08r+Yj1PRAmuayFiRK6MYuR5Ve4IuZtTfxErMIh0+c= -github.com/baidubce/bce-sdk-go v0.9.141 h1:EV5BH5lfymIGPSmYDo9xYdsVlvWAW6nFeiA6t929zBE= -github.com/baidubce/bce-sdk-go v0.9.141/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg= +github.com/baidubce/bce-sdk-go v0.9.186 h1:GVJTwH34UhsY0Plh9avlNw1V3F2N20p7hzcrRTf2zpk= +github.com/baidubce/bce-sdk-go v0.9.186/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= diff --git a/vendor/github.com/baidubce/bce-sdk-go/bce/client.go b/vendor/github.com/baidubce/bce-sdk-go/bce/client.go index 28eccd0f3b8bb..d223a0987fc66 100644 --- a/vendor/github.com/baidubce/bce-sdk-go/bce/client.go +++ b/vendor/github.com/baidubce/bce-sdk-go/bce/client.go @@ -155,6 +155,11 @@ func (c *BceClient) SendRequest(req *BceRequest, resp *BceResponse) error { log.Infof("receive http response: status: %s, debugId: %s, requestId: %s, elapsed: %v", resp.StatusText(), resp.DebugId(), resp.RequestId(), resp.ElapsedTime()) + + if resp.ElapsedTime().Milliseconds() > DEFAULT_WARN_LOG_TIMEOUT_IN_MILLS { + log.Warnf("request time more than 5 second, debugId: %s, requestId: %s, elapsed: %v", + resp.DebugId(), resp.RequestId(), resp.ElapsedTime()) + } for k, v := range resp.Headers() { log.Debugf("%s=%s", k, v) } diff --git a/vendor/github.com/baidubce/bce-sdk-go/bce/config.go b/vendor/github.com/baidubce/bce-sdk-go/bce/config.go index 346b06cddf5dc..c55090aae342f 100644 --- a/vendor/github.com/baidubce/bce-sdk-go/bce/config.go +++ b/vendor/github.com/baidubce/bce-sdk-go/bce/config.go @@ -26,13 +26,15 @@ import ( // Constants and default values for the package bce const ( - SDK_VERSION = "0.9.141" + SDK_VERSION = "0.9.186" URI_PREFIX = "/" // now support uri without prefix "v1" so just set root path DEFAULT_DOMAIN = "baidubce.com" DEFAULT_PROTOCOL = "http" + HTTPS_PROTOCAL = "https" DEFAULT_REGION = "bj" DEFAULT_CONTENT_TYPE = "application/json;charset=utf-8" DEFAULT_CONNECTION_TIMEOUT_IN_MILLIS = 1200 * 1000 + DEFAULT_WARN_LOG_TIMEOUT_IN_MILLS = 5 * 1000 ) var ( diff --git a/vendor/github.com/baidubce/bce-sdk-go/http/client.go b/vendor/github.com/baidubce/bce-sdk-go/http/client.go index b036a40350f87..fe0fa4dbac111 100644 --- a/vendor/github.com/baidubce/bce-sdk-go/http/client.go +++ b/vendor/github.com/baidubce/bce-sdk-go/http/client.go @@ -172,3 +172,22 @@ func Execute(request *Request) (*Response, error) { response := &Response{httpResponse, end.Sub(start)} return response, nil } +func SetResponseHeaderTimeout(t int) { + transport = &http.Transport{ + MaxIdleConnsPerHost: defaultMaxIdleConnsPerHost, + ResponseHeaderTimeout: time.Duration(t) * time.Second, + Dial: func(network, address string) (net.Conn, error) { + conn, err := net.DialTimeout(network, address, defaultDialTimeout) + if err != nil { + return nil, err + } + tc := &timeoutConn{conn, defaultSmallInterval, defaultLargeInterval} + err = tc.SetReadDeadline(time.Now().Add(defaultLargeInterval)) + if err != nil { + return nil, err + } + return tc, nil + }, + } + httpClient.Transport = transport +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/http/constants.go b/vendor/github.com/baidubce/bce-sdk-go/http/constants.go index 2f9a1e185082a..f362a19ddd0ba 100644 --- a/vendor/github.com/baidubce/bce-sdk-go/http/constants.go +++ b/vendor/github.com/baidubce/bce-sdk-go/http/constants.go @@ -61,6 +61,7 @@ const ( BCE_USER_METADATA_PREFIX = "x-bce-meta-" BCE_SECURITY_TOKEN = "x-bce-security-token" BCE_DATE = "x-bce-date" + BCE_TAG = "x-bce-tag-list" // BOS HTTP Headers BCE_COPY_METADATA_DIRECTIVE = "x-bce-metadata-directive" @@ -80,5 +81,8 @@ const ( BCE_RESTORE = "x-bce-restore" BCE_FORBID_OVERWRITE = "x-bce-forbid-overwrite" BCE_SYMLINK_TARGET = "x-bce-symlink-target" - BCE_TRAFFIC_LIMIT = "x-bce-traffic-limit" + BCE_SYMLINK_BUCKET = "x-bce-symlink-bucket" + BCE_TRAFFIC_LIMIT = "x-bce-traffic-limit" + BCE_BUCKET_TYPE = "x-bce-bucket-type" + BCE_OBJECT_TAGGING = "x-bce-tagging" ) diff --git a/vendor/github.com/baidubce/bce-sdk-go/http/request.go b/vendor/github.com/baidubce/bce-sdk-go/http/request.go index 7fecfb99a460e..964c57010a3a0 100644 --- a/vendor/github.com/baidubce/bce-sdk-go/http/request.go +++ b/vendor/github.com/baidubce/bce-sdk-go/http/request.go @@ -149,7 +149,11 @@ func (r *Request) SetParam(key, value string) { func (r *Request) QueryString() string { buf := make([]string, 0, len(r.params)) for k, v := range r.params { - buf = append(buf, util.UriEncode(k, true)+"="+util.UriEncode(v, true)) + if len(v) == 0 { + buf = append(buf, util.UriEncode(k, true)) + } else { + buf = append(buf, util.UriEncode(k, true)+"="+util.UriEncode(v, true)) + } } return strings.Join(buf, "&") } diff --git a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/bucket.go b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/bucket.go index 9046364c394f0..ea79f00322078 100644 --- a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/bucket.go +++ b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/bucket.go @@ -32,11 +32,11 @@ import ( // RETURNS: // - *ListBucketsResult: the result bucket list structure // - error: nil if ok otherwise the specific error -func ListBuckets(cli bce.Client) (*ListBucketsResult, error) { +func ListBuckets(cli bce.Client, ctx *BosContext) (*ListBucketsResult, error) { req := &bce.BceRequest{} req.SetMethod(http.GET) resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return nil, err } if resp.IsFail() { @@ -59,11 +59,11 @@ func ListBuckets(cli bce.Client) (*ListBucketsResult, error) { // - *ListObjectsResult: the result object list structure // - error: nil if ok otherwise the specific error func ListObjects(cli bce.Client, bucket string, - args *ListObjectsArgs) (*ListObjectsResult, error) { + args *ListObjectsArgs, ctx *BosContext) (*ListObjectsResult, error) { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.GET) - + ctx.Bucket = bucket // Optional arguments settings if args != nil { if len(args.Delimiter) != 0 { @@ -85,7 +85,7 @@ func ListObjects(cli bce.Client, bucket string, // Send the request and get result resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return nil, err } if resp.IsFail() { @@ -106,19 +106,20 @@ func ListObjects(cli bce.Client, bucket string, // - bucket: the bucket name // RETURNS: // - error: nil if exists and have authority otherwise the specific error -func HeadBucket(cli bce.Client, bucket string) error { +func HeadBucket(cli bce.Client, bucket string, ctx *BosContext) (error, *bce.BceResponse) { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.HEAD) + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { - return err + if err := SendRequest(cli, req, resp, ctx); err != nil { + return err, resp } if resp.IsFail() { - return resp.ServiceError() + return resp.ServiceError(), resp } defer func() { resp.Body().Close() }() - return nil + return nil, resp } // PutBucket - create a new bucket with the given name @@ -129,12 +130,18 @@ func HeadBucket(cli bce.Client, bucket string) error { // RETURNS: // - string: the location of the new bucket if create success // - error: nil if create success otherwise the specific error -func PutBucket(cli bce.Client, bucket string) (string, error) { +func PutBucket(cli bce.Client, bucket string, args *PutBucketArgs, ctx *BosContext) (string, error) { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.PUT) + ctx.Bucket = bucket + if args != nil { + if len(args.TagList) != 0 { + req.SetHeader(http.BCE_TAG, args.TagList) + } + } resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return "", err } if resp.IsFail() { @@ -151,12 +158,13 @@ func PutBucket(cli bce.Client, bucket string) (string, error) { // - bucket: the bucket name to be deleted // RETURNS: // - error: nil if delete success otherwise the specific error -func DeleteBucket(cli bce.Client, bucket string) error { +func DeleteBucket(cli bce.Client, bucket string, ctx *BosContext) error { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.DELETE) + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { @@ -174,13 +182,14 @@ func DeleteBucket(cli bce.Client, bucket string) error { // RETURNS: // - string: the location of the bucket // - error: nil if delete success otherwise the specific error -func GetBucketLocation(cli bce.Client, bucket string) (string, error) { +func GetBucketLocation(cli bce.Client, bucket string, ctx *BosContext) (string, error) { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.GET) req.SetParam("location", "") + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return "", err } if resp.IsFail() { @@ -203,12 +212,12 @@ func GetBucketLocation(cli bce.Client, bucket string) (string, error) { // - aclBody: the acl file body // RETURNS: // - error: nil if delete success otherwise the specific error -func PutBucketAcl(cli bce.Client, bucket, cannedAcl string, aclBody *bce.Body) error { +func PutBucketAcl(cli bce.Client, bucket, cannedAcl string, aclBody *bce.Body, ctx *BosContext) error { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.PUT) req.SetParam("acl", "") - + ctx.Bucket = bucket // The acl setting if len(cannedAcl) != 0 && aclBody != nil { return bce.NewBceClientError("BOS does not support cannedAcl and acl file at the same time") @@ -222,7 +231,7 @@ func PutBucketAcl(cli bce.Client, bucket, cannedAcl string, aclBody *bce.Body) e } resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { @@ -240,14 +249,14 @@ func PutBucketAcl(cli bce.Client, bucket, cannedAcl string, aclBody *bce.Body) e // RETURNS: // - *GetBucketAclResult: the result of the bucket acl // - error: nil if success otherwise the specific error -func GetBucketAcl(cli bce.Client, bucket string) (*GetBucketAclResult, error) { +func GetBucketAcl(cli bce.Client, bucket string, ctx *BosContext) (*GetBucketAclResult, error) { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.GET) req.SetParam("acl", "") - + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return nil, err } if resp.IsFail() { @@ -269,14 +278,15 @@ func GetBucketAcl(cli bce.Client, bucket string) (*GetBucketAclResult, error) { // - logging: the logging prefix json string body // RETURNS: // - error: nil if success otherwise the specific error -func PutBucketLogging(cli bce.Client, bucket string, logging *bce.Body) error { +func PutBucketLogging(cli bce.Client, bucket string, logging *bce.Body, ctx *BosContext) error { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.PUT) req.SetParam("logging", "") req.SetBody(logging) + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { @@ -294,14 +304,14 @@ func PutBucketLogging(cli bce.Client, bucket string, logging *bce.Body) error { // RETURNS: // - *GetBucketLoggingResult: the logging setting of the bucket // - error: nil if success otherwise the specific error -func GetBucketLogging(cli bce.Client, bucket string) (*GetBucketLoggingResult, error) { +func GetBucketLogging(cli bce.Client, bucket string, ctx *BosContext) (*GetBucketLoggingResult, error) { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.GET) req.SetParam("logging", "") - + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return nil, err } if resp.IsFail() { @@ -321,13 +331,14 @@ func GetBucketLogging(cli bce.Client, bucket string) (*GetBucketLoggingResult, e // - bucket: the bucket name // RETURNS: // - error: nil if success otherwise the specific error -func DeleteBucketLogging(cli bce.Client, bucket string) error { +func DeleteBucketLogging(cli bce.Client, bucket string, ctx *BosContext) error { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.DELETE) req.SetParam("logging", "") + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { @@ -345,14 +356,15 @@ func DeleteBucketLogging(cli bce.Client, bucket string) error { // - lifecycle: the lifecycle rule json string body // RETURNS: // - error: nil if success otherwise the specific error -func PutBucketLifecycle(cli bce.Client, bucket string, lifecycle *bce.Body) error { +func PutBucketLifecycle(cli bce.Client, bucket string, lifecycle *bce.Body, ctx *BosContext) error { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.PUT) req.SetParam("lifecycle", "") req.SetBody(lifecycle) + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { @@ -370,14 +382,14 @@ func PutBucketLifecycle(cli bce.Client, bucket string, lifecycle *bce.Body) erro // RETURNS: // - *GetBucketLifecycleResult: the lifecycle rule of the bucket // - error: nil if success otherwise the specific error -func GetBucketLifecycle(cli bce.Client, bucket string) (*GetBucketLifecycleResult, error) { +func GetBucketLifecycle(cli bce.Client, bucket string, ctx *BosContext) (*GetBucketLifecycleResult, error) { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.GET) req.SetParam("lifecycle", "") - + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return nil, err } if resp.IsFail() { @@ -397,13 +409,14 @@ func GetBucketLifecycle(cli bce.Client, bucket string) (*GetBucketLifecycleResul // - bucket: the bucket name // RETURNS: // - error: nil if success otherwise the specific error -func DeleteBucketLifecycle(cli bce.Client, bucket string) error { +func DeleteBucketLifecycle(cli bce.Client, bucket string, ctx *BosContext) error { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.DELETE) req.SetParam("lifecycle", "") + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { @@ -421,12 +434,12 @@ func DeleteBucketLifecycle(cli bce.Client, bucket string) error { // - storageClass: the storage class string // RETURNS: // - error: nil if success otherwise the specific error -func PutBucketStorageclass(cli bce.Client, bucket, storageClass string) error { +func PutBucketStorageclass(cli bce.Client, bucket, storageClass string, ctx *BosContext) error { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.PUT) req.SetParam("storageClass", "") - + ctx.Bucket = bucket obj := &StorageClassType{storageClass} jsonBytes, jsonErr := json.Marshal(obj) if jsonErr != nil { @@ -439,7 +452,7 @@ func PutBucketStorageclass(cli bce.Client, bucket, storageClass string) error { req.SetBody(body) resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { @@ -457,14 +470,14 @@ func PutBucketStorageclass(cli bce.Client, bucket, storageClass string) error { // RETURNS: // - string: the storage class of the bucket // - error: nil if success otherwise the specific error -func GetBucketStorageclass(cli bce.Client, bucket string) (string, error) { +func GetBucketStorageclass(cli bce.Client, bucket string, ctx *BosContext) (string, error) { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.GET) req.SetParam("storageClass", "") - + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return "", err } if resp.IsFail() { @@ -486,11 +499,12 @@ func GetBucketStorageclass(cli bce.Client, bucket string) (string, error) { // - replicationRuleId: the replication rule id composed of [0-9 A-Z a-z _ -] // RETURNS: // - error: nil if success otherwise the specific error -func PutBucketReplication(cli bce.Client, bucket string, replicationConf *bce.Body, replicationRuleId string) error { +func PutBucketReplication(cli bce.Client, bucket string, replicationConf *bce.Body, replicationRuleId string, ctx *BosContext) error { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.PUT) req.SetParam("replication", "") + ctx.Bucket = bucket if len(replicationRuleId) > 0 { req.SetParam("id", replicationRuleId) } @@ -501,7 +515,7 @@ func PutBucketReplication(cli bce.Client, bucket string, replicationConf *bce.Bo } resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { @@ -520,17 +534,18 @@ func PutBucketReplication(cli bce.Client, bucket string, replicationConf *bce.Bo // RETURNS: // - *GetBucketReplicationResult: the result of the bucket replication config // - error: nil if success otherwise the specific error -func GetBucketReplication(cli bce.Client, bucket string, replicationRuleId string) (*GetBucketReplicationResult, error) { +func GetBucketReplication(cli bce.Client, bucket string, replicationRuleId string, ctx *BosContext) (*GetBucketReplicationResult, error) { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.GET) req.SetParam("replication", "") + ctx.Bucket = bucket if len(replicationRuleId) > 0 { req.SetParam("id", replicationRuleId) } resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return nil, err } if resp.IsFail() { @@ -550,12 +565,13 @@ func GetBucketReplication(cli bce.Client, bucket string, replicationRuleId strin // - bucket: the bucket name // RETURNS: // - error: nil if success otherwise the specific error -func ListBucketReplication(cli bce.Client, bucket string) (*ListBucketReplicationResult, error) { +func ListBucketReplication(cli bce.Client, bucket string, ctx *BosContext) (*ListBucketReplicationResult, error) { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.GET) req.SetParam("replication", "") req.SetParam("list", "") + ctx.Bucket = bucket resp := &bce.BceResponse{} if err := cli.SendRequest(req, resp); err != nil { return nil, err @@ -578,16 +594,17 @@ func ListBucketReplication(cli bce.Client, bucket string) (*ListBucketReplicatio // - replicationRuleId: the replication rule id composed of [0-9 A-Z a-z _ -] // RETURNS: // - error: nil if success otherwise the specific error -func DeleteBucketReplication(cli bce.Client, bucket string, replicationRuleId string) error { +func DeleteBucketReplication(cli bce.Client, bucket string, replicationRuleId string, ctx *BosContext) error { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.DELETE) req.SetParam("replication", "") + ctx.Bucket = bucket if len(replicationRuleId) > 0 { req.SetParam("id", replicationRuleId) } resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { @@ -606,18 +623,19 @@ func DeleteBucketReplication(cli bce.Client, bucket string, replicationRuleId st // RETURNS: // - *GetBucketReplicationProgressResult: the result of the bucket replication process // - error: nil if success otherwise the specific error -func GetBucketReplicationProgress(cli bce.Client, bucket string, replicationRuleId string) ( +func GetBucketReplicationProgress(cli bce.Client, bucket string, replicationRuleId string, ctx *BosContext) ( *GetBucketReplicationProgressResult, error) { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.GET) req.SetParam("replicationProgress", "") + ctx.Bucket = bucket if len(replicationRuleId) > 0 { req.SetParam("id", replicationRuleId) } resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return nil, err } if resp.IsFail() { @@ -638,12 +656,12 @@ func GetBucketReplicationProgress(cli bce.Client, bucket string, replicationRule // - algorithm: the encryption algorithm // RETURNS: // - error: nil if success otherwise the specific error -func PutBucketEncryption(cli bce.Client, bucket, algorithm string) error { +func PutBucketEncryption(cli bce.Client, bucket, algorithm string, ctx *BosContext) error { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.PUT) req.SetParam("encryption", "") - + ctx.Bucket = bucket obj := &BucketEncryptionType{algorithm} jsonBytes, jsonErr := json.Marshal(obj) if jsonErr != nil { @@ -657,7 +675,7 @@ func PutBucketEncryption(cli bce.Client, bucket, algorithm string) error { req.SetBody(body) resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { @@ -675,14 +693,14 @@ func PutBucketEncryption(cli bce.Client, bucket, algorithm string) error { // RETURNS: // - algorithm: the bucket encryption algorithm // - error: nil if success otherwise the specific error -func GetBucketEncryption(cli bce.Client, bucket string) (string, error) { +func GetBucketEncryption(cli bce.Client, bucket string, ctx *BosContext) (string, error) { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.GET) req.SetParam("encryption", "") - + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return "", err } if resp.IsFail() { @@ -702,13 +720,14 @@ func GetBucketEncryption(cli bce.Client, bucket string) (string, error) { // - bucket: the bucket name // RETURNS: // - error: nil if success otherwise the specific error -func DeleteBucketEncryption(cli bce.Client, bucket string) error { +func DeleteBucketEncryption(cli bce.Client, bucket string, ctx *BosContext) error { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.DELETE) req.SetParam("encryption", "") + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { @@ -726,18 +745,19 @@ func DeleteBucketEncryption(cli bce.Client, bucket string) error { // - confBody: the static website config body stream // RETURNS: // - error: nil if success otherwise the specific error -func PutBucketStaticWebsite(cli bce.Client, bucket string, confBody *bce.Body) error { +func PutBucketStaticWebsite(cli bce.Client, bucket string, confBody *bce.Body, ctx *BosContext) error { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.PUT) req.SetParam("website", "") + ctx.Bucket = bucket if confBody != nil { req.SetHeader(http.CONTENT_TYPE, bce.DEFAULT_CONTENT_TYPE) req.SetBody(confBody) } resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { @@ -755,15 +775,15 @@ func PutBucketStaticWebsite(cli bce.Client, bucket string, confBody *bce.Body) e // RETURNS: // - result: the bucket static website config result object // - error: nil if success otherwise the specific error -func GetBucketStaticWebsite(cli bce.Client, bucket string) ( +func GetBucketStaticWebsite(cli bce.Client, bucket string, ctx *BosContext) ( *GetBucketStaticWebsiteResult, error) { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.GET) req.SetParam("website", "") - + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return nil, err } if resp.IsFail() { @@ -783,13 +803,14 @@ func GetBucketStaticWebsite(cli bce.Client, bucket string) ( // - bucket: the bucket name // RETURNS: // - error: nil if success otherwise the specific error -func DeleteBucketStaticWebsite(cli bce.Client, bucket string) error { +func DeleteBucketStaticWebsite(cli bce.Client, bucket string, ctx *BosContext) error { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.DELETE) req.SetParam("website", "") + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { @@ -807,18 +828,19 @@ func DeleteBucketStaticWebsite(cli bce.Client, bucket string) error { // - confBody: the CORS config body stream // RETURNS: // - error: nil if success otherwise the specific error -func PutBucketCors(cli bce.Client, bucket string, confBody *bce.Body) error { +func PutBucketCors(cli bce.Client, bucket string, confBody *bce.Body, ctx *BosContext) error { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.PUT) req.SetParam("cors", "") + ctx.Bucket = bucket if confBody != nil { req.SetHeader(http.CONTENT_TYPE, bce.DEFAULT_CONTENT_TYPE) req.SetBody(confBody) } resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { @@ -836,15 +858,15 @@ func PutBucketCors(cli bce.Client, bucket string, confBody *bce.Body) error { // RETURNS: // - result: the bucket CORS config result object // - error: nil if success otherwise the specific error -func GetBucketCors(cli bce.Client, bucket string) ( +func GetBucketCors(cli bce.Client, bucket string, ctx *BosContext) ( *GetBucketCorsResult, error) { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.GET) req.SetParam("cors", "") - + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return nil, err } if resp.IsFail() { @@ -864,13 +886,14 @@ func GetBucketCors(cli bce.Client, bucket string) ( // - bucket: the bucket name // RETURNS: // - error: nil if success otherwise the specific error -func DeleteBucketCors(cli bce.Client, bucket string) error { +func DeleteBucketCors(cli bce.Client, bucket string, ctx *BosContext) error { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.DELETE) req.SetParam("cors", "") + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { @@ -888,11 +911,12 @@ func DeleteBucketCors(cli bce.Client, bucket string) error { // - resources: the resource items in the bucket to be protected // RETURNS: // - error: nil if success otherwise the specific error -func PutBucketCopyrightProtection(cli bce.Client, bucket string, resources ...string) error { +func PutBucketCopyrightProtection(cli bce.Client, ctx *BosContext, bucket string, resources ...string) error { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.PUT) req.SetParam("copyrightProtection", "") + ctx.Bucket = bucket if len(resources) == 0 { return bce.NewBceClientError("the resource to set copyright protection is empty") } @@ -909,7 +933,7 @@ func PutBucketCopyrightProtection(cli bce.Client, bucket string, resources ...st req.SetBody(body) resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { @@ -927,14 +951,14 @@ func PutBucketCopyrightProtection(cli bce.Client, bucket string, resources ...st // RETURNS: // - result: the bucket copyright protection resources array // - error: nil if success otherwise the specific error -func GetBucketCopyrightProtection(cli bce.Client, bucket string) ([]string, error) { +func GetBucketCopyrightProtection(cli bce.Client, bucket string, ctx *BosContext) ([]string, error) { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.GET) req.SetParam("copyrightProtection", "") - + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return nil, err } if resp.IsFail() { @@ -954,13 +978,14 @@ func GetBucketCopyrightProtection(cli bce.Client, bucket string) ([]string, erro // - bucket: the bucket name // RETURNS: // - error: nil if success otherwise the specific error -func DeleteBucketCopyrightProtection(cli bce.Client, bucket string) error { +func DeleteBucketCopyrightProtection(cli bce.Client, bucket string, ctx *BosContext) error { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.DELETE) req.SetParam("copyrightProtection", "") + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { @@ -978,11 +1003,12 @@ func DeleteBucketCopyrightProtection(cli bce.Client, bucket string) error { // - trashDir: the trash dir name // RETURNS: // - error: nil if success otherwise the specific error -func PutBucketTrash(cli bce.Client, bucket string, trashReq PutBucketTrashReq) error { +func PutBucketTrash(cli bce.Client, bucket string, trashReq PutBucketTrashReq, ctx *BosContext) error { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.PUT) req.SetParam("trash", "") + ctx.Bucket = bucket reqByte, _ := json.Marshal(trashReq) body, err := bce.NewBodyFromString(string(reqByte)) if err != nil { @@ -990,7 +1016,7 @@ func PutBucketTrash(cli bce.Client, bucket string, trashReq PutBucketTrashReq) e } req.SetBody(body) resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { @@ -1000,13 +1026,14 @@ func PutBucketTrash(cli bce.Client, bucket string, trashReq PutBucketTrashReq) e return nil } -func GetBucketTrash(cli bce.Client, bucket string) (*GetBucketTrashResult, error) { +func GetBucketTrash(cli bce.Client, bucket string, ctx *BosContext) (*GetBucketTrashResult, error) { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.GET) req.SetParam("trash", "") + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return nil, err } if resp.IsFail() { @@ -1019,13 +1046,14 @@ func GetBucketTrash(cli bce.Client, bucket string) (*GetBucketTrashResult, error return result, nil } -func DeleteBucketTrash(cli bce.Client, bucket string) error { +func DeleteBucketTrash(cli bce.Client, bucket string, ctx *BosContext) error { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.DELETE) req.SetParam("trash", "") + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { @@ -1035,11 +1063,12 @@ func DeleteBucketTrash(cli bce.Client, bucket string) error { return nil } -func PutBucketNotification(cli bce.Client, bucket string, putBucketNotificationReq PutBucketNotificationReq) error { +func PutBucketNotification(cli bce.Client, bucket string, putBucketNotificationReq PutBucketNotificationReq, ctx *BosContext) error { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.PUT) req.SetParam("notification", "") + ctx.Bucket = bucket reqByte, _ := json.Marshal(putBucketNotificationReq) body, err := bce.NewBodyFromString(string(reqByte)) if err != nil { @@ -1047,7 +1076,7 @@ func PutBucketNotification(cli bce.Client, bucket string, putBucketNotificationR } req.SetBody(body) resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { @@ -1057,13 +1086,14 @@ func PutBucketNotification(cli bce.Client, bucket string, putBucketNotificationR return nil } -func GetBucketNotification(cli bce.Client, bucket string) (*PutBucketNotificationReq, error) { +func GetBucketNotification(cli bce.Client, bucket string, ctx *BosContext) (*PutBucketNotificationReq, error) { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.GET) req.SetParam("notification", "") + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return nil, err } if resp.IsFail() { @@ -1076,13 +1106,134 @@ func GetBucketNotification(cli bce.Client, bucket string) (*PutBucketNotificatio return result, nil } -func DeleteBucketNotification(cli bce.Client, bucket string) error { +func DeleteBucketNotification(cli bce.Client, bucket string, ctx *BosContext) error { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.DELETE) req.SetParam("notification", "") + ctx.Bucket = bucket + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp, ctx); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +func PutBucketMirror(cli bce.Client, bucket string, putBucketMirrorArgs *PutBucketMirrorArgs, ctx *BosContext) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.PUT) + req.SetParam("mirroring", "") + ctx.Bucket = bucket + reqByte, _ := json.Marshal(putBucketMirrorArgs) + body, err := bce.NewBodyFromString(string(reqByte)) + if err != nil { + return err + } + req.SetBody(body) + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp, ctx); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +func GetBucketMirror(cli bce.Client, bucket string, ctx *BosContext) (*PutBucketMirrorArgs, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("mirroring", "") + ctx.Bucket = bucket + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp, ctx); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &PutBucketMirrorArgs{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +func DeleteBucketMirror(cli bce.Client, bucket string, ctx *BosContext) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.DELETE) + req.SetParam("mirroring", "") + ctx.Bucket = bucket + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp, ctx); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +func PutBucketTag(cli bce.Client, bucket string, putBucketTagArgs *PutBucketTagArgs, ctx *BosContext) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.PUT) + req.SetParam("tagging", "") + ctx.Bucket = bucket + reqByte, _ := json.Marshal(putBucketTagArgs) + body, err := bce.NewBodyFromString(string(reqByte)) + if err != nil { + return err + } + req.SetBody(body) + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp, ctx); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +func GetBucketTag(cli bce.Client, bucket string, ctx *BosContext) (*GetBucketTagResult, error) { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.GET) + req.SetParam("tagging", "") + ctx.Bucket = bucket + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp, ctx); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + result := &GetBucketTagResult{} + if err := resp.ParseJsonBody(result); err != nil { + return nil, err + } + return result, nil +} + +func DeleteBucketTag(cli bce.Client, bucket string, ctx *BosContext) error { + req := &bce.BceRequest{} + req.SetUri(getBucketUri(bucket)) + req.SetMethod(http.DELETE) + req.SetParam("tagging", "") + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { diff --git a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/model.go b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/model.go index 30fd7f731f38d..914fa21d16d4b 100644 --- a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/model.go +++ b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/model.go @@ -58,6 +58,10 @@ type PrefixType struct { Prefix string `json:"prefix"` } +type PutBucketArgs struct { + TagList string +} + // ListObjectsResult defines the result structure of ListObjects api. type ListObjectsResult struct { Name string `json:"name"` @@ -172,18 +176,20 @@ type StorageClassType struct { type BucketReplicationDescriptor struct { Bucket string `json:"bucket,omitempty"` StorageClass string `json:"storageClass,omitempty"` + Prefix string `json:"prefix,omitempty"` } // BucketReplicationType defines the data structure for Put and Get of bucket replication type BucketReplicationType struct { - Id string `json:"id"` - Status string `json:"status"` - Resource []string `json:"resource"` - ReplicateDeletes string `json:"replicateDeletes"` - Destination *BucketReplicationDescriptor `json:"destination,omitempty"` - ReplicateHistory *BucketReplicationDescriptor `json:"replicateHistory,omitempty"` - CreateTime int64 `json:"createTime"` - DestRegion string `json:"destRegion"` + Id string `json:"id"` + Status string `json:"status"` + Resource []string `json:"resource"` + NotIncludeResource []string `json:"notIncludeResource,omitempty"` + ReplicateDeletes string `json:"replicateDeletes"` + Destination *BucketReplicationDescriptor `json:"destination,omitempty"` + ReplicateHistory *BucketReplicationDescriptor `json:"replicateHistory,omitempty"` + CreateTime int64 `json:"createTime"` + DestRegion string `json:"destRegion"` } type PutBucketReplicationArgs BucketReplicationType @@ -259,6 +265,8 @@ type PutObjectArgs struct { ContentCrc32 string StorageClass string Process string + CannedAcl string + ObjectTagging string TrafficLimit int64 } @@ -270,13 +278,22 @@ type CopyObjectArgs struct { IfNoneMatch string IfModifiedSince string IfUnmodifiedSince string - TrafficLimit int64 + TrafficLimit int64 + CannedAcl string } type MultiCopyObjectArgs struct { StorageClass string } +type CallbackResult struct { + Result string `json:"result"` +} + +type PutObjectResult struct { + Callback CallbackResult `json:"callback"` +} + // CopyObjectResult defines the result json structure for the copy object api. type CopyObjectResult struct { LastModified string `json:"lastModified"` @@ -462,13 +479,14 @@ type UploadPartCopyArgs struct { IfNoneMatch string IfModifiedSince string IfUnmodifiedSince string - TrafficLimit int64 + TrafficLimit int64 } type PutSymlinkArgs struct { ForbidOverwrite string StorageClass string UserMeta map[string]string + SymlinkBucket string } // UploadInfoType defines an uploaded part info structure. @@ -583,3 +601,65 @@ type PutBucketNotificationAppsSt struct { EventUrl string `json:"eventUrl"` XVars string `json:"xVars"` } + +type MirrorConfigurationRule struct { + Prefix string `json:"prefix,omitempty"` + SourceUrl string `json:"sourceUrl"` + PassQueryString bool `json:"passQuerystring"` + Mode string `json:"mode"` + StorageClass string `json:"storageClass"` + PassHeaders []string `json:"passHeaders"` + IgnoreHeaders []string `json:"ignoreHeaders"` + CustomHeaders []HeaderPair `json:"customHeaders"` + BackSourceUrl string `json:"backSourceUrl"` + Resource string `json:"resource"` + Suffix string `json:"suffix"` + FixedKey string `json:"fixedKey"` + PrefixReplace string `json:"prefixReplace"` + Version string `json:"version"` +} + +type HeaderPair struct { + HeaderName string `json:"headerName"` + HeaderValue string `json:"headerValue"` +} + +type PutBucketMirrorArgs struct { + BucketMirroringConfiguration []MirrorConfigurationRule `json:"bucketMirroringConfiguration"` +} + +type PutBucketTagArgs struct { + Tags []Tag `json:"tags"` +} + +type Tag struct { + TagKey string `json:"tagKey"` + TagValue string `json:"tagValue"` +} + +type GetBucketTagResult struct { + Tags []BucketTag `json:"tag"` +} + +type BucketTag struct { + TagKey string `json:"tag_key"` + TagValue string `json:"tag_value"` +} + +type BosContext struct { + Bucket string + PathStyleEnable bool +} + +type PutObjectTagArgs struct { + ObjectTags []ObjectTags `json:"tagSet"` +} + +type ObjectTags struct { + TagInfo []ObjectTag `json:"tagInfo"` +} + +type ObjectTag struct { + Key string `json:"key"` + Value string `json:"value"` +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/multipart.go b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/multipart.go index 806017b39eff8..0f86fa6404515 100644 --- a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/multipart.go +++ b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/multipart.go @@ -39,11 +39,12 @@ import ( // - *InitiateMultipartUploadResult: the result data structure // - error: nil if ok otherwise the specific error func InitiateMultipartUpload(cli bce.Client, bucket, object, contentType string, - args *InitiateMultipartUploadArgs) (*InitiateMultipartUploadResult, error) { + args *InitiateMultipartUploadArgs, ctx *BosContext) (*InitiateMultipartUploadResult, error) { req := &bce.BceRequest{} req.SetUri(getObjectUri(bucket, object)) req.SetMethod(http.POST) req.SetParam("uploads", "") + ctx.Bucket = bucket if len(contentType) == 0 { contentType = RAW_CONTENT_TYPE } @@ -69,7 +70,7 @@ func InitiateMultipartUpload(cli bce.Client, bucket, object, contentType string, // Send request and get the result resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return nil, err } if resp.IsFail() { @@ -97,12 +98,13 @@ func InitiateMultipartUpload(cli bce.Client, bucket, object, contentType string, // - string: the etag of the uploaded part // - error: nil if ok otherwise the specific error func UploadPart(cli bce.Client, bucket, object, uploadId string, partNumber int, - content *bce.Body, args *UploadPartArgs) (string, error) { + content *bce.Body, args *UploadPartArgs, ctx *BosContext) (string, error) { req := &bce.BceRequest{} req.SetUri(getObjectUri(bucket, object)) req.SetMethod(http.PUT) req.SetParam("uploadId", uploadId) req.SetParam("partNumber", fmt.Sprintf("%d", partNumber)) + ctx.Bucket = bucket if content == nil { return "", bce.NewBceClientError("upload part content should not be empty") } @@ -130,7 +132,7 @@ func UploadPart(cli bce.Client, bucket, object, uploadId string, partNumber int, // Send request and get the result resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return "", err } if resp.IsFail() { @@ -154,12 +156,13 @@ func UploadPart(cli bce.Client, bucket, object, uploadId string, partNumber int, // - string: the etag of the uploaded part // - error: nil if ok otherwise the specific error func UploadPartFromBytes(cli bce.Client, bucket, object, uploadId string, partNumber int, - content []byte, args *UploadPartArgs) (string, error) { + content []byte, args *UploadPartArgs, ctx *BosContext) (string, error) { req := &bce.BceRequest{} req.SetUri(getObjectUri(bucket, object)) req.SetMethod(http.PUT) req.SetParam("uploadId", uploadId) req.SetParam("partNumber", fmt.Sprintf("%d", partNumber)) + ctx.Bucket = bucket if content == nil { return "", bce.NewBceClientError("upload part content should not be empty") } @@ -188,6 +191,14 @@ func UploadPartFromBytes(cli bce.Client, bucket, object, uploadId string, partNu http.BCE_CONTENT_SHA256: args.ContentSha256, http.BCE_CONTENT_CRC32: args.ContentCrc32, }) + //set traffic-limit + if args.TrafficLimit > 0 { + if args.TrafficLimit > TRAFFIC_LIMIT_MAX || args.TrafficLimit < TRAFFIC_LIMIT_MIN { + return "", bce.NewBceClientError(fmt.Sprintf("TrafficLimit must between %d ~ %d, current value:%d", + TRAFFIC_LIMIT_MIN, TRAFFIC_LIMIT_MAX, args.TrafficLimit)) + } + req.SetHeader(http.BCE_TRAFFIC_LIMIT, fmt.Sprintf("%d", args.TrafficLimit)) + } } // Send request and get the result resp := &bce.BceResponse{} @@ -215,12 +226,13 @@ func UploadPartFromBytes(cli bce.Client, bucket, object, uploadId string, partNu // - *CopyObjectResult: the lastModified and eTag of the part // - error: nil if ok otherwise the specific error func UploadPartCopy(cli bce.Client, bucket, object, source, uploadId string, partNumber int, - args *UploadPartCopyArgs) (*CopyObjectResult, error) { + args *UploadPartCopyArgs, ctx *BosContext) (*CopyObjectResult, error) { req := &bce.BceRequest{} req.SetUri(getObjectUri(bucket, object)) req.SetMethod(http.PUT) req.SetParam("uploadId", uploadId) req.SetParam("partNumber", fmt.Sprintf("%d", partNumber)) + ctx.Bucket = bucket if len(source) == 0 { return nil, bce.NewBceClientError("upload part copy source should not be empty") } @@ -246,7 +258,7 @@ func UploadPartCopy(cli bce.Client, bucket, object, source, uploadId string, par // Send request and get the result resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return nil, err } if resp.IsFail() { @@ -272,11 +284,12 @@ func UploadPartCopy(cli bce.Client, bucket, object, source, uploadId string, par // - *CompleteMultipartUploadResult: the result data // - error: nil if ok otherwise the specific error func CompleteMultipartUpload(cli bce.Client, bucket, object, uploadId string, - body *bce.Body, args *CompleteMultipartUploadArgs) (*CompleteMultipartUploadResult, error) { + body *bce.Body, args *CompleteMultipartUploadArgs, ctx *BosContext) (*CompleteMultipartUploadResult, error) { req := &bce.BceRequest{} req.SetUri(getObjectUri(bucket, object)) req.SetMethod(http.POST) req.SetParam("uploadId", uploadId) + ctx.Bucket = bucket if body == nil { return nil, bce.NewBceClientError("upload body info should not be emtpy") } @@ -300,7 +313,7 @@ func CompleteMultipartUpload(cli bce.Client, bucket, object, uploadId string, // Send request and get the result resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return nil, err } if resp.IsFail() { @@ -326,14 +339,14 @@ func CompleteMultipartUpload(cli bce.Client, bucket, object, uploadId string, // - uploadId: the multipart upload id // RETURNS: // - error: nil if ok otherwise the specific error -func AbortMultipartUpload(cli bce.Client, bucket, object, uploadId string) error { +func AbortMultipartUpload(cli bce.Client, bucket, object, uploadId string, ctx *BosContext) error { req := &bce.BceRequest{} req.SetUri(getObjectUri(bucket, object)) req.SetMethod(http.DELETE) req.SetParam("uploadId", uploadId) - + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { @@ -357,12 +370,12 @@ func AbortMultipartUpload(cli bce.Client, bucket, object, uploadId string) error // - *ListPartsResult: the uploaded parts info result // - error: nil if ok otherwise the specific error func ListParts(cli bce.Client, bucket, object, uploadId string, - args *ListPartsArgs) (*ListPartsResult, error) { + args *ListPartsArgs, ctx *BosContext) (*ListPartsResult, error) { req := &bce.BceRequest{} req.SetUri(getObjectUri(bucket, object)) req.SetMethod(http.GET) req.SetParam("uploadId", uploadId) - + ctx.Bucket = bucket // Optional arguments settings if args != nil { if len(args.PartNumberMarker) > 0 { @@ -375,7 +388,7 @@ func ListParts(cli bce.Client, bucket, object, uploadId string, // Send request and get the result resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return nil, err } if resp.IsFail() { @@ -398,11 +411,12 @@ func ListParts(cli bce.Client, bucket, object, uploadId string, // - *ListMultipartUploadsResult: the unfinished uploaded parts info result // - error: nil if ok otherwise the specific error func ListMultipartUploads(cli bce.Client, bucket string, - args *ListMultipartUploadsArgs) (*ListMultipartUploadsResult, error) { + args *ListMultipartUploadsArgs, ctx *BosContext) (*ListMultipartUploadsResult, error) { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.GET) req.SetParam("uploads", "") + ctx.Bucket = bucket // Optional arguments settings if args != nil { @@ -422,7 +436,7 @@ func ListMultipartUploads(cli bce.Client, bucket string, // Send request and get the result resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return nil, err } if resp.IsFail() { diff --git a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/object.go b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/object.go index c8f9a6acd54d7..b4f2269b9ffb8 100644 --- a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/object.go +++ b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/object.go @@ -19,6 +19,7 @@ package api import ( "encoding/json" "fmt" + "io/ioutil" "net" "strconv" "strings" @@ -41,12 +42,14 @@ import ( // - string: the etag of the object // - error: nil if ok otherwise the specific error func PutObject(cli bce.Client, bucket, object string, body *bce.Body, - args *PutObjectArgs) (string, error) { + args *PutObjectArgs, ctx *BosContext) (string, *PutObjectResult, error) { req := &bce.BceRequest{} + NeedReturnCallback := false req.SetUri(getObjectUri(bucket, object)) req.SetMethod(http.PUT) + ctx.Bucket = bucket if body == nil { - return "", bce.NewBceClientError("PutObject body should not be emtpy") + return "", nil, bce.NewBceClientError("PutObject body should not be emtpy") } if body.Size() >= THRESHOLD_100_CONTINUE { req.SetHeader("Expect", "100-continue") @@ -68,11 +71,11 @@ func PutObject(cli bce.Client, bucket, object string, body *bce.Body, // be reset. The `net/http.Client' does not support the Content-Length bigger than the // body size. if args.ContentLength > body.Size() { - return "", bce.NewBceClientError(fmt.Sprintf("ContentLength %d is bigger than body size %d", args.ContentLength, body.Size())) + return "", nil, bce.NewBceClientError(fmt.Sprintf("ContentLength %d is bigger than body size %d", args.ContentLength, body.Size())) } body, err := bce.NewBodyFromSizedReader(body.Stream(), args.ContentLength) if err != nil { - return "", bce.NewBceClientError(err.Error()) + return "", nil, bce.NewBceClientError(err.Error()) } req.SetHeader(http.CONTENT_LENGTH, fmt.Sprintf("%d", args.ContentLength)) req.SetBody(body) // re-assign body @@ -81,7 +84,7 @@ func PutObject(cli bce.Client, bucket, object string, body *bce.Body, //set traffic-limit if args.TrafficLimit > 0 { if args.TrafficLimit > TRAFFIC_LIMIT_MAX || args.TrafficLimit < TRAFFIC_LIMIT_MIN { - return "", bce.NewBceClientError(fmt.Sprintf("TrafficLimit must between %d ~ %d, current value:%d", TRAFFIC_LIMIT_MIN, TRAFFIC_LIMIT_MAX, args.TrafficLimit)) + return "", nil, bce.NewBceClientError(fmt.Sprintf("TrafficLimit must between %d ~ %d, current value:%d", TRAFFIC_LIMIT_MIN, TRAFFIC_LIMIT_MAX, args.TrafficLimit)) } req.SetHeader(http.BCE_TRAFFIC_LIMIT, fmt.Sprintf("%d", args.TrafficLimit)) } @@ -95,17 +98,30 @@ func PutObject(cli bce.Client, bucket, object string, body *bce.Body, req.SetHeader(http.BCE_STORAGE_CLASS, args.StorageClass) } else { if len(args.StorageClass) != 0 { - return "", bce.NewBceClientError("invalid storage class value: " + + return "", nil, bce.NewBceClientError("invalid storage class value: " + args.StorageClass) } } if err := setUserMetadata(req, args.UserMeta); err != nil { - return "", err + return "", nil, err } if len(args.Process) != 0 { req.SetHeader(http.BCE_PROCESS, args.Process) + if strings.HasPrefix(args.Process, "callback") { + NeedReturnCallback = true + } + } + if len(args.CannedAcl) != 0 { + if validCannedAcl(args.CannedAcl) { + req.SetHeader(http.BCE_ACL, args.CannedAcl) + } + } + if len(args.ObjectTagging) != 0 { + if ok, encodeTagging := validObjectTagging(args.ObjectTagging); ok { + req.SetHeader(http.BCE_OBJECT_TAGGING, encodeTagging) + } } } // add content-type if not assigned by user @@ -114,14 +130,21 @@ func PutObject(cli bce.Client, bucket, object string, body *bce.Body, } resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { - return "", err + if err := SendRequest(cli, req, resp, ctx); err != nil { + return "", nil, err } if resp.IsFail() { - return "", resp.ServiceError() + return "", nil, resp.ServiceError() } defer func() { resp.Body().Close() }() - return strings.Trim(resp.Header(http.ETAG), "\""), nil + if NeedReturnCallback { + jsonBody := &PutObjectResult{} + if err := resp.ParseJsonBody(jsonBody); err != nil { + return "", nil, err + } + return strings.Trim(resp.Header(http.ETAG), "\""), jsonBody, nil + } + return strings.Trim(resp.Header(http.ETAG), "\""), nil, nil } // CopyObject - copy one object to a new object with new bucket and/or name. It can alse set the @@ -137,10 +160,11 @@ func PutObject(cli bce.Client, bucket, object string, body *bce.Body, // - *CopyObjectResult: the result object which contains etag and lastmodified // - error: nil if ok otherwise the specific error func CopyObject(cli bce.Client, bucket, object, source string, - args *CopyObjectArgs) (*CopyObjectResult, error) { + args *CopyObjectArgs, ctx *BosContext) (*CopyObjectResult, error) { req := &bce.BceRequest{} req.SetUri(getObjectUri(bucket, object)) req.SetMethod(http.PUT) + ctx.Bucket = bucket if len(source) == 0 { return nil, bce.NewBceClientError("copy source should not be null") } @@ -194,6 +218,10 @@ func CopyObject(cli bce.Client, bucket, object, source string, req.SetHeader(http.BCE_TRAFFIC_LIMIT, fmt.Sprintf("%d", args.TrafficLimit)) } + if validCannedAcl(args.CannedAcl) { + req.SetHeader(http.BCE_ACL, args.CannedAcl) + } + if err := setUserMetadata(req, args.UserMeta); err != nil { return nil, err } @@ -201,7 +229,7 @@ func CopyObject(cli bce.Client, bucket, object, source string, // Send request and get the result resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return nil, err } if resp.IsFail() { @@ -220,12 +248,12 @@ func CopyObject(cli bce.Client, bucket, object, source string, // - cli: the client agent which can perform sending request // - bucket: the bucket name of the object // - object: the name of the object -// - responseHeaders: the optional response headers to get the given object +// - args: the optional args in querysring // - ranges: the optional range start and end to get the given object // RETURNS: // - *GetObjectResult: the output content result of the object // - error: nil if ok otherwise the specific error -func GetObject(cli bce.Client, bucket, object string, responseHeaders map[string]string, +func GetObject(cli bce.Client, bucket, object string, ctx *BosContext, args map[string]string, // nolint:gocyclo ranges ...int64) (*GetObjectResult, error) { if object == "" { @@ -235,13 +263,16 @@ func GetObject(cli bce.Client, bucket, object string, responseHeaders map[string req := &bce.BceRequest{} req.SetUri(getObjectUri(bucket, object)) req.SetMethod(http.GET) - + ctx.Bucket = bucket // Optional arguments settings - if responseHeaders != nil { - for k, v := range responseHeaders { + if args != nil { + for k, v := range args { if _, ok := GET_OBJECT_ALLOWED_RESPONSE_HEADERS[k]; ok { req.SetParam("response"+k, v) } + if strings.HasPrefix(k, http.BCE_PREFIX) { + req.SetParam(k, v) + } } } if len(ranges) != 0 { @@ -256,7 +287,7 @@ func GetObject(cli bce.Client, bucket, object string, responseHeaders map[string // Send request and get the result resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return nil, err } if resp.IsFail() { @@ -336,14 +367,14 @@ func GetObject(cli bce.Client, bucket, object string, responseHeaders map[string // RETURNS: // - *GetObjectMetaResult: the result of this api // - error: nil if ok otherwise the specific error -func GetObjectMeta(cli bce.Client, bucket, object string) (*GetObjectMetaResult, error) { +func GetObjectMeta(cli bce.Client, bucket, object string, ctx *BosContext) (*GetObjectMetaResult, error) { req := &bce.BceRequest{} req.SetUri(getObjectUri(bucket, object)) req.SetMethod(http.HEAD) - + ctx.Bucket = bucket // Send request and get the result resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return nil, err } if resp.IsFail() { @@ -427,13 +458,13 @@ func GetObjectMeta(cli bce.Client, bucket, object string) (*GetObjectMetaResult, // RETURNS: // - *SelectObjectResult: the output select content result of the object // - error: nil if ok otherwise the specific error -func SelectObject(cli bce.Client, bucket, object string, args *SelectObjectArgs) (*SelectObjectResult, error) { +func SelectObject(cli bce.Client, bucket, object string, args *SelectObjectArgs, ctx *BosContext) (*SelectObjectResult, error) { req := &bce.BceRequest{} req.SetUri(getObjectUri(bucket, object)) req.SetMethod(http.POST) req.SetParam("select", "") req.SetParam("type", args.SelectType) - + ctx.Bucket = bucket jsonBytes, jsonErr := json.Marshal(args) if jsonErr != nil { return nil, jsonErr @@ -446,7 +477,7 @@ func SelectObject(cli bce.Client, bucket, object string, args *SelectObjectArgs) // Send request and get the result resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return nil, err } if resp.IsFail() { @@ -471,11 +502,12 @@ func SelectObject(cli bce.Client, bucket, object string, args *SelectObjectArgs) // - *FetchObjectArgs: the result of this api // - error: nil if ok otherwise the specific error func FetchObject(cli bce.Client, bucket, object, source string, - args *FetchObjectArgs) (*FetchObjectResult, error) { + args *FetchObjectArgs, ctx *BosContext) (*FetchObjectResult, error) { req := &bce.BceRequest{} req.SetUri(getObjectUri(bucket, object)) req.SetMethod(http.POST) req.SetParam("fetch", "") + ctx.Bucket = bucket if len(source) == 0 { return nil, bce.NewBceClientError("invalid fetch source value: " + source) } @@ -502,7 +534,7 @@ func FetchObject(cli bce.Client, bucket, object, source string, // Send request and get the result resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return nil, err } if resp.IsFail() { @@ -527,11 +559,12 @@ func FetchObject(cli bce.Client, bucket, object, source string, // - *AppendObjectResult: the result status for this api // - error: nil if ok otherwise the specific error func AppendObject(cli bce.Client, bucket, object string, content *bce.Body, - args *AppendObjectArgs) (*AppendObjectResult, error) { + args *AppendObjectArgs, ctx *BosContext) (*AppendObjectResult, error) { req := &bce.BceRequest{} req.SetUri(getObjectUri(bucket, object)) req.SetMethod(http.POST) req.SetParam("append", "") + ctx.Bucket = bucket if content == nil { return nil, bce.NewBceClientError("AppendObject body should not be emtpy") } @@ -581,7 +614,7 @@ func AppendObject(cli bce.Client, bucket, object string, content *bce.Body, // Send request and get the result resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return nil, err } if resp.IsFail() { @@ -619,13 +652,13 @@ func AppendObject(cli bce.Client, bucket, object string, content *bce.Body, // - object: the name of the object // RETURNS: // - error: nil if ok otherwise the specific error -func DeleteObject(cli bce.Client, bucket, object string) error { +func DeleteObject(cli bce.Client, bucket, object string, ctx *BosContext) error { req := &bce.BceRequest{} req.SetUri(getObjectUri(bucket, object)) req.SetMethod(http.DELETE) - + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { @@ -645,12 +678,13 @@ func DeleteObject(cli bce.Client, bucket, object string) error { // - *DeleteMultipleObjectsResult: the objects failed to delete // - error: nil if ok otherwise the specific error func DeleteMultipleObjects(cli bce.Client, bucket string, - objectListStream *bce.Body) (*DeleteMultipleObjectsResult, error) { + objectListStream *bce.Body, ctx *BosContext) (*DeleteMultipleObjectsResult, error) { req := &bce.BceRequest{} req.SetUri(getBucketUri(bucket)) req.SetMethod(http.POST) req.SetParam("delete", "") req.SetHeader(http.CONTENT_TYPE, "application/json; charset=utf-8") + ctx.Bucket = bucket if objectListStream == nil { return nil, bce.NewBceClientError("DeleteMultipleObjects body should not be emtpy") } @@ -660,13 +694,18 @@ func DeleteMultipleObjects(cli bce.Client, bucket string, req.SetBody(objectListStream) resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return nil, err } if resp.IsFail() { return nil, resp.ServiceError() } jsonBody := &DeleteMultipleObjectsResult{} + + if resp.Header(http.CONTENT_LENGTH) == "0" { + resp.Body().Close() + return jsonBody, nil + } if err := resp.ParseJsonBody(jsonBody); err != nil { return nil, err } @@ -772,12 +811,12 @@ func GeneratePresignedUrlInternal(conf *bce.BceClientConfiguration, signer auth. // RETURNS: // - error: nil if success otherwise the specific error func PutObjectAcl(cli bce.Client, bucket, object, cannedAcl string, - grantRead, grantFullControl []string, aclBody *bce.Body) error { + grantRead, grantFullControl []string, aclBody *bce.Body, ctx *BosContext) error { req := &bce.BceRequest{} req.SetUri(getObjectUri(bucket, object)) req.SetMethod(http.PUT) req.SetParam("acl", "") - + ctx.Bucket = bucket // Joiner for generate the user id list string for grant acl header joiner := func(ids []string) string { for i := range ids { @@ -813,7 +852,7 @@ func PutObjectAcl(cli bce.Client, bucket, object, cannedAcl string, // Do sending request resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { @@ -832,14 +871,14 @@ func PutObjectAcl(cli bce.Client, bucket, object, cannedAcl string, // RETURNS: // - result: the object acl result object // - error: nil if success otherwise the specific error -func GetObjectAcl(cli bce.Client, bucket, object string) (*GetObjectAclResult, error) { +func GetObjectAcl(cli bce.Client, bucket, object string, ctx *BosContext) (*GetObjectAclResult, error) { req := &bce.BceRequest{} req.SetUri(getObjectUri(bucket, object)) req.SetMethod(http.GET) req.SetParam("acl", "") - + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return nil, err } if resp.IsFail() { @@ -860,13 +899,14 @@ func GetObjectAcl(cli bce.Client, bucket, object string) (*GetObjectAclResult, e // - object: the object name // RETURNS: // - error: nil if success otherwise the specific error -func DeleteObjectAcl(cli bce.Client, bucket, object string) error { +func DeleteObjectAcl(cli bce.Client, bucket, object string, ctx *BosContext) error { req := &bce.BceRequest{} req.SetUri(getObjectUri(bucket, object)) req.SetMethod(http.DELETE) req.SetParam("acl", "") + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { @@ -885,16 +925,16 @@ func DeleteObjectAcl(cli bce.Client, bucket, object string) error { // - args: the restore args // RETURNS: // - error: nil if success otherwise the specific error -func RestoreObject(cli bce.Client, bucket string, object string, args ArchiveRestoreArgs) error { +func RestoreObject(cli bce.Client, bucket string, object string, args ArchiveRestoreArgs, ctx *BosContext) error { req := &bce.BceRequest{} req.SetUri(getObjectUri(bucket, object)) req.SetParam("restore", "") req.SetMethod(http.POST) req.SetHeader(http.BCE_RESTORE_DAYS, strconv.Itoa(args.RestoreDays)) req.SetHeader(http.BCE_RESTORE_TIER, args.RestoreTier) - + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { @@ -914,11 +954,12 @@ func RestoreObject(cli bce.Client, bucket string, object string, args ArchiveRes // - symlinkArgs: the optional arguments of this api // RETURNS: // - error: nil if ok otherwise the specific error -func PutObjectSymlink(cli bce.Client, bucket string, object string, symlinkKey string, symlinkArgs *PutSymlinkArgs) error { +func PutObjectSymlink(cli bce.Client, bucket string, object string, symlinkKey string, symlinkArgs *PutSymlinkArgs, ctx *BosContext) error { req := &bce.BceRequest{} req.SetUri(getObjectUri(bucket, symlinkKey)) req.SetParam("symlink", "") req.SetMethod(http.PUT) + ctx.Bucket = bucket if symlinkArgs != nil { if len(symlinkArgs.ForbidOverwrite) != 0 { if !validForbidOverwrite(symlinkArgs.ForbidOverwrite) { @@ -942,11 +983,14 @@ func PutObjectSymlink(cli bce.Client, bucket string, object string, symlinkKey s return err } } + if len(symlinkArgs.SymlinkBucket) != 0 { + req.SetHeader(http.BCE_SYMLINK_BUCKET, symlinkArgs.SymlinkBucket) + } } req.SetHeader(http.BCE_SYMLINK_TARGET, object) resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return err } if resp.IsFail() { @@ -965,18 +1009,99 @@ func PutObjectSymlink(cli bce.Client, bucket string, object string, symlinkKey s // RETURNS: // - string: the name of the target object // - error: nil if ok otherwise the specific error -func GetObjectSymlink(cli bce.Client, bucket string, symlinkKey string) (string, error) { +func GetObjectSymlink(cli bce.Client, bucket string, symlinkKey string, ctx *BosContext) (string, error) { req := &bce.BceRequest{} req.SetUri(getObjectUri(bucket, symlinkKey)) req.SetParam("symlink", "") req.SetMethod(http.GET) + ctx.Bucket = bucket resp := &bce.BceResponse{} - if err := SendRequest(cli, req, resp); err != nil { + if err := SendRequest(cli, req, resp, ctx); err != nil { return "", err } if resp.IsFail() { return "", resp.ServiceError() } defer func() { resp.Body().Close() }() + if resp.Header(http.BCE_SYMLINK_BUCKET) != "" { + result := BOS_CONFIG_PREFIX + resp.Header(http.BCE_SYMLINK_BUCKET) + "/" + resp.Header(http.BCE_SYMLINK_TARGET) + return result, nil + } return resp.Header(http.BCE_SYMLINK_TARGET), nil } + +// PutObjectTag - set tag for given object +// +// PARAMS: +// - cli: the client agent which can perform sending request +// - bucket: the bucket name of the object +// - object: the name of the object +// - putObjectTagArgs: the arguments of object tag +// RETURNS: +// - error: nil if ok otherwise the specific error + +func PutObjectTag(cli bce.Client, bucket, object string, putObjectTagArgs *PutObjectTagArgs, ctx *BosContext) error { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.PUT) + req.SetParam("tagging", "") + ctx.Bucket = bucket + reqByte, _ := json.Marshal(putObjectTagArgs) + body, err := bce.NewBodyFromString(string(reqByte)) + if err != nil { + return err + } + req.SetBody(body) + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp, ctx); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} + +func GetObjectTag(cli bce.Client, bucket, object string, ctx *BosContext) (map[string]interface{}, error) { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.GET) + req.SetParam("tagging", "") + ctx.Bucket = bucket + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp, ctx); err != nil { + return nil, err + } + if resp.IsFail() { + return nil, resp.ServiceError() + } + defer func() { resp.Body().Close() }() + bodyBytes, err := ioutil.ReadAll(resp.Body()) + if err != nil { + return nil, err + } + + result, err := ParseObjectTagResult(bodyBytes) + if err != nil { + return nil, err + } + return result, nil +} + +func DeleteObjectTag(cli bce.Client, bucket, object string, ctx *BosContext) error { + req := &bce.BceRequest{} + req.SetUri(getObjectUri(bucket, object)) + req.SetMethod(http.DELETE) + req.SetParam("tagging", "") + ctx.Bucket = bucket + resp := &bce.BceResponse{} + if err := SendRequest(cli, req, resp, ctx); err != nil { + return err + } + if resp.IsFail() { + return resp.ServiceError() + } + defer func() { resp.Body().Close() }() + return nil +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/util.go b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/util.go index d5ce8bc6ee54a..31ba002b7e089 100644 --- a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/util.go +++ b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/util.go @@ -18,7 +18,11 @@ package api import ( "bytes" + "encoding/json" + "fmt" + "net" net_http "net/http" + "net/url" "strings" "github.com/baidubce/bce-sdk-go/bce" @@ -59,6 +63,9 @@ const ( FORBID_OVERWRITE_FALSE = "false" FORBID_OVERWRITE_TRUE = "true" + + NAMESPACE_BUCKET = "namespace" + BOS_CONFIG_PREFIX = "bos://" ) var DEFAULT_CNAME_LIKE_LIST = []string{ @@ -150,6 +157,29 @@ func validCannedAcl(val string) bool { return false } +func validObjectTagging(tagging string) (bool, string) { + if len(tagging) > 4000 { + return false, "" + } + encodeTagging := []string{} + pair := strings.Split(tagging, "&") + for _, p := range pair { + kv := strings.Split(p, "=") + if len(kv) != 2 { + return false, "" + } + key := kv[0] + value := kv[1] + encodeKey := url.QueryEscape(key) + encodeValue := url.QueryEscape(value) + if len(encodeKey) > 128 || len(encodeValue) > 256 { + return false, "" + } + encodeTagging = append(encodeTagging, encodeKey+"="+encodeValue) + } + return true, strings.Join(encodeTagging, "&") +} + func toHttpHeaderKey(key string) string { var result bytes.Buffer needToUpper := true @@ -235,22 +265,18 @@ func isCnameLikeHost(host string) bool { return true } } + if isVirtualHost(host) { + return true + } return false } -func SendRequest(cli bce.Client, req *bce.BceRequest, resp *bce.BceResponse) error { +func SendRequest(cli bce.Client, req *bce.BceRequest, resp *bce.BceResponse, ctx *BosContext) error { var ( err error need_retry bool ) - - req.SetEndpoint(cli.GetBceClientConfig().Endpoint) - origin_uri := req.Uri() - // set uri for cname or cdn endpoint - if cli.GetBceClientConfig().CnameEnabled || isCnameLikeHost(cli.GetBceClientConfig().Endpoint) { - req.SetUri(getCnameUri(origin_uri)) - } - + setUriAndEndpoint(cli, req, ctx, cli.GetBceClientConfig().Endpoint) if err = cli.SendRequest(req, resp); err != nil { if serviceErr, isServiceErr := err.(*bce.BceServiceError); isServiceErr { if serviceErr.StatusCode == net_http.StatusInternalServerError || @@ -265,10 +291,7 @@ func SendRequest(cli bce.Client, req *bce.BceRequest, resp *bce.BceResponse) err } // retry backup endpoint if need_retry && cli.GetBceClientConfig().BackupEndpoint != "" { - req.SetEndpoint(cli.GetBceClientConfig().BackupEndpoint) - if cli.GetBceClientConfig().CnameEnabled || isCnameLikeHost(cli.GetBceClientConfig().BackupEndpoint) { - req.SetUri(getCnameUri(origin_uri)) - } + setUriAndEndpoint(cli, req, ctx, cli.GetBceClientConfig().BackupEndpoint) if err = cli.SendRequest(req, resp); err != nil { return err } @@ -277,6 +300,102 @@ func SendRequest(cli bce.Client, req *bce.BceRequest, resp *bce.BceResponse) err return err } +func isVirtualHost(host string) bool { + domain := getDomainWithoutPort(host) + arr := strings.Split(domain, ".") + if len(arr) != 4 { + return false + } + // bucket max length is 64 + if len(arr[0]) == 0 || len(arr[0]) > 64 { + return false + } + if arr[2] != "bcebos" || arr[3] != "com" { + return false + } + return true +} + +func isIpHost(host string) bool { + domain := getDomainWithoutPort(host) + validIp := net.ParseIP(domain) + return validIp != nil +} + +func isBosHost(host string) bool { + domain := getDomainWithoutPort(host) + arr := strings.Split(domain, ".") + if len(arr) != 3 { + return false + } + if arr[1] != "bcebos" || arr[2] != "com" { + return false + } + return true +} + +func getDomainWithoutPort(host string) string { + end := 0 + if end = strings.Index(host, ":"); end == -1 { + end = len(host) + } + return host[:end] +} + +func needCompatibleBucketAndEndpoint(bucket, endpoint string) bool { + if bucket == "" { + return false + } + if !isVirtualHost(endpoint) { + return false + } + if strings.Split(endpoint, ".")[0] == bucket { + return false + } + // bucket from sdk and from endpoint is different + return true +} + +// replace endpoint by bucket, only effective when two bucket are in same region, otherwise server return NoSuchBucket error +func replaceEndpointByBucket(bucket, endpoint string) string { + arr := strings.Split(endpoint, ".") + arr[0] = bucket + return strings.Join(arr, ".") +} + +func setUriAndEndpoint(cli bce.Client, req *bce.BceRequest, ctx *BosContext, endpoint string) { + origin_uri := req.Uri() + bucket := ctx.Bucket + // deal with protocal + if strings.HasPrefix(endpoint, "https://") { + req.SetProtocol(bce.HTTPS_PROTOCAL) + endpoint = strings.TrimPrefix(endpoint, "https://") + } else if strings.HasPrefix(endpoint, "http://") { + req.SetProtocol(bce.DEFAULT_PROTOCOL) + endpoint = strings.TrimPrefix(endpoint, "http://") + } + // set uri, endpoint for cname, cdn, virtual host + if cli.GetBceClientConfig().CnameEnabled || isCnameLikeHost(endpoint) { + req.SetEndpoint(endpoint) + // if virtual host endpoint and bucket is not empty, compatible bucket and endpoint + if needCompatibleBucketAndEndpoint(bucket, endpoint) { + req.SetEndpoint(replaceEndpointByBucket(bucket, endpoint)) + } + req.SetUri(getCnameUri(origin_uri)) + } else if isIpHost(endpoint) { + // set endpoint for ip host + req.SetEndpoint(endpoint) + } else if isBosHost(endpoint) { + // endpoint is xx.bcebos.com, set endpoint depends on PathStyleEnable + if bucket != "" && !ctx.PathStyleEnable { + req.SetEndpoint(bucket + "." + endpoint) + req.SetUri(getCnameUri(origin_uri)) + } else { + req.SetEndpoint(endpoint) + } + } +} + func getDefaultContentType(object string) string { dot := strings.LastIndex(object, ".") if dot == -1 { @@ -290,3 +409,27 @@ func getDefaultContentType(object string) string { return "application/octet-stream" } + +func ParseObjectTagResult(rawData []byte) (map[string]interface{}, error) { + var data map[string]interface{} + err := json.Unmarshal(rawData, &data) + if err != nil { + return nil, err + } + + tagSet, ok := data["tagSet"].([]interface{}) + if !ok || len(tagSet) == 0 { + return nil, fmt.Errorf("decode tagSet error") + } + + tagInfoMap, ok := tagSet[0].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("decode tagInfo error") + } + + tags, ok := tagInfoMap["tagInfo"].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("decode tags error") + } + return tags, nil +} diff --git a/vendor/github.com/baidubce/bce-sdk-go/services/bos/client.go b/vendor/github.com/baidubce/bce-sdk-go/services/bos/client.go index b449a869dbae8..aec79b24b2f06 100644 --- a/vendor/github.com/baidubce/bce-sdk-go/services/bos/client.go +++ b/vendor/github.com/baidubce/bce-sdk-go/services/bos/client.go @@ -23,11 +23,13 @@ import ( "errors" "fmt" "io" + "math" "net/http" "os" "github.com/baidubce/bce-sdk-go/auth" "github.com/baidubce/bce-sdk-go/bce" + sdk_http "github.com/baidubce/bce-sdk-go/http" "github.com/baidubce/bce-sdk-go/services/bos/api" "github.com/baidubce/bce-sdk-go/services/sts" "github.com/baidubce/bce-sdk-go/util/log" @@ -52,6 +54,7 @@ type Client struct { // Fileds that used in parallel operation for BOS service MaxParallel int64 MultipartSize int64 + BosContext *api.BosContext } // BosClientConfiguration defines the config components structure by user. @@ -60,6 +63,7 @@ type BosClientConfiguration struct { Sk string Endpoint string RedirectDisabled bool + PathStyleEnable bool } // NewClient make the BOS service client with default configuration. @@ -70,6 +74,7 @@ func NewClient(ak, sk, endpoint string) (*Client, error) { Sk: sk, Endpoint: endpoint, RedirectDisabled: false, + PathStyleEnable: false, }) } @@ -131,9 +136,11 @@ func NewClientWithConfig(config *BosClientConfiguration) (*Client, error) { ConnectionTimeoutInMillis: bce.DEFAULT_CONNECTION_TIMEOUT_IN_MILLIS, RedirectDisabled: config.RedirectDisabled} v1Signer := &auth.BceV1Signer{} - + defaultContext := &api.BosContext{ + PathStyleEnable: config.PathStyleEnable, + } client := &Client{bce.NewBceClient(defaultConf, v1Signer), - DEFAULT_MAX_PARALLEL, DEFAULT_MULTIPART_SIZE} + DEFAULT_MAX_PARALLEL, DEFAULT_MULTIPART_SIZE, defaultContext} return client, nil } @@ -143,7 +150,7 @@ func NewClientWithConfig(config *BosClientConfiguration) (*Client, error) { // - *api.ListBucketsResult: the all buckets // - error: the return error if any occurs func (c *Client) ListBuckets() (*api.ListBucketsResult, error) { - return api.ListBuckets(c) + return api.ListBuckets(c, c.BosContext) } // ListObjects - list all objects of the given bucket @@ -156,7 +163,7 @@ func (c *Client) ListBuckets() (*api.ListBucketsResult, error) { // - error: the return error if any occurs func (c *Client) ListObjects(bucket string, args *api.ListObjectsArgs) (*api.ListObjectsResult, error) { - return api.ListObjects(c, bucket, args) + return api.ListObjects(c, bucket, args, c.BosContext) } // SimpleListObjects - list all objects of the given bucket with simple arguments @@ -173,7 +180,7 @@ func (c *Client) ListObjects(bucket string, func (c *Client) SimpleListObjects(bucket, prefix string, maxKeys int, marker, delimiter string) (*api.ListObjectsResult, error) { args := &api.ListObjectsArgs{delimiter, marker, maxKeys, prefix} - return api.ListObjects(c, bucket, args) + return api.ListObjects(c, bucket, args, c.BosContext) } // HeadBucket - test the given bucket existed and access authority @@ -183,7 +190,8 @@ func (c *Client) SimpleListObjects(bucket, prefix string, maxKeys int, marker, // RETURNS: // - error: nil if exists and have authority otherwise the specific error func (c *Client) HeadBucket(bucket string) error { - return api.HeadBucket(c, bucket) + err, _ := api.HeadBucket(c, bucket, c.BosContext) + return err } // DoesBucketExist - test the given bucket existed or not @@ -194,7 +202,7 @@ func (c *Client) HeadBucket(bucket string) error { // - bool: true if exists and false if not exists or occurs error // - error: nil if exists or not exist, otherwise the specific error func (c *Client) DoesBucketExist(bucket string) (bool, error) { - err := api.HeadBucket(c, bucket) + err, _ := api.HeadBucket(c, bucket, c.BosContext) if err == nil { return true, nil } @@ -209,6 +217,21 @@ func (c *Client) DoesBucketExist(bucket string) (bool, error) { return false, err } +//IsNsBucket - test the given bucket is namespace bucket or not +func (c *Client) IsNsBucket(bucket string) bool { + err, resp := api.HeadBucket(c, bucket, c.BosContext) + if err == nil && resp.Header(sdk_http.BCE_BUCKET_TYPE) == api.NAMESPACE_BUCKET { + return true + } + if realErr, ok := err.(*bce.BceServiceError); ok { + if realErr.StatusCode == http.StatusForbidden && + resp.Header(sdk_http.BCE_BUCKET_TYPE) == api.NAMESPACE_BUCKET { + return true + } + } + return false +} + // PutBucket - create a new bucket // // PARAMS: @@ -217,7 +240,11 @@ func (c *Client) DoesBucketExist(bucket string) (bool, error) { // - string: the location of the new bucket if create success // - error: nil if create success otherwise the specific error func (c *Client) PutBucket(bucket string) (string, error) { - return api.PutBucket(c, bucket) + return api.PutBucket(c, bucket, nil, c.BosContext) +} + +func (c *Client) PutBucketWithArgs(bucket string, args *api.PutBucketArgs) (string, error) { + return api.PutBucket(c, bucket, args, c.BosContext) } // DeleteBucket - delete a empty bucket @@ -227,7 +254,7 @@ func (c *Client) PutBucket(bucket string) (string, error) { // RETURNS: // - error: nil if delete success otherwise the specific error func (c *Client) DeleteBucket(bucket string) error { - return api.DeleteBucket(c, bucket) + return api.DeleteBucket(c, bucket, c.BosContext) } // GetBucketLocation - get the location fo the given bucket @@ -238,7 +265,7 @@ func (c *Client) DeleteBucket(bucket string) error { // - string: the location of the bucket // - error: nil if success otherwise the specific error func (c *Client) GetBucketLocation(bucket string) (string, error) { - return api.GetBucketLocation(c, bucket) + return api.GetBucketLocation(c, bucket, c.BosContext) } // PutBucketAcl - set the acl of the given bucket with acl body stream @@ -249,7 +276,7 @@ func (c *Client) GetBucketLocation(bucket string) (string, error) { // RETURNS: // - error: nil if success otherwise the specific error func (c *Client) PutBucketAcl(bucket string, aclBody *bce.Body) error { - return api.PutBucketAcl(c, bucket, "", aclBody) + return api.PutBucketAcl(c, bucket, "", aclBody, c.BosContext) } // PutBucketAclFromCanned - set the canned acl of the given bucket @@ -260,7 +287,7 @@ func (c *Client) PutBucketAcl(bucket string, aclBody *bce.Body) error { // RETURNS: // - error: nil if success otherwise the specific error func (c *Client) PutBucketAclFromCanned(bucket, cannedAcl string) error { - return api.PutBucketAcl(c, bucket, cannedAcl, nil) + return api.PutBucketAcl(c, bucket, cannedAcl, nil, c.BosContext) } // PutBucketAclFromFile - set the acl of the given bucket with acl json file name @@ -275,7 +302,7 @@ func (c *Client) PutBucketAclFromFile(bucket, aclFile string) error { if err != nil { return err } - return api.PutBucketAcl(c, bucket, "", body) + return api.PutBucketAcl(c, bucket, "", body, c.BosContext) } // PutBucketAclFromString - set the acl of the given bucket with acl json string @@ -290,7 +317,7 @@ func (c *Client) PutBucketAclFromString(bucket, aclString string) error { if err != nil { return err } - return api.PutBucketAcl(c, bucket, "", body) + return api.PutBucketAcl(c, bucket, "", body, c.BosContext) } // PutBucketAclFromStruct - set the acl of the given bucket with acl data structure @@ -309,7 +336,7 @@ func (c *Client) PutBucketAclFromStruct(bucket string, aclObj *api.PutBucketAclA if err != nil { return err } - return api.PutBucketAcl(c, bucket, "", body) + return api.PutBucketAcl(c, bucket, "", body, c.BosContext) } // GetBucketAcl - get the acl of the given bucket @@ -320,7 +347,7 @@ func (c *Client) PutBucketAclFromStruct(bucket string, aclObj *api.PutBucketAclA // - *api.GetBucketAclResult: the result of the bucket acl // - error: nil if success otherwise the specific error func (c *Client) GetBucketAcl(bucket string) (*api.GetBucketAclResult, error) { - return api.GetBucketAcl(c, bucket) + return api.GetBucketAcl(c, bucket, c.BosContext) } // PutBucketLogging - set the loging setting of the given bucket with json stream @@ -331,7 +358,7 @@ func (c *Client) GetBucketAcl(bucket string) (*api.GetBucketAclResult, error) { // RETURNS: // - error: nil if success otherwise the specific error func (c *Client) PutBucketLogging(bucket string, body *bce.Body) error { - return api.PutBucketLogging(c, bucket, body) + return api.PutBucketLogging(c, bucket, body, c.BosContext) } // PutBucketLoggingFromString - set the loging setting of the given bucket with json string @@ -346,7 +373,7 @@ func (c *Client) PutBucketLoggingFromString(bucket, logging string) error { if err != nil { return err } - return api.PutBucketLogging(c, bucket, body) + return api.PutBucketLogging(c, bucket, body, c.BosContext) } // PutBucketLoggingFromStruct - set the loging setting of the given bucket with args object @@ -365,7 +392,7 @@ func (c *Client) PutBucketLoggingFromStruct(bucket string, obj *api.PutBucketLog if err != nil { return err } - return api.PutBucketLogging(c, bucket, body) + return api.PutBucketLogging(c, bucket, body, c.BosContext) } // GetBucketLogging - get the logging setting of the given bucket @@ -376,7 +403,7 @@ func (c *Client) PutBucketLoggingFromStruct(bucket string, obj *api.PutBucketLog // - *api.GetBucketLoggingResult: the logging setting of the bucket // - error: nil if success otherwise the specific error func (c *Client) GetBucketLogging(bucket string) (*api.GetBucketLoggingResult, error) { - return api.GetBucketLogging(c, bucket) + return api.GetBucketLogging(c, bucket, c.BosContext) } // DeleteBucketLogging - delete the logging setting of the given bucket @@ -386,7 +413,7 @@ func (c *Client) GetBucketLogging(bucket string) (*api.GetBucketLoggingResult, e // RETURNS: // - error: nil if success otherwise the specific error func (c *Client) DeleteBucketLogging(bucket string) error { - return api.DeleteBucketLogging(c, bucket) + return api.DeleteBucketLogging(c, bucket, c.BosContext) } // PutBucketLifecycle - set the lifecycle rule of the given bucket with raw stream @@ -397,7 +424,7 @@ func (c *Client) DeleteBucketLogging(bucket string) error { // RETURNS: // - error: nil if success otherwise the specific error func (c *Client) PutBucketLifecycle(bucket string, lifecycle *bce.Body) error { - return api.PutBucketLifecycle(c, bucket, lifecycle) + return api.PutBucketLifecycle(c, bucket, lifecycle, c.BosContext) } // PutBucketLifecycleFromString - set the lifecycle rule of the given bucket with string @@ -412,7 +439,7 @@ func (c *Client) PutBucketLifecycleFromString(bucket, lifecycle string) error { if err != nil { return err } - return api.PutBucketLifecycle(c, bucket, body) + return api.PutBucketLifecycle(c, bucket, body, c.BosContext) } // GetBucketLifecycle - get the lifecycle rule of the given bucket @@ -423,7 +450,7 @@ func (c *Client) PutBucketLifecycleFromString(bucket, lifecycle string) error { // - *api.GetBucketLifecycleResult: the lifecycle rule of the bucket // - error: nil if success otherwise the specific error func (c *Client) GetBucketLifecycle(bucket string) (*api.GetBucketLifecycleResult, error) { - return api.GetBucketLifecycle(c, bucket) + return api.GetBucketLifecycle(c, bucket, c.BosContext) } // DeleteBucketLifecycle - delete the lifecycle rule of the given bucket @@ -433,7 +460,7 @@ func (c *Client) GetBucketLifecycle(bucket string) (*api.GetBucketLifecycleResul // RETURNS: // - error: nil if success otherwise the specific error func (c *Client) DeleteBucketLifecycle(bucket string) error { - return api.DeleteBucketLifecycle(c, bucket) + return api.DeleteBucketLifecycle(c, bucket, c.BosContext) } // PutBucketStorageclass - set the storage class of the given bucket @@ -444,7 +471,7 @@ func (c *Client) DeleteBucketLifecycle(bucket string) error { // RETURNS: // - error: nil if success otherwise the specific error func (c *Client) PutBucketStorageclass(bucket, storageClass string) error { - return api.PutBucketStorageclass(c, bucket, storageClass) + return api.PutBucketStorageclass(c, bucket, storageClass, c.BosContext) } // GetBucketStorageclass - get the storage class of the given bucket @@ -455,7 +482,7 @@ func (c *Client) PutBucketStorageclass(bucket, storageClass string) error { // - string: the storage class string value // - error: nil if success otherwise the specific error func (c *Client) GetBucketStorageclass(bucket string) (string, error) { - return api.GetBucketStorageclass(c, bucket) + return api.GetBucketStorageclass(c, bucket, c.BosContext) } // PutBucketReplication - set the bucket replication config of different region @@ -467,7 +494,7 @@ func (c *Client) GetBucketStorageclass(bucket string) (string, error) { // RETURNS: // - error: nil if success otherwise the specific error func (c *Client) PutBucketReplication(bucket string, replicationConf *bce.Body, replicationRuleId string) error { - return api.PutBucketReplication(c, bucket, replicationConf, replicationRuleId) + return api.PutBucketReplication(c, bucket, replicationConf, replicationRuleId, c.BosContext) } // PutBucketReplicationFromFile - set the bucket replication config with json file name @@ -483,7 +510,7 @@ func (c *Client) PutBucketReplicationFromFile(bucket, confFile string, replicati if err != nil { return err } - return api.PutBucketReplication(c, bucket, body, replicationRuleId) + return api.PutBucketReplication(c, bucket, body, replicationRuleId, c.BosContext) } // PutBucketReplicationFromString - set the bucket replication config with json string @@ -499,7 +526,7 @@ func (c *Client) PutBucketReplicationFromString(bucket, confString string, repli if err != nil { return err } - return api.PutBucketReplication(c, bucket, body, replicationRuleId) + return api.PutBucketReplication(c, bucket, body, replicationRuleId, c.BosContext) } // PutBucketReplicationFromStruct - set the bucket replication config with struct @@ -520,7 +547,7 @@ func (c *Client) PutBucketReplicationFromStruct(bucket string, if err != nil { return err } - return api.PutBucketReplication(c, bucket, body, replicationRuleId) + return api.PutBucketReplication(c, bucket, body, replicationRuleId, c.BosContext) } // GetBucketReplication - get the bucket replication config of the given bucket @@ -532,7 +559,7 @@ func (c *Client) PutBucketReplicationFromStruct(bucket string, // - *api.GetBucketReplicationResult: the result of the bucket replication config // - error: nil if success otherwise the specific error func (c *Client) GetBucketReplication(bucket string, replicationRuleId string) (*api.GetBucketReplicationResult, error) { - return api.GetBucketReplication(c, bucket, replicationRuleId) + return api.GetBucketReplication(c, bucket, replicationRuleId, c.BosContext) } // ListBucketReplication - get all replication config of the given bucket @@ -543,7 +570,7 @@ func (c *Client) GetBucketReplication(bucket string, replicationRuleId string) ( // - *api.ListBucketReplicationResult: the list of the bucket replication config // - error: nil if success otherwise the specific error func (c *Client) ListBucketReplication(bucket string) (*api.ListBucketReplicationResult, error) { - return api.ListBucketReplication(c, bucket) + return api.ListBucketReplication(c, bucket, c.BosContext) } // DeleteBucketReplication - delete the bucket replication config of the given bucket @@ -554,7 +581,7 @@ func (c *Client) ListBucketReplication(bucket string) (*api.ListBucketReplicatio // RETURNS: // - error: nil if success otherwise the specific error func (c *Client) DeleteBucketReplication(bucket string, replicationRuleId string) error { - return api.DeleteBucketReplication(c, bucket, replicationRuleId) + return api.DeleteBucketReplication(c, bucket, replicationRuleId, c.BosContext) } // GetBucketReplicationProgress - get the bucket replication process of the given bucket @@ -567,7 +594,7 @@ func (c *Client) DeleteBucketReplication(bucket string, replicationRuleId string // - error: nil if success otherwise the specific error func (c *Client) GetBucketReplicationProgress(bucket string, replicationRuleId string) ( *api.GetBucketReplicationProgressResult, error) { - return api.GetBucketReplicationProgress(c, bucket, replicationRuleId) + return api.GetBucketReplicationProgress(c, bucket, replicationRuleId, c.BosContext) } // PutBucketEncryption - set the bucket encryption config of the given bucket @@ -578,7 +605,7 @@ func (c *Client) GetBucketReplicationProgress(bucket string, replicationRuleId s // RETURNS: // - error: nil if success otherwise the specific error func (c *Client) PutBucketEncryption(bucket, algorithm string) error { - return api.PutBucketEncryption(c, bucket, algorithm) + return api.PutBucketEncryption(c, bucket, algorithm, c.BosContext) } // GetBucketEncryption - get the bucket encryption config @@ -589,7 +616,7 @@ func (c *Client) PutBucketEncryption(bucket, algorithm string) error { // - string: the encryption algorithm name // - error: nil if success otherwise the specific error func (c *Client) GetBucketEncryption(bucket string) (string, error) { - return api.GetBucketEncryption(c, bucket) + return api.GetBucketEncryption(c, bucket, c.BosContext) } // DeleteBucketEncryption - delete the bucket encryption config of the given bucket @@ -599,7 +626,7 @@ func (c *Client) GetBucketEncryption(bucket string) (string, error) { // RETURNS: // - error: nil if success otherwise the specific error func (c *Client) DeleteBucketEncryption(bucket string) error { - return api.DeleteBucketEncryption(c, bucket) + return api.DeleteBucketEncryption(c, bucket, c.BosContext) } // PutBucketStaticWebsite - set the bucket static website config @@ -610,7 +637,7 @@ func (c *Client) DeleteBucketEncryption(bucket string) error { // RETURNS: // - error: nil if success otherwise the specific error func (c *Client) PutBucketStaticWebsite(bucket string, config *bce.Body) error { - return api.PutBucketStaticWebsite(c, bucket, config) + return api.PutBucketStaticWebsite(c, bucket, config, c.BosContext) } // PutBucketStaticWebsiteFromString - set the bucket static website config from json string @@ -625,7 +652,7 @@ func (c *Client) PutBucketStaticWebsiteFromString(bucket, jsonConfig string) err if err != nil { return err } - return api.PutBucketStaticWebsite(c, bucket, body) + return api.PutBucketStaticWebsite(c, bucket, body, c.BosContext) } // PutBucketStaticWebsiteFromStruct - set the bucket static website config from struct @@ -645,7 +672,7 @@ func (c *Client) PutBucketStaticWebsiteFromStruct(bucket string, if err != nil { return err } - return api.PutBucketStaticWebsite(c, bucket, body) + return api.PutBucketStaticWebsite(c, bucket, body, c.BosContext) } // SimplePutBucketStaticWebsite - simple set the bucket static website config @@ -670,7 +697,7 @@ func (c *Client) SimplePutBucketStaticWebsite(bucket, index, notFound string) er // - error: nil if success otherwise the specific error func (c *Client) GetBucketStaticWebsite(bucket string) ( *api.GetBucketStaticWebsiteResult, error) { - return api.GetBucketStaticWebsite(c, bucket) + return api.GetBucketStaticWebsite(c, bucket, c.BosContext) } // DeleteBucketStaticWebsite - delete the bucket static website config of the given bucket @@ -680,7 +707,7 @@ func (c *Client) GetBucketStaticWebsite(bucket string) ( // RETURNS: // - error: nil if success otherwise the specific error func (c *Client) DeleteBucketStaticWebsite(bucket string) error { - return api.DeleteBucketStaticWebsite(c, bucket) + return api.DeleteBucketStaticWebsite(c, bucket, c.BosContext) } // PutBucketCors - set the bucket CORS config @@ -691,7 +718,7 @@ func (c *Client) DeleteBucketStaticWebsite(bucket string) error { // RETURNS: // - error: nil if success otherwise the specific error func (c *Client) PutBucketCors(bucket string, config *bce.Body) error { - return api.PutBucketCors(c, bucket, config) + return api.PutBucketCors(c, bucket, config, c.BosContext) } // PutBucketCorsFromFile - set the bucket CORS config from json config file @@ -706,7 +733,7 @@ func (c *Client) PutBucketCorsFromFile(bucket, filename string) error { if err != nil { return err } - return api.PutBucketCors(c, bucket, body) + return api.PutBucketCors(c, bucket, body, c.BosContext) } // PutBucketCorsFromString - set the bucket CORS config from json config string @@ -721,7 +748,7 @@ func (c *Client) PutBucketCorsFromString(bucket, jsonConfig string) error { if err != nil { return err } - return api.PutBucketCors(c, bucket, body) + return api.PutBucketCors(c, bucket, body, c.BosContext) } // PutBucketCorsFromStruct - set the bucket CORS config from json config object @@ -740,7 +767,7 @@ func (c *Client) PutBucketCorsFromStruct(bucket string, confObj *api.PutBucketCo if err != nil { return err } - return api.PutBucketCors(c, bucket, body) + return api.PutBucketCors(c, bucket, body, c.BosContext) } // GetBucketCors - get the bucket CORS config @@ -751,7 +778,7 @@ func (c *Client) PutBucketCorsFromStruct(bucket string, confObj *api.PutBucketCo // - result: the bucket CORS config result object // - error: nil if success otherwise the specific error func (c *Client) GetBucketCors(bucket string) (*api.GetBucketCorsResult, error) { - return api.GetBucketCors(c, bucket) + return api.GetBucketCors(c, bucket, c.BosContext) } // DeleteBucketCors - delete the bucket CORS config of the given bucket @@ -761,7 +788,7 @@ func (c *Client) GetBucketCors(bucket string) (*api.GetBucketCorsResult, error) // RETURNS: // - error: nil if success otherwise the specific error func (c *Client) DeleteBucketCors(bucket string) error { - return api.DeleteBucketCors(c, bucket) + return api.DeleteBucketCors(c, bucket, c.BosContext) } // PutBucketCopyrightProtection - set the copyright protection config of the given bucket @@ -773,7 +800,7 @@ func (c *Client) DeleteBucketCors(bucket string) error { // RETURNS: // - error: nil if success otherwise the specific error func (c *Client) PutBucketCopyrightProtection(bucket string, resources ...string) error { - return api.PutBucketCopyrightProtection(c, bucket, resources...) + return api.PutBucketCopyrightProtection(c, c.BosContext, bucket, resources...) } // GetBucketCopyrightProtection - get the bucket copyright protection config @@ -784,7 +811,7 @@ func (c *Client) PutBucketCopyrightProtection(bucket string, resources ...string // - result: the bucket copyright protection config resources // - error: nil if success otherwise the specific error func (c *Client) GetBucketCopyrightProtection(bucket string) ([]string, error) { - return api.GetBucketCopyrightProtection(c, bucket) + return api.GetBucketCopyrightProtection(c, bucket, c.BosContext) } // DeleteBucketCopyrightProtection - delete the bucket copyright protection config @@ -794,7 +821,7 @@ func (c *Client) GetBucketCopyrightProtection(bucket string) ([]string, error) { // RETURNS: // - error: nil if success otherwise the specific error func (c *Client) DeleteBucketCopyrightProtection(bucket string) error { - return api.DeleteBucketCopyrightProtection(c, bucket) + return api.DeleteBucketCopyrightProtection(c, bucket, c.BosContext) } // PutObject - upload a new object or rewrite the existed object with raw stream @@ -809,7 +836,8 @@ func (c *Client) DeleteBucketCopyrightProtection(bucket string) error { // - error: the uploaded error if any occurs func (c *Client) PutObject(bucket, object string, body *bce.Body, args *api.PutObjectArgs) (string, error) { - return api.PutObject(c, bucket, object, body, args) + etag, _, err := api.PutObject(c, bucket, object, body, args, c.BosContext) + return etag, err } // BasicPutObject - the basic interface of uploading an object @@ -822,7 +850,8 @@ func (c *Client) PutObject(bucket, object string, body *bce.Body, // - string: etag of the uploaded object // - error: the uploaded error if any occurs func (c *Client) BasicPutObject(bucket, object string, body *bce.Body) (string, error) { - return api.PutObject(c, bucket, object, body, nil) + etag , _, err := api.PutObject(c, bucket, object, body, nil, c.BosContext) + return etag, err } // PutObjectFromBytes - upload a new object or rewrite the existed object from a byte array @@ -841,7 +870,9 @@ func (c *Client) PutObjectFromBytes(bucket, object string, bytesArr []byte, if err != nil { return "", err } - return api.PutObject(c, bucket, object, body, args) + etag, _, err := api.PutObject(c, bucket, object, body, args, c.BosContext) + return etag, err + } // PutObjectFromString - upload a new object or rewrite the existed object from a string @@ -860,7 +891,9 @@ func (c *Client) PutObjectFromString(bucket, object, content string, if err != nil { return "", err } - return api.PutObject(c, bucket, object, body, args) + etag , _, err := api.PutObject(c, bucket, object, body, args, c.BosContext) + return etag, err + } // PutObjectFromFile - upload a new object or rewrite the existed object from a local file @@ -879,7 +912,8 @@ func (c *Client) PutObjectFromFile(bucket, object, fileName string, if err != nil { return "", err } - return api.PutObject(c, bucket, object, body, args) + etag , _, err := api.PutObject(c, bucket, object, body, args, c.BosContext) + return etag, err } // PutObjectFromStream - upload a new object or rewrite the existed object from stream @@ -898,7 +932,24 @@ func (c *Client) PutObjectFromStream(bucket, object string, reader io.Reader, if err != nil { return "", err } - return api.PutObject(c, bucket, object, body, args) + etag , _, err := api.PutObject(c, bucket, object, body, args, c.BosContext) + return etag, err +} + +func (c *Client) PutObjectFromFileWithCallback(bucket, object, fileName string, + args *api.PutObjectArgs) (string, *api.PutObjectResult, error) { + body, err := bce.NewBodyFromFile(fileName) + if err != nil { + return "", nil, err + } + etag, putObjectResult, err := api.PutObject(c, bucket, object, body, args, c.BosContext) + return etag, putObjectResult, err +} + +func (c *Client) PutObjectWithCallback(bucket, object string, body *bce.Body, + args *api.PutObjectArgs) (string, *api.PutObjectResult, error) { + etag, putObjectResult, err := api.PutObject(c, bucket, object, body, args, c.BosContext) + return etag, putObjectResult, err } // CopyObject - copy a remote object to another one @@ -916,7 +967,7 @@ func (c *Client) PutObjectFromStream(bucket, object string, reader io.Reader, func (c *Client) CopyObject(bucket, object, srcBucket, srcObject string, args *api.CopyObjectArgs) (*api.CopyObjectResult, error) { source := fmt.Sprintf("/%s/%s", srcBucket, srcObject) - return api.CopyObject(c, bucket, object, source, args) + return api.CopyObject(c, bucket, object, source, args, c.BosContext) } // BasicCopyObject - the basic interface of copying a object to another one @@ -932,7 +983,7 @@ func (c *Client) CopyObject(bucket, object, srcBucket, srcObject string, func (c *Client) BasicCopyObject(bucket, object, srcBucket, srcObject string) (*api.CopyObjectResult, error) { source := fmt.Sprintf("/%s/%s", srcBucket, srcObject) - return api.CopyObject(c, bucket, object, source, nil) + return api.CopyObject(c, bucket, object, source, nil, c.BosContext) } // GetObject - get the given object with raw stream return @@ -940,15 +991,15 @@ func (c *Client) BasicCopyObject(bucket, object, srcBucket, // PARAMS: // - bucket: the name of the bucket // - object: the name of the object -// - responseHeaders: the optional response headers to get the given object +// - args: the optional args in querysring // - ranges: the optional range start and end to get the given object // RETURNS: // - *api.GetObjectResult: result struct which contains "Body" and header fields // for details reference https://cloud.baidu.com/doc/BOS/API.html#GetObject.E6.8E.A5.E5.8F.A3 // - error: any error if it occurs -func (c *Client) GetObject(bucket, object string, responseHeaders map[string]string, +func (c *Client) GetObject(bucket, object string, args map[string]string, ranges ...int64) (*api.GetObjectResult, error) { - return api.GetObject(c, bucket, object, responseHeaders, ranges...) + return api.GetObject(c, bucket, object, c.BosContext, args, ranges...) } // BasicGetObject - the basic interface of geting the given object @@ -961,7 +1012,7 @@ func (c *Client) GetObject(bucket, object string, responseHeaders map[string]str // for details reference https://cloud.baidu.com/doc/BOS/API.html#GetObject.E6.8E.A5.E5.8F.A3 // - error: any error if it occurs func (c *Client) BasicGetObject(bucket, object string) (*api.GetObjectResult, error) { - return api.GetObject(c, bucket, object, nil) + return api.GetObject(c, bucket, object, c.BosContext, nil) } // BasicGetObjectToFile - use basic interface to get the given object to the given file path @@ -973,7 +1024,7 @@ func (c *Client) BasicGetObject(bucket, object string) (*api.GetObjectResult, er // RETURNS: // - error: any error if it occurs func (c *Client) BasicGetObjectToFile(bucket, object, filePath string) error { - res, err := api.GetObject(c, bucket, object, nil) + res, err := api.GetObject(c, bucket, object, c.BosContext, nil) if err != nil { return err } @@ -1005,7 +1056,7 @@ func (c *Client) BasicGetObjectToFile(bucket, object, filePath string) error { // https://cloud.baidu.com/doc/BOS/API.html#GetObjectMeta.E6.8E.A5.E5.8F.A3 // - error: any error if it occurs func (c *Client) GetObjectMeta(bucket, object string) (*api.GetObjectMetaResult, error) { - return api.GetObjectMeta(c, bucket, object) + return api.GetObjectMeta(c, bucket, object, c.BosContext) } // SelectObject - select the object content @@ -1018,7 +1069,7 @@ func (c *Client) GetObjectMeta(bucket, object string) (*api.GetObjectMetaResult, // - *api.SelectObjectResult: select object result // - error: any error if it occurs func (c *Client) SelectObject(bucket, object string, args *api.SelectObjectArgs) (*api.SelectObjectResult, error) { - return api.SelectObject(c, bucket, object, args) + return api.SelectObject(c, bucket, object, args, c.BosContext) } // FetchObject - fetch the object content from the given source and store @@ -1033,7 +1084,7 @@ func (c *Client) SelectObject(bucket, object string, args *api.SelectObjectArgs) // - error: any error if it occurs func (c *Client) FetchObject(bucket, object, source string, args *api.FetchObjectArgs) (*api.FetchObjectResult, error) { - return api.FetchObject(c, bucket, object, source, args) + return api.FetchObject(c, bucket, object, source, args, c.BosContext) } // BasicFetchObject - the basic interface of the fetch object api @@ -1046,7 +1097,7 @@ func (c *Client) FetchObject(bucket, object, source string, // - *api.FetchObjectResult: result struct with Code, Message, RequestId and JobId fields // - error: any error if it occurs func (c *Client) BasicFetchObject(bucket, object, source string) (*api.FetchObjectResult, error) { - return api.FetchObject(c, bucket, object, source, nil) + return api.FetchObject(c, bucket, object, source, nil, c.BosContext) } // SimpleFetchObject - fetch object with simple arguments interface @@ -1063,7 +1114,7 @@ func (c *Client) BasicFetchObject(bucket, object, source string) (*api.FetchObje func (c *Client) SimpleFetchObject(bucket, object, source, mode, storageClass string) (*api.FetchObjectResult, error) { args := &api.FetchObjectArgs{mode, storageClass} - return api.FetchObject(c, bucket, object, source, args) + return api.FetchObject(c, bucket, object, source, args, c.BosContext) } // AppendObject - append the given content to a new or existed object which is appendable @@ -1078,7 +1129,7 @@ func (c *Client) SimpleFetchObject(bucket, object, source, mode, // - error: any error if it occurs func (c *Client) AppendObject(bucket, object string, content *bce.Body, args *api.AppendObjectArgs) (*api.AppendObjectResult, error) { - return api.AppendObject(c, bucket, object, content, args) + return api.AppendObject(c, bucket, object, content, args, c.BosContext) } // SimpleAppendObject - the interface to append object with simple offset argument @@ -1093,7 +1144,7 @@ func (c *Client) AppendObject(bucket, object string, content *bce.Body, // - error: any error if it occurs func (c *Client) SimpleAppendObject(bucket, object string, content *bce.Body, offset int64) (*api.AppendObjectResult, error) { - return api.AppendObject(c, bucket, object, content, &api.AppendObjectArgs{Offset: offset}) + return api.AppendObject(c, bucket, object, content, &api.AppendObjectArgs{Offset: offset}, c.BosContext) } // SimpleAppendObjectFromString - the simple interface of appending an object from a string @@ -1112,7 +1163,7 @@ func (c *Client) SimpleAppendObjectFromString(bucket, object, content string, if err != nil { return nil, err } - return api.AppendObject(c, bucket, object, body, &api.AppendObjectArgs{Offset: offset}) + return api.AppendObject(c, bucket, object, body, &api.AppendObjectArgs{Offset: offset}, c.BosContext) } // SimpleAppendObjectFromFile - the simple interface of appending an object from a file @@ -1131,7 +1182,7 @@ func (c *Client) SimpleAppendObjectFromFile(bucket, object, filePath string, if err != nil { return nil, err } - return api.AppendObject(c, bucket, object, body, &api.AppendObjectArgs{Offset: offset}) + return api.AppendObject(c, bucket, object, body, &api.AppendObjectArgs{Offset: offset}, c.BosContext) } // DeleteObject - delete the given object @@ -1142,7 +1193,7 @@ func (c *Client) SimpleAppendObjectFromFile(bucket, object, filePath string, // RETURNS: // - error: any error if it occurs func (c *Client) DeleteObject(bucket, object string) error { - return api.DeleteObject(c, bucket, object) + return api.DeleteObject(c, bucket, object, c.BosContext) } // DeleteMultipleObjects - delete a list of objects @@ -1155,7 +1206,7 @@ func (c *Client) DeleteObject(bucket, object string) error { // - error: any error if it occurs func (c *Client) DeleteMultipleObjects(bucket string, objectListStream *bce.Body) (*api.DeleteMultipleObjectsResult, error) { - return api.DeleteMultipleObjects(c, bucket, objectListStream) + return api.DeleteMultipleObjects(c, bucket, objectListStream, c.BosContext) } // DeleteMultipleObjectsFromString - delete a list of objects with json format string @@ -1172,7 +1223,7 @@ func (c *Client) DeleteMultipleObjectsFromString(bucket, if err != nil { return nil, err } - return api.DeleteMultipleObjects(c, bucket, body) + return api.DeleteMultipleObjects(c, bucket, body, c.BosContext) } // DeleteMultipleObjectsFromStruct - delete a list of objects with object list struct @@ -1193,7 +1244,7 @@ func (c *Client) DeleteMultipleObjectsFromStruct(bucket string, if err != nil { return nil, err } - return api.DeleteMultipleObjects(c, bucket, body) + return api.DeleteMultipleObjects(c, bucket, body, c.BosContext) } // DeleteMultipleObjectsFromKeyList - delete a list of objects with given key string array @@ -1223,7 +1274,7 @@ func (c *Client) DeleteMultipleObjectsFromKeyList(bucket string, if err != nil { return nil, err } - return api.DeleteMultipleObjects(c, bucket, body) + return api.DeleteMultipleObjects(c, bucket, body, c.BosContext) } // InitiateMultipartUpload - initiate a multipart upload to get a upload ID @@ -1239,7 +1290,7 @@ func (c *Client) DeleteMultipleObjectsFromKeyList(bucket string, // - error: nil if ok otherwise the specific error func (c *Client) InitiateMultipartUpload(bucket, object, contentType string, args *api.InitiateMultipartUploadArgs) (*api.InitiateMultipartUploadResult, error) { - return api.InitiateMultipartUpload(c, bucket, object, contentType, args) + return api.InitiateMultipartUpload(c, bucket, object, contentType, args, c.BosContext) } // BasicInitiateMultipartUpload - basic interface to initiate a multipart upload @@ -1252,7 +1303,7 @@ func (c *Client) InitiateMultipartUpload(bucket, object, contentType string, // - error: nil if ok otherwise the specific error func (c *Client) BasicInitiateMultipartUpload(bucket, object string) (*api.InitiateMultipartUploadResult, error) { - return api.InitiateMultipartUpload(c, bucket, object, "", nil) + return api.InitiateMultipartUpload(c, bucket, object, "", nil, c.BosContext) } // UploadPart - upload the single part in the multipart upload process @@ -1269,7 +1320,7 @@ func (c *Client) BasicInitiateMultipartUpload(bucket, // - error: nil if ok otherwise the specific error func (c *Client) UploadPart(bucket, object, uploadId string, partNumber int, content *bce.Body, args *api.UploadPartArgs) (string, error) { - return api.UploadPart(c, bucket, object, uploadId, partNumber, content, args) + return api.UploadPart(c, bucket, object, uploadId, partNumber, content, args, c.BosContext) } // BasicUploadPart - basic interface to upload the single part in the multipart upload process @@ -1285,7 +1336,7 @@ func (c *Client) UploadPart(bucket, object, uploadId string, partNumber int, // - error: nil if ok otherwise the specific error func (c *Client) BasicUploadPart(bucket, object, uploadId string, partNumber int, content *bce.Body) (string, error) { - return api.UploadPart(c, bucket, object, uploadId, partNumber, content, nil) + return api.UploadPart(c, bucket, object, uploadId, partNumber, content, nil, c.BosContext) } // UploadPartFromBytes - upload the single part in the multipart upload process @@ -1302,7 +1353,7 @@ func (c *Client) BasicUploadPart(bucket, object, uploadId string, partNumber int // - error: nil if ok otherwise the specific error func (c *Client) UploadPartFromBytes(bucket, object, uploadId string, partNumber int, content []byte, args *api.UploadPartArgs) (string, error) { - return api.UploadPartFromBytes(c, bucket, object, uploadId, partNumber, content, args) + return api.UploadPartFromBytes(c, bucket, object, uploadId, partNumber, content, args, c.BosContext) } // UploadPartCopy - copy the multipart object @@ -1321,7 +1372,7 @@ func (c *Client) UploadPartFromBytes(bucket, object, uploadId string, partNumber func (c *Client) UploadPartCopy(bucket, object, srcBucket, srcObject, uploadId string, partNumber int, args *api.UploadPartCopyArgs) (*api.CopyObjectResult, error) { source := fmt.Sprintf("/%s/%s", srcBucket, srcObject) - return api.UploadPartCopy(c, bucket, object, source, uploadId, partNumber, args) + return api.UploadPartCopy(c, bucket, object, source, uploadId, partNumber, args, c.BosContext) } // BasicUploadPartCopy - basic interface to copy the multipart object @@ -1339,7 +1390,7 @@ func (c *Client) UploadPartCopy(bucket, object, srcBucket, srcObject, uploadId s func (c *Client) BasicUploadPartCopy(bucket, object, srcBucket, srcObject, uploadId string, partNumber int) (*api.CopyObjectResult, error) { source := fmt.Sprintf("/%s/%s", srcBucket, srcObject) - return api.UploadPartCopy(c, bucket, object, source, uploadId, partNumber, nil) + return api.UploadPartCopy(c, bucket, object, source, uploadId, partNumber, nil, c.BosContext) } // CompleteMultipartUpload - finish a multipart upload operation with parts stream @@ -1355,7 +1406,7 @@ func (c *Client) BasicUploadPartCopy(bucket, object, srcBucket, srcObject, uploa // - error: nil if ok otherwise the specific error func (c *Client) CompleteMultipartUpload(bucket, object, uploadId string, body *bce.Body, args *api.CompleteMultipartUploadArgs) (*api.CompleteMultipartUploadResult, error) { - return api.CompleteMultipartUpload(c, bucket, object, uploadId, body, args) + return api.CompleteMultipartUpload(c, bucket, object, uploadId, body, args, c.BosContext) } // CompleteMultipartUploadFromStruct - finish a multipart upload operation with parts struct @@ -1378,7 +1429,7 @@ func (c *Client) CompleteMultipartUploadFromStruct(bucket, object, uploadId stri if err != nil { return nil, err } - return api.CompleteMultipartUpload(c, bucket, object, uploadId, body, args) + return api.CompleteMultipartUpload(c, bucket, object, uploadId, body, args, c.BosContext) } // AbortMultipartUpload - abort a multipart upload operation @@ -1390,7 +1441,7 @@ func (c *Client) CompleteMultipartUploadFromStruct(bucket, object, uploadId stri // RETURNS: // - error: nil if ok otherwise the specific error func (c *Client) AbortMultipartUpload(bucket, object, uploadId string) error { - return api.AbortMultipartUpload(c, bucket, object, uploadId) + return api.AbortMultipartUpload(c, bucket, object, uploadId, c.BosContext) } // ListParts - list the successfully uploaded parts info by upload id @@ -1405,7 +1456,7 @@ func (c *Client) AbortMultipartUpload(bucket, object, uploadId string) error { // - error: nil if ok otherwise the specific error func (c *Client) ListParts(bucket, object, uploadId string, args *api.ListPartsArgs) (*api.ListPartsResult, error) { - return api.ListParts(c, bucket, object, uploadId, args) + return api.ListParts(c, bucket, object, uploadId, args, c.BosContext) } // BasicListParts - basic interface to list the successfully uploaded parts info by upload id @@ -1418,7 +1469,7 @@ func (c *Client) ListParts(bucket, object, uploadId string, // - *ListPartsResult: the uploaded parts info result // - error: nil if ok otherwise the specific error func (c *Client) BasicListParts(bucket, object, uploadId string) (*api.ListPartsResult, error) { - return api.ListParts(c, bucket, object, uploadId, nil) + return api.ListParts(c, bucket, object, uploadId, nil, c.BosContext) } // ListMultipartUploads - list the unfinished uploaded parts of the given bucket @@ -1431,7 +1482,7 @@ func (c *Client) BasicListParts(bucket, object, uploadId string) (*api.ListParts // - error: nil if ok otherwise the specific error func (c *Client) ListMultipartUploads(bucket string, args *api.ListMultipartUploadsArgs) (*api.ListMultipartUploadsResult, error) { - return api.ListMultipartUploads(c, bucket, args) + return api.ListMultipartUploads(c, bucket, args, c.BosContext) } // BasicListMultipartUploads - basic interface to list the unfinished uploaded parts @@ -1443,7 +1494,7 @@ func (c *Client) ListMultipartUploads(bucket string, // - error: nil if ok otherwise the specific error func (c *Client) BasicListMultipartUploads(bucket string) ( *api.ListMultipartUploadsResult, error) { - return api.ListMultipartUploads(c, bucket, nil) + return api.ListMultipartUploads(c, bucket, nil, c.BosContext) } // UploadSuperFile - parallel upload the super file by using the multipart upload interface @@ -1691,7 +1742,7 @@ func (c *Client) BasicGeneratePresignedUrl(bucket, object string, expireInSecond // RETURNS: // - error: nil if success otherwise the specific error func (c *Client) PutObjectAcl(bucket, object string, aclBody *bce.Body) error { - return api.PutObjectAcl(c, bucket, object, "", nil, nil, aclBody) + return api.PutObjectAcl(c, bucket, object, "", nil, nil, aclBody, c.BosContext) } // PutObjectAclFromCanned - set the canned acl of the given object @@ -1703,7 +1754,7 @@ func (c *Client) PutObjectAcl(bucket, object string, aclBody *bce.Body) error { // RETURNS: // - error: nil if success otherwise the specific error func (c *Client) PutObjectAclFromCanned(bucket, object, cannedAcl string) error { - return api.PutObjectAcl(c, bucket, object, cannedAcl, nil, nil, nil) + return api.PutObjectAcl(c, bucket, object, cannedAcl, nil, nil, nil, c.BosContext) } // PutObjectAclGrantRead - set the canned grant read acl of the given object @@ -1715,7 +1766,7 @@ func (c *Client) PutObjectAclFromCanned(bucket, object, cannedAcl string) error // RETURNS: // - error: nil if success otherwise the specific error func (c *Client) PutObjectAclGrantRead(bucket, object string, ids ...string) error { - return api.PutObjectAcl(c, bucket, object, "", ids, nil, nil) + return api.PutObjectAcl(c, bucket, object, "", ids, nil, nil, c.BosContext) } // PutObjectAclGrantFullControl - set the canned grant full-control acl of the given object @@ -1727,7 +1778,7 @@ func (c *Client) PutObjectAclGrantRead(bucket, object string, ids ...string) err // RETURNS: // - error: nil if success otherwise the specific error func (c *Client) PutObjectAclGrantFullControl(bucket, object string, ids ...string) error { - return api.PutObjectAcl(c, bucket, object, "", nil, ids, nil) + return api.PutObjectAcl(c, bucket, object, "", nil, ids, nil, c.BosContext) } // PutObjectAclFromFile - set the acl of the given object with acl json file name @@ -1743,7 +1794,7 @@ func (c *Client) PutObjectAclFromFile(bucket, object, aclFile string) error { if err != nil { return err } - return api.PutObjectAcl(c, bucket, object, "", nil, nil, body) + return api.PutObjectAcl(c, bucket, object, "", nil, nil, body, c.BosContext) } // PutObjectAclFromString - set the acl of the given object with acl json string @@ -1759,7 +1810,7 @@ func (c *Client) PutObjectAclFromString(bucket, object, aclString string) error if err != nil { return err } - return api.PutObjectAcl(c, bucket, object, "", nil, nil, body) + return api.PutObjectAcl(c, bucket, object, "", nil, nil, body, c.BosContext) } // PutObjectAclFromStruct - set the acl of the given object with acl data structure @@ -1778,7 +1829,7 @@ func (c *Client) PutObjectAclFromStruct(bucket, object string, aclObj *api.PutOb if err != nil { return err } - return api.PutObjectAcl(c, bucket, object, "", nil, nil, body) + return api.PutObjectAcl(c, bucket, object, "", nil, nil, body, c.BosContext) } // GetObjectAcl - get the acl of the given object @@ -1790,7 +1841,7 @@ func (c *Client) PutObjectAclFromStruct(bucket, object string, aclObj *api.PutOb // - *api.GetObjectAclResult: the result of the object acl // - error: nil if success otherwise the specific error func (c *Client) GetObjectAcl(bucket, object string) (*api.GetObjectAclResult, error) { - return api.GetObjectAcl(c, bucket, object) + return api.GetObjectAcl(c, bucket, object, c.BosContext) } // DeleteObjectAcl - delete the acl of the given object @@ -1801,7 +1852,7 @@ func (c *Client) GetObjectAcl(bucket, object string) (*api.GetObjectAclResult, e // RETURNS: // - error: nil if success otherwise the specific error func (c *Client) DeleteObjectAcl(bucket, object string) error { - return api.DeleteObjectAcl(c, bucket, object) + return api.DeleteObjectAcl(c, bucket, object, c.BosContext) } // RestoreObject - restore the archive object @@ -1826,7 +1877,7 @@ func (c *Client) RestoreObject(bucket string, object string, restoreDays int, re RestoreTier: restoreTier, RestoreDays: restoreDays, } - return api.RestoreObject(c, bucket, object, args) + return api.RestoreObject(c, bucket, object, args, c.BosContext) } // PutBucketTrash - put the bucket trash @@ -1837,7 +1888,7 @@ func (c *Client) RestoreObject(bucket string, object string, restoreDays int, re // RETURNS: // - error: nil if success otherwise the specific error func (c *Client) PutBucketTrash(bucket string, trashReq api.PutBucketTrashReq) error { - return api.PutBucketTrash(c, bucket, trashReq) + return api.PutBucketTrash(c, bucket, trashReq, c.BosContext) } // GetBucketTrash - get the bucket trash @@ -1848,7 +1899,7 @@ func (c *Client) PutBucketTrash(bucket string, trashReq api.PutBucketTrashReq) e // - *api.GetBucketTrashResult,: the result of the bucket trash // - error: nil if success otherwise the specific error func (c *Client) GetBucketTrash(bucket string) (*api.GetBucketTrashResult, error) { - return api.GetBucketTrash(c, bucket) + return api.GetBucketTrash(c, bucket, c.BosContext) } // DeleteBucketTrash - delete the trash of the given bucket @@ -1858,7 +1909,7 @@ func (c *Client) GetBucketTrash(bucket string) (*api.GetBucketTrashResult, error // RETURNS: // - error: nil if success otherwise the specific error func (c *Client) DeleteBucketTrash(bucket string) error { - return api.DeleteBucketTrash(c, bucket) + return api.DeleteBucketTrash(c, bucket, c.BosContext) } // PutBucketNotification - put the bucket notification @@ -1869,7 +1920,7 @@ func (c *Client) DeleteBucketTrash(bucket string) error { // RETURNS: // - error: nil if success otherwise the specific error func (c *Client) PutBucketNotification(bucket string, putBucketNotificationReq api.PutBucketNotificationReq) error { - return api.PutBucketNotification(c, bucket, putBucketNotificationReq) + return api.PutBucketNotification(c, bucket, putBucketNotificationReq, c.BosContext) } // GetBucketNotification - get the bucket notification @@ -1880,7 +1931,7 @@ func (c *Client) PutBucketNotification(bucket string, putBucketNotificationReq a // - *api.PutBucketNotificationReq,: the result of the bucket notification // - error: nil if success otherwise the specific error func (c *Client) GetBucketNotification(bucket string) (*api.PutBucketNotificationReq, error) { - return api.GetBucketNotification(c, bucket) + return api.GetBucketNotification(c, bucket, c.BosContext) } // DeleteBucketNotification - delete the notification of the given bucket @@ -1890,7 +1941,7 @@ func (c *Client) GetBucketNotification(bucket string) (*api.PutBucketNotificatio // RETURNS: // - error: nil if success otherwise the specific error func (c *Client) DeleteBucketNotification(bucket string) error { - return api.DeleteBucketNotification(c, bucket) + return api.DeleteBucketNotification(c, bucket, c.BosContext) } // ParallelUpload - auto multipart upload object @@ -1906,7 +1957,7 @@ func (c *Client) DeleteBucketNotification(bucket string) error { // - error: nil if success otherwise the specific error func (c *Client) ParallelUpload(bucket string, object string, filename string, contentType string, args *api.InitiateMultipartUploadArgs) (*api.CompleteMultipartUploadResult, error) { - initiateMultipartUploadResult, err := api.InitiateMultipartUpload(c, bucket, object, contentType, args) + initiateMultipartUploadResult, err := api.InitiateMultipartUpload(c, bucket, object, contentType, args, c.BosContext) if err != nil { return nil, err } @@ -2028,7 +2079,7 @@ func (c *Client) singlePartUpload( var args api.UploadPartArgs args.ContentMD5 = content.ContentMD5() - etag, err := api.UploadPart(c, bucket, object, uploadId, partNumber, content, &args) + etag, err := api.UploadPart(c, bucket, object, uploadId, partNumber, content, &args, c.BosContext) if err != nil { errChan <- err log.Error("upload part fail,err:%v", err) @@ -2073,7 +2124,7 @@ func (c *Client) ParallelCopy(srcBucketName string, srcObjectName string, initArgs.StorageClass = args.StorageClass } } - initiateMultipartUploadResult, err := api.InitiateMultipartUpload(c, destBucketName, destObjectName, objectMeta.ContentType, &initArgs) + initiateMultipartUploadResult, err := api.InitiateMultipartUpload(c, destBucketName, destObjectName, objectMeta.ContentType, &initArgs, c.BosContext) if err != nil { return nil, err @@ -2110,7 +2161,10 @@ func (c *Client) parallelPartCopy(srcMeta api.GetObjectMetaResult, source string var err error size := srcMeta.ContentLength partSize := int64(DEFAULT_MULTIPART_SIZE) - + if partSize*MAX_PART_NUMBER < size { + lowerLimit := int64(math.Ceil(float64(size) / MAX_PART_NUMBER)) + partSize = int64(math.Ceil(float64(lowerLimit)/float64(partSize))) * partSize + } partNum := (size + partSize - 1) / partSize parallelChan := make(chan int, c.MaxParallel) @@ -2184,7 +2238,7 @@ func (c *Client) singlePartCopy(source string, bucket string, object string, upl <-parallelChan }() - copyObjectResult, err := api.UploadPartCopy(c, bucket, object, source, uploadId, partNumber, args) + copyObjectResult, err := api.UploadPartCopy(c, bucket, object, source, uploadId, partNumber, args, c.BosContext) if err != nil { errChan <- err log.Error("upload part fail,err:%v", err) @@ -2204,7 +2258,7 @@ func (c *Client) singlePartCopy(source string, bucket string, object string, upl // RETURNS: // - error: the put error if any occurs func (c *Client) PutSymlink(bucket string, object string, symlinkKey string, symlinkArgs *api.PutSymlinkArgs) error { - return api.PutObjectSymlink(c, bucket, object, symlinkKey, symlinkArgs) + return api.PutObjectSymlink(c, bucket, object, symlinkKey, symlinkArgs, c.BosContext) } // PutSymlink - create symlink for exist target object @@ -2216,5 +2270,41 @@ func (c *Client) PutSymlink(bucket string, object string, symlinkKey string, sym // - string: the target of the symlink // - error: the put error if any occurs func (c *Client) GetSymlink(bucket string, object string) (string, error) { - return api.GetObjectSymlink(c, bucket, object) + return api.GetObjectSymlink(c, bucket, object, c.BosContext) +} + +func (c *Client) PutBucketMirror(bucket string, putBucketMirrorArgs *api.PutBucketMirrorArgs) error { + return api.PutBucketMirror(c, bucket, putBucketMirrorArgs, c.BosContext) +} + +func (c *Client) GetBucketMirror(bucket string) (*api.PutBucketMirrorArgs, error) { + return api.GetBucketMirror(c, bucket, c.BosContext) +} + +func (c *Client) DeleteBucketMirror(bucket string) error { + return api.DeleteBucketMirror(c, bucket, c.BosContext) +} + +func (c *Client) PutBucketTag(bucket string, putBucketTagArgs *api.PutBucketTagArgs) error { + return api.PutBucketTag(c, bucket, putBucketTagArgs, c.BosContext) +} + +func (c *Client) GetBucketTag(bucket string) (*api.GetBucketTagResult, error) { + return api.GetBucketTag(c, bucket, c.BosContext) +} + +func (c *Client) DeleteBucketTag(bucket string) error { + return api.DeleteBucketTag(c, bucket, c.BosContext) +} + +func (c *Client) PutObjectTag(bucket string, object string, putObjectTagArgs *api.PutObjectTagArgs) error { + return api.PutObjectTag(c, bucket, object, putObjectTagArgs, c.BosContext) +} + +func (c *Client) GetObjectTag(bucket string, object string) (map[string]interface{}, error) { + return api.GetObjectTag(c, bucket, object, c.BosContext) +} + +func (c *Client) DeleteObjectTag(bucket string, object string) error { + return api.DeleteObjectTag(c, bucket, object, c.BosContext) } diff --git a/vendor/modules.txt b/vendor/modules.txt index bcbe049abf823..79399e154d408 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -467,7 +467,7 @@ github.com/aws/smithy-go/transport/http/internal/io # github.com/axiomhq/hyperloglog v0.0.0-20240507144631-af9851f82b27 ## explicit; go 1.12 github.com/axiomhq/hyperloglog -# github.com/baidubce/bce-sdk-go v0.9.141 +# github.com/baidubce/bce-sdk-go v0.9.186 ## explicit; go 1.11 github.com/baidubce/bce-sdk-go/auth github.com/baidubce/bce-sdk-go/bce
fix
update module github.com/baidubce/bce-sdk-go to v0.9.186 (#13864)
210c937c414c3623c5770d6c3c237186db12fa75
2023-08-25 15:20:42
Christian Haudum
chore(schema-v13): Fix series store schema creation for v13 (#10348)
false
diff --git a/pkg/storage/stores/series/index/schema_config.go b/pkg/storage/stores/series/index/schema_config.go index ca5265e7b3c7f..c4b3f2dfe17cf 100644 --- a/pkg/storage/stores/series/index/schema_config.go +++ b/pkg/storage/stores/series/index/schema_config.go @@ -14,7 +14,6 @@ import ( const ( secondsInDay = int64(24 * time.Hour / time.Second) millisecondsInDay = int64(24 * time.Hour / time.Millisecond) - v12 = "v12" ) var ( @@ -35,14 +34,18 @@ func CreateSchema(cfg config.PeriodConfig) (SeriesStoreSchema, error) { return nil, errInvalidTablePeriod } - switch cfg.Schema { - case "v9": + v, err := cfg.VersionAsInt() + if err != nil { + return nil, err + } + + if v == 9 { return newSeriesStoreSchema(buckets, v9Entries{}), nil - case "v10", "v11", v12: + } + if v >= 10 { if cfg.RowShards == 0 { return nil, fmt.Errorf("must have row_shards > 0 (current: %d) for schema (%s)", cfg.RowShards, cfg.Schema) } - v10 := v10Entries{rowShards: cfg.RowShards} switch cfg.Schema { case "v10": diff --git a/pkg/storage/stores/series/series_store_test.go b/pkg/storage/stores/series/series_store_test.go index d278dd43154c2..70ce736ecf1a4 100644 --- a/pkg/storage/stores/series/series_store_test.go +++ b/pkg/storage/stores/series/series_store_test.go @@ -34,7 +34,7 @@ const userID = "1" var ( ctx = user.InjectOrgID(context.Background(), userID) - schemas = []string{"v9", "v10", "v11", "v12"} + schemas = []string{"v9", "v10", "v11", "v12", "v13"} stores = []struct { name string configFn configFactory
chore
Fix series store schema creation for v13 (#10348)
66d3e00981cd616ecd76bbe54b39c09cef3cd691
2022-06-23 20:06:51
Michel Hollands
changelog: Fix changelog link (#6479)
false
diff --git a/CHANGELOG.md b/CHANGELOG.md index b3b8a44d24243..c4685b9edf3ed 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,6 @@ ## Main * [6444](https://github.com/grafana/loki/pull/6444) **aminesnow** Add TLS config to query frontend. -* [6435](https://github.com/grafana/loki/pull/6430) **MichelHollands**: Remove the `whole-stream-deletion` mode. -* [6415](https://github.com/grafana/loki/pull/6415) **salvacorts** Evenly spread queriers across kubernetes nodes. -* [6410](https://github.com/grafana/loki/pull/6410) **MichelHollands**: Add support for per tenant delete API access enabling. * [6372](https://github.com/grafana/loki/pull/6372) **splitice**: Add support for numbers in JSON fields. * [6105](https://github.com/grafana/loki/pull/6105) **rutgerke** Export metrics for the Promtail journal target. * [6099](https://github.com/grafana/loki/pull/6099) **cstyan**: Drop lines with malformed JSON in Promtail JSON pipeline stage. @@ -99,6 +96,9 @@ * [5685](https://github.com/grafana/loki/pull/5685) **chaudum**: Assert that push values tuples consist of string values * [6375](https://github.com/grafana/loki/pull/6375) **dannykopping**: Fix bug that prevented users from using the `json` parser after a `line_format` pipeline stage. ##### Changes +* [6435](https://github.com/grafana/loki/pull/6435) **MichelHollands**: Remove the `whole-stream-deletion` mode. +* [6415](https://github.com/grafana/loki/pull/6415) **salvacorts** Evenly spread queriers across kubernetes nodes. +* [6410](https://github.com/grafana/loki/pull/6410) **MichelHollands**: Add support for per tenant delete API access enabling. * [6361](https://github.com/grafana/loki/pull/6361) **chaudum**: Sum values in unwrapped rate aggregation instead of treating them as counter. * [6042](https://github.com/grafana/loki/pull/6042) **slim-bean**: Add a new configuration to allow fudging of ingested timestamps to guarantee sort order of duplicate timestamps at query time. * [6120](https://github.com/grafana/loki/pull/6120) **KMiller-Grafana**: Rename configuration parameter fudge_duplicate_timestamp to be increment_duplicate_timestamp.
changelog
Fix changelog link (#6479)
685c898b9532e5e3d85dc7fe418764983d7cdfa9
2022-07-19 23:39:34
Gerard Vanloo
operator: Addons work in restricted policy (#6564)
false
diff --git a/operator/hack/addons_dev.yaml b/operator/hack/addons_dev.yaml index 6519eb60fc249..f9781b46f6c89 100644 --- a/operator/hack/addons_dev.yaml +++ b/operator/hack/addons_dev.yaml @@ -42,7 +42,7 @@ spec: value: /var/run/secrets/kubernetes.io/serviceaccount/token args: - -c - - while true; do logcli query '{job="systemd-journal"}'; sleep 30; done + - while true; do logcli query '{namespace="default"}'; sleep 30; done securityContext: allowPrivilegeEscalation: false capabilities: @@ -50,6 +50,7 @@ spec: - ALL serviceAccountName: lokistack-dev-addons-logcli securityContext: + runAsUser: 10002 runAsNonRoot: true seccompProfile: type: RuntimeDefault @@ -76,12 +77,17 @@ spec: args: - -config.file=/etc/promtail/promtail.yaml - -log.level=info + env: + - name: 'HOSTNAME' + valueFrom: + fieldRef: + fieldPath: 'spec.nodeName' terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /etc/promtail name: config - - mountPath: /run/promtail + - mountPath: /tmp/promtail name: run - mountPath: /var/lib/docker/containers name: docker @@ -93,34 +99,39 @@ spec: name: journal readOnly: true securityContext: - allowPrivilegeEscalation: false + privileged: true + runAsNonRoot: false + readOnlyRootFilesystem: true capabilities: drop: - ALL serviceAccountName: lokistack-dev-addons-promtail securityContext: + runAsUser: 10002 runAsNonRoot: true + seccompProfile: + type: RuntimeDefault volumes: - - configMap: + - name: config + configMap: defaultMode: 420 name: lokistack-dev-addons-promtail - name: config - - emptyDir: - medium: "" - sizeLimit: 10G - name: run - - emptyDir: + - name: run + emptyDir: medium: "" - sizeLimit: 10G - name: docker - - emptyDir: - medium: "" - sizeLimit: 10G - name: pods - - emptyDir: - medium: "" - sizeLimit: 10G - name: journal + sizeLimit: 5Gi + - name: docker + hostPath: + path: /var/lib/docker/containers + type: "" + - name: pods + hostPath: + path: /var/log/pods + type: "" + - name: journal + hostPath: + path: /var/log/journal + type: "" --- apiVersion: v1 kind: ConfigMap @@ -140,7 +151,7 @@ data: batchwait: 10s timeout: 10s positions: - filename: /run/promtail/positions.yaml + filename: /tmp/promtail/positions.yaml server: http_listen_port: 3100 grpc_listen_port: 9095 @@ -409,21 +420,6 @@ data: target_label: __path__ --- apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: lokistack-dev-addons-writer - labels: - app.kubernetes.io/name: promtail - app.kubernetes.io/instance: developer-addons -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: lokistack-dev-addons-writer -subjects: -- kind: ServiceAccount - name: lokistack-dev-addons-promtail ---- -apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: lokistack-dev-addons-reader diff --git a/operator/hack/addons_ocp.yaml b/operator/hack/addons_ocp.yaml index 208a747e69e3f..fde24765b2eab 100644 --- a/operator/hack/addons_ocp.yaml +++ b/operator/hack/addons_ocp.yaml @@ -42,7 +42,7 @@ spec: value: /var/run/secrets/kubernetes.io/serviceaccount/token args: - -c - - while true; do logcli --ca-cert="/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt" query '{job="systemd-journal"}'; sleep 30; done + - while true; do logcli --ca-cert="/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt" query '{namespace="openshift-logging"}'; sleep 30; done securityContext: allowPrivilegeEscalation: false capabilities: @@ -74,12 +74,17 @@ spec: args: - -config.file=/etc/promtail/promtail.yaml - -log.level=info + env: + - name: 'HOSTNAME' + valueFrom: + fieldRef: + fieldPath: 'spec.nodeName' terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /etc/promtail name: config - - mountPath: /run/promtail + - mountPath: /tmp/promtail name: run - mountPath: /var/lib/docker/containers name: docker @@ -91,7 +96,9 @@ spec: name: journal readOnly: true securityContext: - allowPrivilegeEscalation: false + privileged: true + runAsNonRoot: false + readOnlyRootFilesystem: true capabilities: drop: - ALL @@ -99,26 +106,26 @@ spec: securityContext: runAsNonRoot: true volumes: - - configMap: + - name: config + configMap: defaultMode: 420 name: lokistack-dev-addons-promtail - name: config - - emptyDir: - medium: "" - sizeLimit: 10G - name: run - - emptyDir: - medium: "" - sizeLimit: 10G - name: docker - - emptyDir: + - name: run + emptyDir: medium: "" - sizeLimit: 10G - name: pods - - emptyDir: - medium: "" - sizeLimit: 10G - name: journal + sizeLimit: 5Gi + - name: docker + hostPath: + path: /var/lib/docker/containers + type: "" + - name: pods + hostPath: + path: /var/log/pods + type: "" + - name: journal + hostPath: + path: /var/log/journal + type: "" --- apiVersion: v1 kind: ConfigMap @@ -140,7 +147,7 @@ data: batchwait: 10s timeout: 10s positions: - filename: /run/promtail/positions.yaml + filename: /tmp/promtail/positions.yaml server: http_listen_port: 3100 grpc_listen_port: 9095 @@ -409,6 +416,20 @@ data: target_label: __path__ --- apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: lokistack-dev-addons-writer +rules: +- apiGroups: + - security.openshift.io + resourceNames: + - privileged + resources: + - securitycontextconstraints + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: lokistack-dev-addons-writer
operator
Addons work in restricted policy (#6564)
3abb3b116d157ce26ecbb02e6c74b562e31f732d
2024-11-18 22:14:25
renovate[bot]
fix(deps): update module github.com/hashicorp/golang-lru to v2 (#14979)
false
diff --git a/clients/pkg/logentry/stages/timestamp.go b/clients/pkg/logentry/stages/timestamp.go index fb1fb8a27c3b5..3e2ce9d730a0f 100644 --- a/clients/pkg/logentry/stages/timestamp.go +++ b/clients/pkg/logentry/stages/timestamp.go @@ -8,7 +8,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - lru "github.com/hashicorp/golang-lru" + lru "github.com/hashicorp/golang-lru/v2" "github.com/mitchellh/mapstructure" "github.com/prometheus/common/model" @@ -114,9 +114,9 @@ func newTimestampStage(logger log.Logger, config interface{}) (Stage, error) { return nil, err } - var lastKnownTimestamps *lru.Cache + var lastKnownTimestamps *lru.Cache[string, time.Time] if *cfg.ActionOnFailure == TimestampActionOnFailureFudge { - lastKnownTimestamps, err = lru.New(maxLastKnownTimestampsCacheSize) + lastKnownTimestamps, err = lru.New[string, time.Time](maxLastKnownTimestampsCacheSize) if err != nil { return nil, err } @@ -138,7 +138,7 @@ type timestampStage struct { // Stores the last known timestamp for a given "stream id" (guessed, since at this stage // there's no reliable way to know it). - lastKnownTimestamps *lru.Cache + lastKnownTimestamps *lru.Cache[string, time.Time] } // Name implements Stage @@ -222,7 +222,7 @@ func (ts *timestampStage) processActionOnFailureFudge(labels model.LabelSet, t * } // Fudge the timestamp - *t = lastTimestamp.(time.Time).Add(1 * time.Nanosecond) + *t = lastTimestamp.Add(1 * time.Nanosecond) // Store the fudged timestamp, so that a subsequent fudged timestamp will be 1ns after it ts.lastKnownTimestamps.Add(labelsStr, *t) diff --git a/go.mod b/go.mod index bab9fd32ca618..526cd6f53d07d 100644 --- a/go.mod +++ b/go.mod @@ -58,7 +58,7 @@ require ( github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 github.com/hashicorp/consul/api v1.30.0 - github.com/hashicorp/golang-lru v0.6.0 + github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/imdario/mergo v0.3.16 github.com/influxdata/telegraf v1.16.3 github.com/jmespath/go-jmespath v0.4.0 @@ -126,7 +126,6 @@ require ( github.com/gogo/googleapis v1.4.1 github.com/grafana/jsonparser v0.0.0-20241004153430-023329977675 github.com/grafana/loki/pkg/push v0.0.0-20240924133635-758364c7775f - github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/heroku/x v0.4.0 github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db @@ -173,6 +172,7 @@ require ( github.com/go-ole/go-ole v1.2.6 // indirect github.com/goccy/go-json v0.10.3 // indirect github.com/gorilla/handlers v1.5.2 // indirect + github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/sys/userns v0.1.0 // indirect diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index e4b92c8f585c4..f776e6778b43d 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -28,7 +28,7 @@ import ( "github.com/grafana/dskit/services" "github.com/grafana/dskit/tenant" "github.com/grafana/dskit/user" - lru "github.com/hashicorp/golang-lru" + lru "github.com/hashicorp/golang-lru/v2" "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" @@ -148,7 +148,7 @@ type Distributor struct { subservicesWatcher *services.FailureWatcher // Per-user rate limiter. ingestionRateLimiter *limiter.RateLimiter - labelCache *lru.Cache + labelCache *lru.Cache[string, labelData] // Push failures rate limiter. writeFailuresManager *writefailures.Manager @@ -217,7 +217,7 @@ func New( var servs []services.Service rateLimitStrat := validation.LocalIngestionRateStrategy - labelCache, err := lru.New(maxLabelCacheSize) + labelCache, err := lru.New[string, labelData](maxLabelCacheSize) if err != nil { return nil, err } @@ -1086,8 +1086,7 @@ type labelData struct { func (d *Distributor) parseStreamLabels(vContext validationContext, key string, stream logproto.Stream) (labels.Labels, string, uint64, error) { if val, ok := d.labelCache.Get(key); ok { - labelVal := val.(labelData) - return labelVal.ls, labelVal.ls.String(), labelVal.hash, nil + return val.ls, val.ls.String(), val.hash, nil } ls, err := syntax.ParseLabels(key) diff --git a/pkg/kafka/encoding.go b/pkg/kafka/encoding.go index 65daf59c25e77..15479336de670 100644 --- a/pkg/kafka/encoding.go +++ b/pkg/kafka/encoding.go @@ -9,7 +9,7 @@ import ( "github.com/twmb/franz-go/pkg/kgo" - lru "github.com/hashicorp/golang-lru" + lru "github.com/hashicorp/golang-lru/v2" "github.com/prometheus/prometheus/model/labels" "github.com/grafana/loki/v3/pkg/logproto" @@ -126,11 +126,11 @@ func marshalWriteRequestToRecord(partitionID int32, tenantID string, stream logp // It caches parsed labels for efficiency. type Decoder struct { stream *logproto.Stream - cache *lru.Cache + cache *lru.Cache[string, labels.Labels] } func NewDecoder() (*Decoder, error) { - cache, err := lru.New(5000) // Set LRU size to 5000, adjust as needed + cache, err := lru.New[string, labels.Labels](5000) if err != nil { return nil, fmt.Errorf("failed to create LRU cache: %w", err) } @@ -154,7 +154,7 @@ func (d *Decoder) Decode(data []byte) (logproto.Stream, labels.Labels, error) { var ls labels.Labels if cachedLabels, ok := d.cache.Get(d.stream.Labels); ok { - ls = cachedLabels.(labels.Labels) + ls = cachedLabels } else { var err error ls, err = syntax.ParseLabels(d.stream.Labels) diff --git a/vendor/github.com/hashicorp/golang-lru/.golangci.yml b/vendor/github.com/hashicorp/golang-lru/.golangci.yml deleted file mode 100644 index 49202fc41e645..0000000000000 --- a/vendor/github.com/hashicorp/golang-lru/.golangci.yml +++ /dev/null @@ -1,30 +0,0 @@ -linters: - enable: - - megacheck - - revive - - govet - - unconvert - - megacheck - - gas - - gocyclo - - dupl - - misspell - - unparam - - unused - - typecheck - - ineffassign - - stylecheck - - exportloopref - - gocritic - - nakedret - - gosimple - - prealloc - fast: false - disable-all: true - -issues: - exclude-rules: - - path: _test\.go - linters: - - dupl - exclude-use-default: false diff --git a/vendor/github.com/hashicorp/golang-lru/README.md b/vendor/github.com/hashicorp/golang-lru/README.md deleted file mode 100644 index 063bb16056ef4..0000000000000 --- a/vendor/github.com/hashicorp/golang-lru/README.md +++ /dev/null @@ -1,25 +0,0 @@ -golang-lru -========== - -This provides the `lru` package which implements a fixed-size -thread safe LRU cache. It is based on the cache in Groupcache. - -Documentation -============= - -Full docs are available on [Godoc](https://pkg.go.dev/github.com/hashicorp/golang-lru) - -Example -======= - -Using the LRU is very simple: - -```go -l, _ := New(128) -for i := 0; i < 256; i++ { - l.Add(i, nil) -} -if l.Len() != 128 { - panic(fmt.Sprintf("bad len: %v", l.Len())) -} -``` diff --git a/vendor/github.com/hashicorp/golang-lru/arc.go b/vendor/github.com/hashicorp/golang-lru/arc.go deleted file mode 100644 index e396f8428aa3b..0000000000000 --- a/vendor/github.com/hashicorp/golang-lru/arc.go +++ /dev/null @@ -1,256 +0,0 @@ -package lru - -import ( - "sync" - - "github.com/hashicorp/golang-lru/simplelru" -) - -// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). -// ARC is an enhancement over the standard LRU cache in that tracks both -// frequency and recency of use. This avoids a burst in access to new -// entries from evicting the frequently used older entries. It adds some -// additional tracking overhead to a standard LRU cache, computationally -// it is roughly 2x the cost, and the extra memory overhead is linear -// with the size of the cache. ARC has been patented by IBM, but is -// similar to the TwoQueueCache (2Q) which requires setting parameters. -type ARCCache struct { - size int // Size is the total capacity of the cache - p int // P is the dynamic preference towards T1 or T2 - - t1 simplelru.LRUCache // T1 is the LRU for recently accessed items - b1 simplelru.LRUCache // B1 is the LRU for evictions from t1 - - t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items - b2 simplelru.LRUCache // B2 is the LRU for evictions from t2 - - lock sync.RWMutex -} - -// NewARC creates an ARC of the given size -func NewARC(size int) (*ARCCache, error) { - // Create the sub LRUs - b1, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - b2, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - t1, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - t2, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - - // Initialize the ARC - c := &ARCCache{ - size: size, - p: 0, - t1: t1, - b1: b1, - t2: t2, - b2: b2, - } - return c, nil -} - -// Get looks up a key's value from the cache. -func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) { - c.lock.Lock() - defer c.lock.Unlock() - - // If the value is contained in T1 (recent), then - // promote it to T2 (frequent) - if val, ok := c.t1.Peek(key); ok { - c.t1.Remove(key) - c.t2.Add(key, val) - return val, ok - } - - // Check if the value is contained in T2 (frequent) - if val, ok := c.t2.Get(key); ok { - return val, ok - } - - // No hit - return nil, false -} - -// Add adds a value to the cache. -func (c *ARCCache) Add(key, value interface{}) { - c.lock.Lock() - defer c.lock.Unlock() - - // Check if the value is contained in T1 (recent), and potentially - // promote it to frequent T2 - if c.t1.Contains(key) { - c.t1.Remove(key) - c.t2.Add(key, value) - return - } - - // Check if the value is already in T2 (frequent) and update it - if c.t2.Contains(key) { - c.t2.Add(key, value) - return - } - - // Check if this value was recently evicted as part of the - // recently used list - if c.b1.Contains(key) { - // T1 set is too small, increase P appropriately - delta := 1 - b1Len := c.b1.Len() - b2Len := c.b2.Len() - if b2Len > b1Len { - delta = b2Len / b1Len - } - if c.p+delta >= c.size { - c.p = c.size - } else { - c.p += delta - } - - // Potentially need to make room in the cache - if c.t1.Len()+c.t2.Len() >= c.size { - c.replace(false) - } - - // Remove from B1 - c.b1.Remove(key) - - // Add the key to the frequently used list - c.t2.Add(key, value) - return - } - - // Check if this value was recently evicted as part of the - // frequently used list - if c.b2.Contains(key) { - // T2 set is too small, decrease P appropriately - delta := 1 - b1Len := c.b1.Len() - b2Len := c.b2.Len() - if b1Len > b2Len { - delta = b1Len / b2Len - } - if delta >= c.p { - c.p = 0 - } else { - c.p -= delta - } - - // Potentially need to make room in the cache - if c.t1.Len()+c.t2.Len() >= c.size { - c.replace(true) - } - - // Remove from B2 - c.b2.Remove(key) - - // Add the key to the frequently used list - c.t2.Add(key, value) - return - } - - // Potentially need to make room in the cache - if c.t1.Len()+c.t2.Len() >= c.size { - c.replace(false) - } - - // Keep the size of the ghost buffers trim - if c.b1.Len() > c.size-c.p { - c.b1.RemoveOldest() - } - if c.b2.Len() > c.p { - c.b2.RemoveOldest() - } - - // Add to the recently seen list - c.t1.Add(key, value) -} - -// replace is used to adaptively evict from either T1 or T2 -// based on the current learned value of P -func (c *ARCCache) replace(b2ContainsKey bool) { - t1Len := c.t1.Len() - if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) { - k, _, ok := c.t1.RemoveOldest() - if ok { - c.b1.Add(k, nil) - } - } else { - k, _, ok := c.t2.RemoveOldest() - if ok { - c.b2.Add(k, nil) - } - } -} - -// Len returns the number of cached entries -func (c *ARCCache) Len() int { - c.lock.RLock() - defer c.lock.RUnlock() - return c.t1.Len() + c.t2.Len() -} - -// Keys returns all the cached keys -func (c *ARCCache) Keys() []interface{} { - c.lock.RLock() - defer c.lock.RUnlock() - k1 := c.t1.Keys() - k2 := c.t2.Keys() - return append(k1, k2...) -} - -// Remove is used to purge a key from the cache -func (c *ARCCache) Remove(key interface{}) { - c.lock.Lock() - defer c.lock.Unlock() - if c.t1.Remove(key) { - return - } - if c.t2.Remove(key) { - return - } - if c.b1.Remove(key) { - return - } - if c.b2.Remove(key) { - return - } -} - -// Purge is used to clear the cache -func (c *ARCCache) Purge() { - c.lock.Lock() - defer c.lock.Unlock() - c.t1.Purge() - c.t2.Purge() - c.b1.Purge() - c.b2.Purge() -} - -// Contains is used to check if the cache contains a key -// without updating recency or frequency. -func (c *ARCCache) Contains(key interface{}) bool { - c.lock.RLock() - defer c.lock.RUnlock() - return c.t1.Contains(key) || c.t2.Contains(key) -} - -// Peek is used to inspect the cache value of a key -// without updating recency or frequency. -func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) { - c.lock.RLock() - defer c.lock.RUnlock() - if val, ok := c.t1.Peek(key); ok { - return val, ok - } - return c.t2.Peek(key) -} diff --git a/vendor/github.com/hashicorp/golang-lru/doc.go b/vendor/github.com/hashicorp/golang-lru/doc.go deleted file mode 100644 index 2547df979d0ba..0000000000000 --- a/vendor/github.com/hashicorp/golang-lru/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -// Package lru provides three different LRU caches of varying sophistication. -// -// Cache is a simple LRU cache. It is based on the -// LRU implementation in groupcache: -// https://github.com/golang/groupcache/tree/master/lru -// -// TwoQueueCache tracks frequently used and recently used entries separately. -// This avoids a burst of accesses from taking out frequently used entries, -// at the cost of about 2x computational overhead and some extra bookkeeping. -// -// ARCCache is an adaptive replacement cache. It tracks recent evictions as -// well as recent usage in both the frequent and recent caches. Its -// computational overhead is comparable to TwoQueueCache, but the memory -// overhead is linear with the size of the cache. -// -// ARC has been patented by IBM, so do not use it if that is problematic for -// your program. -// -// All caches in this package take locks while operating, and are therefore -// thread-safe for consumers. -package lru diff --git a/vendor/github.com/hashicorp/golang-lru/testing.go b/vendor/github.com/hashicorp/golang-lru/testing.go deleted file mode 100644 index 492760782c5ea..0000000000000 --- a/vendor/github.com/hashicorp/golang-lru/testing.go +++ /dev/null @@ -1,16 +0,0 @@ -package lru - -import ( - "crypto/rand" - "math" - "math/big" - "testing" -) - -func getRand(tb testing.TB) int64 { - out, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) - if err != nil { - tb.Fatal(err) - } - return out.Int64() -} diff --git a/vendor/github.com/hashicorp/golang-lru/.gitignore b/vendor/github.com/hashicorp/golang-lru/v2/.gitignore similarity index 100% rename from vendor/github.com/hashicorp/golang-lru/.gitignore rename to vendor/github.com/hashicorp/golang-lru/v2/.gitignore diff --git a/vendor/github.com/hashicorp/golang-lru/v2/.golangci.yml b/vendor/github.com/hashicorp/golang-lru/v2/.golangci.yml new file mode 100644 index 0000000000000..7e7b8a96275a6 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/.golangci.yml @@ -0,0 +1,46 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +linters: + fast: false + disable-all: true + enable: + - revive + - megacheck + - govet + - unconvert + - gas + - gocyclo + - dupl + - misspell + - unparam + - unused + - typecheck + - ineffassign + # - stylecheck + - exportloopref + - gocritic + - nakedret + - gosimple + - prealloc + +# golangci-lint configuration file +linters-settings: + revive: + ignore-generated-header: true + severity: warning + rules: + - name: package-comments + severity: warning + disabled: true + - name: exported + severity: warning + disabled: false + arguments: ["checkPrivateReceivers", "disableStutteringCheck"] + +issues: + exclude-use-default: false + exclude-rules: + - path: _test\.go + linters: + - dupl diff --git a/vendor/github.com/hashicorp/golang-lru/2q.go b/vendor/github.com/hashicorp/golang-lru/v2/2q.go similarity index 64% rename from vendor/github.com/hashicorp/golang-lru/2q.go rename to vendor/github.com/hashicorp/golang-lru/v2/2q.go index 15fcad0306e36..8c95252b6f274 100644 --- a/vendor/github.com/hashicorp/golang-lru/2q.go +++ b/vendor/github.com/hashicorp/golang-lru/v2/2q.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package lru import ( - "fmt" + "errors" "sync" - "github.com/hashicorp/golang-lru/simplelru" + "github.com/hashicorp/golang-lru/v2/simplelru" ) const ( @@ -26,33 +29,35 @@ const ( // computationally about 2x the cost, and adds some metadata over // head. The ARCCache is similar, but does not require setting any // parameters. -type TwoQueueCache struct { - size int - recentSize int +type TwoQueueCache[K comparable, V any] struct { + size int + recentSize int + recentRatio float64 + ghostRatio float64 - recent simplelru.LRUCache - frequent simplelru.LRUCache - recentEvict simplelru.LRUCache + recent simplelru.LRUCache[K, V] + frequent simplelru.LRUCache[K, V] + recentEvict simplelru.LRUCache[K, struct{}] lock sync.RWMutex } // New2Q creates a new TwoQueueCache using the default // values for the parameters. -func New2Q(size int) (*TwoQueueCache, error) { - return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries) +func New2Q[K comparable, V any](size int) (*TwoQueueCache[K, V], error) { + return New2QParams[K, V](size, Default2QRecentRatio, Default2QGhostEntries) } // New2QParams creates a new TwoQueueCache using the provided // parameter values. -func New2QParams(size int, recentRatio, ghostRatio float64) (*TwoQueueCache, error) { +func New2QParams[K comparable, V any](size int, recentRatio, ghostRatio float64) (*TwoQueueCache[K, V], error) { if size <= 0 { - return nil, fmt.Errorf("invalid size") + return nil, errors.New("invalid size") } if recentRatio < 0.0 || recentRatio > 1.0 { - return nil, fmt.Errorf("invalid recent ratio") + return nil, errors.New("invalid recent ratio") } if ghostRatio < 0.0 || ghostRatio > 1.0 { - return nil, fmt.Errorf("invalid ghost ratio") + return nil, errors.New("invalid ghost ratio") } // Determine the sub-sizes @@ -60,23 +65,25 @@ func New2QParams(size int, recentRatio, ghostRatio float64) (*TwoQueueCache, err evictSize := int(float64(size) * ghostRatio) // Allocate the LRUs - recent, err := simplelru.NewLRU(size, nil) + recent, err := simplelru.NewLRU[K, V](size, nil) if err != nil { return nil, err } - frequent, err := simplelru.NewLRU(size, nil) + frequent, err := simplelru.NewLRU[K, V](size, nil) if err != nil { return nil, err } - recentEvict, err := simplelru.NewLRU(evictSize, nil) + recentEvict, err := simplelru.NewLRU[K, struct{}](evictSize, nil) if err != nil { return nil, err } // Initialize the cache - c := &TwoQueueCache{ + c := &TwoQueueCache[K, V]{ size: size, recentSize: recentSize, + recentRatio: recentRatio, + ghostRatio: ghostRatio, recent: recent, frequent: frequent, recentEvict: recentEvict, @@ -85,7 +92,7 @@ func New2QParams(size int, recentRatio, ghostRatio float64) (*TwoQueueCache, err } // Get looks up a key's value from the cache. -func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { +func (c *TwoQueueCache[K, V]) Get(key K) (value V, ok bool) { c.lock.Lock() defer c.lock.Unlock() @@ -103,11 +110,11 @@ func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { } // No hit - return nil, false + return } // Add adds a value to the cache. -func (c *TwoQueueCache) Add(key, value interface{}) { +func (c *TwoQueueCache[K, V]) Add(key K, value V) { c.lock.Lock() defer c.lock.Unlock() @@ -141,7 +148,7 @@ func (c *TwoQueueCache) Add(key, value interface{}) { } // ensureSpace is used to ensure we have space in the cache -func (c *TwoQueueCache) ensureSpace(recentEvict bool) { +func (c *TwoQueueCache[K, V]) ensureSpace(recentEvict bool) { // If we have space, nothing to do recentLen := c.recent.Len() freqLen := c.frequent.Len() @@ -153,7 +160,7 @@ func (c *TwoQueueCache) ensureSpace(recentEvict bool) { // the target, evict from there if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { k, _, _ := c.recent.RemoveOldest() - c.recentEvict.Add(k, nil) + c.recentEvict.Add(k, struct{}{}) return } @@ -162,15 +169,43 @@ func (c *TwoQueueCache) ensureSpace(recentEvict bool) { } // Len returns the number of items in the cache. -func (c *TwoQueueCache) Len() int { +func (c *TwoQueueCache[K, V]) Len() int { c.lock.RLock() defer c.lock.RUnlock() return c.recent.Len() + c.frequent.Len() } +// Resize changes the cache size. +func (c *TwoQueueCache[K, V]) Resize(size int) (evicted int) { + c.lock.Lock() + defer c.lock.Unlock() + + // Recalculate the sub-sizes + recentSize := int(float64(size) * c.recentRatio) + evictSize := int(float64(size) * c.ghostRatio) + c.size = size + c.recentSize = recentSize + + // ensureSpace + diff := c.recent.Len() + c.frequent.Len() - size + if diff < 0 { + diff = 0 + } + for i := 0; i < diff; i++ { + c.ensureSpace(true) + } + + // Reallocate the LRUs + c.recent.Resize(size) + c.frequent.Resize(size) + c.recentEvict.Resize(evictSize) + + return diff +} + // Keys returns a slice of the keys in the cache. // The frequently used keys are first in the returned slice. -func (c *TwoQueueCache) Keys() []interface{} { +func (c *TwoQueueCache[K, V]) Keys() []K { c.lock.RLock() defer c.lock.RUnlock() k1 := c.frequent.Keys() @@ -178,8 +213,18 @@ func (c *TwoQueueCache) Keys() []interface{} { return append(k1, k2...) } +// Values returns a slice of the values in the cache. +// The frequently used values are first in the returned slice. +func (c *TwoQueueCache[K, V]) Values() []V { + c.lock.RLock() + defer c.lock.RUnlock() + v1 := c.frequent.Values() + v2 := c.recent.Values() + return append(v1, v2...) +} + // Remove removes the provided key from the cache. -func (c *TwoQueueCache) Remove(key interface{}) { +func (c *TwoQueueCache[K, V]) Remove(key K) { c.lock.Lock() defer c.lock.Unlock() if c.frequent.Remove(key) { @@ -194,7 +239,7 @@ func (c *TwoQueueCache) Remove(key interface{}) { } // Purge is used to completely clear the cache. -func (c *TwoQueueCache) Purge() { +func (c *TwoQueueCache[K, V]) Purge() { c.lock.Lock() defer c.lock.Unlock() c.recent.Purge() @@ -204,7 +249,7 @@ func (c *TwoQueueCache) Purge() { // Contains is used to check if the cache contains a key // without updating recency or frequency. -func (c *TwoQueueCache) Contains(key interface{}) bool { +func (c *TwoQueueCache[K, V]) Contains(key K) bool { c.lock.RLock() defer c.lock.RUnlock() return c.frequent.Contains(key) || c.recent.Contains(key) @@ -212,7 +257,7 @@ func (c *TwoQueueCache) Contains(key interface{}) bool { // Peek is used to inspect the cache value of a key // without updating recency or frequency. -func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) { +func (c *TwoQueueCache[K, V]) Peek(key K) (value V, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.frequent.Peek(key); ok { diff --git a/vendor/github.com/hashicorp/golang-lru/v2/README.md b/vendor/github.com/hashicorp/golang-lru/v2/README.md new file mode 100644 index 0000000000000..a942eb5397006 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/README.md @@ -0,0 +1,79 @@ +golang-lru +========== + +This provides the `lru` package which implements a fixed-size +thread safe LRU cache. It is based on the cache in Groupcache. + +Documentation +============= + +Full docs are available on [Go Packages](https://pkg.go.dev/github.com/hashicorp/golang-lru/v2) + +LRU cache example +================= + +```go +package main + +import ( + "fmt" + "github.com/hashicorp/golang-lru/v2" +) + +func main() { + l, _ := lru.New[int, any](128) + for i := 0; i < 256; i++ { + l.Add(i, nil) + } + if l.Len() != 128 { + panic(fmt.Sprintf("bad len: %v", l.Len())) + } +} +``` + +Expirable LRU cache example +=========================== + +```go +package main + +import ( + "fmt" + "time" + + "github.com/hashicorp/golang-lru/v2/expirable" +) + +func main() { + // make cache with 10ms TTL and 5 max keys + cache := expirable.NewLRU[string, string](5, nil, time.Millisecond*10) + + + // set value under key1. + cache.Add("key1", "val1") + + // get value under key1 + r, ok := cache.Get("key1") + + // check for OK value + if ok { + fmt.Printf("value before expiration is found: %v, value: %q\n", ok, r) + } + + // wait for cache to expire + time.Sleep(time.Millisecond * 12) + + // get value under key1 after key expiration + r, ok = cache.Get("key1") + fmt.Printf("value after expiration is found: %v, value: %q\n", ok, r) + + // set value under key2, would evict old entry because it is already expired. + cache.Add("key2", "val2") + + fmt.Printf("Cache len: %d\n", cache.Len()) + // Output: + // value before expiration is found: true, value: "val1" + // value after expiration is found: false, value: "" + // Cache len: 1 +} +``` diff --git a/vendor/github.com/hashicorp/golang-lru/v2/doc.go b/vendor/github.com/hashicorp/golang-lru/v2/doc.go new file mode 100644 index 0000000000000..24107ee0edeef --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/doc.go @@ -0,0 +1,24 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package lru provides three different LRU caches of varying sophistication. +// +// Cache is a simple LRU cache. It is based on the LRU implementation in +// groupcache: https://github.com/golang/groupcache/tree/master/lru +// +// TwoQueueCache tracks frequently used and recently used entries separately. +// This avoids a burst of accesses from taking out frequently used entries, at +// the cost of about 2x computational overhead and some extra bookkeeping. +// +// ARCCache is an adaptive replacement cache. It tracks recent evictions as well +// as recent usage in both the frequent and recent caches. Its computational +// overhead is comparable to TwoQueueCache, but the memory overhead is linear +// with the size of the cache. +// +// ARC has been patented by IBM, so do not use it if that is problematic for +// your program. For this reason, it is in a separate go module contained within +// this repository. +// +// All caches in this package take locks while operating, and are therefore +// thread-safe for consumers. +package lru diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/v2/lru.go similarity index 72% rename from vendor/github.com/hashicorp/golang-lru/lru.go rename to vendor/github.com/hashicorp/golang-lru/v2/lru.go index 895d8e3ea0c14..a2655f1f31093 100644 --- a/vendor/github.com/hashicorp/golang-lru/lru.go +++ b/vendor/github.com/hashicorp/golang-lru/v2/lru.go @@ -1,9 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package lru import ( "sync" - "github.com/hashicorp/golang-lru/simplelru" + "github.com/hashicorp/golang-lru/v2/simplelru" ) const ( @@ -12,23 +15,24 @@ const ( ) // Cache is a thread-safe fixed size LRU cache. -type Cache struct { - lru *simplelru.LRU - evictedKeys, evictedVals []interface{} - onEvictedCB func(k, v interface{}) - lock sync.RWMutex +type Cache[K comparable, V any] struct { + lru *simplelru.LRU[K, V] + evictedKeys []K + evictedVals []V + onEvictedCB func(k K, v V) + lock sync.RWMutex } // New creates an LRU of the given size. -func New(size int) (*Cache, error) { - return NewWithEvict(size, nil) +func New[K comparable, V any](size int) (*Cache[K, V], error) { + return NewWithEvict[K, V](size, nil) } // NewWithEvict constructs a fixed size cache with the given eviction // callback. -func NewWithEvict(size int, onEvicted func(key, value interface{})) (c *Cache, err error) { +func NewWithEvict[K comparable, V any](size int, onEvicted func(key K, value V)) (c *Cache[K, V], err error) { // create a cache with default settings - c = &Cache{ + c = &Cache[K, V]{ onEvictedCB: onEvicted, } if onEvicted != nil { @@ -39,21 +43,22 @@ func NewWithEvict(size int, onEvicted func(key, value interface{})) (c *Cache, e return } -func (c *Cache) initEvictBuffers() { - c.evictedKeys = make([]interface{}, 0, DefaultEvictedBufferSize) - c.evictedVals = make([]interface{}, 0, DefaultEvictedBufferSize) +func (c *Cache[K, V]) initEvictBuffers() { + c.evictedKeys = make([]K, 0, DefaultEvictedBufferSize) + c.evictedVals = make([]V, 0, DefaultEvictedBufferSize) } // onEvicted save evicted key/val and sent in externally registered callback // outside of critical section -func (c *Cache) onEvicted(k, v interface{}) { +func (c *Cache[K, V]) onEvicted(k K, v V) { c.evictedKeys = append(c.evictedKeys, k) c.evictedVals = append(c.evictedVals, v) } // Purge is used to completely clear the cache. -func (c *Cache) Purge() { - var ks, vs []interface{} +func (c *Cache[K, V]) Purge() { + var ks []K + var vs []V c.lock.Lock() c.lru.Purge() if c.onEvictedCB != nil && len(c.evictedKeys) > 0 { @@ -70,8 +75,9 @@ func (c *Cache) Purge() { } // Add adds a value to the cache. Returns true if an eviction occurred. -func (c *Cache) Add(key, value interface{}) (evicted bool) { - var k, v interface{} +func (c *Cache[K, V]) Add(key K, value V) (evicted bool) { + var k K + var v V c.lock.Lock() evicted = c.lru.Add(key, value) if c.onEvictedCB != nil && evicted { @@ -86,7 +92,7 @@ func (c *Cache) Add(key, value interface{}) (evicted bool) { } // Get looks up a key's value from the cache. -func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { +func (c *Cache[K, V]) Get(key K) (value V, ok bool) { c.lock.Lock() value, ok = c.lru.Get(key) c.lock.Unlock() @@ -95,7 +101,7 @@ func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { // Contains checks if a key is in the cache, without updating the // recent-ness or deleting it for being stale. -func (c *Cache) Contains(key interface{}) bool { +func (c *Cache[K, V]) Contains(key K) bool { c.lock.RLock() containKey := c.lru.Contains(key) c.lock.RUnlock() @@ -104,7 +110,7 @@ func (c *Cache) Contains(key interface{}) bool { // Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. -func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { +func (c *Cache[K, V]) Peek(key K) (value V, ok bool) { c.lock.RLock() value, ok = c.lru.Peek(key) c.lock.RUnlock() @@ -114,8 +120,9 @@ func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { // ContainsOrAdd checks if a key is in the cache without updating the // recent-ness or deleting it for being stale, and if not, adds the value. // Returns whether found and whether an eviction occurred. -func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { - var k, v interface{} +func (c *Cache[K, V]) ContainsOrAdd(key K, value V) (ok, evicted bool) { + var k K + var v V c.lock.Lock() if c.lru.Contains(key) { c.lock.Unlock() @@ -136,8 +143,9 @@ func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { // PeekOrAdd checks if a key is in the cache without updating the // recent-ness or deleting it for being stale, and if not, adds the value. // Returns whether found and whether an eviction occurred. -func (c *Cache) PeekOrAdd(key, value interface{}) (previous interface{}, ok, evicted bool) { - var k, v interface{} +func (c *Cache[K, V]) PeekOrAdd(key K, value V) (previous V, ok, evicted bool) { + var k K + var v V c.lock.Lock() previous, ok = c.lru.Peek(key) if ok { @@ -153,12 +161,13 @@ func (c *Cache) PeekOrAdd(key, value interface{}) (previous interface{}, ok, evi if c.onEvictedCB != nil && evicted { c.onEvictedCB(k, v) } - return nil, false, evicted + return } // Remove removes the provided key from the cache. -func (c *Cache) Remove(key interface{}) (present bool) { - var k, v interface{} +func (c *Cache[K, V]) Remove(key K) (present bool) { + var k K + var v V c.lock.Lock() present = c.lru.Remove(key) if c.onEvictedCB != nil && present { @@ -173,8 +182,9 @@ func (c *Cache) Remove(key interface{}) (present bool) { } // Resize changes the cache size. -func (c *Cache) Resize(size int) (evicted int) { - var ks, vs []interface{} +func (c *Cache[K, V]) Resize(size int) (evicted int) { + var ks []K + var vs []V c.lock.Lock() evicted = c.lru.Resize(size) if c.onEvictedCB != nil && evicted > 0 { @@ -191,8 +201,9 @@ func (c *Cache) Resize(size int) (evicted int) { } // RemoveOldest removes the oldest item from the cache. -func (c *Cache) RemoveOldest() (key, value interface{}, ok bool) { - var k, v interface{} +func (c *Cache[K, V]) RemoveOldest() (key K, value V, ok bool) { + var k K + var v V c.lock.Lock() key, value, ok = c.lru.RemoveOldest() if c.onEvictedCB != nil && ok { @@ -207,7 +218,7 @@ func (c *Cache) RemoveOldest() (key, value interface{}, ok bool) { } // GetOldest returns the oldest entry -func (c *Cache) GetOldest() (key, value interface{}, ok bool) { +func (c *Cache[K, V]) GetOldest() (key K, value V, ok bool) { c.lock.RLock() key, value, ok = c.lru.GetOldest() c.lock.RUnlock() @@ -215,15 +226,23 @@ func (c *Cache) GetOldest() (key, value interface{}, ok bool) { } // Keys returns a slice of the keys in the cache, from oldest to newest. -func (c *Cache) Keys() []interface{} { +func (c *Cache[K, V]) Keys() []K { c.lock.RLock() keys := c.lru.Keys() c.lock.RUnlock() return keys } +// Values returns a slice of the values in the cache, from oldest to newest. +func (c *Cache[K, V]) Values() []V { + c.lock.RLock() + values := c.lru.Values() + c.lock.RUnlock() + return values +} + // Len returns the number of items in the cache. -func (c *Cache) Len() int { +func (c *Cache[K, V]) Len() int { c.lock.RLock() length := c.lru.Len() c.lock.RUnlock() diff --git a/vendor/modules.txt b/vendor/modules.txt index 01b94fbc4c175..cb3dc0fa401b3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1119,10 +1119,10 @@ github.com/hashicorp/go-sockaddr github.com/hashicorp/go-uuid # github.com/hashicorp/golang-lru v0.6.0 ## explicit; go 1.12 -github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru # github.com/hashicorp/golang-lru/v2 v2.0.7 ## explicit; go 1.18 +github.com/hashicorp/golang-lru/v2 github.com/hashicorp/golang-lru/v2/internal github.com/hashicorp/golang-lru/v2/simplelru # github.com/hashicorp/memberlist v0.5.0 => github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe
fix
update module github.com/hashicorp/golang-lru to v2 (#14979)
2327789b5506d0ccc00d931195da17a2d47bf236
2023-10-27 12:17:35
renovate[bot]
fix(deps): update github.com/grafana/gomemcache digest to 6947259 (main) (#10836)
false
diff --git a/go.mod b/go.mod index fd6104c1c40bb..d7dfb90347211 100644 --- a/go.mod +++ b/go.mod @@ -51,7 +51,7 @@ require ( github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 github.com/grafana/dskit v0.0.0-20231017083947-7b512eb54d47 github.com/grafana/go-gelf/v2 v2.0.1 - github.com/grafana/gomemcache v0.0.0-20230914135007-70d78eaabfe1 + github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd github.com/grafana/tail v0.0.0-20230510142333-77b18831edf0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 diff --git a/go.sum b/go.sum index 792b91d419e70..7bf146121d376 100644 --- a/go.sum +++ b/go.sum @@ -987,8 +987,8 @@ github.com/grafana/go-gelf/v2 v2.0.1 h1:BOChP0h/jLeD+7F9mL7tq10xVkDG15he3T1zHuQa github.com/grafana/go-gelf/v2 v2.0.1/go.mod h1:lexHie0xzYGwCgiRGcvZ723bSNyNI8ZRD4s0CLobh90= github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 h1:xLuzPoOzdfNb/RF/IENCw+oLVdZB4G21VPhkHBgwSHY= github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85/go.mod h1:crI9WX6p0IhrqB+DqIUHulRW853PaNFf7o4UprV//3I= -github.com/grafana/gomemcache v0.0.0-20230914135007-70d78eaabfe1 h1:MLYY2R60/74hfYl5vRRmC2VDo0Yuql1QQ1ig8hnvgSI= -github.com/grafana/gomemcache v0.0.0-20230914135007-70d78eaabfe1/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= +github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 h1:/of8Z8taCPftShATouOrBVy6GaTTjgQd/VfNiZp/VXQ= +github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe h1:yIXAAbLswn7VNWBIvM71O2QsgfgW9fRXZNR0DXe6pDU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= diff --git a/vendor/github.com/grafana/gomemcache/memcache/memcache.go b/vendor/github.com/grafana/gomemcache/memcache/memcache.go index f6b9cfdbf4b12..67288a12fb78a 100644 --- a/vendor/github.com/grafana/gomemcache/memcache/memcache.go +++ b/vendor/github.com/grafana/gomemcache/memcache/memcache.go @@ -48,7 +48,7 @@ var ( // CompareAndSwap) failed because the condition was not satisfied. ErrNotStored = errors.New("memcache: item not stored") - // ErrServer means that a server error occurred. + // ErrServerError means that a server error occurred. ErrServerError = errors.New("memcache: server error") // ErrNoStats means that no statistics were available. @@ -175,6 +175,14 @@ type Client struct { // be set to a number higher than your peak parallel requests. MaxIdleConns int + // WriteBufferSizeBytes specifies the size of the write buffer (in bytes). The buffer + // is allocated for each connection. If <= 0, the default value of 4KB will be used. + WriteBufferSizeBytes int + + // ReadBufferSizeBytes specifies the size of the read buffer (in bytes). The buffer + // is allocated for each connection. If <= 0, the default value of 4KB will be used. + ReadBufferSizeBytes int + // recentlyUsedConnsThreshold is the default grace period given to an // idle connection to consider it "recently used". Recently used connections // are never closed even if idle. @@ -402,6 +410,11 @@ func (c *Client) dial(addr net.Addr) (net.Conn, error) { } func (c *Client) getConn(addr net.Addr) (*conn, error) { + var ( + writer *bufio.Writer + reader *bufio.Reader + ) + cn, ok := c.getFreeConn(addr) if ok { cn.extendDeadline() @@ -411,17 +424,32 @@ func (c *Client) getConn(addr net.Addr) (*conn, error) { if err != nil { return nil, err } + + // Init buffered writer. + if c.WriteBufferSizeBytes > 0 { + writer = bufio.NewWriterSize(nc, c.WriteBufferSizeBytes) + } else { + writer = bufio.NewWriter(nc) + } + + // Init buffered reader. + if c.ReadBufferSizeBytes > 0 { + reader = bufio.NewReaderSize(nc, c.ReadBufferSizeBytes) + } else { + reader = bufio.NewReader(nc) + } + cn = &conn{ nc: nc, addr: addr, - rw: bufio.NewReadWriter(bufio.NewReader(nc), bufio.NewWriter(nc)), + rw: bufio.NewReadWriter(reader, writer), c: c, } cn.extendDeadline() return cn, nil } -func (c *Client) onItem(item *Item, operation string, fn func(*Client, *bufio.ReadWriter, *Item) error) error { +func (c *Client) onItem(item *Item, fn func(*Client, *bufio.ReadWriter, *Item) error) error { addr, err := c.selector.PickServer(item.Key) if err != nil { return err @@ -706,7 +734,7 @@ func cut(s string, sep byte) (before, after string, found bool) { // Set writes the given item, unconditionally. func (c *Client) Set(item *Item) error { - return c.onItem(item, "set", (*Client).set) + return c.onItem(item, (*Client).set) } func (c *Client) set(rw *bufio.ReadWriter, item *Item) error { @@ -716,7 +744,7 @@ func (c *Client) set(rw *bufio.ReadWriter, item *Item) error { // Add writes the given item, if no value already exists for its // key. ErrNotStored is returned if that condition is not met. func (c *Client) Add(item *Item) error { - return c.onItem(item, "add", (*Client).add) + return c.onItem(item, (*Client).add) } func (c *Client) add(rw *bufio.ReadWriter, item *Item) error { @@ -726,7 +754,7 @@ func (c *Client) add(rw *bufio.ReadWriter, item *Item) error { // Replace writes the given item, but only if the server *does* // already hold data for this key func (c *Client) Replace(item *Item) error { - return c.onItem(item, "replace", (*Client).replace) + return c.onItem(item, (*Client).replace) } func (c *Client) replace(rw *bufio.ReadWriter, item *Item) error { @@ -741,7 +769,7 @@ func (c *Client) replace(rw *bufio.ReadWriter, item *Item) error { // calls. ErrNotStored is returned if the value was evicted in between // the calls. func (c *Client) CompareAndSwap(item *Item) error { - return c.onItem(item, "cas", (*Client).cas) + return c.onItem(item, (*Client).cas) } func (c *Client) cas(rw *bufio.ReadWriter, item *Item) error { diff --git a/vendor/modules.txt b/vendor/modules.txt index cafc9755c6684..8b26e541da528 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -900,7 +900,7 @@ github.com/grafana/dskit/user # github.com/grafana/go-gelf/v2 v2.0.1 ## explicit; go 1.17 github.com/grafana/go-gelf/v2/gelf -# github.com/grafana/gomemcache v0.0.0-20230914135007-70d78eaabfe1 +# github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 ## explicit; go 1.18 github.com/grafana/gomemcache/memcache # github.com/grafana/loki/pkg/push v0.0.0-20231023154132-0a7737e7c7eb => ./pkg/push
fix
update github.com/grafana/gomemcache digest to 6947259 (main) (#10836)
2cb3b3a14f65b561510d26d5ecf6b0d246c9d0bc
2025-03-05 02:52:09
renovate[bot]
fix(deps): update module github.com/aws/aws-sdk-go-v2/config to v1.29.9 (main) (#16542)
false
diff --git a/tools/lambda-promtail/go.mod b/tools/lambda-promtail/go.mod index 328bdaf01db84..608f17a6c6953 100644 --- a/tools/lambda-promtail/go.mod +++ b/tools/lambda-promtail/go.mod @@ -7,7 +7,7 @@ toolchain go1.24.0 require ( github.com/aws/aws-lambda-go v1.47.0 github.com/aws/aws-sdk-go-v2 v1.36.3 - github.com/aws/aws-sdk-go-v2/config v1.29.8 + github.com/aws/aws-sdk-go-v2/config v1.29.9 github.com/aws/aws-sdk-go-v2/service/s3 v1.78.0 github.com/go-kit/log v0.2.1 github.com/gogo/protobuf v1.3.2 @@ -29,7 +29,7 @@ require ( github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.61 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.62 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect @@ -39,9 +39,9 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.2 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.25.0 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.33.16 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.25.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.17 // indirect github.com/aws/smithy-go v1.22.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500 // indirect diff --git a/tools/lambda-promtail/go.sum b/tools/lambda-promtail/go.sum index 0c7e03ad21a02..d91f42a10c2cc 100644 --- a/tools/lambda-promtail/go.sum +++ b/tools/lambda-promtail/go.sum @@ -59,10 +59,10 @@ github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38y github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14= -github.com/aws/aws-sdk-go-v2/config v1.29.8 h1:RpwAfYcV2lr/yRc4lWhUM9JRPQqKgKWmou3LV7UfWP4= -github.com/aws/aws-sdk-go-v2/config v1.29.8/go.mod h1:t+G7Fq1OcO8cXTPPXzxQSnj/5Xzdc9jAAD3Xrn9/Mgo= -github.com/aws/aws-sdk-go-v2/credentials v1.17.61 h1:Hd/uX6Wo2iUW1JWII+rmyCD7MMhOe7ALwQXN6sKDd1o= -github.com/aws/aws-sdk-go-v2/credentials v1.17.61/go.mod h1:L7vaLkwHY1qgW0gG1zG0z/X0sQ5tpIY5iI13+j3qI80= +github.com/aws/aws-sdk-go-v2/config v1.29.9 h1:Kg+fAYNaJeGXp1vmjtidss8O2uXIsXwaRqsQJKXVr+0= +github.com/aws/aws-sdk-go-v2/config v1.29.9/go.mod h1:oU3jj2O53kgOU4TXq/yipt6ryiooYjlkqqVaZk7gY/U= +github.com/aws/aws-sdk-go-v2/credentials v1.17.62 h1:fvtQY3zFzYJ9CfixuAQ96IxDrBajbBWGqjNTCa79ocU= +github.com/aws/aws-sdk-go-v2/credentials v1.17.62/go.mod h1:ElETBxIQqcxej++Cs8GyPBbgMys5DgQPTwo7cUPDKt8= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q= @@ -83,12 +83,12 @@ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 h1:moLQUoVq91Liq github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15/go.mod h1:ZH34PJUc8ApjBIfgQCFvkWcUDBtl/WTD+uiYHjd8igA= github.com/aws/aws-sdk-go-v2/service/s3 v1.78.0 h1:EBm8lXevBWe+kK9VOU/IBeOI189WPRwPUc3LvJK9GOs= github.com/aws/aws-sdk-go-v2/service/s3 v1.78.0/go.mod h1:4qzsZSzB/KiX2EzDjs9D7A8rI/WGJxZceVJIHqtJjIU= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.0 h1:2U9sF8nKy7UgyEeLiZTRg6ShBS22z8UnYpV6aRFL0is= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.0/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.0 h1:wjAdc85cXdQR5uLx5FwWvGIHm4OPJhTyzUHU8craXtE= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.0/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.16 h1:BHEK2Q/7CMRMCb3nySi/w8UbIcPhKvYP5s1xf8/izn0= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.16/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.1 h1:8JdC7Gr9NROg1Rusk25IcZeTO59zLxsKgE0gkh5O6h0= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.1/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1 h1:KwuLovgQPcdjNMfFt9OhUd9a2OwcOKhxfvF4glTzLuA= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.17 h1:PZV5W8yk4OtH1JAuhV2PXwwO9v5G5Aoj+eMCn4T+1Kc= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.17/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
fix
update module github.com/aws/aws-sdk-go-v2/config to v1.29.9 (main) (#16542)
65645ea8e65cb1ff5095f9c7791e968b2ec410f6
2022-07-12 16:59:51
Mohamed-Amine Bouqsimi
operator: Add support for tail TLS encryption (#6663)
false
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md index 30ed43d79ed4e..a95cf2d0e754f 100644 --- a/operator/CHANGELOG.md +++ b/operator/CHANGELOG.md @@ -1,6 +1,6 @@ ## Main -- [6646](https://github.com/grafana/loki/pull/6646) **periklis**: Update Loki operand to v2.6.0 +- [6663](https://github.com/grafana/loki/pull/6663) **aminesnow**: Generalize live tail fix to all clusters using TLS - [6443](https://github.com/grafana/loki/pull/6443) **aminesnow**: Fix live tail of logs not working on OpenShift-based clusters - [6646](https://github.com/grafana/loki/pull/6646) **periklis**: Update Loki operand to v2.6.0 - [6594](https://github.com/grafana/loki/pull/6594) **xperimental**: Disable client certificate authentication on gateway diff --git a/operator/internal/manifests/config.go b/operator/internal/manifests/config.go index 6405564a59881..25f6f9e62141f 100644 --- a/operator/internal/manifests/config.go +++ b/operator/internal/manifests/config.go @@ -69,6 +69,11 @@ func ConfigOptions(opt Options) config.Options { } } + protocol := "http" + if opt.Gates.HTTPEncryption { + protocol = "https" + } + return config.Options{ Stack: opt.Stack, Namespace: opt.Namespace, @@ -82,8 +87,9 @@ func ConfigOptions(opt Options) config.Options { Port: gossipPort, }, Querier: config.Address{ - FQDN: fqdn(NewQuerierHTTPService(opt).GetName(), opt.Namespace), - Port: httpPort, + Protocol: protocol, + FQDN: fqdn(NewQuerierHTTPService(opt).GetName(), opt.Namespace), + Port: httpPort, }, IndexGateway: config.Address{ FQDN: fqdn(NewIndexGatewayGRPCService(opt).GetName(), opt.Namespace), diff --git a/operator/internal/manifests/internal/config/build_test.go b/operator/internal/manifests/internal/config/build_test.go index 1a98943f98951..94353f5394297 100644 --- a/operator/internal/manifests/internal/config/build_test.go +++ b/operator/internal/manifests/internal/config/build_test.go @@ -198,8 +198,9 @@ overrides: Port: 7946, }, Querier: Address{ - FQDN: "loki-querier-http-lokistack-dev.default.svc.cluster.local", - Port: 3100, + Protocol: "http", + FQDN: "loki-querier-http-lokistack-dev.default.svc.cluster.local", + Port: 3100, }, IndexGateway: Address{ FQDN: "loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local", @@ -444,8 +445,9 @@ overrides: Port: 7946, }, Querier: Address{ - FQDN: "loki-querier-http-lokistack-dev.default.svc.cluster.local", - Port: 3100, + Protocol: "http", + FQDN: "loki-querier-http-lokistack-dev.default.svc.cluster.local", + Port: 3100, }, IndexGateway: Address{ FQDN: "loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local", @@ -513,8 +515,9 @@ func TestBuild_ConfigAndRuntimeConfig_CreateLokiConfigFailed(t *testing.T) { Port: 7946, }, Querier: Address{ - FQDN: "loki-querier-http-lokistack-dev.default.svc.cluster.local", - Port: 3100, + Protocol: "http", + FQDN: "loki-querier-http-lokistack-dev.default.svc.cluster.local", + Port: 3100, }, IndexGateway: Address{ FQDN: "loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local", @@ -795,8 +798,9 @@ overrides: Port: 7946, }, Querier: Address{ - FQDN: "loki-querier-http-lokistack-dev.default.svc.cluster.local", - Port: 3100, + Protocol: "http", + FQDN: "loki-querier-http-lokistack-dev.default.svc.cluster.local", + Port: 3100, }, IndexGateway: Address{ FQDN: "loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local", @@ -1125,8 +1129,9 @@ overrides: Port: 7946, }, Querier: Address{ - FQDN: "loki-querier-http-lokistack-dev.default.svc.cluster.local", - Port: 3100, + Protocol: "http", + FQDN: "loki-querier-http-lokistack-dev.default.svc.cluster.local", + Port: 3100, }, IndexGateway: Address{ FQDN: "loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local", @@ -1469,8 +1474,9 @@ overrides: Port: 7946, }, Querier: Address{ - FQDN: "loki-querier-http-lokistack-dev.default.svc.cluster.local", - Port: 3100, + Protocol: "http", + FQDN: "loki-querier-http-lokistack-dev.default.svc.cluster.local", + Port: 3100, }, IndexGateway: Address{ FQDN: "loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local", diff --git a/operator/internal/manifests/internal/config/loki-config.yaml b/operator/internal/manifests/internal/config/loki-config.yaml index 1b15added84ee..51a7ce3c914d0 100644 --- a/operator/internal/manifests/internal/config/loki-config.yaml +++ b/operator/internal/manifests/internal/config/loki-config.yaml @@ -48,7 +48,7 @@ compactor: compaction_interval: 2h working_directory: {{ .StorageDirectory }}/compactor frontend: - tail_proxy_url: http://{{ .Querier.FQDN }}:{{ .Querier.Port }} + tail_proxy_url: {{ .Querier.Protocol }}://{{ .Querier.FQDN }}:{{ .Querier.Port }} compress_responses: true max_outstanding_per_tenant: 256 log_queries_longer_than: 5s diff --git a/operator/internal/manifests/internal/config/options.go b/operator/internal/manifests/internal/config/options.go index ef07ae5103d7a..3102e4628951d 100644 --- a/operator/internal/manifests/internal/config/options.go +++ b/operator/internal/manifests/internal/config/options.go @@ -30,6 +30,8 @@ type Options struct { // Address FQDN and port for a k8s service. type Address struct { + // Protocol is optional + Protocol string // FQDN is required FQDN string // Port is required diff --git a/operator/internal/manifests/query-frontend.go b/operator/internal/manifests/query-frontend.go index d887fa0d58e47..8b2b0e668c154 100644 --- a/operator/internal/manifests/query-frontend.go +++ b/operator/internal/manifests/query-frontend.go @@ -4,9 +4,9 @@ import ( "fmt" "path" - lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" + "github.com/ViaQ/logerr/v2/kverrors" "github.com/grafana/loki/operator/internal/manifests/internal/config" - "github.com/grafana/loki/operator/internal/manifests/openshift" + "github.com/imdario/mergo" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -31,13 +31,6 @@ func BuildQueryFrontend(opts Options) ([]client.Object, error) { } } - if opts.Stack.Tenants != nil { - mode := opts.Stack.Tenants.Mode - if err := configureQueryFrontendDeploymentForMode(deployment, mode, &opts); err != nil { - return nil, err - } - } - return []client.Object{ deployment, NewQueryFrontendGRPCService(opts), @@ -219,6 +212,11 @@ func NewQueryFrontendHTTPService(opts Options) *corev1.Service { func configureQueryFrontendHTTPServicePKI(deployment *appsv1.Deployment, stackName string) error { serviceName := serviceNameQueryFrontendHTTP(stackName) + caBundleName := signingCABundleName(stackName) + + if err := configureTailCA(deployment, lokiFrontendContainerName, caBundleName, caBundleDir, caFile); err != nil { + return err + } return configureHTTPServicePKI(&deployment.Spec.Template.Spec, serviceName) } @@ -227,18 +225,54 @@ func configureQueryFrontendGRPCServicePKI(deployment *appsv1.Deployment, stackNa return configureGRPCServicePKI(&deployment.Spec.Template.Spec, serviceName) } -func configureQueryFrontendDeploymentForMode(deployment *appsv1.Deployment, mode lokiv1.ModeType, opts *Options) error { - switch mode { - case lokiv1.Static, lokiv1.Dynamic: - return nil // nothing to configure - case lokiv1.OpenshiftLogging: - url := fmt.Sprintf("https://%s:%d", fqdn(serviceNameQuerierHTTP(opts.Name), opts.Namespace), httpPort) - caBundleName := signingCABundleName(opts.Name) - - if opts.Gates.ServiceMonitorTLSEndpoints { - return openshift.ConfigureQueryFrontendDeployment(deployment, url, lokiFrontendContainerName, caBundleName, caBundleDir, caFile) +// ConfigureQueryFrontendDeployment configures CA certificate when TLS is enabled. +func configureTailCA(d *appsv1.Deployment, + qfContainerName, caBundleVolumeName, caDir, caFile string, +) error { + var qfIdx int + for i, c := range d.Spec.Template.Spec.Containers { + if c.Name == qfContainerName { + qfIdx = i + break } } + containerSpec := corev1.Container{ + Args: []string{ + fmt.Sprintf("-frontend.tail-tls-config.tls-ca-path=%s/%s", caDir, caFile), + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: caBundleVolumeName, + ReadOnly: true, + MountPath: caDir, + }, + }, + } + + p := corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: caBundleVolumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + DefaultMode: &defaultConfigMapMode, + LocalObjectReference: corev1.LocalObjectReference{ + Name: caBundleVolumeName, + }, + }, + }, + }, + }, + } + + if err := mergo.Merge(&d.Spec.Template.Spec.Containers[qfIdx], containerSpec, mergo.WithAppendSlice); err != nil { + return kverrors.Wrap(err, "failed to add tls config args") + } + + if err := mergo.Merge(&d.Spec.Template.Spec, p, mergo.WithAppendSlice); err != nil { + return kverrors.Wrap(err, "failed to add tls volumes") + } + return nil } diff --git a/operator/internal/manifests/query-frontend_test.go b/operator/internal/manifests/query-frontend_test.go index 66cd650fb442c..92ded54dd0037 100644 --- a/operator/internal/manifests/query-frontend_test.go +++ b/operator/internal/manifests/query-frontend_test.go @@ -1,13 +1,16 @@ package manifests import ( + "fmt" + "path" "testing" - v1 "github.com/grafana/loki/operator/apis/config/v1" lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" + "github.com/grafana/loki/operator/internal/manifests/internal/config" "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestNewQueryFrontendDeployment_SelectorMatchesLabels(t *testing.T) { @@ -48,61 +51,49 @@ func TestNewQueryFrontendDeployment_HasTemplateConfigHashAnnotation(t *testing.T require.Equal(t, annotations[expected], "deadbeef") } -func TestConfigureQueryFrontendDeploymentForMode(t *testing.T) { - type tt struct { - desc string - opts *Options - dpl *appsv1.Deployment - want *appsv1.Deployment - } - - tc := []tt{ - { - desc: "static mode", - opts: &Options{ - Stack: lokiv1.LokiStackSpec{ - Tenants: &lokiv1.TenantsSpec{ - Mode: lokiv1.Static, - }, +func TestConfigureQueryFrontendHTTPServicePKI(t *testing.T) { + opts := Options{ + Name: "abcd", + Namespace: "efgh", + Stack: lokiv1.LokiStackSpec{ + Template: &lokiv1.LokiTemplateSpec{ + QueryFrontend: &lokiv1.LokiComponentSpec{ + Replicas: 1, }, }, - dpl: &appsv1.Deployment{}, - want: &appsv1.Deployment{}, }, - { - desc: "dynamic mode", - opts: &Options{ - Stack: lokiv1.LokiStackSpec{ - Tenants: &lokiv1.TenantsSpec{ - Mode: lokiv1.Dynamic, - }, - }, - }, - dpl: &appsv1.Deployment{}, - want: &appsv1.Deployment{}, + } + d := appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: appsv1.SchemeGroupVersion.String(), }, - { - desc: "openshift-logging mode", - opts: &Options{ - Name: "test", - Namespace: "test-ns", - Stack: lokiv1.LokiStackSpec{ - Tenants: &lokiv1.TenantsSpec{ - Mode: lokiv1.OpenshiftLogging, - }, - }, - Gates: v1.FeatureGates{ - ServiceMonitorTLSEndpoints: true, - }, - }, - dpl: &appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: lokiFrontendContainerName, + Args: []string{ + "-target=query-frontend", + }, + VolumeMounts: []corev1.VolumeMount{ { - Args: []string{ - "-target=query-frontend", + Name: configVolumeName, + ReadOnly: false, + MountPath: config.LokiConfigMountDir, + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: configVolumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + DefaultMode: &defaultConfigMapMode, + LocalObjectReference: corev1.LocalObjectReference{ + Name: lokiConfigMapName(opts.Name), }, }, }, @@ -110,52 +101,99 @@ func TestConfigureQueryFrontendDeploymentForMode(t *testing.T) { }, }, }, - want: &appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ + }, + } + + caBundleVolumeName := signingCABundleName(opts.Name) + serviceName := serviceNameQueryFrontendHTTP(opts.Name) + expected := appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: appsv1.SchemeGroupVersion.String(), + }, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: lokiFrontendContainerName, + Args: []string{ + "-target=query-frontend", + fmt.Sprintf("-frontend.tail-tls-config.tls-ca-path=%s/%s", caBundleDir, caFile), + fmt.Sprintf("-server.http-tls-cert-path=%s", path.Join(httpTLSDir, tlsCertFile)), + fmt.Sprintf("-server.http-tls-key-path=%s", path.Join(httpTLSDir, tlsKeyFile)), + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: configVolumeName, + ReadOnly: false, + MountPath: config.LokiConfigMountDir, + }, + { + Name: caBundleVolumeName, + ReadOnly: true, + MountPath: caBundleDir, + }, { - Args: []string{ - "-target=query-frontend", - "-frontend.tail-proxy-url=https://test-querier-http.test-ns.svc.cluster.local:3100", - "-frontend.tail-tls-config.tls-ca-path=/var/run/ca/service-ca.crt", + Name: serviceName, + ReadOnly: false, + MountPath: httpTLSDir, + }, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Scheme: corev1.URISchemeHTTPS, }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "test-ca-bundle", - ReadOnly: true, - MountPath: "/var/run/ca", - }, + }, + }, + LivenessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Scheme: corev1.URISchemeHTTPS, }, }, }, - Volumes: []corev1.Volume{ - { - Name: "test-ca-bundle", - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - DefaultMode: &defaultConfigMapMode, - LocalObjectReference: corev1.LocalObjectReference{ - Name: "test-ca-bundle", - }, - }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: configVolumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + DefaultMode: &defaultConfigMapMode, + LocalObjectReference: corev1.LocalObjectReference{ + Name: lokiConfigMapName(opts.Name), + }, + }, + }, + }, + { + Name: caBundleVolumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + DefaultMode: &defaultConfigMapMode, + LocalObjectReference: corev1.LocalObjectReference{ + Name: caBundleVolumeName, }, }, }, }, + { + Name: serviceName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: serviceName, + }, + }, + }, }, }, }, }, } - for _, tc := range tc { - tc := tc - t.Run(tc.desc, func(t *testing.T) { - t.Parallel() - err := configureQueryFrontendDeploymentForMode(tc.dpl, tc.opts.Stack.Tenants.Mode, tc.opts) - require.NoError(t, err) - require.Equal(t, tc.want, tc.dpl) - }) - } + + err := configureQueryFrontendHTTPServicePKI(&d, opts.Name) + require.Nil(t, err) + require.Equal(t, expected, d) }
operator
Add support for tail TLS encryption (#6663)
1b1471dca0387031809f9e23e8a54f0b61879b47
2025-02-14 02:06:09
renovate[bot]
chore(deps): update dependency @types/node to v22.13.2 (main) (#16257)
false
diff --git a/pkg/dataobj/explorer/ui/package-lock.json b/pkg/dataobj/explorer/ui/package-lock.json index 475e7cbbb981c..8760ed3e06428 100644 --- a/pkg/dataobj/explorer/ui/package-lock.json +++ b/pkg/dataobj/explorer/ui/package-lock.json @@ -1194,9 +1194,9 @@ "license": "MIT" }, "node_modules/@types/node": { - "version": "22.13.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-22.13.1.tgz", - "integrity": "sha512-jK8uzQlrvXqEU91UxiK5J7pKHyzgnI1Qnl0QDHIgVGuolJhRb9EEl28Cj9b3rGR8B2lhFCtvIm5os8lFnO/1Ew==", + "version": "22.13.2", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.13.2.tgz", + "integrity": "sha512-Z+r8y3XL9ZpI2EY52YYygAFmo2/oWfNSj4BCpAXE2McAexDk8VcnBMGC9Djn9gTKt4d2T/hhXqmPzo4hfIXtTg==", "dev": true, "license": "MIT", "dependencies": {
chore
update dependency @types/node to v22.13.2 (main) (#16257)
be145d81635abcc1f0aa8a1bdc9335f4353a4294
2025-01-24 22:04:08
renovate[bot]
fix(deps): update module github.com/grafana/loki/v3 to v3.3.2 (main) (#15952)
false
diff --git a/cmd/segment-inspect/go.mod b/cmd/segment-inspect/go.mod index 02c589b86095c..864edd901a6cd 100644 --- a/cmd/segment-inspect/go.mod +++ b/cmd/segment-inspect/go.mod @@ -1,12 +1,12 @@ module github.com/grafana/loki/cmd/segment-inspect -go 1.22 +go 1.23 -toolchain go1.22.6 +toolchain go1.23.5 require ( github.com/dustin/go-humanize v1.0.1 - github.com/grafana/loki/v3 v3.2.1 + github.com/grafana/loki/v3 v3.3.2 ) require ( @@ -21,7 +21,7 @@ require ( github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/coder/quartz v0.1.0 // indirect + github.com/coder/quartz v0.1.2 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -42,13 +42,13 @@ require ( github.com/gogo/status v1.1.1 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/google/btree v1.1.2 // indirect + github.com/google/btree v1.1.3 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.1 // indirect - github.com/grafana/dskit v0.0.0-20240905221822-931a021fb06b // indirect + github.com/grafana/dskit v0.0.0-20241007172036-53283a0f6b41 // indirect github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 // indirect - github.com/grafana/jsonparser v0.0.0-20240425183733-ea80629e1a32 // indirect - github.com/grafana/loki/pkg/push v0.0.0-20231124142027-e52380921608 // indirect + github.com/grafana/jsonparser v0.0.0-20241004153430-023329977675 // indirect + github.com/grafana/loki/pkg/push v0.0.0-20240924133635-758364c7775f // indirect github.com/grafana/pyroscope-go/godeltaprof v0.1.8 // indirect github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect github.com/hashicorp/consul/api v1.29.4 // indirect @@ -67,14 +67,14 @@ require ( github.com/imdario/mergo v0.3.16 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.9 // indirect + github.com/klauspost/compress v1.17.11 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/miekg/dns v1.1.61 // indirect - github.com/mitchellh/copystructure v1.0.0 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/mitchellh/reflectwalk v1.0.1 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect @@ -82,12 +82,12 @@ require ( github.com/onsi/gomega v1.29.0 // indirect github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e // indirect github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect - github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pires/go-proxyproto v0.7.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/exporter-toolkit v0.11.0 // indirect @@ -96,9 +96,9 @@ require ( github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/sercand/kuberesolver/v5 v5.1.1 // indirect - github.com/shopspring/decimal v1.2.0 // indirect + github.com/shopspring/decimal v1.3.1 // indirect github.com/sony/gobreaker v0.5.0 // indirect - github.com/spf13/cast v1.3.1 // indirect + github.com/spf13/cast v1.5.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/stretchr/testify v1.9.0 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect @@ -107,9 +107,9 @@ require ( go.etcd.io/etcd/client/pkg/v3 v3.5.4 // indirect go.etcd.io/etcd/client/v3 v3.5.4 // indirect go.opentelemetry.io/collector/pdata v1.12.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.opentelemetry.io/otel v1.29.0 // indirect + go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/trace v1.29.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.21.0 // indirect @@ -118,16 +118,16 @@ require ( golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 // indirect golang.org/x/mod v0.19.0 // indirect golang.org/x/net v0.33.0 // indirect - golang.org/x/oauth2 v0.22.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sync v0.10.0 // indirect golang.org/x/sys v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect - golang.org/x/time v0.6.0 // indirect + golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.23.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240820151423-278611b39280 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240820151423-278611b39280 // indirect - google.golang.org/grpc v1.65.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/grpc v1.67.1 // indirect + google.golang.org/protobuf v1.35.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/cmd/segment-inspect/go.sum b/cmd/segment-inspect/go.sum index f8661a5427287..b735c9b05f204 100644 --- a/cmd/segment-inspect/go.sum +++ b/cmd/segment-inspect/go.sum @@ -35,8 +35,8 @@ github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4 github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI= -github.com/alicebob/miniredis/v2 v2.30.4 h1:8S4/o1/KoUArAGbGwPxcwf0krlzceva2XVOSchFS7Eo= -github.com/alicebob/miniredis/v2 v2.30.4/go.mod h1:b25qWj4fCEsBeAAR2mlb0ufImGC6uH3VlUfb/HS5zKg= +github.com/alicebob/miniredis/v2 v2.33.0 h1:uvTF0EDeu9RLnUEG27Db5I68ESoIxTiXbNUiji6lZrA= +github.com/alicebob/miniredis/v2 v2.33.0/go.mod h1:MhP4a3EU7aENRi9aO+tHfTBZicLqQevyi/DJpoj6mi0= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= @@ -44,8 +44,8 @@ github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJ github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI= -github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= +github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= @@ -68,8 +68,8 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/coder/quartz v0.1.0 h1:cLL+0g5l7xTf6ordRnUMMiZtRE8Sq5LxpghS63vEXrQ= -github.com/coder/quartz v0.1.0/go.mod h1:vsiCc+AHViMKH2CQpGIpFgdHIEQsxwm8yCscqKmzbRA= +github.com/coder/quartz v0.1.2 h1:PVhc9sJimTdKd3VbygXtS4826EOCpB1fXoRlLnCrE+s= +github.com/coder/quartz v0.1.2/go.mod h1:vsiCc+AHViMKH2CQpGIpFgdHIEQsxwm8yCscqKmzbRA= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= @@ -104,6 +104,8 @@ github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4Nij github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -159,8 +161,8 @@ github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6 github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -177,16 +179,16 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/grafana/dskit v0.0.0-20240905221822-931a021fb06b h1:x2HCzk29I0o5pRPfqWP/qwhXaPGlcz8pohq5kO1NZoE= -github.com/grafana/dskit v0.0.0-20240905221822-931a021fb06b/go.mod h1:SPLNCARd4xdjCkue0O6hvuoveuS1dGJjDnfxYe405YQ= +github.com/grafana/dskit v0.0.0-20241007172036-53283a0f6b41 h1:a4O59OU3FJZ+EJUVnlvvNTvdAc4uRN1P6EaGwqL9CnA= +github.com/grafana/dskit v0.0.0-20241007172036-53283a0f6b41/go.mod h1:SPLNCARd4xdjCkue0O6hvuoveuS1dGJjDnfxYe405YQ= github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 h1:X8IKQ0wu40wpvYcKfBcc5T4QnhdQjUhtUtB/1CY89lE= github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= -github.com/grafana/jsonparser v0.0.0-20240425183733-ea80629e1a32 h1:NznuPwItog+rwdVg8hAuGKP29ndRSzJAwhxKldkP8oQ= -github.com/grafana/jsonparser v0.0.0-20240425183733-ea80629e1a32/go.mod h1:796sq+UcONnSlzA3RtlBZ+b/hrerkZXiEmO8oMjyRwY= -github.com/grafana/loki/pkg/push v0.0.0-20231124142027-e52380921608 h1:ZYk42718kSXOiIKdjZKljWLgBpzL5z1yutKABksQCMg= -github.com/grafana/loki/pkg/push v0.0.0-20231124142027-e52380921608/go.mod h1:f3JSoxBTPXX5ec4FxxeC19nTBSxoTz+cBgS3cYLMcr0= -github.com/grafana/loki/v3 v3.2.1 h1:VB7u+KHfvL5aHAxgoVBvz5wVhsdGuqKC7uuOFOOe7jw= -github.com/grafana/loki/v3 v3.2.1/go.mod h1:WvdLl6wOS+yahaeQY+xhD2m2XzkHDfKr5FZaX7D/X2Y= +github.com/grafana/jsonparser v0.0.0-20241004153430-023329977675 h1:U94jQ2TQr1m3HNyE8efSdyaBbDrdPaWImXyenuKZ/nw= +github.com/grafana/jsonparser v0.0.0-20241004153430-023329977675/go.mod h1:796sq+UcONnSlzA3RtlBZ+b/hrerkZXiEmO8oMjyRwY= +github.com/grafana/loki/pkg/push v0.0.0-20240924133635-758364c7775f h1:NcS5dQXH/BtoMpQtA1d0+ftwyzdwQk5/8SCj9eHl8w8= +github.com/grafana/loki/pkg/push v0.0.0-20240924133635-758364c7775f/go.mod h1:lJEF/Wh5MYlmBem6tOYAFObkLsuikfrEf8Iy9AdMPiQ= +github.com/grafana/loki/v3 v3.3.2 h1:S/VODX0Ik/tP6I4sNd54kMHbvQDtWLHipDCZrGvpBUQ= +github.com/grafana/loki/v3 v3.3.2/go.mod h1:XpYMDs7/l82aaI7pEaViX+nZ4ZxdWVsBtt6WkuPOosI= github.com/grafana/pyroscope-go/godeltaprof v0.1.8 h1:iwOtYXeeVSAeYefJNaxDytgjKtUuKQbJqgAIjlnicKg= github.com/grafana/pyroscope-go/godeltaprof v0.1.8/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= @@ -263,8 +265,8 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8 github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -298,16 +300,17 @@ github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJys github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= -github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -334,8 +337,8 @@ github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= -github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:FfH+VrHHk6Lxt9HdVS0PXzSXFyS2NbZKXv33FYPol0A= +github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b/go.mod h1:AC62GU6hc0BrNm+9RK9VSiwa/EUe1bkIeFORAMcHvJU= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -360,8 +363,8 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -397,8 +400,9 @@ github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtr github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= github.com/sercand/kuberesolver/v5 v5.1.1 h1:CYH+d67G0sGBj7q5wLK61yzqJJ8gLLC8aeprPTHb6yY= github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYMWZJ294T3BtmVCpQ= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -407,8 +411,9 @@ github.com/sony/gobreaker v0.5.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -437,8 +442,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yuin/gopher-lua v1.1.0 h1:BojcDhfyDWgU2f2TOzYK/g5p2gxMrku8oupLDqlnSqE= -github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= +github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= +github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= go.etcd.io/etcd/api/v3 v3.5.4 h1:OHVyt3TopwtUQ2GKdd5wu3PmmipR4FTwCqoEjSyRdIc= go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= go.etcd.io/etcd/client/pkg/v3 v3.5.4 h1:lrneYvz923dvC14R54XcA7FXoZ3mlGZAgmwhfm7HqOg= @@ -447,12 +452,12 @@ go.etcd.io/etcd/client/v3 v3.5.4 h1:p83BUL3tAYS0OT/r0qglgc3M1JjhM0diV8DSWAhVXv4= go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= go.opentelemetry.io/collector/pdata v1.12.0 h1:Xx5VK1p4VO0md8MWm2icwC1MnJ7f8EimKItMWw46BmA= go.opentelemetry.io/collector/pdata v1.12.0/go.mod h1:MYeB0MmMAxeM0hstCFrCqWLzdyeYySim2dG6pDT6nYI= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -527,8 +532,8 @@ golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= -golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -591,8 +596,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -629,10 +634,10 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto/googleapis/api v0.0.0-20240820151423-278611b39280 h1:YDFM9oOjiFhaMAVgbDxfxW+66nRrsvzQzJ51wp3OxC0= -google.golang.org/genproto/googleapis/api v0.0.0-20240820151423-278611b39280/go.mod h1:fO8wJzT2zbQbAjbIoos1285VfEIYKDDY+Dt+WpTkh6g= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240820151423-278611b39280 h1:XQMA2e105XNlEZ8NRF0HqnUOZzP14sUSsgL09kpdNnU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240820151423-278611b39280/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 h1:T6rh4haD3GVYsgEfWExoCZA2o2FmbNyKpTuAxbEFPTg= +google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:wp2WsuBYj6j8wUdo3ToZsdxxixbvQNAHqVJrTgi5E5M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -641,8 +646,8 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -654,8 +659,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -687,7 +692,7 @@ k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 h1:b2FmK8YH+QEwq/Sy2uAEhmqL5nPfGYbJOcaqjeYYZoA= +k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/tools/lambda-promtail/go.mod b/tools/lambda-promtail/go.mod index b269facf70db6..d85108812b446 100644 --- a/tools/lambda-promtail/go.mod +++ b/tools/lambda-promtail/go.mod @@ -1,6 +1,8 @@ module main -go 1.22 +go 1.23 + +toolchain go1.23.5 require ( github.com/aws/aws-lambda-go v1.47.0 @@ -10,8 +12,8 @@ require ( github.com/go-kit/log v0.2.1 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 - github.com/grafana/dskit v0.0.0-20241004175247-687ec485facf - github.com/grafana/loki/v3 v3.2.1 + github.com/grafana/dskit v0.0.0-20241007172036-53283a0f6b41 + github.com/grafana/loki/v3 v3.3.2 github.com/prometheus/common v0.61.0 github.com/prometheus/prometheus v1.8.2-0.20200727090838-6f296594a852 github.com/stretchr/testify v1.10.0 @@ -61,12 +63,12 @@ require ( github.com/gogo/googleapis v1.4.1 // indirect github.com/gogo/status v1.1.1 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/btree v1.1.2 // indirect + github.com/google/btree v1.1.3 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 // indirect - github.com/grafana/jsonparser v0.0.0-20240425183733-ea80629e1a32 // indirect - github.com/grafana/loki/pkg/push v0.0.0-20231124142027-e52380921608 // indirect + github.com/grafana/jsonparser v0.0.0-20241004153430-023329977675 // indirect + github.com/grafana/loki/pkg/push v0.0.0-20240924133635-758364c7775f // indirect github.com/grafana/pyroscope-go/godeltaprof v0.1.8 // indirect github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect github.com/hashicorp/consul/api v1.29.4 // indirect @@ -85,33 +87,33 @@ require ( github.com/imdario/mergo v0.3.16 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.9 // indirect + github.com/klauspost/compress v1.17.11 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/miekg/dns v1.1.61 // indirect - github.com/mitchellh/copystructure v1.0.0 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/mitchellh/reflectwalk v1.0.1 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e // indirect github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect - github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b // indirect github.com/pires/go-proxyproto v0.7.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.20.4 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/exporter-toolkit v0.11.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/sercand/kuberesolver/v5 v5.1.1 // indirect - github.com/shopspring/decimal v1.2.0 // indirect + github.com/shopspring/decimal v1.3.1 // indirect github.com/sony/gobreaker v0.5.0 // indirect - github.com/spf13/cast v1.3.1 // indirect + github.com/spf13/cast v1.5.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect @@ -119,9 +121,9 @@ require ( go.etcd.io/etcd/client/pkg/v3 v3.5.4 // indirect go.etcd.io/etcd/client/v3 v3.5.4 // indirect go.opentelemetry.io/collector/pdata v1.12.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.opentelemetry.io/otel v1.29.0 // indirect + go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/trace v1.29.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.21.0 // indirect @@ -134,11 +136,11 @@ require ( golang.org/x/sync v0.10.0 // indirect golang.org/x/sys v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect - golang.org/x/time v0.6.0 // indirect + golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.23.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240820151423-278611b39280 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240820151423-278611b39280 // indirect - google.golang.org/grpc v1.65.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/grpc v1.67.1 // indirect google.golang.org/protobuf v1.35.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/tools/lambda-promtail/go.sum b/tools/lambda-promtail/go.sum index f9e9fac4fa81c..f047bb4b1e3a3 100644 --- a/tools/lambda-promtail/go.sum +++ b/tools/lambda-promtail/go.sum @@ -35,8 +35,8 @@ github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4 github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI= -github.com/alicebob/miniredis/v2 v2.30.4 h1:8S4/o1/KoUArAGbGwPxcwf0krlzceva2XVOSchFS7Eo= -github.com/alicebob/miniredis/v2 v2.30.4/go.mod h1:b25qWj4fCEsBeAAR2mlb0ufImGC6uH3VlUfb/HS5zKg= +github.com/alicebob/miniredis/v2 v2.33.0 h1:uvTF0EDeu9RLnUEG27Db5I68ESoIxTiXbNUiji6lZrA= +github.com/alicebob/miniredis/v2 v2.33.0/go.mod h1:MhP4a3EU7aENRi9aO+tHfTBZicLqQevyi/DJpoj6mi0= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= @@ -46,8 +46,8 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-lambda-go v1.47.0 h1:0H8s0vumYx/YKs4sE7YM0ktwL2eWse+kfopsRI1sXVI= github.com/aws/aws-lambda-go v1.47.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7RfgJv23DymV8A= -github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI= -github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= +github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go-v2 v1.33.0 h1:Evgm4DI9imD81V0WwD+TN4DCwjUMdc94TrduMLbgZJs= github.com/aws/aws-sdk-go-v2 v1.33.0/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 h1:lL7IfaFzngfx0ZwUGOZdsFFnQ5uLvR0hWqqhyE7Q9M8= @@ -140,6 +140,8 @@ github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4Nij github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -195,8 +197,8 @@ github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6 github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -213,16 +215,16 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/grafana/dskit v0.0.0-20241004175247-687ec485facf h1:ZafqZwIpdCCMifH9Ok6C98rYaCh5OZeyyHLbU0FPedg= -github.com/grafana/dskit v0.0.0-20241004175247-687ec485facf/go.mod h1:SPLNCARd4xdjCkue0O6hvuoveuS1dGJjDnfxYe405YQ= +github.com/grafana/dskit v0.0.0-20241007172036-53283a0f6b41 h1:a4O59OU3FJZ+EJUVnlvvNTvdAc4uRN1P6EaGwqL9CnA= +github.com/grafana/dskit v0.0.0-20241007172036-53283a0f6b41/go.mod h1:SPLNCARd4xdjCkue0O6hvuoveuS1dGJjDnfxYe405YQ= github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 h1:X8IKQ0wu40wpvYcKfBcc5T4QnhdQjUhtUtB/1CY89lE= github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= -github.com/grafana/jsonparser v0.0.0-20240425183733-ea80629e1a32 h1:NznuPwItog+rwdVg8hAuGKP29ndRSzJAwhxKldkP8oQ= -github.com/grafana/jsonparser v0.0.0-20240425183733-ea80629e1a32/go.mod h1:796sq+UcONnSlzA3RtlBZ+b/hrerkZXiEmO8oMjyRwY= -github.com/grafana/loki/pkg/push v0.0.0-20231124142027-e52380921608 h1:ZYk42718kSXOiIKdjZKljWLgBpzL5z1yutKABksQCMg= -github.com/grafana/loki/pkg/push v0.0.0-20231124142027-e52380921608/go.mod h1:f3JSoxBTPXX5ec4FxxeC19nTBSxoTz+cBgS3cYLMcr0= -github.com/grafana/loki/v3 v3.2.1 h1:VB7u+KHfvL5aHAxgoVBvz5wVhsdGuqKC7uuOFOOe7jw= -github.com/grafana/loki/v3 v3.2.1/go.mod h1:WvdLl6wOS+yahaeQY+xhD2m2XzkHDfKr5FZaX7D/X2Y= +github.com/grafana/jsonparser v0.0.0-20241004153430-023329977675 h1:U94jQ2TQr1m3HNyE8efSdyaBbDrdPaWImXyenuKZ/nw= +github.com/grafana/jsonparser v0.0.0-20241004153430-023329977675/go.mod h1:796sq+UcONnSlzA3RtlBZ+b/hrerkZXiEmO8oMjyRwY= +github.com/grafana/loki/pkg/push v0.0.0-20240924133635-758364c7775f h1:NcS5dQXH/BtoMpQtA1d0+ftwyzdwQk5/8SCj9eHl8w8= +github.com/grafana/loki/pkg/push v0.0.0-20240924133635-758364c7775f/go.mod h1:lJEF/Wh5MYlmBem6tOYAFObkLsuikfrEf8Iy9AdMPiQ= +github.com/grafana/loki/v3 v3.3.2 h1:S/VODX0Ik/tP6I4sNd54kMHbvQDtWLHipDCZrGvpBUQ= +github.com/grafana/loki/v3 v3.3.2/go.mod h1:XpYMDs7/l82aaI7pEaViX+nZ4ZxdWVsBtt6WkuPOosI= github.com/grafana/pyroscope-go/godeltaprof v0.1.8 h1:iwOtYXeeVSAeYefJNaxDytgjKtUuKQbJqgAIjlnicKg= github.com/grafana/pyroscope-go/godeltaprof v0.1.8/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= @@ -296,8 +298,8 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8 github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -331,16 +333,17 @@ github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJys github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= -github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -367,8 +370,8 @@ github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= -github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:FfH+VrHHk6Lxt9HdVS0PXzSXFyS2NbZKXv33FYPol0A= +github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b/go.mod h1:AC62GU6hc0BrNm+9RK9VSiwa/EUe1bkIeFORAMcHvJU= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -392,8 +395,8 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= -github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -427,8 +430,9 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUt github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sercand/kuberesolver/v5 v5.1.1 h1:CYH+d67G0sGBj7q5wLK61yzqJJ8gLLC8aeprPTHb6yY= github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYMWZJ294T3BtmVCpQ= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -437,8 +441,9 @@ github.com/sony/gobreaker v0.5.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -466,8 +471,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yuin/gopher-lua v1.1.0 h1:BojcDhfyDWgU2f2TOzYK/g5p2gxMrku8oupLDqlnSqE= -github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= +github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= +github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= go.etcd.io/etcd/api/v3 v3.5.4 h1:OHVyt3TopwtUQ2GKdd5wu3PmmipR4FTwCqoEjSyRdIc= go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= go.etcd.io/etcd/client/pkg/v3 v3.5.4 h1:lrneYvz923dvC14R54XcA7FXoZ3mlGZAgmwhfm7HqOg= @@ -476,12 +481,12 @@ go.etcd.io/etcd/client/v3 v3.5.4 h1:p83BUL3tAYS0OT/r0qglgc3M1JjhM0diV8DSWAhVXv4= go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= go.opentelemetry.io/collector/pdata v1.12.0 h1:Xx5VK1p4VO0md8MWm2icwC1MnJ7f8EimKItMWw46BmA= go.opentelemetry.io/collector/pdata v1.12.0/go.mod h1:MYeB0MmMAxeM0hstCFrCqWLzdyeYySim2dG6pDT6nYI= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -620,8 +625,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -657,10 +662,10 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto/googleapis/api v0.0.0-20240820151423-278611b39280 h1:YDFM9oOjiFhaMAVgbDxfxW+66nRrsvzQzJ51wp3OxC0= -google.golang.org/genproto/googleapis/api v0.0.0-20240820151423-278611b39280/go.mod h1:fO8wJzT2zbQbAjbIoos1285VfEIYKDDY+Dt+WpTkh6g= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240820151423-278611b39280 h1:XQMA2e105XNlEZ8NRF0HqnUOZzP14sUSsgL09kpdNnU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240820151423-278611b39280/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 h1:T6rh4haD3GVYsgEfWExoCZA2o2FmbNyKpTuAxbEFPTg= +google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:wp2WsuBYj6j8wUdo3ToZsdxxixbvQNAHqVJrTgi5E5M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -669,8 +674,8 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -715,7 +720,7 @@ k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 h1:b2FmK8YH+QEwq/Sy2uAEhmqL5nPfGYbJOcaqjeYYZoA= +k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
fix
update module github.com/grafana/loki/v3 to v3.3.2 (main) (#15952)
ae180d6e070946eb5359ecd63a9e01e02f160ce3
2024-04-24 00:11:03
Trevor Whitney
fix(nix): lambda-promtail vendor hash (#12763)
false
diff --git a/nix/packages/loki.nix b/nix/packages/loki.nix index e88fac6b6bb05..977161460eb8d 100644 --- a/nix/packages/loki.nix +++ b/nix/packages/loki.nix @@ -5,7 +5,7 @@ let pname = "lambda-promtail"; src = ./../../tools/lambda-promtail; - vendorHash = "sha256-PBdPIrN0aWO38bgoAg6jZlY7scpUM2tAjJ6bMN4SQt8="; + vendorHash = "sha256-CKob173T0VHD5c8F26aU7p1l+QzqddNM4qQedMbLJa0="; doCheck = false; diff --git a/pkg/bloomgateway/processor.go b/pkg/bloomgateway/processor.go index 8f79e417ab9f4..d94b305a9b26b 100644 --- a/pkg/bloomgateway/processor.go +++ b/pkg/bloomgateway/processor.go @@ -63,7 +63,7 @@ func (p *processor) runWithBounds(ctx context.Context, tasks []Task, bounds v1.M return nil } -func (p *processor) processTasks(ctx context.Context, tenant string, day config.DayTime, keyspaces v1.MultiFingerprintBounds, tasks []Task) error { +func (p *processor) processTasks(ctx context.Context, tenant string, day config.DayTime, _ v1.MultiFingerprintBounds, tasks []Task) error { level.Info(p.logger).Log("msg", "process tasks for day", "tenant", tenant, "tasks", len(tasks), "day", day.String()) var duration time.Duration
fix
lambda-promtail vendor hash (#12763)
c9950e394d2bca8bd290f60672a3bc904cd72d7b
2024-07-18 18:28:33
Christian Haudum
fix(blooms): Fix eviction of multiple blockcache items (#13573)
false
diff --git a/pkg/storage/stores/shipper/bloomshipper/blockscache.go b/pkg/storage/stores/shipper/bloomshipper/blockscache.go index b26a4ed5cbda5..a7992af267c54 100644 --- a/pkg/storage/stores/shipper/bloomshipper/blockscache.go +++ b/pkg/storage/stores/shipper/bloomshipper/blockscache.go @@ -272,6 +272,10 @@ func (c *BlocksCache) put(key string, value BlockDirectory) (*Entry, error) { func (c *BlocksCache) evict(key string, element *list.Element, reason string) { entry := element.Value.(*Entry) + if key != entry.Key { + level.Error(c.logger).Log("msg", "failed to remove entry: entry key and map key do not match", "map_key", key, "entry_key", entry.Key) + return + } err := c.remove(entry) if err != nil { level.Error(c.logger).Log("msg", "failed to remove entry from disk", "err", err) @@ -400,6 +404,7 @@ func (c *BlocksCache) evictLeastRecentlyUsedItems() { ) elem := c.lru.Back() for c.currSizeBytes >= int64(c.cfg.SoftLimit) && elem != nil { + nextElem := elem.Prev() entry := elem.Value.(*Entry) if entry.refCount.Load() == 0 { level.Debug(c.logger).Log( @@ -408,7 +413,7 @@ func (c *BlocksCache) evictLeastRecentlyUsedItems() { ) c.evict(entry.Key, elem, reasonFull) } - elem = elem.Prev() + elem = nextElem } } diff --git a/pkg/storage/stores/shipper/bloomshipper/blockscache_test.go b/pkg/storage/stores/shipper/bloomshipper/blockscache_test.go index 1ddc465577fcf..d19833805ef47 100644 --- a/pkg/storage/stores/shipper/bloomshipper/blockscache_test.go +++ b/pkg/storage/stores/shipper/bloomshipper/blockscache_test.go @@ -197,8 +197,8 @@ func TestBlocksCache_TTLEviction(t *testing.T) { func TestBlocksCache_LRUEviction(t *testing.T) { cfg := config.BlocksCacheConfig{ TTL: time.Hour, - SoftLimit: flagext.Bytes(15), - HardLimit: flagext.Bytes(20), + SoftLimit: flagext.Bytes(25), + HardLimit: flagext.Bytes(50), // no need for TTL evictions PurgeInterval: time.Minute, } @@ -216,27 +216,32 @@ func TestBlocksCache_LRUEviction(t *testing.T) { // oldest without refcount - will be evicted err = cache.Put(ctx, "c", CacheValue("c", 4)) require.NoError(t, err) + err = cache.Put(ctx, "d", CacheValue("d", 4)) + require.NoError(t, err) + err = cache.Put(ctx, "e", CacheValue("e", 4)) + require.NoError(t, err) + err = cache.Put(ctx, "f", CacheValue("f", 4)) + require.NoError(t, err) // increase ref counter on "b" _, found := cache.Get(ctx, "b") require.True(t, found) // exceed soft limit - err = cache.Put(ctx, "d", CacheValue("d", 4)) + err = cache.Put(ctx, "g", CacheValue("g", 16)) require.NoError(t, err) time.Sleep(time.Second) l, ok := cache.len() require.True(t, ok) - require.Equal(t, 3, l) - // key "b" was evicted because it was the oldest - // and it had no ref counts - _, found = cache.Get(ctx, "c") - require.False(t, found) - - require.Equal(t, int64(12), cache.currSizeBytes) + // expect 3 items in cache: + // * item a with size 4 + // * item b with size 4 + // * item g with size 16 + require.Equal(t, 3, l) + require.Equal(t, int64(24), cache.currSizeBytes) } func TestBlocksCache_RefCounter(t *testing.T) {
fix
Fix eviction of multiple blockcache items (#13573)
8699ee40f7772fd96bf3f0025f1bc762cc912bc1
2019-03-25 20:00:02
Steven Sheehy
helm: Run with reduced privileges
false
diff --git a/production/helm/Chart.yaml b/production/helm/Chart.yaml index 40b88cca03d19..6eebbd45494fb 100644 --- a/production/helm/Chart.yaml +++ b/production/helm/Chart.yaml @@ -1,5 +1,5 @@ name: loki -version: 0.3.0 +version: 0.4.0 appVersion: 0.0.1 kubeVersion: "^1.10.0-0" description: "Loki: like Prometheus, but for logs." diff --git a/production/helm/templates/loki/deployment.yaml b/production/helm/templates/loki/deployment.yaml index 93eff4d02c40d..c221489b17e8a 100644 --- a/production/helm/templates/loki/deployment.yaml +++ b/production/helm/templates/loki/deployment.yaml @@ -60,6 +60,8 @@ spec: {{- toYaml .Values.loki.readinessProbe | nindent 12 }} resources: {{- toYaml .Values.loki.resources | nindent 12 }} + securityContext: + {{- toYaml .Values.loki.securityContext | nindent 12 }} nodeSelector: {{- toYaml .Values.loki.nodeSelector | nindent 8 }} affinity: diff --git a/production/helm/templates/loki/podsecuritypolicy.yaml b/production/helm/templates/loki/podsecuritypolicy.yaml index c81894d6132fb..a5ef5c3f27454 100644 --- a/production/helm/templates/loki/podsecuritypolicy.yaml +++ b/production/helm/templates/loki/podsecuritypolicy.yaml @@ -6,7 +6,7 @@ metadata: name: {{ template "loki.fullname" . }} labels: app: {{ template "loki.name" . }} - chart: {{ .Chart.Name }}-{{ .Chart.Version }} + chart: {{ template "loki.chart" . }} heritage: {{ .Release.Service }} release: {{ .Release.Name }} spec: @@ -21,13 +21,24 @@ spec: hostIPC: false hostPID: false runAsUser: - rule: 'RunAsAny' + rule: 'MustRunAsNonRoot' seLinux: - rule: 'RunAsAny' + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 supplementalGroups: - rule: 'RunAsAny' + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 fsGroup: - rule: 'RunAsAny' - readOnlyRootFilesystem: false + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: true + requiredDropCapabilities: + - ALL {{- end }} {{- end }} diff --git a/production/helm/templates/loki/pvc.yaml b/production/helm/templates/loki/pvc.yaml index 9bba88becb3a5..f9041d551d224 100644 --- a/production/helm/templates/loki/pvc.yaml +++ b/production/helm/templates/loki/pvc.yaml @@ -6,13 +6,11 @@ metadata: name: {{ template "loki.fullname" . }} labels: app: {{ template "loki.name" . }} - chart: {{ .Chart.Name }}-{{ .Chart.Version }} + chart: {{ template "loki.chart" . }} heritage: {{ .Release.Service }} release: {{ .Release.Name }} - {{- with .Values.loki.persistence.annotations }} annotations: -{{ toYaml . | indent 4 }} - {{- end }} + {{- toYaml .Values.loki.persistence.annotations | nindent 4 }} spec: accessModes: {{- range .Values.loki.persistence.accessModes }} diff --git a/production/helm/templates/loki/role.yaml b/production/helm/templates/loki/role.yaml index eb9c508f25a58..794bf3aac0351 100644 --- a/production/helm/templates/loki/role.yaml +++ b/production/helm/templates/loki/role.yaml @@ -6,7 +6,7 @@ metadata: name: {{ template "loki.fullname" . }} labels: app: {{ template "loki.name" . }} - chart: {{ .Chart.Name }}-{{ .Chart.Version }} + chart: {{ template "loki.chart" . }} heritage: {{ .Release.Service }} release: {{ .Release.Name }} {{- if .Values.rbac.pspEnabled }} diff --git a/production/helm/templates/loki/rolebinding.yaml b/production/helm/templates/loki/rolebinding.yaml index 4aa765bb4a239..734fc9aeb7952 100644 --- a/production/helm/templates/loki/rolebinding.yaml +++ b/production/helm/templates/loki/rolebinding.yaml @@ -6,7 +6,7 @@ metadata: name: {{ template "loki.fullname" . }} labels: app: {{ template "loki.name" . }} - chart: {{ .Chart.Name }}-{{ .Chart.Version }} + chart: {{ template "loki.chart" . }} heritage: {{ .Release.Service }} release: {{ .Release.Name }} roleRef: diff --git a/production/helm/templates/loki/serviceaccount.yaml b/production/helm/templates/loki/serviceaccount.yaml index 302cafdb452ce..fae926023df40 100644 --- a/production/helm/templates/loki/serviceaccount.yaml +++ b/production/helm/templates/loki/serviceaccount.yaml @@ -5,7 +5,7 @@ kind: ServiceAccount metadata: labels: app: {{ template "loki.name" . }} - chart: {{ .Chart.Name }}-{{ .Chart.Version }} + chart: {{ template "loki.chart" . }} heritage: {{ .Release.Service }} release: {{ .Release.Name }} name: {{ template "loki.serviceAccountName" . }} diff --git a/production/helm/templates/promtail/clusterrole.yaml b/production/helm/templates/promtail/clusterrole.yaml index 75be62d03d24a..b9d3709013816 100644 --- a/production/helm/templates/promtail/clusterrole.yaml +++ b/production/helm/templates/promtail/clusterrole.yaml @@ -8,10 +8,8 @@ metadata: chart: {{ template "promtail.chart" . }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} -{{- with .Values.promtail.annotations }} annotations: -{{ toYaml . | indent 4 }} -{{- end }} + {{- toYaml .Values.promtail.annotations | nindent 4 }} name: {{ template "promtail.fullname" . }}-clusterrole rules: - apiGroups: [""] # "" indicates the core API group diff --git a/production/helm/templates/promtail/clusterrolebinding.yaml b/production/helm/templates/promtail/clusterrolebinding.yaml index 7a842922c8283..a6d998432504c 100644 --- a/production/helm/templates/promtail/clusterrolebinding.yaml +++ b/production/helm/templates/promtail/clusterrolebinding.yaml @@ -9,10 +9,8 @@ metadata: chart: {{ template "promtail.chart" . }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} -{{- with .Values.promtail.annotations }} annotations: -{{ toYaml . | indent 4 }} -{{- end }} + {{- toYaml .Values.promtail.annotations | nindent 4 }} subjects: - kind: ServiceAccount name: {{ template "promtail.serviceAccountName" . }} diff --git a/production/helm/templates/promtail/configmap.yaml b/production/helm/templates/promtail/configmap.yaml index 5a85f0c9324c0..71d6c0617a331 100644 --- a/production/helm/templates/promtail/configmap.yaml +++ b/production/helm/templates/promtail/configmap.yaml @@ -14,7 +14,10 @@ data: minbackoff: {{ .Values.promtail.config.backoff_config.minbackoff }} maxbackoff: {{ .Values.promtail.config.backoff_config.maxbackoff }} maxretries: {{ .Values.promtail.config.backoff_config.maxretries }} - + server: + http_listen_port: {{ .Values.promtail.port }} + positions: + filename: /run/promtail/positions.yaml scrape_configs: - entry_parser: '{{ .Values.promtail.entryParser }}' job_name: kubernetes-pods-name diff --git a/production/helm/templates/promtail/daemonset.yaml b/production/helm/templates/promtail/daemonset.yaml index 1f02fd5e2cf19..a3bdd627e9ca2 100644 --- a/production/helm/templates/promtail/daemonset.yaml +++ b/production/helm/templates/promtail/daemonset.yaml @@ -42,6 +42,8 @@ spec: volumeMounts: - name: config mountPath: /etc/promtail + - name: run + mountPath: /run/promtail {{- with .Values.promtail.volumeMounts }} {{- toYaml . | nindent 12 }} {{- end }} @@ -51,11 +53,10 @@ spec: fieldRef: fieldPath: spec.nodeName ports: - - containerPort: 80 + - containerPort: {{ .Values.promtail.port }} name: http-metrics securityContext: - privileged: true - runAsUser: 0 + {{- toYaml .Values.promtail.securityContext | nindent 12 }} livenessProbe: {{- toYaml .Values.promtail.livenessProbe | nindent 12 }} readinessProbe: @@ -72,6 +73,9 @@ spec: - name: config configMap: name: {{ template "promtail.fullname" . }} + - name: run + hostPath: + path: /run/promtail {{- with .Values.promtail.volumes }} {{- toYaml . | nindent 8 }} {{- end }} diff --git a/production/helm/templates/promtail/podsecuritypolicy.yaml b/production/helm/templates/promtail/podsecuritypolicy.yaml index 2666f6bc226a1..96d761caa8fa6 100644 --- a/production/helm/templates/promtail/podsecuritypolicy.yaml +++ b/production/helm/templates/promtail/podsecuritypolicy.yaml @@ -6,12 +6,12 @@ metadata: name: {{ template "promtail.fullname" . }} labels: app: {{ template "promtail.name" . }} - chart: {{ .Chart.Name }}-{{ .Chart.Version }} + chart: {{ template "promtail.chart" . }} heritage: {{ .Release.Service }} release: {{ .Release.Name }} spec: - privileged: true - allowPrivilegeEscalation: true + privileged: false + allowPrivilegeEscalation: false volumes: - 'secret' - 'configMap' @@ -27,6 +27,8 @@ spec: rule: 'RunAsAny' fsGroup: rule: 'RunAsAny' - readOnlyRootFilesystem: false + readOnlyRootFilesystem: true + requiredDropCapabilities: + - ALL {{- end }} {{- end }} diff --git a/production/helm/templates/promtail/role.yaml b/production/helm/templates/promtail/role.yaml index e37a92f737cec..96ead979c7209 100644 --- a/production/helm/templates/promtail/role.yaml +++ b/production/helm/templates/promtail/role.yaml @@ -6,7 +6,7 @@ metadata: name: {{ template "promtail.fullname" . }} labels: app: {{ template "promtail.name" . }} - chart: {{ .Chart.Name }}-{{ .Chart.Version }} + chart: {{ template "promtail.chart" . }} heritage: {{ .Release.Service }} release: {{ .Release.Name }} {{- if .Values.rbac.pspEnabled }} diff --git a/production/helm/templates/promtail/rolebinding.yaml b/production/helm/templates/promtail/rolebinding.yaml index e2787b2c61bc8..9f77e754c102e 100644 --- a/production/helm/templates/promtail/rolebinding.yaml +++ b/production/helm/templates/promtail/rolebinding.yaml @@ -6,7 +6,7 @@ metadata: name: {{ template "promtail.fullname" . }} labels: app: {{ template "promtail.name" . }} - chart: {{ .Chart.Name }}-{{ .Chart.Version }} + chart: {{ template "promtail.chart" . }} heritage: {{ .Release.Service }} release: {{ .Release.Name }} roleRef: diff --git a/production/helm/values.yaml b/production/helm/values.yaml index 853470ef90d72..ee7df7a8075d7 100644 --- a/production/helm/values.yaml +++ b/production/helm/values.yaml @@ -50,6 +50,13 @@ loki: # cpu: 100m # memory: 128Mi + securityContext: + fsGroup: 10001 + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + ## Pod Annotations podAnnotations: {} @@ -94,7 +101,7 @@ loki: - ReadWriteOnce size: 10Gi storageClassName: default - # annotations: {} + annotations: {} # subPath: "" # existingClaim: @@ -141,18 +148,22 @@ promtail: tag: latest pullPolicy: Always # Always pull while in BETA + port: 3101 + + # Extra volumes to scrape logs from volumes: - - name: varlog + - name: pods hostPath: - path: /var/log - - name: varlibdockercontainers + path: /var/log/pods + - name: docker hostPath: path: /var/lib/docker/containers volumeMounts: - - name: varlog - mountPath: /var/log - - name: varlibdockercontainers + - name: pods + mountPath: /var/log/pods + readOnly: true + - name: docker mountPath: /var/lib/docker/containers readOnly: true @@ -168,6 +179,12 @@ promtail: # cpu: 100m # memory: 128Mi + securityContext: + fsGroup: 0 + readOnlyRootFilesystem: true + runAsGroup: 0 + runAsUser: 0 + ## Pod Annotations podAnnotations: {} # prometheus.io/scrape: "true"
helm
Run with reduced privileges
061bdfe67208eef0af11b0a31214f2470088b585
2024-10-31 20:52:00
Jay Clifford
docs: AWS Cloud Guide Update (#14687)
false
diff --git a/docs/sources/setup/install/helm/deployment-guides/aws.md b/docs/sources/setup/install/helm/deployment-guides/aws.md index bbe80da5e7761..380a37dd4ff6c 100644 --- a/docs/sources/setup/install/helm/deployment-guides/aws.md +++ b/docs/sources/setup/install/helm/deployment-guides/aws.md @@ -18,12 +18,12 @@ There are two methods for authenticating and connecting Loki to AWS S3. We will ## Considerations {{< admonition type="caution" >}} -This guide was accurate at the time it was last updated on **21st October, 2024**. As cloud providers frequently update their services and offerings, as a best practice, you should refer to the [AWS S3 documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/Welcome.html) before creating your buckets and assigning roles. +This guide was accurate at the time it was last updated on **31st October, 2024**. As cloud providers frequently update their services and offerings, as a best practice, you should refer to the [AWS S3 documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/Welcome.html) before creating your buckets and assigning roles. {{< /admonition >}} - **IAM Role:** The IAM role created in this guide is a basic role that allows Loki to read and write to the S3 bucket. You may wish to add more granular permissions based on your requirements. -- **Authentication:** Grafana Loki comes with a basic authentication layer. The Loki gateway (NGINX) is exposed to the internet using basic authentication in this example. NGINX can also be replaced with other open-source reverse proxies. Refer to [Authentication](https://grafana.com/docs/loki/<LOKI_VERSION/operations/authentication/) for more information. +- **Authentication:** Grafana Loki comes with a basic authentication layer. The Loki gateway (NGINX) is exposed to the internet using basic authentication in this example. NGINX can also be replaced with other open-source reverse proxies. Refer to [Authentication](https://grafana.com/docs/loki/<LOKI_VERSION>/operations/authentication/) for more information. - **Retention:** The retention period is set to 28 days in the `values.yaml` file. You may wish to adjust this based on your requirements. @@ -48,7 +48,7 @@ The minimum requirements for deploying Loki on EKS are: - Kubernetes version `1.30` or above. - `3` nodes for the EKS cluster. -- Instance type depends on your workload. A good starting point is `m5.xlarge`. +- Instance type depends on your workload. A good starting point for a production cluster is `m7i.2xlarge`. Here is the EKSctl cluster configuration file used in this guide: @@ -59,8 +59,8 @@ apiVersion: eksctl.io/v1alpha5 kind: ClusterConfig metadata: - name: <INSERT-NAME> - region: <INSERT-REGION> + name: <INSERT-CLUSTER-NAME> + region: <INSERT-REGION-FOR-CLUSTER> version: "1.31" iam: @@ -68,28 +68,25 @@ iam: addons: - name: aws-ebs-csi-driver - - name: eks-pod-identity-agent managedNodeGroups: - name: loki-workers - instanceType: m5.xlarge + instanceType: m7i.2xlarge desiredCapacity: 3 minSize: 2 maxSize: 3 - amiFamily: AmazonLinux2 + amiFamily: AmazonLinux2023 iam: withAddonPolicies: ebs: true volumeSize: 80 - volumeType: gp2 + volumeType: gp3 ebsOptimized: true - ``` The following plugins must also be installed within the EKS cluster: - **Amazon EBS CSI Driver**: Enables Kubernetes to dynamically provision and manage EBS volumes as persistent storage for applications. We use this to provision the node volumes for Loki. -- **Amazon EKS Pod Identity Agent**: Manages AWS IAM roles for pods, allowing fine-grained access control to AWS resources without needing to store credentials in containers. This is how Loki will access the S3 bucket. - **CoreDNS**: Provides internal DNS service for Kubernetes clusters, ensuring that services and pods can communicate with each other using DNS names. - **kube-proxy**: Maintains network rules on nodes, enabling communication between pods and services within the cluster. @@ -198,77 +195,6 @@ The recommended method for connecting Loki to AWS S3 is to use an IAM role. This ``` **Make sure to replace the placeholder with your AWS account ID.** -### Adding the policy to the S3 buckets - -To allow the IAM role to access the S3 buckets, you need to add the policy to the bucket. You can do this using the AWS Management Console or the AWS CLI. The below steps show how to add the policy using the AWS CLI. - -1. Create a bucket policy file named `bucket-policy-chunk.json` with the following content: - - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "Statement1", - "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:iam::<ACCOUNT ID>:role/LokiServiceAccountRole" - }, - "Action": [ - "s3:PutObject", - "s3:GetObject", - "s3:DeleteObject", - "s3:ListBucket" - ], - "Resource": [ - "arn:aws:s3:::< CHUNK BUCKET NAME >", - "arn:aws:s3:::< CHUNK BUCKET NAME >/*" - ] - } - ] - } - ``` - **Make sure to replace the placeholders with your AWS account ID and the bucket names.** - -1. Add the policy to the bucket: - - ```bash - aws s3api put-bucket-policy --bucket <CHUNK BUCKET NAME eg. `loki-aws-dev-chunks`> --policy file://bucket-policy-chunk.json - ``` -1. Create a bucket policy file named `bucket-policy-ruler.json` with the following content: - - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "Statement1", - "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:iam::<ACCOUNT ID>:role/LokiServiceAccountRole" - }, - "Action": [ - "s3:PutObject", - "s3:GetObject", - "s3:DeleteObject", - "s3:ListBucket" - ], - "Resource": [ - "arn:aws:s3:::< RULER BUCKET NAME >", - "arn:aws:s3:::< RULER BUCKET NAME >/*" - ] - } - ] - } - ``` - **Make sure to replace the placeholders with your AWS account ID and the bucket names.** - -1. Add the policy to the bucket: - - ```bash - aws s3api put-bucket-policy --bucket <RULER BUCKET NAME eg. `loki-aws-dev-ruler`> --policy file://bucket-policy-ruler.json - ``` - ## Deploying the Helm chart Before we can deploy the Loki Helm chart, we need to add the Grafana chart repository to Helm. This repository contains the Loki Helm chart. @@ -322,8 +248,6 @@ Loki by default does not come with any authentication. Since we will be deployin We create a literal secret with the username and password for Loki canary to authenticate with the Loki gateway. **Make sure to replace the placeholders with your desired username and password.** - - ### Loki Helm chart configuration Create a `values.yaml` file choosing the configuration options that best suit your requirements. Below there is an example of `values.yaml` files for the Loki Helm chart in [microservices](https://grafana.com/docs/loki/<LOKI_VERSION>/get-started/deployment-modes/#microservices-mode) mode.
docs
AWS Cloud Guide Update (#14687)