mirror of
https://github.com/pomerium/pomerium.git
synced 2025-08-04 01:09:36 +02:00
New tracing system (#5388)
* update tracing config definitions * new tracing system * performance improvements * only configure tracing in envoy if it is enabled in pomerium * [tracing] refactor to use custom extension for trace id editing (#5420) refactor to use custom extension for trace id editing * set default tracing sample rate to 1.0 * fix proxy service http middleware * improve some existing auth related traces * test fixes * bump envoyproxy/go-control-plane * code cleanup * test fixes * Fix missing spans for well-known endpoints * import extension apis from pomerium/envoy-custom
This commit is contained in:
parent
832742648d
commit
396c35b6b4
121 changed files with 6096 additions and 1946 deletions
193
internal/testutil/tracetest/buffer.go
Normal file
193
internal/testutil/tracetest/buffer.go
Normal file
|
@ -0,0 +1,193 @@
|
|||
package tracetest
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"encoding/base64"
|
||||
"maps"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
"github.com/pomerium/pomerium/internal/hashutil"
|
||||
commonv1 "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
resourcev1 "go.opentelemetry.io/proto/otlp/resource/v1"
|
||||
tracev1 "go.opentelemetry.io/proto/otlp/trace/v1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type ScopeBuffer struct {
|
||||
scope *ScopeInfo
|
||||
spans []*tracev1.Span
|
||||
}
|
||||
|
||||
func (sb *ScopeBuffer) Insert(spans ...*tracev1.Span) {
|
||||
sb.spans = append(sb.spans, spans...)
|
||||
}
|
||||
|
||||
func NewScopeBuffer(scope *ScopeInfo) *ScopeBuffer {
|
||||
return &ScopeBuffer{
|
||||
scope: scope,
|
||||
}
|
||||
}
|
||||
|
||||
type ResourceBuffer struct {
|
||||
resource *ResourceInfo
|
||||
spansByScope map[string]*ScopeBuffer
|
||||
}
|
||||
|
||||
func NewResourceBuffer(resource *ResourceInfo) *ResourceBuffer {
|
||||
return &ResourceBuffer{
|
||||
resource: resource,
|
||||
spansByScope: make(map[string]*ScopeBuffer),
|
||||
}
|
||||
}
|
||||
|
||||
func (rb *ResourceBuffer) Insert(scope *ScopeInfo, span *tracev1.Span) {
|
||||
var spans *ScopeBuffer
|
||||
if sp, ok := rb.spansByScope[scope.ID()]; ok {
|
||||
spans = sp
|
||||
} else {
|
||||
spans = NewScopeBuffer(scope)
|
||||
rb.spansByScope[scope.ID()] = spans
|
||||
}
|
||||
spans.Insert(span)
|
||||
}
|
||||
|
||||
func (rb *ResourceBuffer) Flush() []*tracev1.ScopeSpans {
|
||||
out := make([]*tracev1.ScopeSpans, 0, len(rb.spansByScope))
|
||||
for _, key := range slices.Sorted(maps.Keys(rb.spansByScope)) {
|
||||
spans := rb.spansByScope[key]
|
||||
slices.SortStableFunc(spans.spans, func(a, b *tracev1.Span) int {
|
||||
return cmp.Compare(a.StartTimeUnixNano, b.StartTimeUnixNano)
|
||||
})
|
||||
scopeSpans := &tracev1.ScopeSpans{
|
||||
Scope: spans.scope.Scope,
|
||||
SchemaUrl: spans.scope.Schema,
|
||||
Spans: spans.spans,
|
||||
}
|
||||
out = append(out, scopeSpans)
|
||||
}
|
||||
clear(rb.spansByScope)
|
||||
return out
|
||||
}
|
||||
|
||||
func (rb *ResourceBuffer) Merge(other *ResourceBuffer) {
|
||||
for scope, otherSpans := range other.spansByScope {
|
||||
if ourSpans, ok := rb.spansByScope[scope]; !ok {
|
||||
rb.spansByScope[scope] = otherSpans
|
||||
} else {
|
||||
ourSpans.Insert(otherSpans.spans...)
|
||||
}
|
||||
}
|
||||
clear(other.spansByScope)
|
||||
}
|
||||
|
||||
type Buffer struct {
|
||||
scopesByResourceID map[string]*ResourceBuffer
|
||||
}
|
||||
|
||||
func NewBuffer() *Buffer {
|
||||
return &Buffer{
|
||||
scopesByResourceID: make(map[string]*ResourceBuffer),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Buffer) Insert(resource *ResourceInfo, scope *ScopeInfo, span *tracev1.Span) {
|
||||
resourceEq := resource.ID()
|
||||
var scopes *ResourceBuffer
|
||||
if sc, ok := b.scopesByResourceID[resourceEq]; ok {
|
||||
scopes = sc
|
||||
} else {
|
||||
scopes = NewResourceBuffer(resource)
|
||||
b.scopesByResourceID[resourceEq] = scopes
|
||||
}
|
||||
scopes.Insert(scope, span)
|
||||
}
|
||||
|
||||
func (b *Buffer) Flush() []*tracev1.ResourceSpans {
|
||||
out := make([]*tracev1.ResourceSpans, 0, len(b.scopesByResourceID))
|
||||
for _, key := range slices.Sorted(maps.Keys(b.scopesByResourceID)) {
|
||||
scopes := b.scopesByResourceID[key]
|
||||
resourceSpans := &tracev1.ResourceSpans{
|
||||
Resource: scopes.resource.Resource,
|
||||
ScopeSpans: scopes.Flush(),
|
||||
SchemaUrl: scopes.resource.Schema,
|
||||
}
|
||||
out = append(out, resourceSpans)
|
||||
}
|
||||
clear(b.scopesByResourceID)
|
||||
return out
|
||||
}
|
||||
|
||||
func (b *Buffer) Merge(other *Buffer) {
|
||||
if b != nil {
|
||||
for k, otherV := range other.scopesByResourceID {
|
||||
if v, ok := b.scopesByResourceID[k]; !ok {
|
||||
b.scopesByResourceID[k] = otherV
|
||||
} else {
|
||||
v.Merge(otherV)
|
||||
}
|
||||
}
|
||||
}
|
||||
clear(other.scopesByResourceID)
|
||||
}
|
||||
|
||||
func (b *Buffer) IsEmpty() bool {
|
||||
return len(b.scopesByResourceID) == 0
|
||||
}
|
||||
|
||||
type ResourceInfo struct {
|
||||
Resource *resourcev1.Resource
|
||||
Schema string
|
||||
ID func() string
|
||||
}
|
||||
|
||||
func NewResourceInfo(resource *resourcev1.Resource, resourceSchema string) *ResourceInfo {
|
||||
ri := &ResourceInfo{
|
||||
Resource: resource,
|
||||
Schema: resourceSchema,
|
||||
}
|
||||
ri.ID = sync.OnceValue(ri.computeID)
|
||||
return ri
|
||||
}
|
||||
|
||||
func (ri *ResourceInfo) computeID() string {
|
||||
hash := hashutil.NewDigest()
|
||||
tmp := resourcev1.Resource{
|
||||
Attributes: ri.Resource.Attributes,
|
||||
}
|
||||
bytes, _ := proto.Marshal(&tmp)
|
||||
hash.WriteStringWithLen(ri.Schema)
|
||||
hash.WriteWithLen(bytes)
|
||||
return base64.StdEncoding.EncodeToString(hash.Sum(nil))
|
||||
}
|
||||
|
||||
type ScopeInfo struct {
|
||||
Scope *commonv1.InstrumentationScope
|
||||
Schema string
|
||||
ID func() string
|
||||
}
|
||||
|
||||
func NewScopeInfo(scope *commonv1.InstrumentationScope, scopeSchema string) *ScopeInfo {
|
||||
si := &ScopeInfo{
|
||||
Scope: scope,
|
||||
Schema: scopeSchema,
|
||||
}
|
||||
si.ID = sync.OnceValue(si.computeID)
|
||||
return si
|
||||
}
|
||||
|
||||
func (si *ScopeInfo) computeID() string {
|
||||
if si.Scope == nil {
|
||||
return "(unknown)"
|
||||
}
|
||||
hash := hashutil.NewDigest()
|
||||
tmp := commonv1.InstrumentationScope{
|
||||
Name: si.Scope.Name,
|
||||
Version: si.Scope.Version,
|
||||
Attributes: si.Scope.Attributes,
|
||||
}
|
||||
bytes, _ := proto.Marshal(&tmp)
|
||||
hash.WriteStringWithLen(si.Schema)
|
||||
hash.WriteWithLen(bytes)
|
||||
return base64.StdEncoding.EncodeToString(hash.Sum(nil))
|
||||
}
|
118
internal/testutil/tracetest/buffer_test.go
Normal file
118
internal/testutil/tracetest/buffer_test.go
Normal file
|
@ -0,0 +1,118 @@
|
|||
package tracetest
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/pomerium/pomerium/internal/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
tracev1 "go.opentelemetry.io/proto/otlp/trace/v1"
|
||||
)
|
||||
|
||||
func TestBuffer(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// start time determines sort order of spans within a resource+scope group
|
||||
s := []*tracev1.Span{
|
||||
{TraceId: Trace(1).B(), SpanId: Span(1).B(), StartTimeUnixNano: 1},
|
||||
{TraceId: Trace(1).B(), SpanId: Span(2).B(), StartTimeUnixNano: 2},
|
||||
{TraceId: Trace(2).B(), SpanId: Span(3).B(), StartTimeUnixNano: 3},
|
||||
{TraceId: Trace(2).B(), SpanId: Span(4).B(), StartTimeUnixNano: 4},
|
||||
{TraceId: Trace(1).B(), SpanId: Span(5).B(), StartTimeUnixNano: 5},
|
||||
{TraceId: Trace(1).B(), SpanId: Span(6).B(), StartTimeUnixNano: 6},
|
||||
{TraceId: Trace(2).B(), SpanId: Span(7).B(), StartTimeUnixNano: 7},
|
||||
{TraceId: Trace(2).B(), SpanId: Span(8).B(), StartTimeUnixNano: 8},
|
||||
{TraceId: Trace(1).B(), SpanId: Span(9).B(), StartTimeUnixNano: 9},
|
||||
{TraceId: Trace(1).B(), SpanId: Span(10).B(), StartTimeUnixNano: 10},
|
||||
{TraceId: Trace(2).B(), SpanId: Span(11).B(), StartTimeUnixNano: 11},
|
||||
{TraceId: Trace(2).B(), SpanId: Span(12).B(), StartTimeUnixNano: 12},
|
||||
{TraceId: Trace(1).B(), SpanId: Span(13).B(), StartTimeUnixNano: 13},
|
||||
{TraceId: Trace(1).B(), SpanId: Span(14).B(), StartTimeUnixNano: 14},
|
||||
{TraceId: Trace(2).B(), SpanId: Span(15).B(), StartTimeUnixNano: 15},
|
||||
{TraceId: Trace(2).B(), SpanId: Span(16).B(), StartTimeUnixNano: 16},
|
||||
}
|
||||
|
||||
newTestBuffer := func() *Buffer {
|
||||
b := NewBuffer()
|
||||
b.Insert(Resource(1).Make(), Scope(1).Make(), s[0])
|
||||
b.Insert(Resource(1).Make(), Scope(1).Make(), s[1])
|
||||
b.Insert(Resource(1).Make(), Scope(1).Make(), s[2])
|
||||
b.Insert(Resource(1).Make(), Scope(1).Make(), s[3])
|
||||
b.Insert(Resource(1).Make(), Scope(2).Make(), s[4])
|
||||
b.Insert(Resource(1).Make(), Scope(2).Make(), s[5])
|
||||
b.Insert(Resource(1).Make(), Scope(2).Make(), s[6])
|
||||
b.Insert(Resource(1).Make(), Scope(2).Make(), s[7])
|
||||
b.Insert(Resource(2).Make(), Scope(1).Make(), s[8])
|
||||
b.Insert(Resource(2).Make(), Scope(1).Make(), s[9])
|
||||
b.Insert(Resource(2).Make(), Scope(1).Make(), s[10])
|
||||
b.Insert(Resource(2).Make(), Scope(1).Make(), s[11])
|
||||
b.Insert(Resource(2).Make(), Scope(2).Make(), s[12])
|
||||
b.Insert(Resource(2).Make(), Scope(2).Make(), s[13])
|
||||
b.Insert(Resource(2).Make(), Scope(2).Make(), s[14])
|
||||
b.Insert(Resource(2).Make(), Scope(2).Make(), s[15])
|
||||
return b
|
||||
}
|
||||
|
||||
newExpectedSpans := func() []*tracev1.ResourceSpans {
|
||||
return []*tracev1.ResourceSpans{
|
||||
{
|
||||
Resource: Resource(1).Make().Resource,
|
||||
ScopeSpans: []*tracev1.ScopeSpans{
|
||||
{
|
||||
Scope: Scope(1).Make().Scope,
|
||||
Spans: []*tracev1.Span{s[0], s[1], s[2], s[3]},
|
||||
SchemaUrl: Schema(0).String(),
|
||||
},
|
||||
{
|
||||
Scope: Scope(2).Make().Scope,
|
||||
Spans: []*tracev1.Span{s[4], s[5], s[6], s[7]},
|
||||
SchemaUrl: Schema(0).String(),
|
||||
},
|
||||
},
|
||||
SchemaUrl: Schema(0).String(),
|
||||
},
|
||||
{
|
||||
Resource: Resource(2).Make().Resource,
|
||||
ScopeSpans: []*tracev1.ScopeSpans{
|
||||
{
|
||||
Scope: Scope(1).Make().Scope,
|
||||
Spans: []*tracev1.Span{s[8], s[9], s[10], s[11]},
|
||||
SchemaUrl: Schema(0).String(),
|
||||
},
|
||||
{
|
||||
Scope: Scope(2).Make().Scope,
|
||||
Spans: []*tracev1.Span{s[12], s[13], s[14], s[15]},
|
||||
SchemaUrl: Schema(0).String(),
|
||||
},
|
||||
},
|
||||
SchemaUrl: Schema(0).String(),
|
||||
},
|
||||
}
|
||||
}
|
||||
t.Run("Flush", func(t *testing.T) {
|
||||
b := newTestBuffer()
|
||||
actual := b.Flush()
|
||||
assert.True(t, b.IsEmpty())
|
||||
testutil.AssertProtoEqual(t, newExpectedSpans(), actual)
|
||||
})
|
||||
|
||||
t.Run("Default scope", func(t *testing.T) {
|
||||
b := NewBuffer()
|
||||
b.Insert(Resource(1).Make(Schema(2)), NewScopeInfo(nil, ""), s[0])
|
||||
b.Insert(Resource(1).Make(Schema(2)), NewScopeInfo(nil, ""), s[1])
|
||||
b.Insert(Resource(1).Make(Schema(2)), NewScopeInfo(nil, ""), s[2])
|
||||
actual := b.Flush()
|
||||
testutil.AssertProtoEqual(t, []*tracev1.ResourceSpans{
|
||||
{
|
||||
Resource: Resource(1).Make(Schema(2)).Resource,
|
||||
ScopeSpans: []*tracev1.ScopeSpans{
|
||||
{
|
||||
Scope: nil,
|
||||
Spans: []*tracev1.Span{s[0], s[1], s[2]},
|
||||
SchemaUrl: "",
|
||||
},
|
||||
},
|
||||
SchemaUrl: Schema(2).String(),
|
||||
},
|
||||
}, actual)
|
||||
})
|
||||
}
|
84
internal/testutil/tracetest/mock_otlptrace/mock_client.go
Normal file
84
internal/testutil/tracetest/mock_otlptrace/mock_client.go
Normal file
|
@ -0,0 +1,84 @@
|
|||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: go.opentelemetry.io/otel/exporters/otlp/otlptrace (interfaces: Client)
|
||||
//
|
||||
// Generated by this command:
|
||||
//
|
||||
// mockgen go.opentelemetry.io/otel/exporters/otlp/otlptrace Client
|
||||
//
|
||||
|
||||
// Package mock_otlptrace is a generated GoMock package.
|
||||
package mock_otlptrace
|
||||
|
||||
import (
|
||||
context "context"
|
||||
reflect "reflect"
|
||||
|
||||
v1 "go.opentelemetry.io/proto/otlp/trace/v1"
|
||||
gomock "go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
// MockClient is a mock of Client interface.
|
||||
type MockClient struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockClientMockRecorder
|
||||
isgomock struct{}
|
||||
}
|
||||
|
||||
// MockClientMockRecorder is the mock recorder for MockClient.
|
||||
type MockClientMockRecorder struct {
|
||||
mock *MockClient
|
||||
}
|
||||
|
||||
// NewMockClient creates a new mock instance.
|
||||
func NewMockClient(ctrl *gomock.Controller) *MockClient {
|
||||
mock := &MockClient{ctrl: ctrl}
|
||||
mock.recorder = &MockClientMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockClient) EXPECT() *MockClientMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// Start mocks base method.
|
||||
func (m *MockClient) Start(ctx context.Context) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Start", ctx)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Start indicates an expected call of Start.
|
||||
func (mr *MockClientMockRecorder) Start(ctx any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockClient)(nil).Start), ctx)
|
||||
}
|
||||
|
||||
// Stop mocks base method.
|
||||
func (m *MockClient) Stop(ctx context.Context) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Stop", ctx)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Stop indicates an expected call of Stop.
|
||||
func (mr *MockClientMockRecorder) Stop(ctx any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockClient)(nil).Stop), ctx)
|
||||
}
|
||||
|
||||
// UploadTraces mocks base method.
|
||||
func (m *MockClient) UploadTraces(ctx context.Context, protoSpans []*v1.ResourceSpans) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "UploadTraces", ctx, protoSpans)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// UploadTraces indicates an expected call of UploadTraces.
|
||||
func (mr *MockClientMockRecorder) UploadTraces(ctx, protoSpans any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadTraces", reflect.TypeOf((*MockClient)(nil).UploadTraces), ctx, protoSpans)
|
||||
}
|
629
internal/testutil/tracetest/tracing.go
Normal file
629
internal/testutil/tracetest/tracing.go
Normal file
|
@ -0,0 +1,629 @@
|
|||
package tracetest
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"maps"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
"unique"
|
||||
|
||||
gocmp "github.com/google/go-cmp/cmp"
|
||||
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
oteltrace "go.opentelemetry.io/otel/trace"
|
||||
coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
|
||||
commonv1 "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
resourcev1 "go.opentelemetry.io/proto/otlp/resource/v1"
|
||||
tracev1 "go.opentelemetry.io/proto/otlp/trace/v1"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/testing/protocmp"
|
||||
)
|
||||
|
||||
type (
|
||||
Trace uint32
|
||||
Span uint32
|
||||
Scope uint32
|
||||
Schema uint32
|
||||
Resource uint32
|
||||
)
|
||||
|
||||
func (n Trace) String() string { return fmt.Sprintf("Trace %d", n) }
|
||||
func (n Span) String() string { return fmt.Sprintf("Span %d", n) }
|
||||
func (n Scope) String() string { return fmt.Sprintf("Scope %d", n) }
|
||||
func (n Schema) String() string { return fmt.Sprintf("Schema %d", n) }
|
||||
func (n Resource) String() string { return fmt.Sprintf("Resource %d", n) }
|
||||
|
||||
func (n Trace) ID() unique.Handle[oteltrace.TraceID] {
|
||||
id, _ := trace.ToTraceID(n.B())
|
||||
return id
|
||||
}
|
||||
|
||||
func (n Trace) B() []byte {
|
||||
var id oteltrace.TraceID
|
||||
binary.BigEndian.PutUint32(id[12:], uint32(n))
|
||||
return id[:]
|
||||
}
|
||||
|
||||
func (n Span) ID() oteltrace.SpanID {
|
||||
id, _ := trace.ToSpanID(n.B())
|
||||
return id
|
||||
}
|
||||
|
||||
func (n Span) B() []byte {
|
||||
var id oteltrace.SpanID
|
||||
binary.BigEndian.PutUint32(id[4:], uint32(n))
|
||||
return id[:]
|
||||
}
|
||||
|
||||
func (n Scope) Make(s ...Schema) *ScopeInfo {
|
||||
if len(s) == 0 {
|
||||
s = append(s, Schema(0))
|
||||
}
|
||||
return NewScopeInfo(&commonv1.InstrumentationScope{
|
||||
Name: n.String(),
|
||||
Version: "v1",
|
||||
Attributes: []*commonv1.KeyValue{
|
||||
{
|
||||
Key: "id",
|
||||
Value: &commonv1.AnyValue{
|
||||
Value: &commonv1.AnyValue_IntValue{
|
||||
IntValue: int64(n),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, s[0].String())
|
||||
}
|
||||
|
||||
func (n Resource) Make(s ...Schema) *ResourceInfo {
|
||||
if len(s) == 0 {
|
||||
s = append(s, Schema(0))
|
||||
}
|
||||
return NewResourceInfo(&resourcev1.Resource{
|
||||
Attributes: []*commonv1.KeyValue{
|
||||
{
|
||||
Key: "name",
|
||||
Value: &commonv1.AnyValue{
|
||||
Value: &commonv1.AnyValue_StringValue{
|
||||
StringValue: n.String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Key: "id",
|
||||
Value: &commonv1.AnyValue{
|
||||
Value: &commonv1.AnyValue_IntValue{
|
||||
IntValue: int64(n),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, s[0].String())
|
||||
}
|
||||
|
||||
func Traceparent(trace Trace, span Span, sampled bool) string {
|
||||
sampledStr := "00"
|
||||
if sampled {
|
||||
sampledStr = "01"
|
||||
}
|
||||
return fmt.Sprintf("00-%s-%s-%s", trace.ID().Value(), span.ID(), sampledStr)
|
||||
}
|
||||
|
||||
type TraceResults struct {
|
||||
resourceSpans []*tracev1.ResourceSpans
|
||||
|
||||
GetResources func() []*resourcev1.Resource
|
||||
GetTraces func() *Traces
|
||||
}
|
||||
|
||||
type Traces struct {
|
||||
ByID map[unique.Handle[oteltrace.TraceID]]*TraceDetails
|
||||
ByName map[string]TraceDetailsList
|
||||
ByParticipant map[string]TraceDetailsList
|
||||
}
|
||||
|
||||
func (t *Traces) WithoutErrors() *Traces {
|
||||
byID := make(map[unique.Handle[oteltrace.TraceID]]*TraceDetails, len(t.ByID))
|
||||
for k, v := range t.ByID {
|
||||
if len(v.Errors) > 0 {
|
||||
continue
|
||||
}
|
||||
byID[k] = v
|
||||
}
|
||||
byName := make(map[string]TraceDetailsList)
|
||||
for k, v := range t.ByName {
|
||||
filtered := v.WithoutErrors()
|
||||
if len(filtered) == 0 {
|
||||
continue
|
||||
}
|
||||
byName[k] = filtered
|
||||
}
|
||||
byParticipant := make(map[string]TraceDetailsList)
|
||||
for k, v := range t.ByParticipant {
|
||||
filtered := v.WithoutErrors()
|
||||
if len(filtered) == 0 {
|
||||
continue
|
||||
}
|
||||
byParticipant[k] = filtered
|
||||
}
|
||||
return &Traces{
|
||||
ByID: byID,
|
||||
ByName: byName,
|
||||
ByParticipant: byParticipant,
|
||||
}
|
||||
}
|
||||
|
||||
type TraceDetails struct {
|
||||
ID unique.Handle[oteltrace.TraceID]
|
||||
Name string
|
||||
Spans []*SpanDetails
|
||||
Services []string
|
||||
StartTime time.Time
|
||||
EndTime time.Time
|
||||
Duration time.Duration
|
||||
Errors []int // indexes into Spans
|
||||
}
|
||||
|
||||
func (td *TraceDetails) Equal(other *TraceDetails) (bool, string) {
|
||||
diffSpans := func(a, b []*SpanDetails) (bool, string) {
|
||||
for i := range len(a) {
|
||||
diff := gocmp.Diff(a[i], b[i], protocmp.Transform())
|
||||
if diff != "" {
|
||||
return false, diff
|
||||
}
|
||||
}
|
||||
return true, ""
|
||||
}
|
||||
if td.ID != other.ID {
|
||||
return false, fmt.Sprintf("traces are trivially not equal: ID %s (actual) != %s (expected)", td.ID.Value(), other.ID.Value())
|
||||
}
|
||||
if len(td.Spans) != len(other.Spans) {
|
||||
return false, fmt.Sprintf("traces are trivially not equal: len(spans) %d (actual) != %d (expected)", len(td.Spans), len(other.Spans))
|
||||
}
|
||||
if !td.StartTime.Equal(other.StartTime) {
|
||||
return false, fmt.Sprintf("traces are trivially not equal: start time %s (actual) != %s (expected)", td.StartTime, other.StartTime)
|
||||
}
|
||||
if !td.EndTime.Equal(other.EndTime) {
|
||||
return false, fmt.Sprintf("traces are trivially not equal: end time %s (actual) != %s (expected)", td.EndTime, other.EndTime)
|
||||
}
|
||||
return diffSpans(td.Spans, other.Spans)
|
||||
}
|
||||
|
||||
type TraceDetailsList []*TraceDetails
|
||||
|
||||
func (list TraceDetailsList) WithoutExportRPCs() TraceDetailsList {
|
||||
out := make(TraceDetailsList, 0, len(list))
|
||||
for _, td := range list {
|
||||
if strings.Contains(td.Name, "opentelemetry.proto.collector.trace.v1.TraceService/Export") {
|
||||
continue
|
||||
}
|
||||
out = append(out, td)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (list TraceDetailsList) WithoutErrors() TraceDetailsList {
|
||||
out := make(TraceDetailsList, 0, len(list))
|
||||
for _, td := range list {
|
||||
if len(td.Errors) > 0 {
|
||||
continue
|
||||
}
|
||||
out = append(out, td)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (td *TraceDetails) SpanTree() *SpanTree {
|
||||
nodesByID := map[oteltrace.SpanID]*SpanTreeNode{}
|
||||
nodesByID[oteltrace.SpanID([8]byte{})] = &SpanTreeNode{} // root node
|
||||
for _, span := range td.Spans {
|
||||
spanID, _ := trace.ToSpanID(span.Raw.SpanId)
|
||||
nodesByID[spanID] = &SpanTreeNode{
|
||||
Span: span,
|
||||
}
|
||||
}
|
||||
detachedNodesByID := map[oteltrace.SpanID]*SpanTreeNode{}
|
||||
for _, span := range td.Spans {
|
||||
spanID, _ := trace.ToSpanID(span.Raw.SpanId)
|
||||
parentSpanID, _ := trace.ToSpanID(span.Raw.ParentSpanId)
|
||||
if _, ok := nodesByID[parentSpanID]; !ok {
|
||||
detachedNodesByID[parentSpanID] = &SpanTreeNode{}
|
||||
nodesByID[parentSpanID] = detachedNodesByID[parentSpanID]
|
||||
}
|
||||
nodesByID[spanID].Parent = nodesByID[parentSpanID]
|
||||
nodesByID[parentSpanID].Children = append(nodesByID[parentSpanID].Children, nodesByID[spanID])
|
||||
}
|
||||
for _, node := range nodesByID {
|
||||
slices.SortFunc(node.Children, func(a, b *SpanTreeNode) int {
|
||||
return cmp.Compare(a.Span.Raw.StartTimeUnixNano, b.Span.Raw.StartTimeUnixNano)
|
||||
})
|
||||
}
|
||||
return &SpanTree{
|
||||
Root: nodesByID[oteltrace.SpanID([8]byte{})],
|
||||
DetachedParents: detachedNodesByID,
|
||||
}
|
||||
}
|
||||
|
||||
type SpanDetails struct {
|
||||
Raw *tracev1.Span
|
||||
Resource *resourcev1.Resource
|
||||
Scope *commonv1.InstrumentationScope
|
||||
StartTime time.Time
|
||||
EndTime time.Time
|
||||
Duration time.Duration
|
||||
Service string
|
||||
}
|
||||
|
||||
func NewTraceResults(resourceSpans []*tracev1.ResourceSpans) *TraceResults {
|
||||
tr := &TraceResults{
|
||||
resourceSpans: resourceSpans,
|
||||
}
|
||||
tr.GetResources = sync.OnceValue(tr.computeResources)
|
||||
tr.GetTraces = sync.OnceValue(tr.computeTraces)
|
||||
return tr
|
||||
}
|
||||
|
||||
func (tr *TraceResults) computeResources() []*resourcev1.Resource {
|
||||
resources := []*resourcev1.Resource{}
|
||||
for _, res := range tr.resourceSpans {
|
||||
resources = append(resources, res.Resource)
|
||||
}
|
||||
return resources
|
||||
}
|
||||
|
||||
func (tr *TraceResults) computeTraces() *Traces {
|
||||
tracesByID := map[unique.Handle[oteltrace.TraceID]]*TraceDetails{}
|
||||
for _, resSpan := range tr.resourceSpans {
|
||||
resource := resSpan.Resource
|
||||
for _, scopeSpans := range resSpan.ScopeSpans {
|
||||
scope := scopeSpans.Scope
|
||||
for _, span := range scopeSpans.Spans {
|
||||
traceID, _ := trace.ToTraceID(span.TraceId)
|
||||
var details *TraceDetails
|
||||
if d, ok := tracesByID[traceID]; ok {
|
||||
details = d
|
||||
} else {
|
||||
details = &TraceDetails{
|
||||
ID: traceID,
|
||||
}
|
||||
tracesByID[traceID] = details
|
||||
}
|
||||
svc := ""
|
||||
for _, attr := range resource.Attributes {
|
||||
if attr.Key == "service.name" {
|
||||
svc = attr.Value.GetStringValue()
|
||||
break
|
||||
}
|
||||
}
|
||||
details.Spans = append(details.Spans, &SpanDetails{
|
||||
Raw: span,
|
||||
Resource: resource,
|
||||
Scope: scope,
|
||||
StartTime: time.Unix(0, int64(span.StartTimeUnixNano)),
|
||||
EndTime: time.Unix(0, int64(span.EndTimeUnixNano)),
|
||||
Duration: time.Duration(span.EndTimeUnixNano - span.StartTimeUnixNano),
|
||||
Service: svc,
|
||||
})
|
||||
if span.Status != nil {
|
||||
if span.Status.Code == tracev1.Status_STATUS_CODE_ERROR {
|
||||
details.Errors = append(details.Errors, len(details.Spans)-1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tracesByName := map[string]TraceDetailsList{}
|
||||
tracesByParticipant := map[string]TraceDetailsList{}
|
||||
// sort spans by start time and compute durations
|
||||
for _, td := range tracesByID {
|
||||
slices.SortFunc(td.Spans, func(a, b *SpanDetails) int {
|
||||
return cmp.Compare(a.Raw.StartTimeUnixNano, b.Raw.StartTimeUnixNano)
|
||||
})
|
||||
startTime := td.Spans[0].Raw.StartTimeUnixNano
|
||||
endTime := td.Spans[0].Raw.EndTimeUnixNano
|
||||
serviceNames := map[string]struct{}{}
|
||||
for _, span := range td.Spans {
|
||||
startTime = min(startTime, span.Raw.StartTimeUnixNano)
|
||||
endTime = max(endTime, span.Raw.EndTimeUnixNano)
|
||||
if span.Service != "" {
|
||||
serviceNames[span.Service] = struct{}{}
|
||||
}
|
||||
}
|
||||
td.StartTime = time.Unix(0, int64(startTime))
|
||||
td.EndTime = time.Unix(0, int64(endTime))
|
||||
td.Duration = td.EndTime.Sub(td.StartTime)
|
||||
td.Services = slices.Sorted(maps.Keys(serviceNames))
|
||||
td.Name = fmt.Sprintf("%s: %s", td.Spans[0].Service, td.Spans[0].Raw.Name)
|
||||
tracesByName[td.Name] = append(tracesByName[td.Name], td)
|
||||
for svc := range serviceNames {
|
||||
tracesByParticipant[svc] = append(tracesByParticipant[svc], td)
|
||||
}
|
||||
}
|
||||
|
||||
return &Traces{
|
||||
ByID: tracesByID,
|
||||
ByName: tracesByName,
|
||||
ByParticipant: tracesByParticipant,
|
||||
}
|
||||
}
|
||||
|
||||
type SpanTree struct {
|
||||
Root *SpanTreeNode
|
||||
DetachedParents map[oteltrace.SpanID]*SpanTreeNode
|
||||
}
|
||||
|
||||
type SpanTreeNode struct {
|
||||
Span *SpanDetails
|
||||
Parent *SpanTreeNode
|
||||
Children []*SpanTreeNode
|
||||
}
|
||||
|
||||
type Match struct {
|
||||
Name string
|
||||
TraceCount any
|
||||
Services []string
|
||||
}
|
||||
|
||||
type (
|
||||
GreaterOrEqual int
|
||||
Greater int
|
||||
|
||||
// Any makes no assertions on the trace count. If the trace is not found, it
|
||||
// doesn't count against the Exact match option.
|
||||
Any struct{}
|
||||
|
||||
// EqualToMatch asserts that the value is the same as the value of another
|
||||
// match (by name)
|
||||
EqualToMatch string
|
||||
// GreaterThanMatch asserts that the value is greater than the value of
|
||||
// another match (by name)
|
||||
GreaterThanMatch string
|
||||
)
|
||||
|
||||
type MatchOptions struct {
|
||||
// If true, asserts that there is exactly one [Match] entry per result
|
||||
Exact bool
|
||||
// If true, asserts that no traces contain detached spans
|
||||
CheckDetachedSpans bool
|
||||
}
|
||||
|
||||
func (tr *TraceResults) MatchTraces(t testing.TB, opts MatchOptions, matches ...Match) {
|
||||
t.Helper()
|
||||
traces := tr.GetTraces()
|
||||
matchArgsByName := map[string]Match{}
|
||||
for i, m := range matches {
|
||||
if m.Name != "" {
|
||||
require.NotContains(t, matchArgsByName, m.Name, "duplicate name")
|
||||
matchArgsByName[m.Name] = m
|
||||
if traceDetails, ok := traces.ByName[m.Name]; ok {
|
||||
switch tc := m.TraceCount.(type) {
|
||||
case GreaterOrEqual:
|
||||
assert.GreaterOrEqualf(t, len(traceDetails), int(tc),
|
||||
"[match %d]: expected %q to have >=%d traces, but found %d",
|
||||
i+1, m.Name, int(tc), len(traceDetails))
|
||||
case Greater:
|
||||
assert.Greaterf(t, len(traceDetails), int(tc),
|
||||
"[match %d]: expected %q to have >%d traces, but found %d",
|
||||
i+1, m.Name, int(tc), len(traceDetails))
|
||||
case GreaterThanMatch:
|
||||
assert.Greaterf(t, len(traceDetails), len(traces.ByName[string(tc)]),
|
||||
"[match %d]: expected %q to have >%d traces (value of %s), but found %d",
|
||||
i+1, m.Name, len(traces.ByName[string(tc)]), string(tc), len(traceDetails))
|
||||
case EqualToMatch:
|
||||
assert.Equalf(t, len(traceDetails), len(traces.ByName[string(tc)]),
|
||||
"[match %d]: expected %q to have %d traces (value of %s), but found %d",
|
||||
i+1, m.Name, len(traces.ByName[string(tc)]), string(tc), len(traceDetails))
|
||||
case Any:
|
||||
case int:
|
||||
s := "s"
|
||||
if tc == 1 {
|
||||
s = ""
|
||||
}
|
||||
assert.Lenf(t, traceDetails, tc,
|
||||
"[match %d]: expected %q to have %d trace%s, but found %d",
|
||||
i+1, m.Name, tc, s, len(traceDetails))
|
||||
}
|
||||
|
||||
if m.Services != nil {
|
||||
for _, trace := range traceDetails {
|
||||
assert.ElementsMatch(t, m.Services, trace.Services)
|
||||
}
|
||||
}
|
||||
} else if _, ok := m.TraceCount.(Any); !ok {
|
||||
t.Errorf("no traces with name %q found", m.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if opts.CheckDetachedSpans {
|
||||
for _, trace := range traces.ByID {
|
||||
tree := trace.SpanTree()
|
||||
if !assert.Empty(t, tree.DetachedParents) {
|
||||
for spanID, node := range tree.DetachedParents {
|
||||
t.Log("------------------------------------")
|
||||
t.Logf("span id: %s", spanID)
|
||||
if len(node.Children) != 0 {
|
||||
t.Log("children:")
|
||||
}
|
||||
for _, c := range node.Children {
|
||||
t.Log(protojson.Format(c.Span.Raw))
|
||||
}
|
||||
t.Log("------------------------------------")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if opts.Exact {
|
||||
expected := slices.Sorted(maps.Keys(matchArgsByName))
|
||||
actual := slices.Sorted(maps.Keys(traces.ByName))
|
||||
for name, match := range matchArgsByName {
|
||||
if _, ok := traces.ByName[name]; !ok {
|
||||
if _, ok := match.TraceCount.(Any); ok {
|
||||
expected = slices.DeleteFunc(expected, func(s string) bool { return s == name })
|
||||
}
|
||||
}
|
||||
}
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func (tr *TraceResults) AssertEqual(t testing.TB, expectedResults *TraceResults, msgFmtAndArgs ...any) {
|
||||
t.Helper()
|
||||
actualTraces := tr.GetTraces()
|
||||
expectedTraces := expectedResults.GetTraces()
|
||||
for traceID, expected := range expectedTraces.ByID {
|
||||
if actual, ok := actualTraces.ByID[traceID]; !ok {
|
||||
if len(msgFmtAndArgs) > 0 {
|
||||
t.Errorf("expected trace id %s not found (%s)", traceID.Value().String(),
|
||||
fmt.Sprintf(msgFmtAndArgs[0].(string), msgFmtAndArgs[1:]...))
|
||||
} else {
|
||||
t.Errorf("expected trace id %s not found", traceID.Value().String())
|
||||
}
|
||||
} else {
|
||||
if equal, diff := actual.Equal(expected); !equal {
|
||||
if len(msgFmtAndArgs) > 0 {
|
||||
t.Errorf("trace %s is not equal (%s):\n%s", traceID.Value().String(),
|
||||
fmt.Sprintf(msgFmtAndArgs[0].(string), msgFmtAndArgs[1:]...), diff)
|
||||
} else {
|
||||
t.Errorf("trace %s is not equal:\n%s", traceID.Value().String(), diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for traceID := range actualTraces.ByID {
|
||||
if _, ok := expectedTraces.ByID[traceID]; !ok {
|
||||
if len(msgFmtAndArgs) > 0 {
|
||||
t.Errorf("unexpected trace id %s found (%s)", traceID.Value().String(),
|
||||
fmt.Sprintf(msgFmtAndArgs[0].(string), msgFmtAndArgs[1:]...))
|
||||
} else {
|
||||
t.Errorf("unexpected trace id %s found", traceID.Value().String())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func FlattenResourceSpans(lists [][]*tracev1.ResourceSpans) []*tracev1.ResourceSpans {
|
||||
res := NewBuffer()
|
||||
for _, list := range lists {
|
||||
for _, resource := range list {
|
||||
resInfo := NewResourceInfo(resource.Resource, resource.SchemaUrl)
|
||||
for _, scope := range resource.ScopeSpans {
|
||||
scopeInfo := NewScopeInfo(scope.Scope, scope.SchemaUrl)
|
||||
for _, span := range scope.Spans {
|
||||
res.Insert(resInfo, scopeInfo, span)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return res.Flush()
|
||||
}
|
||||
|
||||
func FlattenExportRequests(reqs []*coltracepb.ExportTraceServiceRequest) []*tracev1.ResourceSpans {
|
||||
lists := make([][]*tracev1.ResourceSpans, len(reqs))
|
||||
for i, req := range reqs {
|
||||
lists[i] = req.ResourceSpans
|
||||
}
|
||||
return FlattenResourceSpans(lists)
|
||||
}
|
||||
|
||||
type EventRecording struct {
|
||||
events []trace.DebugEvent
|
||||
normalizedTo time.Time
|
||||
}
|
||||
|
||||
func LoadEventRecording(raw []byte) (*EventRecording, error) {
|
||||
events := []trace.DebugEvent{}
|
||||
if err := json.Unmarshal(raw, &events); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := 1; i < len(events); i++ {
|
||||
if events[i].Timestamp.Before(events[i-1].Timestamp) {
|
||||
return nil, fmt.Errorf("invalid timestamps: event %d occurred before event %d", i, i-1)
|
||||
}
|
||||
}
|
||||
return &EventRecording{
|
||||
events: events,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (er *EventRecording) Normalize(startTime time.Time) {
|
||||
if len(er.events) == 0 {
|
||||
return
|
||||
}
|
||||
er.normalizedTo = startTime
|
||||
offset := startTime.Sub(er.events[0].Timestamp)
|
||||
for i, ev := range er.events {
|
||||
er.events[i].Timestamp = ev.Timestamp.Add(offset)
|
||||
for _, resSpan := range ev.Request.ResourceSpans {
|
||||
for _, scopeSpans := range resSpan.ScopeSpans {
|
||||
for _, span := range scopeSpans.Spans {
|
||||
span.StartTimeUnixNano += uint64(offset)
|
||||
span.EndTimeUnixNano += uint64(offset)
|
||||
for _, event := range span.Events {
|
||||
event.TimeUnixNano += uint64(offset)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (er *EventRecording) NormalizedTo() time.Time {
|
||||
return er.normalizedTo
|
||||
}
|
||||
|
||||
type EventCallbackFunc = func(ctx context.Context, req *coltracepb.ExportTraceServiceRequest) (*coltracepb.ExportTraceServiceResponse, error)
|
||||
|
||||
func (er *EventRecording) Events() []trace.DebugEvent {
|
||||
return er.events
|
||||
}
|
||||
|
||||
func (er *EventRecording) Clone() *EventRecording {
|
||||
clonedEvents := make([]trace.DebugEvent, 0, len(er.events))
|
||||
for _, ev := range er.events {
|
||||
clonedEvents = append(clonedEvents, trace.DebugEvent{
|
||||
Timestamp: ev.Timestamp,
|
||||
Request: proto.Clone(ev.Request).(*coltracepb.ExportTraceServiceRequest),
|
||||
})
|
||||
}
|
||||
c := &EventRecording{
|
||||
events: clonedEvents,
|
||||
normalizedTo: er.normalizedTo,
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func (er *EventRecording) Replay(callback EventCallbackFunc) error {
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
durations := make([]time.Duration, 0, len(er.events)-1)
|
||||
for i := 1; i < len(er.events); i++ {
|
||||
durations = append(durations, er.events[i].Timestamp.Sub(er.events[i-1].Timestamp))
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(er.events))
|
||||
er.Normalize(time.Now())
|
||||
for i, ev := range er.events {
|
||||
go func() {
|
||||
callback(context.Background(), ev.Request)
|
||||
wg.Done()
|
||||
}()
|
||||
if i < len(er.events)-1 {
|
||||
time.Sleep(durations[i])
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue