mirror of
https://github.com/pomerium/pomerium.git
synced 2025-06-01 10:22:43 +02:00
new tracing system
This commit is contained in:
parent
8f36870650
commit
b9065b6a55
130 changed files with 7928 additions and 1836 deletions
|
@ -10,7 +10,9 @@ import (
|
||||||
"github.com/pomerium/pomerium/config"
|
"github.com/pomerium/pomerium/config"
|
||||||
"github.com/pomerium/pomerium/internal/atomicutil"
|
"github.com/pomerium/pomerium/internal/atomicutil"
|
||||||
"github.com/pomerium/pomerium/internal/log"
|
"github.com/pomerium/pomerium/internal/log"
|
||||||
|
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
||||||
"github.com/pomerium/pomerium/pkg/cryptutil"
|
"github.com/pomerium/pomerium/pkg/cryptutil"
|
||||||
|
oteltrace "go.opentelemetry.io/otel/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ValidateOptions checks that configuration are complete and valid.
|
// ValidateOptions checks that configuration are complete and valid.
|
||||||
|
@ -41,20 +43,28 @@ type Authenticate struct {
|
||||||
cfg *authenticateConfig
|
cfg *authenticateConfig
|
||||||
options *atomicutil.Value[*config.Options]
|
options *atomicutil.Value[*config.Options]
|
||||||
state *atomicutil.Value[*authenticateState]
|
state *atomicutil.Value[*authenticateState]
|
||||||
|
tracerProvider oteltrace.TracerProvider
|
||||||
|
tracer oteltrace.Tracer
|
||||||
}
|
}
|
||||||
|
|
||||||
// New validates and creates a new authenticate service from a set of Options.
|
// New validates and creates a new authenticate service from a set of Options.
|
||||||
func New(ctx context.Context, cfg *config.Config, options ...Option) (*Authenticate, error) {
|
func New(ctx context.Context, cfg *config.Config, options ...Option) (*Authenticate, error) {
|
||||||
authenticateConfig := getAuthenticateConfig(options...)
|
authenticateConfig := getAuthenticateConfig(options...)
|
||||||
|
|
||||||
|
tracerProvider := trace.NewTracerProvider(ctx, "Authenticate")
|
||||||
|
tracer := tracerProvider.Tracer(trace.PomeriumCoreTracer)
|
||||||
|
|
||||||
a := &Authenticate{
|
a := &Authenticate{
|
||||||
cfg: authenticateConfig,
|
cfg: authenticateConfig,
|
||||||
options: config.NewAtomicOptions(),
|
options: config.NewAtomicOptions(),
|
||||||
state: atomicutil.NewValue(newAuthenticateState()),
|
state: atomicutil.NewValue(newAuthenticateState()),
|
||||||
|
tracerProvider: tracerProvider,
|
||||||
|
tracer: tracer,
|
||||||
}
|
}
|
||||||
|
|
||||||
a.options.Store(cfg.Options)
|
a.options.Store(cfg.Options)
|
||||||
|
|
||||||
state, err := newAuthenticateStateFromConfig(ctx, cfg, authenticateConfig)
|
state, err := newAuthenticateStateFromConfig(ctx, tracerProvider, cfg, authenticateConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -70,7 +80,7 @@ func (a *Authenticate) OnConfigChange(ctx context.Context, cfg *config.Config) {
|
||||||
}
|
}
|
||||||
|
|
||||||
a.options.Store(cfg.Options)
|
a.options.Store(cfg.Options)
|
||||||
if state, err := newAuthenticateStateFromConfig(ctx, cfg, a.cfg); err != nil {
|
if state, err := newAuthenticateStateFromConfig(ctx, a.tracerProvider, cfg, a.cfg); err != nil {
|
||||||
log.Ctx(ctx).Error().Err(err).Msg("authenticate: failed to update state")
|
log.Ctx(ctx).Error().Err(err).Msg("authenticate: failed to update state")
|
||||||
} else {
|
} else {
|
||||||
a.state.Store(state)
|
a.state.Store(state)
|
||||||
|
|
|
@ -1,14 +1,17 @@
|
||||||
package authenticate
|
package authenticate
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/pomerium/pomerium/authenticate/events"
|
"github.com/pomerium/pomerium/authenticate/events"
|
||||||
"github.com/pomerium/pomerium/config"
|
"github.com/pomerium/pomerium/config"
|
||||||
identitypb "github.com/pomerium/pomerium/pkg/grpc/identity"
|
identitypb "github.com/pomerium/pomerium/pkg/grpc/identity"
|
||||||
"github.com/pomerium/pomerium/pkg/identity"
|
"github.com/pomerium/pomerium/pkg/identity"
|
||||||
|
oteltrace "go.opentelemetry.io/otel/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
type authenticateConfig struct {
|
type authenticateConfig struct {
|
||||||
getIdentityProvider func(options *config.Options, idpID string) (identity.Authenticator, error)
|
getIdentityProvider func(ctx context.Context, tracerProvider oteltrace.TracerProvider, options *config.Options, idpID string) (identity.Authenticator, error)
|
||||||
profileTrimFn func(*identitypb.Profile)
|
profileTrimFn func(*identitypb.Profile)
|
||||||
authEventFn events.AuthEventFn
|
authEventFn events.AuthEventFn
|
||||||
}
|
}
|
||||||
|
@ -26,7 +29,7 @@ func getAuthenticateConfig(options ...Option) *authenticateConfig {
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithGetIdentityProvider sets the getIdentityProvider function in the config.
|
// WithGetIdentityProvider sets the getIdentityProvider function in the config.
|
||||||
func WithGetIdentityProvider(getIdentityProvider func(options *config.Options, idpID string) (identity.Authenticator, error)) Option {
|
func WithGetIdentityProvider(getIdentityProvider func(ctx context.Context, tracerProvider oteltrace.TracerProvider, options *config.Options, idpID string) (identity.Authenticator, error)) Option {
|
||||||
return func(cfg *authenticateConfig) {
|
return func(cfg *authenticateConfig) {
|
||||||
cfg.getIdentityProvider = getIdentityProvider
|
cfg.getIdentityProvider = getIdentityProvider
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,6 @@ import (
|
||||||
"github.com/pomerium/pomerium/internal/log"
|
"github.com/pomerium/pomerium/internal/log"
|
||||||
"github.com/pomerium/pomerium/internal/middleware"
|
"github.com/pomerium/pomerium/internal/middleware"
|
||||||
"github.com/pomerium/pomerium/internal/sessions"
|
"github.com/pomerium/pomerium/internal/sessions"
|
||||||
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
|
||||||
"github.com/pomerium/pomerium/internal/urlutil"
|
"github.com/pomerium/pomerium/internal/urlutil"
|
||||||
"github.com/pomerium/pomerium/pkg/cryptutil"
|
"github.com/pomerium/pomerium/pkg/cryptutil"
|
||||||
"github.com/pomerium/pomerium/pkg/identity"
|
"github.com/pomerium/pomerium/pkg/identity"
|
||||||
|
@ -114,7 +113,7 @@ func (a *Authenticate) RetrieveSession(next http.Handler) http.Handler {
|
||||||
// session state is attached to the users's request context.
|
// session state is attached to the users's request context.
|
||||||
func (a *Authenticate) VerifySession(next http.Handler) http.Handler {
|
func (a *Authenticate) VerifySession(next http.Handler) http.Handler {
|
||||||
return httputil.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
|
return httputil.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
|
||||||
ctx, span := trace.StartSpan(r.Context(), "authenticate.VerifySession")
|
ctx, span := a.tracer.Start(r.Context(), "authenticate.VerifySession")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
state := a.state.Load()
|
state := a.state.Load()
|
||||||
|
@ -160,7 +159,7 @@ func (a *Authenticate) RobotsTxt(w http.ResponseWriter, _ *http.Request) {
|
||||||
|
|
||||||
// SignIn handles authenticating a user.
|
// SignIn handles authenticating a user.
|
||||||
func (a *Authenticate) SignIn(w http.ResponseWriter, r *http.Request) error {
|
func (a *Authenticate) SignIn(w http.ResponseWriter, r *http.Request) error {
|
||||||
ctx, span := trace.StartSpan(r.Context(), "authenticate.SignIn")
|
ctx, span := a.tracer.Start(r.Context(), "authenticate.SignIn")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
state := a.state.Load()
|
state := a.state.Load()
|
||||||
|
@ -197,13 +196,13 @@ func (a *Authenticate) SignOut(w http.ResponseWriter, r *http.Request) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Authenticate) signOutRedirect(w http.ResponseWriter, r *http.Request) error {
|
func (a *Authenticate) signOutRedirect(w http.ResponseWriter, r *http.Request) error {
|
||||||
ctx, span := trace.StartSpan(r.Context(), "authenticate.SignOut")
|
ctx, span := a.tracer.Start(r.Context(), "authenticate.SignOut")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
options := a.options.Load()
|
options := a.options.Load()
|
||||||
idpID := a.getIdentityProviderIDForRequest(r)
|
idpID := a.getIdentityProviderIDForRequest(r)
|
||||||
|
|
||||||
authenticator, err := a.cfg.getIdentityProvider(options, idpID)
|
authenticator, err := a.cfg.getIdentityProvider(ctx, a.tracerProvider, options, idpID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -274,7 +273,7 @@ func (a *Authenticate) reauthenticateOrFail(w http.ResponseWriter, r *http.Reque
|
||||||
options := a.options.Load()
|
options := a.options.Load()
|
||||||
idpID := a.getIdentityProviderIDForRequest(r)
|
idpID := a.getIdentityProviderIDForRequest(r)
|
||||||
|
|
||||||
authenticator, err := a.cfg.getIdentityProvider(options, idpID)
|
authenticator, err := a.cfg.getIdentityProvider(r.Context(), a.tracerProvider, options, idpID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -307,6 +306,10 @@ func (a *Authenticate) OAuthCallback(w http.ResponseWriter, r *http.Request) err
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("authenticate.OAuthCallback: %w", err)
|
return fmt.Errorf("authenticate.OAuthCallback: %w", err)
|
||||||
}
|
}
|
||||||
|
q := redirect.Query()
|
||||||
|
if traceparent := q.Get(urlutil.QueryTraceparent); traceparent != "" {
|
||||||
|
w.Header().Set("X-Pomerium-Traceparent", traceparent)
|
||||||
|
}
|
||||||
httputil.Redirect(w, r, redirect.String(), http.StatusFound)
|
httputil.Redirect(w, r, redirect.String(), http.StatusFound)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -321,7 +324,7 @@ func (a *Authenticate) statusForErrorCode(errorCode string) int {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Authenticate) getOAuthCallback(w http.ResponseWriter, r *http.Request) (*url.URL, error) {
|
func (a *Authenticate) getOAuthCallback(w http.ResponseWriter, r *http.Request) (*url.URL, error) {
|
||||||
ctx, span := trace.StartSpan(r.Context(), "authenticate.getOAuthCallback")
|
ctx, span := a.tracer.Start(r.Context(), "authenticate.getOAuthCallback")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
state := a.state.Load()
|
state := a.state.Load()
|
||||||
|
@ -380,7 +383,7 @@ Or contact your administrator.
|
||||||
|
|
||||||
idpID := state.flow.GetIdentityProviderIDForURLValues(redirectURL.Query())
|
idpID := state.flow.GetIdentityProviderIDForURLValues(redirectURL.Query())
|
||||||
|
|
||||||
authenticator, err := a.cfg.getIdentityProvider(options, idpID)
|
authenticator, err := a.cfg.getIdentityProvider(ctx, a.tracerProvider, options, idpID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -432,7 +435,7 @@ func (a *Authenticate) getSessionFromCtx(ctx context.Context) (*sessions.State,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Authenticate) userInfo(w http.ResponseWriter, r *http.Request) error {
|
func (a *Authenticate) userInfo(w http.ResponseWriter, r *http.Request) error {
|
||||||
ctx, span := trace.StartSpan(r.Context(), "authenticate.userInfo")
|
ctx, span := a.tracer.Start(r.Context(), "authenticate.userInfo")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
options := a.options.Load()
|
options := a.options.Load()
|
||||||
|
@ -484,7 +487,7 @@ func (a *Authenticate) revokeSession(ctx context.Context, w http.ResponseWriter,
|
||||||
|
|
||||||
idpID := r.FormValue(urlutil.QueryIdentityProviderID)
|
idpID := r.FormValue(urlutil.QueryIdentityProviderID)
|
||||||
|
|
||||||
authenticator, err := a.cfg.getIdentityProvider(options, idpID)
|
authenticator, err := a.cfg.getIdentityProvider(ctx, a.tracerProvider, options, idpID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,6 +16,7 @@ import (
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
oteltrace "go.opentelemetry.io/otel/trace"
|
||||||
"go.uber.org/mock/gomock"
|
"go.uber.org/mock/gomock"
|
||||||
"golang.org/x/crypto/chacha20poly1305"
|
"golang.org/x/crypto/chacha20poly1305"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
|
@ -39,23 +40,21 @@ import (
|
||||||
"github.com/pomerium/pomerium/pkg/identity/oidc"
|
"github.com/pomerium/pomerium/pkg/identity/oidc"
|
||||||
)
|
)
|
||||||
|
|
||||||
func testAuthenticate() *Authenticate {
|
func testAuthenticate(t *testing.T) *Authenticate {
|
||||||
redirectURL, _ := url.Parse("https://auth.example.com/oauth/callback")
|
opts := newTestOptions(t)
|
||||||
var auth Authenticate
|
opts.AuthenticateURLString = "https://auth.example.com/oauth/callback"
|
||||||
auth.state = atomicutil.NewValue(&authenticateState{
|
auth, err := New(context.Background(), &config.Config{
|
||||||
redirectURL: redirectURL,
|
Options: opts,
|
||||||
cookieSecret: cryptutil.NewKey(),
|
|
||||||
flow: new(stubFlow),
|
|
||||||
})
|
})
|
||||||
auth.options = config.NewAtomicOptions()
|
if err != nil {
|
||||||
auth.options.Store(&config.Options{
|
panic(err)
|
||||||
SharedKey: cryptutil.NewBase64Key(),
|
}
|
||||||
})
|
auth.state.Load().flow = new(stubFlow)
|
||||||
return &auth
|
return auth
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAuthenticate_RobotsTxt(t *testing.T) {
|
func TestAuthenticate_RobotsTxt(t *testing.T) {
|
||||||
auth := testAuthenticate()
|
auth := testAuthenticate(t)
|
||||||
req, err := http.NewRequest(http.MethodGet, "/robots.txt", nil)
|
req, err := http.NewRequest(http.MethodGet, "/robots.txt", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -73,7 +72,7 @@ func TestAuthenticate_RobotsTxt(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAuthenticate_Handler(t *testing.T) {
|
func TestAuthenticate_Handler(t *testing.T) {
|
||||||
auth := testAuthenticate()
|
auth := testAuthenticate(t)
|
||||||
|
|
||||||
h := auth.Handler()
|
h := auth.Handler()
|
||||||
if h == nil {
|
if h == nil {
|
||||||
|
@ -224,17 +223,16 @@ func TestAuthenticate_SignOut(t *testing.T) {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
ctrl := gomock.NewController(t)
|
ctrl := gomock.NewController(t)
|
||||||
defer ctrl.Finish()
|
defer ctrl.Finish()
|
||||||
a := &Authenticate{
|
a := testAuthenticate(t)
|
||||||
cfg: getAuthenticateConfig(WithGetIdentityProvider(func(_ *config.Options, _ string) (identity.Authenticator, error) {
|
a.cfg = getAuthenticateConfig(WithGetIdentityProvider(func(_ context.Context, _ oteltrace.TracerProvider, _ *config.Options, _ string) (identity.Authenticator, error) {
|
||||||
return tt.provider, nil
|
return tt.provider, nil
|
||||||
})),
|
}))
|
||||||
state: atomicutil.NewValue(&authenticateState{
|
a.state = atomicutil.NewValue(&authenticateState{
|
||||||
sessionStore: tt.sessionStore,
|
sessionStore: tt.sessionStore,
|
||||||
sharedEncoder: mock.Encoder{},
|
sharedEncoder: mock.Encoder{},
|
||||||
flow: new(stubFlow),
|
flow: new(stubFlow),
|
||||||
}),
|
})
|
||||||
options: config.NewAtomicOptions(),
|
a.options = config.NewAtomicOptions()
|
||||||
}
|
|
||||||
if tt.signoutRedirectURL != "" {
|
if tt.signoutRedirectURL != "" {
|
||||||
opts := a.options.Load()
|
opts := a.options.Load()
|
||||||
opts.SignOutRedirectURLString = tt.signoutRedirectURL
|
opts.SignOutRedirectURLString = tt.signoutRedirectURL
|
||||||
|
@ -280,7 +278,7 @@ func TestAuthenticate_SignOutDoesNotRequireSession(t *testing.T) {
|
||||||
|
|
||||||
sessionStore := &mstore.Store{LoadError: errors.New("no session")}
|
sessionStore := &mstore.Store{LoadError: errors.New("no session")}
|
||||||
a := &Authenticate{
|
a := &Authenticate{
|
||||||
cfg: getAuthenticateConfig(WithGetIdentityProvider(func(_ *config.Options, _ string) (identity.Authenticator, error) {
|
cfg: getAuthenticateConfig(WithGetIdentityProvider(func(_ context.Context, _ oteltrace.TracerProvider, _ *config.Options, _ string) (identity.Authenticator, error) {
|
||||||
return identity.MockProvider{}, nil
|
return identity.MockProvider{}, nil
|
||||||
})),
|
})),
|
||||||
state: atomicutil.NewValue(&authenticateState{
|
state: atomicutil.NewValue(&authenticateState{
|
||||||
|
@ -354,18 +352,17 @@ func TestAuthenticate_OAuthCallback(t *testing.T) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
authURL, _ := url.Parse(tt.authenticateURL)
|
authURL, _ := url.Parse(tt.authenticateURL)
|
||||||
a := &Authenticate{
|
a := testAuthenticate(t)
|
||||||
cfg: getAuthenticateConfig(WithGetIdentityProvider(func(_ *config.Options, _ string) (identity.Authenticator, error) {
|
a.cfg = getAuthenticateConfig(WithGetIdentityProvider(func(_ context.Context, _ oteltrace.TracerProvider, _ *config.Options, _ string) (identity.Authenticator, error) {
|
||||||
return tt.provider, nil
|
return tt.provider, nil
|
||||||
})),
|
}))
|
||||||
state: atomicutil.NewValue(&authenticateState{
|
a.state = atomicutil.NewValue(&authenticateState{
|
||||||
redirectURL: authURL,
|
redirectURL: authURL,
|
||||||
sessionStore: tt.session,
|
sessionStore: tt.session,
|
||||||
cookieCipher: aead,
|
cookieCipher: aead,
|
||||||
flow: new(stubFlow),
|
flow: new(stubFlow),
|
||||||
}),
|
})
|
||||||
options: config.NewAtomicOptions(),
|
a.options = config.NewAtomicOptions()
|
||||||
}
|
|
||||||
u, _ := url.Parse("/oauthGet")
|
u, _ := url.Parse("/oauthGet")
|
||||||
params, _ := url.ParseQuery(u.RawQuery)
|
params, _ := url.ParseQuery(u.RawQuery)
|
||||||
params.Add("error", tt.paramErr)
|
params.Add("error", tt.paramErr)
|
||||||
|
@ -466,20 +463,19 @@ func TestAuthenticate_SessionValidatorMiddleware(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
a := &Authenticate{
|
a := testAuthenticate(t)
|
||||||
cfg: getAuthenticateConfig(WithGetIdentityProvider(func(_ *config.Options, _ string) (identity.Authenticator, error) {
|
a.cfg = getAuthenticateConfig(WithGetIdentityProvider(func(_ context.Context, _ oteltrace.TracerProvider, _ *config.Options, _ string) (identity.Authenticator, error) {
|
||||||
return tt.provider, nil
|
return tt.provider, nil
|
||||||
})),
|
}))
|
||||||
state: atomicutil.NewValue(&authenticateState{
|
a.state = atomicutil.NewValue(&authenticateState{
|
||||||
cookieSecret: cryptutil.NewKey(),
|
cookieSecret: cryptutil.NewKey(),
|
||||||
redirectURL: uriParseHelper("https://authenticate.corp.beyondperimeter.com"),
|
redirectURL: uriParseHelper("https://authenticate.corp.beyondperimeter.com"),
|
||||||
sessionStore: tt.session,
|
sessionStore: tt.session,
|
||||||
cookieCipher: aead,
|
cookieCipher: aead,
|
||||||
sharedEncoder: signer,
|
sharedEncoder: signer,
|
||||||
flow: new(stubFlow),
|
flow: new(stubFlow),
|
||||||
}),
|
})
|
||||||
options: config.NewAtomicOptions(),
|
a.options = config.NewAtomicOptions()
|
||||||
}
|
|
||||||
r := httptest.NewRequest(http.MethodGet, "/", nil)
|
r := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||||
state, err := tt.session.LoadSession(r)
|
state, err := tt.session.LoadSession(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -512,7 +508,7 @@ func TestAuthenticate_userInfo(t *testing.T) {
|
||||||
t.Run("cookie-redirect-uri", func(t *testing.T) {
|
t.Run("cookie-redirect-uri", func(t *testing.T) {
|
||||||
w := httptest.NewRecorder()
|
w := httptest.NewRecorder()
|
||||||
r := httptest.NewRequest(http.MethodGet, "https://authenticate.service.cluster.local/.pomerium/?pomerium_redirect_uri=https://www.example.com", nil)
|
r := httptest.NewRequest(http.MethodGet, "https://authenticate.service.cluster.local/.pomerium/?pomerium_redirect_uri=https://www.example.com", nil)
|
||||||
var a Authenticate
|
a := testAuthenticate(t)
|
||||||
a.state = atomicutil.NewValue(&authenticateState{
|
a.state = atomicutil.NewValue(&authenticateState{
|
||||||
cookieSecret: cryptutil.NewKey(),
|
cookieSecret: cryptutil.NewKey(),
|
||||||
flow: new(stubFlow),
|
flow: new(stubFlow),
|
||||||
|
@ -577,14 +573,13 @@ func TestAuthenticate_userInfo(t *testing.T) {
|
||||||
if !tt.validSignature {
|
if !tt.validSignature {
|
||||||
f.verifySignatureErr = errors.New("bad signature")
|
f.verifySignatureErr = errors.New("bad signature")
|
||||||
}
|
}
|
||||||
a := &Authenticate{
|
a := testAuthenticate(t)
|
||||||
options: o,
|
a.options = o
|
||||||
state: atomicutil.NewValue(&authenticateState{
|
a.state = atomicutil.NewValue(&authenticateState{
|
||||||
sessionStore: tt.sessionStore,
|
sessionStore: tt.sessionStore,
|
||||||
sharedEncoder: signer,
|
sharedEncoder: signer,
|
||||||
flow: f,
|
flow: f,
|
||||||
}),
|
})
|
||||||
}
|
|
||||||
r := httptest.NewRequest(http.MethodGet, tt.url, nil)
|
r := httptest.NewRequest(http.MethodGet, tt.url, nil)
|
||||||
state, err := tt.sessionStore.LoadSession(r)
|
state, err := tt.sessionStore.LoadSession(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -606,7 +601,7 @@ func TestAuthenticate_userInfo(t *testing.T) {
|
||||||
|
|
||||||
func TestAuthenticate_CORS(t *testing.T) {
|
func TestAuthenticate_CORS(t *testing.T) {
|
||||||
f := new(stubFlow)
|
f := new(stubFlow)
|
||||||
auth := testAuthenticate()
|
auth := testAuthenticate(t)
|
||||||
state := auth.state.Load()
|
state := auth.state.Load()
|
||||||
state.sessionLoader = &mstore.Store{Session: &sessions.State{}}
|
state.sessionLoader = &mstore.Store{Session: &sessions.State{}}
|
||||||
state.sharedEncoder = mock.Encoder{}
|
state.sharedEncoder = mock.Encoder{}
|
||||||
|
@ -645,7 +640,7 @@ func TestAuthenticate_CORS(t *testing.T) {
|
||||||
func TestSignOutBranding(t *testing.T) {
|
func TestSignOutBranding(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
auth := testAuthenticate()
|
auth := testAuthenticate(t)
|
||||||
auth.state.Load().flow.(*stubFlow).verifySignatureErr = errors.New("unsigned URL")
|
auth.state.Load().flow.(*stubFlow).verifySignatureErr = errors.New("unsigned URL")
|
||||||
auth.options.Store(&config.Options{
|
auth.options.Store(&config.Options{
|
||||||
BrandingOptions: &configproto.Settings{
|
BrandingOptions: &configproto.Settings{
|
||||||
|
|
|
@ -1,13 +1,16 @@
|
||||||
package authenticate
|
package authenticate
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/pomerium/pomerium/config"
|
"github.com/pomerium/pomerium/config"
|
||||||
"github.com/pomerium/pomerium/internal/urlutil"
|
"github.com/pomerium/pomerium/internal/urlutil"
|
||||||
"github.com/pomerium/pomerium/pkg/identity"
|
"github.com/pomerium/pomerium/pkg/identity"
|
||||||
"github.com/pomerium/pomerium/pkg/identity/oauth"
|
"github.com/pomerium/pomerium/pkg/identity/oauth"
|
||||||
|
oteltrace "go.opentelemetry.io/otel/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
func defaultGetIdentityProvider(options *config.Options, idpID string) (identity.Authenticator, error) {
|
func defaultGetIdentityProvider(ctx context.Context, tracerProvider oteltrace.TracerProvider, options *config.Options, idpID string) (identity.Authenticator, error) {
|
||||||
authenticateURL, err := options.GetAuthenticateURL()
|
authenticateURL, err := options.GetAuthenticateURL()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -23,7 +26,7 @@ func defaultGetIdentityProvider(options *config.Options, idpID string) (identity
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return identity.NewAuthenticator(oauth.Options{
|
return identity.NewAuthenticator(ctx, tracerProvider, oauth.Options{
|
||||||
RedirectURL: redirectURL,
|
RedirectURL: redirectURL,
|
||||||
ProviderName: idp.GetType(),
|
ProviderName: idp.GetType(),
|
||||||
ProviderURL: idp.GetUrl(),
|
ProviderURL: idp.GetUrl(),
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"github.com/go-jose/go-jose/v3"
|
"github.com/go-jose/go-jose/v3"
|
||||||
|
oteltrace "go.opentelemetry.io/otel/trace"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
|
|
||||||
"github.com/pomerium/pomerium/config"
|
"github.com/pomerium/pomerium/config"
|
||||||
|
@ -65,7 +66,9 @@ func newAuthenticateState() *authenticateState {
|
||||||
|
|
||||||
func newAuthenticateStateFromConfig(
|
func newAuthenticateStateFromConfig(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
cfg *config.Config, authenticateConfig *authenticateConfig,
|
tracerProvider oteltrace.TracerProvider,
|
||||||
|
cfg *config.Config,
|
||||||
|
authenticateConfig *authenticateConfig,
|
||||||
) (*authenticateState, error) {
|
) (*authenticateState, error) {
|
||||||
err := ValidateOptions(cfg.Options)
|
err := ValidateOptions(cfg.Options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -147,6 +150,7 @@ func newAuthenticateStateFromConfig(
|
||||||
|
|
||||||
if cfg.Options.UseStatelessAuthenticateFlow() {
|
if cfg.Options.UseStatelessAuthenticateFlow() {
|
||||||
state.flow, err = authenticateflow.NewStateless(ctx,
|
state.flow, err = authenticateflow.NewStateless(ctx,
|
||||||
|
tracerProvider,
|
||||||
cfg,
|
cfg,
|
||||||
cookieStore,
|
cookieStore,
|
||||||
authenticateConfig.getIdentityProvider,
|
authenticateConfig.getIdentityProvider,
|
||||||
|
@ -154,7 +158,7 @@ func newAuthenticateStateFromConfig(
|
||||||
authenticateConfig.authEventFn,
|
authenticateConfig.authEventFn,
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
state.flow, err = authenticateflow.NewStateful(ctx, cfg, cookieStore)
|
state.flow, err = authenticateflow.NewStateful(ctx, tracerProvider, cfg, cookieStore)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"github.com/pomerium/pomerium/pkg/cryptutil"
|
"github.com/pomerium/pomerium/pkg/cryptutil"
|
||||||
"github.com/pomerium/pomerium/pkg/grpc/databroker"
|
"github.com/pomerium/pomerium/pkg/grpc/databroker"
|
||||||
"github.com/pomerium/pomerium/pkg/storage"
|
"github.com/pomerium/pomerium/pkg/storage"
|
||||||
|
oteltrace "go.opentelemetry.io/otel/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Authorize struct holds
|
// Authorize struct holds
|
||||||
|
@ -35,18 +36,25 @@ type Authorize struct {
|
||||||
// This should provide a consistent view of the data at a given server/record version and
|
// This should provide a consistent view of the data at a given server/record version and
|
||||||
// avoid partial updates.
|
// avoid partial updates.
|
||||||
stateLock sync.RWMutex
|
stateLock sync.RWMutex
|
||||||
|
|
||||||
|
tracerProvider oteltrace.TracerProvider
|
||||||
|
tracer oteltrace.Tracer
|
||||||
}
|
}
|
||||||
|
|
||||||
// New validates and creates a new Authorize service from a set of config options.
|
// New validates and creates a new Authorize service from a set of config options.
|
||||||
func New(ctx context.Context, cfg *config.Config) (*Authorize, error) {
|
func New(ctx context.Context, cfg *config.Config) (*Authorize, error) {
|
||||||
|
tracerProvider := trace.NewTracerProvider(ctx, "Authorize")
|
||||||
|
tracer := tracerProvider.Tracer(trace.PomeriumCoreTracer)
|
||||||
a := &Authorize{
|
a := &Authorize{
|
||||||
currentOptions: config.NewAtomicOptions(),
|
currentOptions: config.NewAtomicOptions(),
|
||||||
store: store.New(),
|
store: store.New(),
|
||||||
globalCache: storage.NewGlobalCache(time.Minute),
|
globalCache: storage.NewGlobalCache(time.Minute),
|
||||||
|
tracerProvider: tracerProvider,
|
||||||
|
tracer: tracer,
|
||||||
}
|
}
|
||||||
a.accessTracker = NewAccessTracker(a, accessTrackerMaxSize, accessTrackerDebouncePeriod)
|
a.accessTracker = NewAccessTracker(a, accessTrackerMaxSize, accessTrackerDebouncePeriod)
|
||||||
|
|
||||||
state, err := newAuthorizeStateFromConfig(ctx, cfg, a.store, nil)
|
state, err := newAuthorizeStateFromConfig(ctx, tracerProvider, cfg, a.store, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -88,7 +96,7 @@ func newPolicyEvaluator(
|
||||||
ctx = log.WithContext(ctx, func(c zerolog.Context) zerolog.Context {
|
ctx = log.WithContext(ctx, func(c zerolog.Context) zerolog.Context {
|
||||||
return c.Str("service", "authorize")
|
return c.Str("service", "authorize")
|
||||||
})
|
})
|
||||||
ctx, span := trace.StartSpan(ctx, "authorize.newPolicyEvaluator")
|
ctx, span := trace.Continue(ctx, "authorize.newPolicyEvaluator")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
clientCA, err := opts.DownstreamMTLS.GetCA()
|
clientCA, err := opts.DownstreamMTLS.GetCA()
|
||||||
|
@ -141,7 +149,7 @@ func newPolicyEvaluator(
|
||||||
func (a *Authorize) OnConfigChange(ctx context.Context, cfg *config.Config) {
|
func (a *Authorize) OnConfigChange(ctx context.Context, cfg *config.Config) {
|
||||||
currentState := a.state.Load()
|
currentState := a.state.Load()
|
||||||
a.currentOptions.Store(cfg.Options)
|
a.currentOptions.Store(cfg.Options)
|
||||||
if state, err := newAuthorizeStateFromConfig(ctx, cfg, a.store, currentState.evaluator); err != nil {
|
if state, err := newAuthorizeStateFromConfig(ctx, a.tracerProvider, cfg, a.store, currentState.evaluator); err != nil {
|
||||||
log.Ctx(ctx).Error().Err(err).Msg("authorize: error updating state")
|
log.Ctx(ctx).Error().Err(err).Msg("authorize: error updating state")
|
||||||
} else {
|
} else {
|
||||||
a.state.Store(state)
|
a.state.Store(state)
|
||||||
|
|
|
@ -228,16 +228,24 @@ func (a *Authorize) requireLoginResponse(
|
||||||
// always assume https scheme
|
// always assume https scheme
|
||||||
checkRequestURL := getCheckRequestURL(in)
|
checkRequestURL := getCheckRequestURL(in)
|
||||||
checkRequestURL.Scheme = "https"
|
checkRequestURL.Scheme = "https"
|
||||||
|
var signInURLQuery url.Values
|
||||||
|
|
||||||
|
headers := http.Header{}
|
||||||
|
if id := in.GetAttributes().GetRequest().GetHttp().GetHeaders()["traceparent"]; id != "" {
|
||||||
|
headers["X-Pomerium-Traceparent"] = []string{id}
|
||||||
|
headers["X-Pomerium-Tracestate"] = []string{"pomerium.traceparent=" + id} // TODO: this might not be necessary anymore
|
||||||
|
signInURLQuery = url.Values{}
|
||||||
|
signInURLQuery.Add("pomerium_traceparent", id)
|
||||||
|
signInURLQuery.Add("pomerium_tracestate", "pomerium.traceparent="+id)
|
||||||
|
}
|
||||||
redirectTo, err := state.authenticateFlow.AuthenticateSignInURL(
|
redirectTo, err := state.authenticateFlow.AuthenticateSignInURL(
|
||||||
ctx, nil, &checkRequestURL, idp.GetId())
|
ctx, signInURLQuery, &checkRequestURL, idp.GetId())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
headers["Location"] = []string{redirectTo}
|
||||||
|
|
||||||
return a.deniedResponse(ctx, in, http.StatusFound, "Login", http.Header{
|
return a.deniedResponse(ctx, in, http.StatusFound, "Login", headers)
|
||||||
"Location": {redirectTo},
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Authorize) requireWebAuthnResponse(
|
func (a *Authorize) requireWebAuthnResponse(
|
||||||
|
|
|
@ -3,7 +3,6 @@ package authorize
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
|
||||||
"github.com/pomerium/pomerium/pkg/grpc/databroker"
|
"github.com/pomerium/pomerium/pkg/grpc/databroker"
|
||||||
"github.com/pomerium/pomerium/pkg/grpc/session"
|
"github.com/pomerium/pomerium/pkg/grpc/session"
|
||||||
"github.com/pomerium/pomerium/pkg/grpc/user"
|
"github.com/pomerium/pomerium/pkg/grpc/user"
|
||||||
|
@ -63,7 +62,7 @@ func (a *Authorize) getDataBrokerSessionOrServiceAccount(
|
||||||
sessionID string,
|
sessionID string,
|
||||||
dataBrokerRecordVersion uint64,
|
dataBrokerRecordVersion uint64,
|
||||||
) (s sessionOrServiceAccount, err error) {
|
) (s sessionOrServiceAccount, err error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "authorize.getDataBrokerSessionOrServiceAccount")
|
ctx, span := a.tracer.Start(ctx, "authorize.getDataBrokerSessionOrServiceAccount")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
record, err := getDataBrokerRecord(ctx, grpcutil.GetTypeURL(new(session.Session)), sessionID, dataBrokerRecordVersion)
|
record, err := getDataBrokerRecord(ctx, grpcutil.GetTypeURL(new(session.Session)), sessionID, dataBrokerRecordVersion)
|
||||||
|
@ -96,7 +95,7 @@ func (a *Authorize) getDataBrokerUser(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
userID string,
|
userID string,
|
||||||
) (*user.User, error) {
|
) (*user.User, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "authorize.getDataBrokerUser")
|
ctx, span := a.tracer.Start(ctx, "authorize.getDataBrokerUser")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
record, err := getDataBrokerRecord(ctx, grpcutil.GetTypeURL(new(user.User)), userID, 0)
|
record, err := getDataBrokerRecord(ctx, grpcutil.GetTypeURL(new(user.User)), userID, 0)
|
||||||
|
|
|
@ -199,7 +199,7 @@ func getOrCreatePolicyEvaluators(
|
||||||
|
|
||||||
// Evaluate evaluates the rego for the given policy and generates the identity headers.
|
// Evaluate evaluates the rego for the given policy and generates the identity headers.
|
||||||
func (e *Evaluator) Evaluate(ctx context.Context, req *Request) (*Result, error) {
|
func (e *Evaluator) Evaluate(ctx context.Context, req *Request) (*Result, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "authorize.Evaluator.Evaluate")
|
ctx, span := trace.Continue(ctx, "authorize.Evaluator.Evaluate")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
eg, ctx := errgroup.WithContext(ctx)
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
|
|
|
@ -80,7 +80,7 @@ func NewHeadersEvaluator(store *store.Store) *HeadersEvaluator {
|
||||||
|
|
||||||
// Evaluate evaluates the headers.rego script.
|
// Evaluate evaluates the headers.rego script.
|
||||||
func (e *HeadersEvaluator) Evaluate(ctx context.Context, req *HeadersRequest, options ...rego.EvalOption) (*HeadersResponse, error) {
|
func (e *HeadersEvaluator) Evaluate(ctx context.Context, req *HeadersRequest, options ...rego.EvalOption) (*HeadersResponse, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "authorize.HeadersEvaluator.Evaluate")
|
ctx, span := trace.Continue(ctx, "authorize.HeadersEvaluator.Evaluate")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
ectx := new(rego.EvalContext)
|
ectx := new(rego.EvalContext)
|
||||||
|
|
|
@ -6,7 +6,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/open-policy-agent/opa/rego"
|
"github.com/open-policy-agent/opa/rego"
|
||||||
octrace "go.opencensus.io/trace"
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
|
||||||
"github.com/pomerium/pomerium/authorize/internal/store"
|
"github.com/pomerium/pomerium/authorize/internal/store"
|
||||||
"github.com/pomerium/pomerium/config"
|
"github.com/pomerium/pomerium/config"
|
||||||
|
@ -209,9 +209,9 @@ func (e *PolicyEvaluator) Evaluate(ctx context.Context, req *PolicyRequest) (*Po
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *PolicyEvaluator) evaluateQuery(ctx context.Context, req *PolicyRequest, query policyQuery) (*PolicyResponse, error) {
|
func (e *PolicyEvaluator) evaluateQuery(ctx context.Context, req *PolicyRequest, query policyQuery) (*PolicyResponse, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "authorize.PolicyEvaluator.evaluateQuery")
|
ctx, span := trace.Continue(ctx, "authorize.PolicyEvaluator.evaluateQuery")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
span.AddAttributes(octrace.StringAttribute("script_checksum", query.checksum()))
|
span.SetAttributes(attribute.String("script_checksum", query.checksum()))
|
||||||
|
|
||||||
rs, err := safeEval(ctx, query.PreparedEvalQuery,
|
rs, err := safeEval(ctx, query.PreparedEvalQuery,
|
||||||
rego.EvalInput(req),
|
rego.EvalInput(req),
|
||||||
|
|
|
@ -19,7 +19,6 @@ import (
|
||||||
"github.com/pomerium/pomerium/internal/httputil"
|
"github.com/pomerium/pomerium/internal/httputil"
|
||||||
"github.com/pomerium/pomerium/internal/log"
|
"github.com/pomerium/pomerium/internal/log"
|
||||||
"github.com/pomerium/pomerium/internal/sessions"
|
"github.com/pomerium/pomerium/internal/sessions"
|
||||||
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
|
||||||
"github.com/pomerium/pomerium/internal/urlutil"
|
"github.com/pomerium/pomerium/internal/urlutil"
|
||||||
"github.com/pomerium/pomerium/pkg/contextutil"
|
"github.com/pomerium/pomerium/pkg/contextutil"
|
||||||
"github.com/pomerium/pomerium/pkg/grpc/user"
|
"github.com/pomerium/pomerium/pkg/grpc/user"
|
||||||
|
@ -29,7 +28,7 @@ import (
|
||||||
|
|
||||||
// Check implements the envoy auth server gRPC endpoint.
|
// Check implements the envoy auth server gRPC endpoint.
|
||||||
func (a *Authorize) Check(ctx context.Context, in *envoy_service_auth_v3.CheckRequest) (*envoy_service_auth_v3.CheckResponse, error) {
|
func (a *Authorize) Check(ctx context.Context, in *envoy_service_auth_v3.CheckRequest) (*envoy_service_auth_v3.CheckResponse, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "authorize.grpc.Check")
|
ctx, span := a.tracer.Start(ctx, "authorize.grpc.Check")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
querier := storage.NewTracingQuerier(
|
querier := storage.NewTracingQuerier(
|
||||||
|
|
|
@ -14,7 +14,7 @@ import (
|
||||||
opastorage "github.com/open-policy-agent/opa/storage"
|
opastorage "github.com/open-policy-agent/opa/storage"
|
||||||
"github.com/open-policy-agent/opa/storage/inmem"
|
"github.com/open-policy-agent/opa/storage/inmem"
|
||||||
"github.com/open-policy-agent/opa/types"
|
"github.com/open-policy-agent/opa/types"
|
||||||
octrace "go.opencensus.io/trace"
|
"go.opentelemetry.io/otel/attribute"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
"google.golang.org/protobuf/types/known/timestamppb"
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
|
@ -131,20 +131,20 @@ func (s *Store) GetDataBrokerRecordOption() func(*rego.Rego) {
|
||||||
types.NewObject(nil, types.NewDynamicProperty(types.S, types.S)),
|
types.NewObject(nil, types.NewDynamicProperty(types.S, types.S)),
|
||||||
),
|
),
|
||||||
}, func(bctx rego.BuiltinContext, op1 *ast.Term, op2 *ast.Term) (*ast.Term, error) {
|
}, func(bctx rego.BuiltinContext, op1 *ast.Term, op2 *ast.Term) (*ast.Term, error) {
|
||||||
ctx, span := trace.StartSpan(bctx.Context, "rego.get_databroker_record")
|
ctx, span := trace.Continue(bctx.Context, "rego.get_databroker_record")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
recordType, ok := op1.Value.(ast.String)
|
recordType, ok := op1.Value.(ast.String)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid record type: %T", op1)
|
return nil, fmt.Errorf("invalid record type: %T", op1)
|
||||||
}
|
}
|
||||||
span.AddAttributes(octrace.StringAttribute("record_type", recordType.String()))
|
span.SetAttributes(attribute.String("record_type", recordType.String()))
|
||||||
|
|
||||||
recordIDOrIndex, ok := op2.Value.(ast.String)
|
recordIDOrIndex, ok := op2.Value.(ast.String)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid record id: %T", op2)
|
return nil, fmt.Errorf("invalid record id: %T", op2)
|
||||||
}
|
}
|
||||||
span.AddAttributes(octrace.StringAttribute("record_id", recordIDOrIndex.String()))
|
span.SetAttributes(attribute.String("record_id", recordIDOrIndex.String()))
|
||||||
|
|
||||||
msg := s.GetDataBrokerRecord(ctx, string(recordType), string(recordIDOrIndex))
|
msg := s.GetDataBrokerRecord(ctx, string(recordType), string(recordIDOrIndex))
|
||||||
if msg == nil {
|
if msg == nil {
|
||||||
|
|
|
@ -7,10 +7,10 @@ import (
|
||||||
envoy_service_auth_v3 "github.com/envoyproxy/go-control-plane/envoy/service/auth/v3"
|
envoy_service_auth_v3 "github.com/envoyproxy/go-control-plane/envoy/service/auth/v3"
|
||||||
"github.com/go-jose/go-jose/v3/jwt"
|
"github.com/go-jose/go-jose/v3/jwt"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
|
||||||
"github.com/pomerium/pomerium/authorize/evaluator"
|
"github.com/pomerium/pomerium/authorize/evaluator"
|
||||||
"github.com/pomerium/pomerium/internal/log"
|
"github.com/pomerium/pomerium/internal/log"
|
||||||
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
|
||||||
"github.com/pomerium/pomerium/pkg/grpc/databroker"
|
"github.com/pomerium/pomerium/pkg/grpc/databroker"
|
||||||
"github.com/pomerium/pomerium/pkg/grpc/session"
|
"github.com/pomerium/pomerium/pkg/grpc/session"
|
||||||
"github.com/pomerium/pomerium/pkg/grpc/user"
|
"github.com/pomerium/pomerium/pkg/grpc/user"
|
||||||
|
@ -24,7 +24,7 @@ func (a *Authorize) logAuthorizeCheck(
|
||||||
in *envoy_service_auth_v3.CheckRequest,
|
in *envoy_service_auth_v3.CheckRequest,
|
||||||
res *evaluator.Result, s sessionOrServiceAccount, u *user.User,
|
res *evaluator.Result, s sessionOrServiceAccount, u *user.User,
|
||||||
) {
|
) {
|
||||||
ctx, span := trace.StartSpan(ctx, "authorize.grpc.LogAuthorizeCheck")
|
ctx, span := a.tracer.Start(ctx, "authorize.grpc.LogAuthorizeCheck")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
hdrs := getCheckRequestHeaders(in)
|
hdrs := getCheckRequestHeaders(in)
|
||||||
|
@ -39,17 +39,24 @@ func (a *Authorize) logAuthorizeCheck(
|
||||||
|
|
||||||
// result
|
// result
|
||||||
if res != nil {
|
if res != nil {
|
||||||
|
span.SetAttributes(attribute.Bool("result.allow", res.Allow.Value))
|
||||||
evt = evt.Bool("allow", res.Allow.Value)
|
evt = evt.Bool("allow", res.Allow.Value)
|
||||||
|
allowReasons := res.Allow.Reasons.Strings()
|
||||||
if res.Allow.Value {
|
if res.Allow.Value {
|
||||||
evt = evt.Strs("allow-why-true", res.Allow.Reasons.Strings())
|
span.SetAttributes(attribute.StringSlice("result.allow-why-true", allowReasons))
|
||||||
|
evt = evt.Strs("allow-why-true", allowReasons)
|
||||||
} else {
|
} else {
|
||||||
evt = evt.Strs("allow-why-false", res.Allow.Reasons.Strings())
|
span.SetAttributes(attribute.StringSlice("result.allow-why-false", allowReasons))
|
||||||
|
evt = evt.Strs("allow-why-false", allowReasons)
|
||||||
}
|
}
|
||||||
evt = evt.Bool("deny", res.Deny.Value)
|
evt = evt.Bool("deny", res.Deny.Value)
|
||||||
|
denyReasons := res.Deny.Reasons.Strings()
|
||||||
if res.Deny.Value {
|
if res.Deny.Value {
|
||||||
evt = evt.Strs("deny-why-true", res.Deny.Reasons.Strings())
|
span.SetAttributes(attribute.StringSlice("result.deny-why-true", denyReasons))
|
||||||
|
evt = evt.Strs("deny-why-true", denyReasons)
|
||||||
} else {
|
} else {
|
||||||
evt = evt.Strs("deny-why-false", res.Deny.Reasons.Strings())
|
span.SetAttributes(attribute.StringSlice("result.deny-why-false", denyReasons))
|
||||||
|
evt = evt.Strs("deny-why-false", denyReasons)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,6 +5,8 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
|
||||||
|
oteltrace "go.opentelemetry.io/otel/trace"
|
||||||
googlegrpc "google.golang.org/grpc"
|
googlegrpc "google.golang.org/grpc"
|
||||||
|
|
||||||
"github.com/pomerium/pomerium/authorize/evaluator"
|
"github.com/pomerium/pomerium/authorize/evaluator"
|
||||||
|
@ -32,7 +34,10 @@ type authorizeState struct {
|
||||||
|
|
||||||
func newAuthorizeStateFromConfig(
|
func newAuthorizeStateFromConfig(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
cfg *config.Config, store *store.Store, previousPolicyEvaluator *evaluator.Evaluator,
|
tracerProvider oteltrace.TracerProvider,
|
||||||
|
cfg *config.Config,
|
||||||
|
store *store.Store,
|
||||||
|
previousPolicyEvaluator *evaluator.Evaluator,
|
||||||
) (*authorizeState, error) {
|
) (*authorizeState, error) {
|
||||||
if err := validateOptions(cfg.Options); err != nil {
|
if err := validateOptions(cfg.Options); err != nil {
|
||||||
return nil, fmt.Errorf("authorize: bad options: %w", err)
|
return nil, fmt.Errorf("authorize: bad options: %w", err)
|
||||||
|
@ -62,7 +67,7 @@ func newAuthorizeStateFromConfig(
|
||||||
InstallationID: cfg.Options.InstallationID,
|
InstallationID: cfg.Options.InstallationID,
|
||||||
ServiceName: cfg.Options.Services,
|
ServiceName: cfg.Options.Services,
|
||||||
SignedJWTKey: sharedKey,
|
SignedJWTKey: sharedKey,
|
||||||
})
|
}, googlegrpc.WithStatsHandler(otelgrpc.NewClientHandler(otelgrpc.WithTracerProvider(tracerProvider))))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("authorize: error creating databroker connection: %w", err)
|
return nil, fmt.Errorf("authorize: error creating databroker connection: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -75,9 +80,9 @@ func newAuthorizeStateFromConfig(
|
||||||
}
|
}
|
||||||
|
|
||||||
if cfg.Options.UseStatelessAuthenticateFlow() {
|
if cfg.Options.UseStatelessAuthenticateFlow() {
|
||||||
state.authenticateFlow, err = authenticateflow.NewStateless(ctx, cfg, nil, nil, nil, nil)
|
state.authenticateFlow, err = authenticateflow.NewStateless(ctx, tracerProvider, cfg, nil, nil, nil, nil)
|
||||||
} else {
|
} else {
|
||||||
state.authenticateFlow, err = authenticateflow.NewStateful(ctx, cfg, nil)
|
state.authenticateFlow, err = authenticateflow.NewStateful(ctx, tracerProvider, cfg, nil)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -12,6 +12,7 @@ import (
|
||||||
|
|
||||||
"github.com/pomerium/pomerium/config"
|
"github.com/pomerium/pomerium/config"
|
||||||
"github.com/pomerium/pomerium/internal/log"
|
"github.com/pomerium/pomerium/internal/log"
|
||||||
|
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
||||||
"github.com/pomerium/pomerium/internal/version"
|
"github.com/pomerium/pomerium/internal/version"
|
||||||
_ "github.com/pomerium/pomerium/internal/zero/bootstrap/writers/filesystem"
|
_ "github.com/pomerium/pomerium/internal/zero/bootstrap/writers/filesystem"
|
||||||
_ "github.com/pomerium/pomerium/internal/zero/bootstrap/writers/k8s"
|
_ "github.com/pomerium/pomerium/internal/zero/bootstrap/writers/k8s"
|
||||||
|
@ -30,9 +31,15 @@ func main() {
|
||||||
}
|
}
|
||||||
root.AddCommand(zero_cmd.BuildRootCmd())
|
root.AddCommand(zero_cmd.BuildRootCmd())
|
||||||
root.PersistentFlags().StringVar(&configFile, "config", "", "Specify configuration file location")
|
root.PersistentFlags().StringVar(&configFile, "config", "", "Specify configuration file location")
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
log.SetLevel(zerolog.InfoLevel)
|
log.SetLevel(zerolog.InfoLevel)
|
||||||
|
ctx := trace.Options{
|
||||||
|
RemoteClient: trace.NewSyncClient(trace.NewRemoteClientFromEnv()),
|
||||||
|
}.NewContext(context.Background())
|
||||||
|
defer func() {
|
||||||
|
if err := trace.ShutdownContext(ctx); err != nil {
|
||||||
|
log.Error().Err(err).Send()
|
||||||
|
}
|
||||||
|
}()
|
||||||
runFn := run
|
runFn := run
|
||||||
if zero_cmd.IsManagedMode(configFile) {
|
if zero_cmd.IsManagedMode(configFile) {
|
||||||
runFn = zero_cmd.Run
|
runFn = zero_cmd.Run
|
||||||
|
|
|
@ -3,6 +3,7 @@ package envoyconfig
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
|
@ -37,7 +38,7 @@ func (b *Builder) BuildBootstrap(
|
||||||
cfg *config.Config,
|
cfg *config.Config,
|
||||||
fullyStatic bool,
|
fullyStatic bool,
|
||||||
) (bootstrap *envoy_config_bootstrap_v3.Bootstrap, err error) {
|
) (bootstrap *envoy_config_bootstrap_v3.Bootstrap, err error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "envoyconfig.Builder.BuildBootstrap")
|
ctx, span := trace.Continue(ctx, "envoyconfig.Builder.BuildBootstrap")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
bootstrap = new(envoy_config_bootstrap_v3.Bootstrap)
|
bootstrap = new(envoy_config_bootstrap_v3.Bootstrap)
|
||||||
|
@ -52,7 +53,7 @@ func (b *Builder) BuildBootstrap(
|
||||||
return nil, fmt.Errorf("error building bootstrap dynamic resources: %w", err)
|
return nil, fmt.Errorf("error building bootstrap dynamic resources: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
bootstrap.LayeredRuntime, err = b.BuildBootstrapLayeredRuntime()
|
bootstrap.LayeredRuntime, err = b.BuildBootstrapLayeredRuntime(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error building bootstrap layered runtime: %w", err)
|
return nil, fmt.Errorf("error building bootstrap layered runtime: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -148,7 +149,13 @@ func (b *Builder) BuildBootstrapDynamicResources(
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildBootstrapLayeredRuntime builds the layered runtime for the envoy bootstrap.
|
// BuildBootstrapLayeredRuntime builds the layered runtime for the envoy bootstrap.
|
||||||
func (b *Builder) BuildBootstrapLayeredRuntime() (*envoy_config_bootstrap_v3.LayeredRuntime, error) {
|
func (b *Builder) BuildBootstrapLayeredRuntime(ctx context.Context) (*envoy_config_bootstrap_v3.LayeredRuntime, error) {
|
||||||
|
flushIntervalMs := 5000
|
||||||
|
minFlushSpans := 3
|
||||||
|
if trace.DebugFlagsFromContext(ctx).Check(trace.EnvoyFlushEverySpan) {
|
||||||
|
minFlushSpans = 1
|
||||||
|
flushIntervalMs = math.MaxInt32
|
||||||
|
}
|
||||||
layer, err := structpb.NewStruct(map[string]any{
|
layer, err := structpb.NewStruct(map[string]any{
|
||||||
"re2": map[string]any{
|
"re2": map[string]any{
|
||||||
"max_program_size": map[string]any{
|
"max_program_size": map[string]any{
|
||||||
|
@ -156,6 +163,21 @@ func (b *Builder) BuildBootstrapLayeredRuntime() (*envoy_config_bootstrap_v3.Lay
|
||||||
"warn_level": 1024,
|
"warn_level": 1024,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"tracing": map[string]any{
|
||||||
|
"opentelemetry": map[string]any{
|
||||||
|
"flush_interval_ms": flushIntervalMs,
|
||||||
|
// For most requests, envoy generates 3 spans:
|
||||||
|
// - ingress (downstream->envoy)
|
||||||
|
// - ext_authz check request (envoy->pomerium)
|
||||||
|
// - egress (envoy->upstream)
|
||||||
|
// The default value is 5, which usually leads to delayed exports.
|
||||||
|
// This can be set lower, e.g. 1 to have envoy export every span
|
||||||
|
// individually (useful for testing), but 3 is a reasonable default.
|
||||||
|
// If set to 1, also set flush_interval_ms to a very large number to
|
||||||
|
// effectively disable it.
|
||||||
|
"min_flush_spans": minFlushSpans,
|
||||||
|
},
|
||||||
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("envoyconfig: failed to create layered runtime layer: %w", err)
|
return nil, fmt.Errorf("envoyconfig: failed to create layered runtime layer: %w", err)
|
||||||
|
@ -180,7 +202,7 @@ func (b *Builder) BuildBootstrapStaticResources(
|
||||||
cfg *config.Config,
|
cfg *config.Config,
|
||||||
fullyStatic bool,
|
fullyStatic bool,
|
||||||
) (staticResources *envoy_config_bootstrap_v3.Bootstrap_StaticResources, err error) {
|
) (staticResources *envoy_config_bootstrap_v3.Bootstrap_StaticResources, err error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "envoyconfig.Builder.BuildBootstrapStaticResources")
|
ctx, span := trace.Continue(ctx, "envoyconfig.Builder.BuildBootstrapStaticResources")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
staticResources = new(envoy_config_bootstrap_v3.Bootstrap_StaticResources)
|
staticResources = new(envoy_config_bootstrap_v3.Bootstrap_StaticResources)
|
||||||
|
|
|
@ -36,7 +36,7 @@ func TestBuilder_BuildBootstrapAdmin(t *testing.T) {
|
||||||
|
|
||||||
func TestBuilder_BuildBootstrapLayeredRuntime(t *testing.T) {
|
func TestBuilder_BuildBootstrapLayeredRuntime(t *testing.T) {
|
||||||
b := New("localhost:1111", "localhost:2222", "localhost:3333", filemgr.NewManager(), nil)
|
b := New("localhost:1111", "localhost:2222", "localhost:3333", filemgr.NewManager(), nil)
|
||||||
staticCfg, err := b.BuildBootstrapLayeredRuntime()
|
staticCfg, err := b.BuildBootstrapLayeredRuntime(context.Background())
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
testutil.AssertProtoJSONEqual(t, `
|
testutil.AssertProtoJSONEqual(t, `
|
||||||
{ "layers": [{
|
{ "layers": [{
|
||||||
|
@ -47,6 +47,12 @@ func TestBuilder_BuildBootstrapLayeredRuntime(t *testing.T) {
|
||||||
"error_level": 1048576,
|
"error_level": 1048576,
|
||||||
"warn_level": 1024
|
"warn_level": 1024
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"tracing": {
|
||||||
|
"opentelemetry": {
|
||||||
|
"flush_interval_ms": 5000,
|
||||||
|
"min_flush_spans": 3
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}] }
|
}] }
|
||||||
|
|
|
@ -26,7 +26,7 @@ import (
|
||||||
|
|
||||||
// BuildClusters builds envoy clusters from the given config.
|
// BuildClusters builds envoy clusters from the given config.
|
||||||
func (b *Builder) BuildClusters(ctx context.Context, cfg *config.Config) ([]*envoy_config_cluster_v3.Cluster, error) {
|
func (b *Builder) BuildClusters(ctx context.Context, cfg *config.Config) ([]*envoy_config_cluster_v3.Cluster, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "envoyconfig.Builder.BuildClusters")
|
ctx, span := trace.Continue(ctx, "envoyconfig.Builder.BuildClusters")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
grpcURLs := []*url.URL{{
|
grpcURLs := []*url.URL{{
|
||||||
|
@ -104,13 +104,6 @@ func (b *Builder) BuildClusters(ctx context.Context, cfg *config.Config) ([]*env
|
||||||
envoyAdminCluster,
|
envoyAdminCluster,
|
||||||
}
|
}
|
||||||
|
|
||||||
tracingCluster, err := buildTracingCluster(cfg.Options)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
} else if tracingCluster != nil {
|
|
||||||
clusters = append(clusters, tracingCluster)
|
|
||||||
}
|
|
||||||
|
|
||||||
if config.IsProxy(cfg.Options.Services) {
|
if config.IsProxy(cfg.Options.Services) {
|
||||||
for policy := range cfg.Options.GetAllPolicies() {
|
for policy := range cfg.Options.GetAllPolicies() {
|
||||||
if len(policy.To) > 0 {
|
if len(policy.To) > 0 {
|
||||||
|
@ -444,6 +437,16 @@ func grpcHealthChecks(name string) []*envoy_config_core_v3.HealthCheck {
|
||||||
ServiceName: name,
|
ServiceName: name,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
// EventLogger: []*envoy_config_core_v3.TypedExtensionConfig{
|
||||||
|
// {
|
||||||
|
// Name: "envoy.health_check.event_sink.file",
|
||||||
|
// TypedConfig: marshalAny(&envoy_extensions_eventsinks_file_v3.HealthCheckEventFileSink{
|
||||||
|
// EventLogPath: "/tmp/healthchecks",
|
||||||
|
// }),
|
||||||
|
// },
|
||||||
|
// },
|
||||||
|
// AlwaysLogHealthCheckFailures: true,
|
||||||
|
// AlwaysLogHealthCheckSuccess: true,
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
5
config/envoyconfig/extensions/doc.go
Normal file
5
config/envoyconfig/extensions/doc.go
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
// Package extensions contains the protobuf config definitions for Pomerium's
|
||||||
|
// custom envoy extensions. Because the required configuration is minimal,
|
||||||
|
// identical copies of the proto files are stored separately in envoy-custom.
|
||||||
|
// Be sure to keep these in sync if the definitions change.
|
||||||
|
package extensions
|
125
config/envoyconfig/extensions/trace_context.pb.go
Normal file
125
config/envoyconfig/extensions/trace_context.pb.go
Normal file
|
@ -0,0 +1,125 @@
|
||||||
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
|
// versions:
|
||||||
|
// protoc-gen-go v1.35.2
|
||||||
|
// protoc (unknown)
|
||||||
|
// source: github.com/pomerium/pomerium/config/envoyconfig/extensions/trace_context.proto
|
||||||
|
|
||||||
|
package extensions
|
||||||
|
|
||||||
|
import (
|
||||||
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
|
reflect "reflect"
|
||||||
|
sync "sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Verify that this generated code is sufficiently up-to-date.
|
||||||
|
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||||
|
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||||
|
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||||
|
)
|
||||||
|
|
||||||
|
type TraceContext struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TraceContext) Reset() {
|
||||||
|
*x = TraceContext{}
|
||||||
|
mi := &file_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto_msgTypes[0]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TraceContext) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*TraceContext) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *TraceContext) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto_msgTypes[0]
|
||||||
|
if x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use TraceContext.ProtoReflect.Descriptor instead.
|
||||||
|
func (*TraceContext) Descriptor() ([]byte, []int) {
|
||||||
|
return file_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto_rawDescGZIP(), []int{0}
|
||||||
|
}
|
||||||
|
|
||||||
|
var File_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto protoreflect.FileDescriptor
|
||||||
|
|
||||||
|
var file_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto_rawDesc = []byte{
|
||||||
|
0x0a, 0x4e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x6f, 0x6d,
|
||||||
|
0x65, 0x72, 0x69, 0x75, 0x6d, 0x2f, 0x70, 0x6f, 0x6d, 0x65, 0x72, 0x69, 0x75, 0x6d, 0x2f, 0x63,
|
||||||
|
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x63, 0x6f, 0x6e, 0x66, 0x69,
|
||||||
|
0x67, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61,
|
||||||
|
0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||||
|
0x12, 0x13, 0x70, 0x6f, 0x6d, 0x65, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e,
|
||||||
|
0x73, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x0e, 0x0a, 0x0c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f,
|
||||||
|
0x6e, 0x74, 0x65, 0x78, 0x74, 0x42, 0x3c, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
|
||||||
|
0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x6f, 0x6d, 0x65, 0x72, 0x69, 0x75, 0x6d, 0x2f, 0x70, 0x6f, 0x6d,
|
||||||
|
0x65, 0x72, 0x69, 0x75, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x65, 0x6e, 0x76,
|
||||||
|
0x6f, 0x79, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
|
||||||
|
0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
file_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto_rawDescOnce sync.Once
|
||||||
|
file_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto_rawDescData = file_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto_rawDesc
|
||||||
|
)
|
||||||
|
|
||||||
|
func file_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto_rawDescGZIP() []byte {
|
||||||
|
file_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto_rawDescOnce.Do(func() {
|
||||||
|
file_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto_rawDescData)
|
||||||
|
})
|
||||||
|
return file_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto_rawDescData
|
||||||
|
}
|
||||||
|
|
||||||
|
var file_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||||
|
var file_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto_goTypes = []any{
|
||||||
|
(*TraceContext)(nil), // 0: pomerium.extensions.TraceContext
|
||||||
|
}
|
||||||
|
var file_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto_depIdxs = []int32{
|
||||||
|
0, // [0:0] is the sub-list for method output_type
|
||||||
|
0, // [0:0] is the sub-list for method input_type
|
||||||
|
0, // [0:0] is the sub-list for extension type_name
|
||||||
|
0, // [0:0] is the sub-list for extension extendee
|
||||||
|
0, // [0:0] is the sub-list for field type_name
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
file_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto_init()
|
||||||
|
}
|
||||||
|
func file_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto_init() {
|
||||||
|
if File_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
type x struct{}
|
||||||
|
out := protoimpl.TypeBuilder{
|
||||||
|
File: protoimpl.DescBuilder{
|
||||||
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
|
RawDescriptor: file_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto_rawDesc,
|
||||||
|
NumEnums: 0,
|
||||||
|
NumMessages: 1,
|
||||||
|
NumExtensions: 0,
|
||||||
|
NumServices: 0,
|
||||||
|
},
|
||||||
|
GoTypes: file_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto_goTypes,
|
||||||
|
DependencyIndexes: file_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto_depIdxs,
|
||||||
|
MessageInfos: file_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto_msgTypes,
|
||||||
|
}.Build()
|
||||||
|
File_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto = out.File
|
||||||
|
file_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto_rawDesc = nil
|
||||||
|
file_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto_goTypes = nil
|
||||||
|
file_github_com_pomerium_pomerium_config_envoyconfig_extensions_trace_context_proto_depIdxs = nil
|
||||||
|
}
|
7
config/envoyconfig/extensions/trace_context.proto
Normal file
7
config/envoyconfig/extensions/trace_context.proto
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package pomerium.extensions;
|
||||||
|
|
||||||
|
option go_package = "github.com/pomerium/pomerium/config/envoyconfig/extensions";
|
||||||
|
|
||||||
|
message TraceContext {}
|
157
config/envoyconfig/extensions/uuidx.pb.go
Normal file
157
config/envoyconfig/extensions/uuidx.pb.go
Normal file
|
@ -0,0 +1,157 @@
|
||||||
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
|
// versions:
|
||||||
|
// protoc-gen-go v1.35.2
|
||||||
|
// protoc (unknown)
|
||||||
|
// source: github.com/pomerium/pomerium/config/envoyconfig/extensions/uuidx.proto
|
||||||
|
|
||||||
|
package extensions
|
||||||
|
|
||||||
|
import (
|
||||||
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
|
wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
|
||||||
|
reflect "reflect"
|
||||||
|
sync "sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Verify that this generated code is sufficiently up-to-date.
|
||||||
|
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||||
|
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||||
|
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||||
|
)
|
||||||
|
|
||||||
|
type UuidxRequestIdConfig struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
PackTraceReason *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=pack_trace_reason,json=packTraceReason,proto3" json:"pack_trace_reason,omitempty"`
|
||||||
|
UseRequestIdForTraceSampling *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=use_request_id_for_trace_sampling,json=useRequestIdForTraceSampling,proto3" json:"use_request_id_for_trace_sampling,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *UuidxRequestIdConfig) Reset() {
|
||||||
|
*x = UuidxRequestIdConfig{}
|
||||||
|
mi := &file_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto_msgTypes[0]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *UuidxRequestIdConfig) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*UuidxRequestIdConfig) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *UuidxRequestIdConfig) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto_msgTypes[0]
|
||||||
|
if x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use UuidxRequestIdConfig.ProtoReflect.Descriptor instead.
|
||||||
|
func (*UuidxRequestIdConfig) Descriptor() ([]byte, []int) {
|
||||||
|
return file_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto_rawDescGZIP(), []int{0}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *UuidxRequestIdConfig) GetPackTraceReason() *wrapperspb.BoolValue {
|
||||||
|
if x != nil {
|
||||||
|
return x.PackTraceReason
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *UuidxRequestIdConfig) GetUseRequestIdForTraceSampling() *wrapperspb.BoolValue {
|
||||||
|
if x != nil {
|
||||||
|
return x.UseRequestIdForTraceSampling
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var File_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto protoreflect.FileDescriptor
|
||||||
|
|
||||||
|
var file_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto_rawDesc = []byte{
|
||||||
|
0x0a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x6f, 0x6d,
|
||||||
|
0x65, 0x72, 0x69, 0x75, 0x6d, 0x2f, 0x70, 0x6f, 0x6d, 0x65, 0x72, 0x69, 0x75, 0x6d, 0x2f, 0x63,
|
||||||
|
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x63, 0x6f, 0x6e, 0x66, 0x69,
|
||||||
|
0x67, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x75, 0x75, 0x69,
|
||||||
|
0x64, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x70, 0x6f, 0x6d, 0x65, 0x72, 0x69,
|
||||||
|
0x75, 0x6d, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x1e, 0x67,
|
||||||
|
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77,
|
||||||
|
0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc3, 0x01,
|
||||||
|
0x0a, 0x14, 0x55, 0x75, 0x69, 0x64, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64,
|
||||||
|
0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x46, 0x0a, 0x11, 0x70, 0x61, 0x63, 0x6b, 0x5f, 0x74,
|
||||||
|
0x72, 0x61, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||||
|
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||||
|
0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x70,
|
||||||
|
0x61, 0x63, 0x6b, 0x54, 0x72, 0x61, 0x63, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x63,
|
||||||
|
0x0a, 0x21, 0x75, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64,
|
||||||
|
0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c,
|
||||||
|
0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
|
||||||
|
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c,
|
||||||
|
0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1c, 0x75, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||||
|
0x74, 0x49, 0x64, 0x46, 0x6f, 0x72, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c,
|
||||||
|
0x69, 0x6e, 0x67, 0x42, 0x3c, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
|
||||||
|
0x6d, 0x2f, 0x70, 0x6f, 0x6d, 0x65, 0x72, 0x69, 0x75, 0x6d, 0x2f, 0x70, 0x6f, 0x6d, 0x65, 0x72,
|
||||||
|
0x69, 0x75, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79,
|
||||||
|
0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
|
||||||
|
0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
file_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto_rawDescOnce sync.Once
|
||||||
|
file_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto_rawDescData = file_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto_rawDesc
|
||||||
|
)
|
||||||
|
|
||||||
|
func file_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto_rawDescGZIP() []byte {
|
||||||
|
file_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto_rawDescOnce.Do(func() {
|
||||||
|
file_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto_rawDescData)
|
||||||
|
})
|
||||||
|
return file_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto_rawDescData
|
||||||
|
}
|
||||||
|
|
||||||
|
var file_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||||
|
var file_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto_goTypes = []any{
|
||||||
|
(*UuidxRequestIdConfig)(nil), // 0: pomerium.extensions.UuidxRequestIdConfig
|
||||||
|
(*wrapperspb.BoolValue)(nil), // 1: google.protobuf.BoolValue
|
||||||
|
}
|
||||||
|
var file_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto_depIdxs = []int32{
|
||||||
|
1, // 0: pomerium.extensions.UuidxRequestIdConfig.pack_trace_reason:type_name -> google.protobuf.BoolValue
|
||||||
|
1, // 1: pomerium.extensions.UuidxRequestIdConfig.use_request_id_for_trace_sampling:type_name -> google.protobuf.BoolValue
|
||||||
|
2, // [2:2] is the sub-list for method output_type
|
||||||
|
2, // [2:2] is the sub-list for method input_type
|
||||||
|
2, // [2:2] is the sub-list for extension type_name
|
||||||
|
2, // [2:2] is the sub-list for extension extendee
|
||||||
|
0, // [0:2] is the sub-list for field type_name
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { file_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto_init() }
|
||||||
|
func file_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto_init() {
|
||||||
|
if File_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
type x struct{}
|
||||||
|
out := protoimpl.TypeBuilder{
|
||||||
|
File: protoimpl.DescBuilder{
|
||||||
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
|
RawDescriptor: file_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto_rawDesc,
|
||||||
|
NumEnums: 0,
|
||||||
|
NumMessages: 1,
|
||||||
|
NumExtensions: 0,
|
||||||
|
NumServices: 0,
|
||||||
|
},
|
||||||
|
GoTypes: file_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto_goTypes,
|
||||||
|
DependencyIndexes: file_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto_depIdxs,
|
||||||
|
MessageInfos: file_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto_msgTypes,
|
||||||
|
}.Build()
|
||||||
|
File_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto = out.File
|
||||||
|
file_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto_rawDesc = nil
|
||||||
|
file_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto_goTypes = nil
|
||||||
|
file_github_com_pomerium_pomerium_config_envoyconfig_extensions_uuidx_proto_depIdxs = nil
|
||||||
|
}
|
12
config/envoyconfig/extensions/uuidx.proto
Normal file
12
config/envoyconfig/extensions/uuidx.proto
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package pomerium.extensions;
|
||||||
|
|
||||||
|
import "google/protobuf/wrappers.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/pomerium/pomerium/config/envoyconfig/extensions";
|
||||||
|
|
||||||
|
message UuidxRequestIdConfig {
|
||||||
|
google.protobuf.BoolValue pack_trace_reason = 1;
|
||||||
|
google.protobuf.BoolValue use_request_id_for_trace_sampling = 2;
|
||||||
|
}
|
|
@ -34,6 +34,16 @@ func ExtAuthzFilter(grpcClientTimeout *durationpb.Duration) *envoy_extensions_fi
|
||||||
ClusterName: "pomerium-authorize",
|
ClusterName: "pomerium-authorize",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
InitialMetadata: []*envoy_config_core_v3.HeaderValue{
|
||||||
|
{
|
||||||
|
Key: "x-pomerium-traceparent",
|
||||||
|
Value: `%DYNAMIC_METADATA(pomerium.internal:traceparent)%`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "x-pomerium-tracestate",
|
||||||
|
Value: `%DYNAMIC_METADATA(pomerium.internal:tracestate)%`,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
MetadataContextNamespaces: []string{"com.pomerium.client-certificate-info"},
|
MetadataContextNamespaces: []string{"com.pomerium.client-certificate-info"},
|
||||||
|
|
|
@ -39,6 +39,22 @@ func (b *Builder) buildVirtualHost(
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
vh.Routes = append(vh.Routes, rs...)
|
vh.Routes = append(vh.Routes, rs...)
|
||||||
|
vh.RequestHeadersToAdd = []*envoy_config_core_v3.HeaderValueOption{
|
||||||
|
{
|
||||||
|
Header: &envoy_config_core_v3.HeaderValue{
|
||||||
|
Key: "x-pomerium-traceparent",
|
||||||
|
Value: `%DYNAMIC_METADATA(pomerium.internal:traceparent)%`,
|
||||||
|
},
|
||||||
|
AppendAction: envoy_config_core_v3.HeaderValueOption_OVERWRITE_IF_EXISTS_OR_ADD,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Header: &envoy_config_core_v3.HeaderValue{
|
||||||
|
Key: "x-pomerium-tracestate",
|
||||||
|
Value: `%DYNAMIC_METADATA(pomerium.internal:tracestate)%`,
|
||||||
|
},
|
||||||
|
AppendAction: envoy_config_core_v3.HeaderValueOption_APPEND_IF_EXISTS_OR_ADD,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
return vh, nil
|
return vh, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,7 @@ func (b *Builder) BuildListeners(
|
||||||
cfg *config.Config,
|
cfg *config.Config,
|
||||||
fullyStatic bool,
|
fullyStatic bool,
|
||||||
) ([]*envoy_config_listener_v3.Listener, error) {
|
) ([]*envoy_config_listener_v3.Listener, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "envoyconfig.Builder.BuildListeners")
|
ctx, span := trace.Continue(ctx, "envoyconfig.Builder.BuildListeners")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
var listeners []*envoy_config_listener_v3.Listener
|
var listeners []*envoy_config_listener_v3.Listener
|
||||||
|
|
|
@ -73,6 +73,9 @@ func (b *Builder) buildGRPCHTTPConnectionManagerFilter() *envoy_config_listener_
|
||||||
PathSpecifier: &envoy_config_route_v3.RouteMatch_Prefix{Prefix: fmt.Sprintf("/%s/", svc)},
|
PathSpecifier: &envoy_config_route_v3.RouteMatch_Prefix{Prefix: fmt.Sprintf("/%s/", svc)},
|
||||||
Grpc: &envoy_config_route_v3.RouteMatch_GrpcRouteMatchOptions{},
|
Grpc: &envoy_config_route_v3.RouteMatch_GrpcRouteMatchOptions{},
|
||||||
},
|
},
|
||||||
|
Decorator: &envoy_config_route_v3.Decorator{
|
||||||
|
Operation: fmt.Sprintf("pomerium-control-plane-grpc %s", svc),
|
||||||
|
},
|
||||||
Action: &envoy_config_route_v3.Route_Route{
|
Action: &envoy_config_route_v3.Route_Route{
|
||||||
Route: &envoy_config_route_v3.RouteAction{
|
Route: &envoy_config_route_v3.RouteAction{
|
||||||
ClusterSpecifier: &envoy_config_route_v3.RouteAction_Cluster{
|
ClusterSpecifier: &envoy_config_route_v3.RouteAction_Cluster{
|
||||||
|
|
|
@ -10,7 +10,6 @@ import (
|
||||||
envoy_config_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
|
envoy_config_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
|
||||||
envoy_extensions_access_loggers_grpc_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/access_loggers/grpc/v3"
|
envoy_extensions_access_loggers_grpc_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/access_loggers/grpc/v3"
|
||||||
envoy_extensions_filters_network_http_connection_manager "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
|
envoy_extensions_filters_network_http_connection_manager "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
|
||||||
envoy_type_v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3"
|
|
||||||
"google.golang.org/protobuf/types/known/durationpb"
|
"google.golang.org/protobuf/types/known/durationpb"
|
||||||
"google.golang.org/protobuf/types/known/wrapperspb"
|
"google.golang.org/protobuf/types/known/wrapperspb"
|
||||||
|
|
||||||
|
@ -177,11 +176,6 @@ func (b *Builder) buildMainHTTPConnectionManagerFilter(
|
||||||
maxStreamDuration = durationpb.New(cfg.Options.WriteTimeout)
|
maxStreamDuration = durationpb.New(cfg.Options.WriteTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
tracingProvider, err := buildTracingHTTP(cfg.Options)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
localReply, err := b.buildLocalReplyConfig(cfg.Options)
|
localReply, err := b.buildLocalReplyConfig(cfg.Options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -198,10 +192,6 @@ func (b *Builder) buildMainHTTPConnectionManagerFilter(
|
||||||
},
|
},
|
||||||
HttpProtocolOptions: http1ProtocolOptions,
|
HttpProtocolOptions: http1ProtocolOptions,
|
||||||
RequestTimeout: durationpb.New(cfg.Options.ReadTimeout),
|
RequestTimeout: durationpb.New(cfg.Options.ReadTimeout),
|
||||||
Tracing: &envoy_extensions_filters_network_http_connection_manager.HttpConnectionManager_Tracing{
|
|
||||||
RandomSampling: &envoy_type_v3.Percent{Value: cfg.Options.TracingSampleRate * 100},
|
|
||||||
Provider: tracingProvider,
|
|
||||||
},
|
|
||||||
// See https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_conn_man/headers#x-forwarded-for
|
// See https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_conn_man/headers#x-forwarded-for
|
||||||
UseRemoteAddress: &wrapperspb.BoolValue{Value: true},
|
UseRemoteAddress: &wrapperspb.BoolValue{Value: true},
|
||||||
SkipXffAppend: cfg.Options.SkipXffAppend,
|
SkipXffAppend: cfg.Options.SkipXffAppend,
|
||||||
|
@ -224,6 +214,8 @@ func (b *Builder) buildMainHTTPConnectionManagerFilter(
|
||||||
mgr.CodecType = cfg.Options.GetCodecType().ToEnvoy()
|
mgr.CodecType = cfg.Options.GetCodecType().ToEnvoy()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
applyTracingConfig(mgr, cfg.Options)
|
||||||
|
|
||||||
if fullyStatic {
|
if fullyStatic {
|
||||||
routeConfiguration, err := b.buildMainRouteConfiguration(ctx, cfg)
|
routeConfiguration, err := b.buildMainRouteConfiguration(ctx, cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -103,7 +103,7 @@ func TestBuildListeners(t *testing.T) {
|
||||||
}]
|
}]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}`, httpConfig.Get("httpFilters.6").String(),
|
}`, httpConfig.Get("httpFilters.7").String(),
|
||||||
"should add alt-svc header")
|
"should add alt-svc header")
|
||||||
case "quic-ingress":
|
case "quic-ingress":
|
||||||
hasQUIC = true
|
hasQUIC = true
|
||||||
|
@ -151,5 +151,6 @@ func Test_buildMainHTTPConnectionManagerFilter(t *testing.T) {
|
||||||
options.AuthenticateURLString = "https://authenticate.example.com"
|
options.AuthenticateURLString = "https://authenticate.example.com"
|
||||||
filter, err := b.buildMainHTTPConnectionManagerFilter(context.Background(), &config.Config{Options: options}, false, false)
|
filter, err := b.buildMainHTTPConnectionManagerFilter(context.Background(), &config.Config{Options: options}, false, false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
testutil.AssertProtoJSONEqual(t, testData(t, "main_http_connection_manager_filter.json", nil), filter)
|
testutil.AssertProtoJSONEqual(t, testData(t, "main_http_connection_manager_filter.json", nil), filter)
|
||||||
}
|
}
|
||||||
|
|
|
@ -108,6 +108,9 @@ func (b *Builder) buildOutboundRoutes() []*envoy_config_route_v3.Route {
|
||||||
PathSpecifier: &envoy_config_route_v3.RouteMatch_Prefix{Prefix: prefix},
|
PathSpecifier: &envoy_config_route_v3.RouteMatch_Prefix{Prefix: prefix},
|
||||||
Grpc: &envoy_config_route_v3.RouteMatch_GrpcRouteMatchOptions{},
|
Grpc: &envoy_config_route_v3.RouteMatch_GrpcRouteMatchOptions{},
|
||||||
},
|
},
|
||||||
|
Decorator: &envoy_config_route_v3.Decorator{
|
||||||
|
Operation: fmt.Sprintf("Outbound (grpc): %s %s", def.Cluster, prefix),
|
||||||
|
},
|
||||||
Action: &envoy_config_route_v3.Route_Route{
|
Action: &envoy_config_route_v3.Route_Route{
|
||||||
Route: &envoy_config_route_v3.RouteAction{
|
Route: &envoy_config_route_v3.RouteAction{
|
||||||
ClusterSpecifier: &envoy_config_route_v3.RouteAction_Cluster{
|
ClusterSpecifier: &envoy_config_route_v3.RouteAction_Cluster{
|
||||||
|
@ -130,6 +133,9 @@ func (b *Builder) buildOutboundRoutes() []*envoy_config_route_v3.Route {
|
||||||
Match: &envoy_config_route_v3.RouteMatch{
|
Match: &envoy_config_route_v3.RouteMatch{
|
||||||
PathSpecifier: &envoy_config_route_v3.RouteMatch_Prefix{Prefix: "/envoy/stats/prometheus"},
|
PathSpecifier: &envoy_config_route_v3.RouteMatch_Prefix{Prefix: "/envoy/stats/prometheus"},
|
||||||
},
|
},
|
||||||
|
Decorator: &envoy_config_route_v3.Decorator{
|
||||||
|
Operation: "Outbound: envoy-metrics /envoy/stats/prometheus/*",
|
||||||
|
},
|
||||||
Action: &envoy_config_route_v3.Route_Route{
|
Action: &envoy_config_route_v3.Route_Route{
|
||||||
Route: &envoy_config_route_v3.RouteAction{
|
Route: &envoy_config_route_v3.RouteAction{
|
||||||
ClusterSpecifier: &envoy_config_route_v3.RouteAction_Cluster{
|
ClusterSpecifier: &envoy_config_route_v3.RouteAction_Cluster{
|
||||||
|
|
|
@ -15,6 +15,9 @@ func Test_buildOutboundRoutes(t *testing.T) {
|
||||||
"grpc": {},
|
"grpc": {},
|
||||||
"prefix": "/envoy.service.auth.v3.Authorization/"
|
"prefix": "/envoy.service.auth.v3.Authorization/"
|
||||||
},
|
},
|
||||||
|
"decorator": {
|
||||||
|
"operation": "Outbound (grpc): pomerium-authorize /envoy.service.auth.v3.Authorization/"
|
||||||
|
},
|
||||||
"name": "pomerium-authorize",
|
"name": "pomerium-authorize",
|
||||||
"route": {
|
"route": {
|
||||||
"autoHostRewrite": true,
|
"autoHostRewrite": true,
|
||||||
|
@ -28,6 +31,9 @@ func Test_buildOutboundRoutes(t *testing.T) {
|
||||||
"grpc": {},
|
"grpc": {},
|
||||||
"prefix": "/databroker.DataBrokerService/"
|
"prefix": "/databroker.DataBrokerService/"
|
||||||
},
|
},
|
||||||
|
"decorator": {
|
||||||
|
"operation": "Outbound (grpc): pomerium-databroker /databroker.DataBrokerService/"
|
||||||
|
},
|
||||||
"name": "pomerium-databroker",
|
"name": "pomerium-databroker",
|
||||||
"route": {
|
"route": {
|
||||||
"autoHostRewrite": true,
|
"autoHostRewrite": true,
|
||||||
|
@ -41,6 +47,9 @@ func Test_buildOutboundRoutes(t *testing.T) {
|
||||||
"grpc": {},
|
"grpc": {},
|
||||||
"prefix": "/registry.Registry/"
|
"prefix": "/registry.Registry/"
|
||||||
},
|
},
|
||||||
|
"decorator": {
|
||||||
|
"operation": "Outbound (grpc): pomerium-databroker /registry.Registry/"
|
||||||
|
},
|
||||||
"name": "pomerium-databroker",
|
"name": "pomerium-databroker",
|
||||||
"route": {
|
"route": {
|
||||||
"autoHostRewrite": true,
|
"autoHostRewrite": true,
|
||||||
|
@ -54,6 +63,9 @@ func Test_buildOutboundRoutes(t *testing.T) {
|
||||||
"grpc": {},
|
"grpc": {},
|
||||||
"prefix": "/"
|
"prefix": "/"
|
||||||
},
|
},
|
||||||
|
"decorator": {
|
||||||
|
"operation": "Outbound (grpc): pomerium-control-plane-grpc /"
|
||||||
|
},
|
||||||
"name": "pomerium-control-plane-grpc",
|
"name": "pomerium-control-plane-grpc",
|
||||||
"route": {
|
"route": {
|
||||||
"autoHostRewrite": true,
|
"autoHostRewrite": true,
|
||||||
|
@ -66,6 +78,9 @@ func Test_buildOutboundRoutes(t *testing.T) {
|
||||||
"match": {
|
"match": {
|
||||||
"prefix": "/envoy/stats/prometheus"
|
"prefix": "/envoy/stats/prometheus"
|
||||||
},
|
},
|
||||||
|
"decorator": {
|
||||||
|
"operation": "Outbound: envoy-metrics /envoy/stats/prometheus/*"
|
||||||
|
},
|
||||||
"name": "envoy-metrics",
|
"name": "envoy-metrics",
|
||||||
"route": {
|
"route": {
|
||||||
"cluster": "pomerium-envoy-admin",
|
"cluster": "pomerium-envoy-admin",
|
||||||
|
|
|
@ -20,7 +20,7 @@ func (b *Builder) BuildRouteConfigurations(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
cfg *config.Config,
|
cfg *config.Config,
|
||||||
) ([]*envoy_config_route_v3.RouteConfiguration, error) {
|
) ([]*envoy_config_route_v3.RouteConfiguration, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "envoyconfig.Builder.BuildRouteConfigurations")
|
ctx, span := trace.Continue(ctx, "envoyconfig.Builder.BuildRouteConfigurations")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
var routeConfigurations []*envoy_config_route_v3.RouteConfiguration
|
var routeConfigurations []*envoy_config_route_v3.RouteConfiguration
|
||||||
|
|
|
@ -42,6 +42,21 @@ func TestBuilder_buildMainRouteConfiguration(t *testing.T) {
|
||||||
{
|
{
|
||||||
"name": "catch-all",
|
"name": "catch-all",
|
||||||
"domains": ["*"],
|
"domains": ["*"],
|
||||||
|
"requestHeadersToAdd": [
|
||||||
|
{
|
||||||
|
"appendAction": "OVERWRITE_IF_EXISTS_OR_ADD",
|
||||||
|
"header": {
|
||||||
|
"key": "x-pomerium-traceparent",
|
||||||
|
"value": "%DYNAMIC_METADATA(pomerium.internal:traceparent)%"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"header": {
|
||||||
|
"key": "x-pomerium-tracestate",
|
||||||
|
"value": "%DYNAMIC_METADATA(pomerium.internal:tracestate)%"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
"routes": [
|
"routes": [
|
||||||
`+protojson.Format(b.buildControlPlanePathRoute(cfg.Options, "/ping"))+`,
|
`+protojson.Format(b.buildControlPlanePathRoute(cfg.Options, "/ping"))+`,
|
||||||
`+protojson.Format(b.buildControlPlanePathRoute(cfg.Options, "/healthz"))+`,
|
`+protojson.Format(b.buildControlPlanePathRoute(cfg.Options, "/healthz"))+`,
|
||||||
|
@ -51,6 +66,10 @@ func TestBuilder_buildMainRouteConfiguration(t *testing.T) {
|
||||||
`+protojson.Format(b.buildControlPlanePrefixRoute(cfg.Options, "/.well-known/pomerium/"))+`,
|
`+protojson.Format(b.buildControlPlanePrefixRoute(cfg.Options, "/.well-known/pomerium/"))+`,
|
||||||
{
|
{
|
||||||
"name": "policy-0",
|
"name": "policy-0",
|
||||||
|
"decorator": {
|
||||||
|
"operation": "ingress: ${method} ${host}${path}",
|
||||||
|
"propagate": false
|
||||||
|
},
|
||||||
"match": {
|
"match": {
|
||||||
"headers": [
|
"headers": [
|
||||||
{ "name": ":authority", "stringMatch": { "safeRegex": { "regex": "^(.*)\\.example\\.com$" } }}
|
{ "name": ":authority", "stringMatch": { "safeRegex": { "regex": "^(.*)\\.example\\.com$" } }}
|
||||||
|
@ -104,6 +123,10 @@ func TestBuilder_buildMainRouteConfiguration(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "policy-0",
|
"name": "policy-0",
|
||||||
|
"decorator": {
|
||||||
|
"operation": "ingress: ${method} ${host}${path}",
|
||||||
|
"propagate": false
|
||||||
|
},
|
||||||
"match": {
|
"match": {
|
||||||
"headers": [
|
"headers": [
|
||||||
{ "name": ":authority", "stringMatch": { "safeRegex": { "regex": "^(.*)\\.example\\.com:443$" } }}
|
{ "name": ":authority", "stringMatch": { "safeRegex": { "regex": "^(.*)\\.example\\.com:443$" } }}
|
||||||
|
|
|
@ -114,6 +114,9 @@ func (b *Builder) buildControlPlanePathRoute(
|
||||||
Match: &envoy_config_route_v3.RouteMatch{
|
Match: &envoy_config_route_v3.RouteMatch{
|
||||||
PathSpecifier: &envoy_config_route_v3.RouteMatch_Path{Path: path},
|
PathSpecifier: &envoy_config_route_v3.RouteMatch_Path{Path: path},
|
||||||
},
|
},
|
||||||
|
Decorator: &envoy_config_route_v3.Decorator{
|
||||||
|
Operation: "internal: ${method} ${host}${path}",
|
||||||
|
},
|
||||||
Action: &envoy_config_route_v3.Route_Route{
|
Action: &envoy_config_route_v3.Route_Route{
|
||||||
Route: &envoy_config_route_v3.RouteAction{
|
Route: &envoy_config_route_v3.RouteAction{
|
||||||
ClusterSpecifier: &envoy_config_route_v3.RouteAction_Cluster{
|
ClusterSpecifier: &envoy_config_route_v3.RouteAction_Cluster{
|
||||||
|
@ -138,6 +141,9 @@ func (b *Builder) buildControlPlanePrefixRoute(
|
||||||
Match: &envoy_config_route_v3.RouteMatch{
|
Match: &envoy_config_route_v3.RouteMatch{
|
||||||
PathSpecifier: &envoy_config_route_v3.RouteMatch_Prefix{Prefix: prefix},
|
PathSpecifier: &envoy_config_route_v3.RouteMatch_Prefix{Prefix: prefix},
|
||||||
},
|
},
|
||||||
|
Decorator: &envoy_config_route_v3.Decorator{
|
||||||
|
Operation: "internal: ${method} ${host}${path}",
|
||||||
|
},
|
||||||
Action: &envoy_config_route_v3.Route_Route{
|
Action: &envoy_config_route_v3.Route_Route{
|
||||||
Route: &envoy_config_route_v3.RouteAction{
|
Route: &envoy_config_route_v3.RouteAction{
|
||||||
ClusterSpecifier: &envoy_config_route_v3.RouteAction_Cluster{
|
ClusterSpecifier: &envoy_config_route_v3.RouteAction_Cluster{
|
||||||
|
@ -271,6 +277,10 @@ func (b *Builder) buildRouteForPolicyAndMatch(
|
||||||
route := &envoy_config_route_v3.Route{
|
route := &envoy_config_route_v3.Route{
|
||||||
Name: name,
|
Name: name,
|
||||||
Match: match,
|
Match: match,
|
||||||
|
Decorator: &envoy_config_route_v3.Decorator{
|
||||||
|
Operation: "ingress: ${method} ${host}${path}",
|
||||||
|
Propagate: wrapperspb.Bool(false),
|
||||||
|
},
|
||||||
Metadata: &envoy_config_core_v3.Metadata{},
|
Metadata: &envoy_config_core_v3.Metadata{},
|
||||||
RequestHeadersToRemove: getRequestHeadersToRemove(cfg.Options, policy),
|
RequestHeadersToRemove: getRequestHeadersToRemove(cfg.Options, policy),
|
||||||
ResponseHeadersToAdd: toEnvoyHeaders(cfg.Options.GetSetResponseHeadersForPolicy(policy)),
|
ResponseHeadersToAdd: toEnvoyHeaders(cfg.Options.GetSetResponseHeadersForPolicy(policy)),
|
||||||
|
|
|
@ -59,6 +59,9 @@ func Test_buildPomeriumHTTPRoutes(t *testing.T) {
|
||||||
routeString := func(typ, name string) string {
|
routeString := func(typ, name string) string {
|
||||||
str := `{
|
str := `{
|
||||||
"name": "pomerium-` + typ + `-` + name + `",
|
"name": "pomerium-` + typ + `-` + name + `",
|
||||||
|
"decorator": {
|
||||||
|
"operation": "internal: ${method} ${host}${path}"
|
||||||
|
},
|
||||||
"match": {
|
"match": {
|
||||||
"` + typ + `": "` + name + `"
|
"` + typ + `": "` + name + `"
|
||||||
},
|
},
|
||||||
|
@ -135,6 +138,9 @@ func Test_buildControlPlanePathRoute(t *testing.T) {
|
||||||
testutil.AssertProtoJSONEqual(t, `
|
testutil.AssertProtoJSONEqual(t, `
|
||||||
{
|
{
|
||||||
"name": "pomerium-path-/hello/world",
|
"name": "pomerium-path-/hello/world",
|
||||||
|
"decorator": {
|
||||||
|
"operation": "internal: ${method} ${host}${path}"
|
||||||
|
},
|
||||||
"match": {
|
"match": {
|
||||||
"path": "/hello/world"
|
"path": "/hello/world"
|
||||||
},
|
},
|
||||||
|
@ -179,6 +185,9 @@ func Test_buildControlPlanePrefixRoute(t *testing.T) {
|
||||||
testutil.AssertProtoJSONEqual(t, `
|
testutil.AssertProtoJSONEqual(t, `
|
||||||
{
|
{
|
||||||
"name": "pomerium-prefix-/hello/world/",
|
"name": "pomerium-prefix-/hello/world/",
|
||||||
|
"decorator": {
|
||||||
|
"operation": "internal: ${method} ${host}${path}"
|
||||||
|
},
|
||||||
"match": {
|
"match": {
|
||||||
"prefix": "/hello/world/"
|
"prefix": "/hello/world/"
|
||||||
},
|
},
|
||||||
|
@ -406,6 +415,10 @@ func Test_buildPolicyRoutes(t *testing.T) {
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
"name": "policy-1",
|
"name": "policy-1",
|
||||||
|
"decorator": {
|
||||||
|
"operation": "ingress: ${method} ${host}${path}",
|
||||||
|
"propagate": false
|
||||||
|
},
|
||||||
"match": {
|
"match": {
|
||||||
"prefix": "/"
|
"prefix": "/"
|
||||||
},
|
},
|
||||||
|
@ -476,6 +489,10 @@ func Test_buildPolicyRoutes(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "policy-2",
|
"name": "policy-2",
|
||||||
|
"decorator": {
|
||||||
|
"operation": "ingress: ${method} ${host}${path}",
|
||||||
|
"propagate": false
|
||||||
|
},
|
||||||
"match": {
|
"match": {
|
||||||
"path": "/some/path"
|
"path": "/some/path"
|
||||||
},
|
},
|
||||||
|
@ -547,6 +564,10 @@ func Test_buildPolicyRoutes(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "policy-3",
|
"name": "policy-3",
|
||||||
|
"decorator": {
|
||||||
|
"operation": "ingress: ${method} ${host}${path}",
|
||||||
|
"propagate": false
|
||||||
|
},
|
||||||
"match": {
|
"match": {
|
||||||
"prefix": "/some/prefix/"
|
"prefix": "/some/prefix/"
|
||||||
},
|
},
|
||||||
|
@ -617,6 +638,10 @@ func Test_buildPolicyRoutes(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "policy-4",
|
"name": "policy-4",
|
||||||
|
"decorator": {
|
||||||
|
"operation": "ingress: ${method} ${host}${path}",
|
||||||
|
"propagate": false
|
||||||
|
},
|
||||||
"match": {
|
"match": {
|
||||||
"safeRegex": {
|
"safeRegex": {
|
||||||
"regex": "^/[a]+$"
|
"regex": "^/[a]+$"
|
||||||
|
@ -689,6 +714,10 @@ func Test_buildPolicyRoutes(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "policy-5",
|
"name": "policy-5",
|
||||||
|
"decorator": {
|
||||||
|
"operation": "ingress: ${method} ${host}${path}",
|
||||||
|
"propagate": false
|
||||||
|
},
|
||||||
"match": {
|
"match": {
|
||||||
"prefix": "/some/prefix/"
|
"prefix": "/some/prefix/"
|
||||||
},
|
},
|
||||||
|
@ -760,6 +789,10 @@ func Test_buildPolicyRoutes(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "policy-6",
|
"name": "policy-6",
|
||||||
|
"decorator": {
|
||||||
|
"operation": "ingress: ${method} ${host}${path}",
|
||||||
|
"propagate": false
|
||||||
|
},
|
||||||
"match": {
|
"match": {
|
||||||
"path": "/some/path"
|
"path": "/some/path"
|
||||||
},
|
},
|
||||||
|
@ -830,6 +863,10 @@ func Test_buildPolicyRoutes(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "policy-7",
|
"name": "policy-7",
|
||||||
|
"decorator": {
|
||||||
|
"operation": "ingress: ${method} ${host}${path}",
|
||||||
|
"propagate": false
|
||||||
|
},
|
||||||
"match": {
|
"match": {
|
||||||
"path": "/some/path"
|
"path": "/some/path"
|
||||||
},
|
},
|
||||||
|
@ -901,6 +938,10 @@ func Test_buildPolicyRoutes(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "policy-8",
|
"name": "policy-8",
|
||||||
|
"decorator": {
|
||||||
|
"operation": "ingress: ${method} ${host}${path}",
|
||||||
|
"propagate": false
|
||||||
|
},
|
||||||
"match": {
|
"match": {
|
||||||
"path": "/websocket-timeout"
|
"path": "/websocket-timeout"
|
||||||
},
|
},
|
||||||
|
@ -994,6 +1035,10 @@ func Test_buildPolicyRoutes(t *testing.T) {
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
"name": "policy-0",
|
"name": "policy-0",
|
||||||
|
"decorator": {
|
||||||
|
"operation": "ingress: ${method} ${host}${path}",
|
||||||
|
"propagate": false
|
||||||
|
},
|
||||||
"match": {
|
"match": {
|
||||||
"prefix": "/"
|
"prefix": "/"
|
||||||
},
|
},
|
||||||
|
@ -1083,6 +1128,10 @@ func Test_buildPolicyRoutes(t *testing.T) {
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
"name": "policy-0",
|
"name": "policy-0",
|
||||||
|
"decorator": {
|
||||||
|
"operation": "ingress: ${method} ${host}${path}",
|
||||||
|
"propagate": false
|
||||||
|
},
|
||||||
"match": {
|
"match": {
|
||||||
"connectMatcher": {}
|
"connectMatcher": {}
|
||||||
},
|
},
|
||||||
|
@ -1155,6 +1204,10 @@ func Test_buildPolicyRoutes(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "policy-1",
|
"name": "policy-1",
|
||||||
|
"decorator": {
|
||||||
|
"operation": "ingress: ${method} ${host}${path}",
|
||||||
|
"propagate": false
|
||||||
|
},
|
||||||
"match": {
|
"match": {
|
||||||
"connectMatcher": {}
|
"connectMatcher": {}
|
||||||
},
|
},
|
||||||
|
@ -1248,6 +1301,10 @@ func Test_buildPolicyRoutes(t *testing.T) {
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
"name": "policy-0",
|
"name": "policy-0",
|
||||||
|
"decorator": {
|
||||||
|
"operation": "ingress: ${method} ${host}${path}",
|
||||||
|
"propagate": false
|
||||||
|
},
|
||||||
"match": {
|
"match": {
|
||||||
"connectMatcher": {}
|
"connectMatcher": {}
|
||||||
},
|
},
|
||||||
|
@ -1345,6 +1402,10 @@ func Test_buildPolicyRoutes(t *testing.T) {
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
"name": "policy-0",
|
"name": "policy-0",
|
||||||
|
"decorator": {
|
||||||
|
"operation": "ingress: ${method} ${host}${path}",
|
||||||
|
"propagate": false
|
||||||
|
},
|
||||||
"match": {
|
"match": {
|
||||||
"prefix": "/"
|
"prefix": "/"
|
||||||
},
|
},
|
||||||
|
@ -1440,6 +1501,10 @@ func Test_buildPolicyRoutes(t *testing.T) {
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
"name": "policy-0",
|
"name": "policy-0",
|
||||||
|
"decorator": {
|
||||||
|
"operation": "ingress: ${method} ${host}${path}",
|
||||||
|
"propagate": false
|
||||||
|
},
|
||||||
"match": {
|
"match": {
|
||||||
"prefix": "/"
|
"prefix": "/"
|
||||||
},
|
},
|
||||||
|
@ -1588,6 +1653,10 @@ func Test_buildPolicyRoutesRewrite(t *testing.T) {
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
"name": "policy-0",
|
"name": "policy-0",
|
||||||
|
"decorator": {
|
||||||
|
"operation": "ingress: ${method} ${host}${path}",
|
||||||
|
"propagate": false
|
||||||
|
},
|
||||||
"match": {
|
"match": {
|
||||||
"prefix": "/"
|
"prefix": "/"
|
||||||
},
|
},
|
||||||
|
@ -1659,6 +1728,10 @@ func Test_buildPolicyRoutesRewrite(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "policy-1",
|
"name": "policy-1",
|
||||||
|
"decorator": {
|
||||||
|
"operation": "ingress: ${method} ${host}${path}",
|
||||||
|
"propagate": false
|
||||||
|
},
|
||||||
"match": {
|
"match": {
|
||||||
"prefix": "/"
|
"prefix": "/"
|
||||||
},
|
},
|
||||||
|
@ -1730,6 +1803,10 @@ func Test_buildPolicyRoutesRewrite(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "policy-2",
|
"name": "policy-2",
|
||||||
|
"decorator": {
|
||||||
|
"operation": "ingress: ${method} ${host}${path}",
|
||||||
|
"propagate": false
|
||||||
|
},
|
||||||
"match": {
|
"match": {
|
||||||
"prefix": "/"
|
"prefix": "/"
|
||||||
},
|
},
|
||||||
|
@ -1806,6 +1883,10 @@ func Test_buildPolicyRoutesRewrite(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "policy-3",
|
"name": "policy-3",
|
||||||
|
"decorator": {
|
||||||
|
"operation": "ingress: ${method} ${host}${path}",
|
||||||
|
"propagate": false
|
||||||
|
},
|
||||||
"match": {
|
"match": {
|
||||||
"prefix": "/"
|
"prefix": "/"
|
||||||
},
|
},
|
||||||
|
@ -1877,6 +1958,10 @@ func Test_buildPolicyRoutesRewrite(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "policy-4",
|
"name": "policy-4",
|
||||||
|
"decorator": {
|
||||||
|
"operation": "ingress: ${method} ${host}${path}",
|
||||||
|
"propagate": false
|
||||||
|
},
|
||||||
"match": {
|
"match": {
|
||||||
"prefix": "/"
|
"prefix": "/"
|
||||||
},
|
},
|
||||||
|
@ -1948,6 +2033,10 @@ func Test_buildPolicyRoutesRewrite(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "policy-5",
|
"name": "policy-5",
|
||||||
|
"decorator": {
|
||||||
|
"operation": "ingress: ${method} ${host}${path}",
|
||||||
|
"propagate": false
|
||||||
|
},
|
||||||
"match": {
|
"match": {
|
||||||
"prefix": "/"
|
"prefix": "/"
|
||||||
},
|
},
|
||||||
|
|
|
@ -23,7 +23,69 @@
|
||||||
"commonHttpProtocolOptions": {
|
"commonHttpProtocolOptions": {
|
||||||
"idleTimeout": "300s"
|
"idleTimeout": "300s"
|
||||||
},
|
},
|
||||||
|
"earlyHeaderMutationExtensions": [
|
||||||
|
{
|
||||||
|
"name": "envoy.http.early_header_mutation.trace_context",
|
||||||
|
"typedConfig": {
|
||||||
|
"@type": "type.googleapis.com/pomerium.extensions.TraceContext"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
"httpFilters": [
|
"httpFilters": [
|
||||||
|
{
|
||||||
|
"name": "envoy.filters.http.header_to_metadata",
|
||||||
|
"typedConfig": {
|
||||||
|
"@type": "type.googleapis.com/envoy.extensions.filters.http.header_to_metadata.v3.Config",
|
||||||
|
"requestRules": [
|
||||||
|
{
|
||||||
|
"header": "x-pomerium-traceparent",
|
||||||
|
"onHeaderPresent": {
|
||||||
|
"metadataNamespace": "pomerium.internal",
|
||||||
|
"key": "traceparent"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"header": "x-pomerium-tracestate",
|
||||||
|
"onHeaderPresent": {
|
||||||
|
"metadataNamespace": "pomerium.internal",
|
||||||
|
"key": "tracestate"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"header": "x-pomerium-external-parent-span",
|
||||||
|
"onHeaderPresent": {
|
||||||
|
"key": "external-parent-span",
|
||||||
|
"metadataNamespace": "pomerium.internal"
|
||||||
|
},
|
||||||
|
"remove": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"header": "x-pomerium-sampling-decision",
|
||||||
|
"onHeaderPresent": {
|
||||||
|
"metadataNamespace": "pomerium.internal",
|
||||||
|
"key": "sampling-decision"
|
||||||
|
},
|
||||||
|
"remove": true
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"responseRules": [
|
||||||
|
{
|
||||||
|
"header": "x-pomerium-traceparent",
|
||||||
|
"onHeaderPresent": {
|
||||||
|
"metadataNamespace": "pomerium.internal",
|
||||||
|
"key": "traceparent"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"header": "x-pomerium-tracestate",
|
||||||
|
"onHeaderPresent": {
|
||||||
|
"metadataNamespace": "pomerium.internal",
|
||||||
|
"key": "tracestate"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "envoy.filters.http.lua",
|
"name": "envoy.filters.http.lua",
|
||||||
"typedConfig": {
|
"typedConfig": {
|
||||||
|
@ -50,13 +112,25 @@
|
||||||
"envoyGrpc": {
|
"envoyGrpc": {
|
||||||
"clusterName": "pomerium-authorize"
|
"clusterName": "pomerium-authorize"
|
||||||
},
|
},
|
||||||
"timeout": "10s"
|
"timeout": "10s",
|
||||||
|
"initialMetadata": [
|
||||||
|
{
|
||||||
|
"key": "x-pomerium-traceparent",
|
||||||
|
"value": "%DYNAMIC_METADATA(pomerium.internal:traceparent)%"
|
||||||
},
|
},
|
||||||
"metadataContextNamespaces": ["com.pomerium.client-certificate-info"],
|
{
|
||||||
|
"key": "x-pomerium-tracestate",
|
||||||
|
"value": "%DYNAMIC_METADATA(pomerium.internal:tracestate)%"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"transportApiVersion": "V3",
|
||||||
"statusOnError": {
|
"statusOnError": {
|
||||||
"code": "InternalServerError"
|
"code": "InternalServerError"
|
||||||
},
|
},
|
||||||
"transportApiVersion": "V3"
|
"metadataContextNamespaces": [
|
||||||
|
"com.pomerium.client-certificate-info"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -73,7 +147,7 @@
|
||||||
"typedConfig": {
|
"typedConfig": {
|
||||||
"@type": "type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua",
|
"@type": "type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua",
|
||||||
"defaultSourceCode": {
|
"defaultSourceCode": {
|
||||||
"inlineString": "function has_prefix(str, prefix)\n return str ~= nil and str:sub(1, #prefix) == prefix\nend\n\nfunction remove_pomerium_cookie(cookie_name, cookie)\n local result = \"\"\n for c in cookie:gmatch(\"([^;]+)\") do\n c = c:gsub(\"^ +\",\"\")\n local name = c:match(\"^([^=]+)\")\n if name ~= cookie_name then\n if string.len(result) \u003e 0 then\n result = result .. \"; \" .. c\n else\n result = result .. c\n end\n end\n end\n return result\nend\n\nfunction envoy_on_request(request_handle)\n local headers = request_handle:headers()\n local metadata = request_handle:metadata()\n\n local remove_cookie_name = metadata:get(\"remove_pomerium_cookie\")\n if remove_cookie_name then\n local cookie = headers:get(\"cookie\")\n if cookie ~= nil then\n local newcookie = remove_pomerium_cookie(remove_cookie_name, cookie)\n headers:replace(\"cookie\", newcookie)\n end\n end\n\n local remove_authorization = metadata:get(\"remove_pomerium_authorization\")\n if remove_authorization then\n local authorization = headers:get(\"authorization\")\n local authorization_prefix = \"Pomerium \"\n if has_prefix(authorization, authorization_prefix) then\n headers:remove(\"authorization\")\n end\n\n headers:remove('x-pomerium-authorization')\n end\nend\n\nfunction envoy_on_response(response_handle) end\n"
|
"inlineString": "function has_prefix(str, prefix)\n return str ~= nil and str:sub(1, #prefix) == prefix\nend\n\nfunction remove_pomerium_cookie(cookie_name, cookie)\n local result = \"\"\n for c in cookie:gmatch(\"([^;]+)\") do\n c = c:gsub(\"^ +\",\"\")\n local name = c:match(\"^([^=]+)\")\n if name ~= cookie_name then\n if string.len(result) > 0 then\n result = result .. \"; \" .. c\n else\n result = result .. c\n end\n end\n end\n return result\nend\n\nfunction envoy_on_request(request_handle)\n local headers = request_handle:headers()\n local metadata = request_handle:metadata()\n\n local remove_cookie_name = metadata:get(\"remove_pomerium_cookie\")\n if remove_cookie_name then\n local cookie = headers:get(\"cookie\")\n if cookie ~= nil then\n local newcookie = remove_pomerium_cookie(remove_cookie_name, cookie)\n headers:replace(\"cookie\", newcookie)\n end\n end\n\n local remove_authorization = metadata:get(\"remove_pomerium_authorization\")\n if remove_authorization then\n local authorization = headers:get(\"authorization\")\n local authorization_prefix = \"Pomerium \"\n if has_prefix(authorization, authorization_prefix) then\n headers:remove(\"authorization\")\n end\n\n headers:remove('x-pomerium-authorization')\n end\nend\n\nfunction envoy_on_response(response_handle) end\n"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -167,6 +241,13 @@
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"requestIdExtension": {
|
||||||
|
"typedConfig": {
|
||||||
|
"@type": "type.googleapis.com/pomerium.extensions.UuidxRequestIdConfig",
|
||||||
|
"packTraceReason": true,
|
||||||
|
"useRequestIdForTraceSampling": true
|
||||||
|
}
|
||||||
|
},
|
||||||
"requestTimeout": "30s",
|
"requestTimeout": "30s",
|
||||||
"normalizePath": true,
|
"normalizePath": true,
|
||||||
"rds": {
|
"rds": {
|
||||||
|
@ -181,7 +262,83 @@
|
||||||
"tracing": {
|
"tracing": {
|
||||||
"randomSampling": {
|
"randomSampling": {
|
||||||
"value": 0.01
|
"value": 0.01
|
||||||
|
},
|
||||||
|
"verbose": true,
|
||||||
|
"maxPathTagLength": 1024,
|
||||||
|
"customTags": [
|
||||||
|
{
|
||||||
|
"tag": "pomerium.traceparent",
|
||||||
|
"metadata": {
|
||||||
|
"kind": {
|
||||||
|
"request": {}
|
||||||
|
},
|
||||||
|
"metadataKey": {
|
||||||
|
"key": "pomerium.internal",
|
||||||
|
"path": [
|
||||||
|
{
|
||||||
|
"key": "traceparent"
|
||||||
}
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"tag": "pomerium.tracestate",
|
||||||
|
"metadata": {
|
||||||
|
"kind": {
|
||||||
|
"request": {}
|
||||||
|
},
|
||||||
|
"metadataKey": {
|
||||||
|
"key": "pomerium.internal",
|
||||||
|
"path": [
|
||||||
|
{
|
||||||
|
"key": "tracestate"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"metadata": {
|
||||||
|
"kind": {
|
||||||
|
"request": {}
|
||||||
|
},
|
||||||
|
"metadataKey": {
|
||||||
|
"key": "pomerium.internal",
|
||||||
|
"path": [
|
||||||
|
{
|
||||||
|
"key": "external-parent-span"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"tag": "pomerium.external-parent-span"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"provider": {
|
||||||
|
"name": "envoy.tracers.opentelemetry",
|
||||||
|
"typedConfig": {
|
||||||
|
"@type": "type.googleapis.com/envoy.config.trace.v3.OpenTelemetryConfig",
|
||||||
|
"grpcService": {
|
||||||
|
"envoyGrpc": {
|
||||||
|
"clusterName": "pomerium-control-plane-grpc"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"serviceName": "Envoy",
|
||||||
|
"resourceDetectors": [
|
||||||
|
{
|
||||||
|
"name": "envoy.tracers.opentelemetry.resource_detectors.static_config",
|
||||||
|
"typedConfig": {
|
||||||
|
"@type": "type.googleapis.com/envoy.extensions.tracers.opentelemetry.resource_detectors.v3.StaticConfigResourceDetectorConfig",
|
||||||
|
"attributes": {
|
||||||
|
"pomerium.envoy": "true"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"spawnUpstreamSpan": true
|
||||||
},
|
},
|
||||||
"useRemoteAddress": true,
|
"useRemoteAddress": true,
|
||||||
"xffNumTrustedHops": 1
|
"xffNumTrustedHops": 1
|
||||||
|
|
|
@ -2,133 +2,160 @@ package envoyconfig
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"os"
|
||||||
|
"strconv"
|
||||||
envoy_config_cluster_v3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
|
|
||||||
envoy_config_endpoint_v3 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
|
|
||||||
envoy_config_trace_v3 "github.com/envoyproxy/go-control-plane/envoy/config/trace/v3"
|
|
||||||
"google.golang.org/protobuf/types/known/durationpb"
|
|
||||||
|
|
||||||
|
envoy_config_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
||||||
|
tracev3 "github.com/envoyproxy/go-control-plane/envoy/config/trace/v3"
|
||||||
|
envoy_extensions_filters_http_header_to_metadata "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/header_to_metadata/v3"
|
||||||
|
envoy_extensions_filters_network_http_connection_manager "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
|
||||||
|
envoy_extensions_tracers_otel "github.com/envoyproxy/go-control-plane/envoy/extensions/tracers/opentelemetry/resource_detectors/v3"
|
||||||
|
metadatav3 "github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3"
|
||||||
|
envoy_tracing_v3 "github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3"
|
||||||
|
envoy_type_v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3"
|
||||||
"github.com/pomerium/pomerium/config"
|
"github.com/pomerium/pomerium/config"
|
||||||
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
"github.com/pomerium/pomerium/config/envoyconfig/extensions"
|
||||||
"github.com/pomerium/pomerium/pkg/protoutil"
|
"google.golang.org/protobuf/types/known/wrapperspb"
|
||||||
)
|
)
|
||||||
|
|
||||||
func buildTracingCluster(options *config.Options) (*envoy_config_cluster_v3.Cluster, error) {
|
func applyTracingConfig(
|
||||||
tracingOptions, err := config.NewTracingOptions(options)
|
mgr *envoy_extensions_filters_network_http_connection_manager.HttpConnectionManager,
|
||||||
if err != nil {
|
opts *config.Options,
|
||||||
return nil, fmt.Errorf("envoyconfig: invalid tracing config: %w", err)
|
) {
|
||||||
|
mgr.HttpFilters = append([]*envoy_extensions_filters_network_http_connection_manager.HttpFilter{
|
||||||
|
tracingMetadataFilter(),
|
||||||
|
}, mgr.HttpFilters...)
|
||||||
|
|
||||||
|
mgr.EarlyHeaderMutationExtensions = []*envoy_config_core_v3.TypedExtensionConfig{
|
||||||
|
{
|
||||||
|
Name: "envoy.http.early_header_mutation.trace_context",
|
||||||
|
TypedConfig: marshalAny(&extensions.TraceContext{}),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
mgr.RequestIdExtension = &envoy_extensions_filters_network_http_connection_manager.RequestIDExtension{
|
||||||
|
TypedConfig: marshalAny(&extensions.UuidxRequestIdConfig{
|
||||||
|
PackTraceReason: wrapperspb.Bool(true),
|
||||||
|
UseRequestIdForTraceSampling: wrapperspb.Bool(true),
|
||||||
|
}),
|
||||||
}
|
}
|
||||||
|
|
||||||
switch tracingOptions.Provider {
|
maxPathTagLength := uint32(1024)
|
||||||
case trace.DatadogTracingProviderName:
|
if value, ok := os.LookupEnv("OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT"); ok {
|
||||||
addr, _ := parseAddress("127.0.0.1:8126")
|
if num, err := strconv.ParseUint(value, 10, 32); err == nil {
|
||||||
|
maxPathTagLength = max(64, uint32(num))
|
||||||
if options.TracingDatadogAddress != "" {
|
}
|
||||||
addr, err = parseAddress(options.TracingDatadogAddress)
|
}
|
||||||
if err != nil {
|
requestTag := func(key string) *envoy_tracing_v3.CustomTag {
|
||||||
return nil, fmt.Errorf("envoyconfig: invalid tracing datadog address: %w", err)
|
return &envoy_tracing_v3.CustomTag{
|
||||||
|
Tag: fmt.Sprintf("pomerium.%s", key),
|
||||||
|
Type: &envoy_tracing_v3.CustomTag_Metadata_{
|
||||||
|
Metadata: &envoy_tracing_v3.CustomTag_Metadata{
|
||||||
|
Kind: &metadatav3.MetadataKind{
|
||||||
|
Kind: &metadatav3.MetadataKind_Request_{
|
||||||
|
Request: &metadatav3.MetadataKind_Request{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
MetadataKey: &metadatav3.MetadataKey{
|
||||||
|
Key: "pomerium.internal",
|
||||||
|
Path: []*metadatav3.MetadataKey_PathSegment{
|
||||||
|
{
|
||||||
|
Segment: &metadatav3.MetadataKey_PathSegment_Key{
|
||||||
|
Key: key,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mgr.Tracing = &envoy_extensions_filters_network_http_connection_manager.HttpConnectionManager_Tracing{
|
||||||
|
RandomSampling: &envoy_type_v3.Percent{Value: opts.TracingSampleRate * 100},
|
||||||
|
Verbose: true,
|
||||||
|
SpawnUpstreamSpan: wrapperspb.Bool(true),
|
||||||
|
Provider: &tracev3.Tracing_Http{
|
||||||
|
Name: "envoy.tracers.opentelemetry",
|
||||||
|
ConfigType: &tracev3.Tracing_Http_TypedConfig{
|
||||||
|
TypedConfig: marshalAny(&tracev3.OpenTelemetryConfig{
|
||||||
|
GrpcService: &envoy_config_core_v3.GrpcService{
|
||||||
|
TargetSpecifier: &envoy_config_core_v3.GrpcService_EnvoyGrpc_{
|
||||||
|
EnvoyGrpc: &envoy_config_core_v3.GrpcService_EnvoyGrpc{
|
||||||
|
ClusterName: "pomerium-control-plane-grpc",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ServiceName: "Envoy",
|
||||||
|
ResourceDetectors: []*envoy_config_core_v3.TypedExtensionConfig{
|
||||||
|
{
|
||||||
|
Name: "envoy.tracers.opentelemetry.resource_detectors.static_config",
|
||||||
|
TypedConfig: marshalAny(&envoy_extensions_tracers_otel.StaticConfigResourceDetectorConfig{
|
||||||
|
Attributes: map[string]string{
|
||||||
|
"pomerium.envoy": "true",
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// this allows full URLs to be displayed in traces, they are otherwise truncated
|
||||||
|
MaxPathTagLength: wrapperspb.UInt32(maxPathTagLength),
|
||||||
|
CustomTags: []*envoy_tracing_v3.CustomTag{
|
||||||
|
requestTag("traceparent"),
|
||||||
|
requestTag("tracestate"),
|
||||||
|
requestTag("external-parent-span"),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints := []*envoy_config_endpoint_v3.LbEndpoint{{
|
func tracingMetadataFilter() *envoy_extensions_filters_network_http_connection_manager.HttpFilter {
|
||||||
HostIdentifier: &envoy_config_endpoint_v3.LbEndpoint_Endpoint{
|
traceparentRule := &envoy_extensions_filters_http_header_to_metadata.Config_Rule{
|
||||||
Endpoint: &envoy_config_endpoint_v3.Endpoint{
|
Header: "x-pomerium-traceparent",
|
||||||
Address: addr,
|
OnHeaderPresent: &envoy_extensions_filters_http_header_to_metadata.Config_KeyValuePair{
|
||||||
|
MetadataNamespace: "pomerium.internal",
|
||||||
|
Key: "traceparent",
|
||||||
},
|
},
|
||||||
|
Remove: false,
|
||||||
|
}
|
||||||
|
tracestateRule := &envoy_extensions_filters_http_header_to_metadata.Config_Rule{
|
||||||
|
Header: "x-pomerium-tracestate",
|
||||||
|
OnHeaderPresent: &envoy_extensions_filters_http_header_to_metadata.Config_KeyValuePair{
|
||||||
|
MetadataNamespace: "pomerium.internal",
|
||||||
|
Key: "tracestate",
|
||||||
},
|
},
|
||||||
}}
|
Remove: false,
|
||||||
|
}
|
||||||
return &envoy_config_cluster_v3.Cluster{
|
externalParentSpanRule := &envoy_extensions_filters_http_header_to_metadata.Config_Rule{
|
||||||
Name: "datadog-apm",
|
Header: "x-pomerium-external-parent-span",
|
||||||
ConnectTimeout: &durationpb.Duration{
|
OnHeaderPresent: &envoy_extensions_filters_http_header_to_metadata.Config_KeyValuePair{
|
||||||
Seconds: 5,
|
MetadataNamespace: "pomerium.internal",
|
||||||
|
Key: "external-parent-span",
|
||||||
},
|
},
|
||||||
ClusterDiscoveryType: getClusterDiscoveryType(endpoints),
|
Remove: true,
|
||||||
LbPolicy: envoy_config_cluster_v3.Cluster_ROUND_ROBIN,
|
}
|
||||||
LoadAssignment: &envoy_config_endpoint_v3.ClusterLoadAssignment{
|
samplingDecisionRule := &envoy_extensions_filters_http_header_to_metadata.Config_Rule{
|
||||||
ClusterName: "datadog-apm",
|
Header: "x-pomerium-sampling-decision",
|
||||||
Endpoints: []*envoy_config_endpoint_v3.LocalityLbEndpoints{{
|
OnHeaderPresent: &envoy_extensions_filters_http_header_to_metadata.Config_KeyValuePair{
|
||||||
LbEndpoints: endpoints,
|
MetadataNamespace: "pomerium.internal",
|
||||||
}},
|
Key: "sampling-decision",
|
||||||
|
},
|
||||||
|
Remove: true,
|
||||||
|
}
|
||||||
|
return &envoy_extensions_filters_network_http_connection_manager.HttpFilter{
|
||||||
|
Name: "envoy.filters.http.header_to_metadata",
|
||||||
|
ConfigType: &envoy_extensions_filters_network_http_connection_manager.HttpFilter_TypedConfig{
|
||||||
|
TypedConfig: marshalAny(&envoy_extensions_filters_http_header_to_metadata.Config{
|
||||||
|
RequestRules: []*envoy_extensions_filters_http_header_to_metadata.Config_Rule{
|
||||||
|
traceparentRule,
|
||||||
|
tracestateRule,
|
||||||
|
externalParentSpanRule,
|
||||||
|
samplingDecisionRule,
|
||||||
|
},
|
||||||
|
ResponseRules: []*envoy_extensions_filters_http_header_to_metadata.Config_Rule{
|
||||||
|
traceparentRule,
|
||||||
|
tracestateRule,
|
||||||
|
},
|
||||||
|
}),
|
||||||
},
|
},
|
||||||
}, nil
|
|
||||||
case trace.ZipkinTracingProviderName:
|
|
||||||
host := tracingOptions.ZipkinEndpoint.Host
|
|
||||||
if _, port, _ := net.SplitHostPort(host); port == "" {
|
|
||||||
if tracingOptions.ZipkinEndpoint.Scheme == "https" {
|
|
||||||
host = net.JoinHostPort(host, "443")
|
|
||||||
} else {
|
|
||||||
host = net.JoinHostPort(host, "80")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
addr, err := parseAddress(host)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("envoyconfig: invalid tracing zipkin address: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
endpoints := []*envoy_config_endpoint_v3.LbEndpoint{{
|
|
||||||
HostIdentifier: &envoy_config_endpoint_v3.LbEndpoint_Endpoint{
|
|
||||||
Endpoint: &envoy_config_endpoint_v3.Endpoint{
|
|
||||||
Address: addr,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}}
|
|
||||||
return &envoy_config_cluster_v3.Cluster{
|
|
||||||
Name: "zipkin",
|
|
||||||
ConnectTimeout: &durationpb.Duration{
|
|
||||||
Seconds: 5,
|
|
||||||
},
|
|
||||||
ClusterDiscoveryType: getClusterDiscoveryType(endpoints),
|
|
||||||
LbPolicy: envoy_config_cluster_v3.Cluster_ROUND_ROBIN,
|
|
||||||
LoadAssignment: &envoy_config_endpoint_v3.ClusterLoadAssignment{
|
|
||||||
ClusterName: "zipkin",
|
|
||||||
Endpoints: []*envoy_config_endpoint_v3.LocalityLbEndpoints{{
|
|
||||||
LbEndpoints: endpoints,
|
|
||||||
}},
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
default:
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildTracingHTTP(options *config.Options) (*envoy_config_trace_v3.Tracing_Http, error) {
|
|
||||||
tracingOptions, err := config.NewTracingOptions(options)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid tracing config: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch tracingOptions.Provider {
|
|
||||||
case trace.DatadogTracingProviderName:
|
|
||||||
tracingTC := protoutil.NewAny(&envoy_config_trace_v3.DatadogConfig{
|
|
||||||
CollectorCluster: "datadog-apm",
|
|
||||||
ServiceName: tracingOptions.Service,
|
|
||||||
})
|
|
||||||
return &envoy_config_trace_v3.Tracing_Http{
|
|
||||||
Name: "envoy.tracers.datadog",
|
|
||||||
ConfigType: &envoy_config_trace_v3.Tracing_Http_TypedConfig{
|
|
||||||
TypedConfig: tracingTC,
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
case trace.ZipkinTracingProviderName:
|
|
||||||
path := tracingOptions.ZipkinEndpoint.Path
|
|
||||||
if path == "" {
|
|
||||||
path = "/"
|
|
||||||
}
|
|
||||||
tracingTC := protoutil.NewAny(&envoy_config_trace_v3.ZipkinConfig{
|
|
||||||
CollectorCluster: "zipkin",
|
|
||||||
CollectorEndpoint: path,
|
|
||||||
CollectorEndpointVersion: envoy_config_trace_v3.ZipkinConfig_HTTP_JSON,
|
|
||||||
})
|
|
||||||
return &envoy_config_trace_v3.Tracing_Http{
|
|
||||||
Name: "envoy.tracers.zipkin",
|
|
||||||
ConfigType: &envoy_config_trace_v3.Tracing_Http_TypedConfig{
|
|
||||||
TypedConfig: tracingTC,
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
default:
|
|
||||||
return nil, nil
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,135 +0,0 @@
|
||||||
package envoyconfig
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"github.com/pomerium/pomerium/config"
|
|
||||||
"github.com/pomerium/pomerium/internal/testutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestBuildTracingCluster(t *testing.T) {
|
|
||||||
t.Run("datadog", func(t *testing.T) {
|
|
||||||
c, err := buildTracingCluster(&config.Options{
|
|
||||||
TracingProvider: "datadog",
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
testutil.AssertProtoJSONEqual(t, `
|
|
||||||
{
|
|
||||||
"name": "datadog-apm",
|
|
||||||
"type": "STATIC",
|
|
||||||
"connectTimeout": "5s",
|
|
||||||
"loadAssignment": {
|
|
||||||
"clusterName": "datadog-apm",
|
|
||||||
"endpoints": [{
|
|
||||||
"lbEndpoints": [{
|
|
||||||
"endpoint": {
|
|
||||||
"address": {
|
|
||||||
"socketAddress": {
|
|
||||||
"address": "127.0.0.1",
|
|
||||||
"portValue": 8126
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}]
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
`, c)
|
|
||||||
|
|
||||||
c, err = buildTracingCluster(&config.Options{
|
|
||||||
TracingProvider: "datadog",
|
|
||||||
TracingDatadogAddress: "example.com:8126",
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
testutil.AssertProtoJSONEqual(t, `
|
|
||||||
{
|
|
||||||
"name": "datadog-apm",
|
|
||||||
"type": "STRICT_DNS",
|
|
||||||
"connectTimeout": "5s",
|
|
||||||
"loadAssignment": {
|
|
||||||
"clusterName": "datadog-apm",
|
|
||||||
"endpoints": [{
|
|
||||||
"lbEndpoints": [{
|
|
||||||
"endpoint": {
|
|
||||||
"address": {
|
|
||||||
"socketAddress": {
|
|
||||||
"address": "example.com",
|
|
||||||
"portValue": 8126
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}]
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
`, c)
|
|
||||||
})
|
|
||||||
t.Run("zipkin", func(t *testing.T) {
|
|
||||||
c, err := buildTracingCluster(&config.Options{
|
|
||||||
TracingProvider: "zipkin",
|
|
||||||
ZipkinEndpoint: "https://example.com/api/v2/spans",
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
testutil.AssertProtoJSONEqual(t, `
|
|
||||||
{
|
|
||||||
"name": "zipkin",
|
|
||||||
"type": "STRICT_DNS",
|
|
||||||
"connectTimeout": "5s",
|
|
||||||
"loadAssignment": {
|
|
||||||
"clusterName": "zipkin",
|
|
||||||
"endpoints": [{
|
|
||||||
"lbEndpoints": [{
|
|
||||||
"endpoint": {
|
|
||||||
"address": {
|
|
||||||
"socketAddress": {
|
|
||||||
"address": "example.com",
|
|
||||||
"portValue": 443
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}]
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
`, c)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBuildTracingHTTP(t *testing.T) {
|
|
||||||
t.Run("datadog", func(t *testing.T) {
|
|
||||||
h, err := buildTracingHTTP(&config.Options{
|
|
||||||
TracingProvider: "datadog",
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
testutil.AssertProtoJSONEqual(t, `
|
|
||||||
{
|
|
||||||
"name": "envoy.tracers.datadog",
|
|
||||||
"typedConfig": {
|
|
||||||
"@type": "type.googleapis.com/envoy.config.trace.v3.DatadogConfig",
|
|
||||||
"collectorCluster": "datadog-apm",
|
|
||||||
"serviceName": "pomerium"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
`, h)
|
|
||||||
})
|
|
||||||
t.Run("zipkin", func(t *testing.T) {
|
|
||||||
h, err := buildTracingHTTP(&config.Options{
|
|
||||||
TracingProvider: "zipkin",
|
|
||||||
ZipkinEndpoint: "https://example.com/api/v2/spans",
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
testutil.AssertProtoJSONEqual(t, `
|
|
||||||
{
|
|
||||||
"name": "envoy.tracers.zipkin",
|
|
||||||
"typedConfig": {
|
|
||||||
"@type": "type.googleapis.com/envoy.config.trace.v3.ZipkinConfig",
|
|
||||||
"collectorCluster": "zipkin",
|
|
||||||
"collectorEndpoint": "/api/v2/spans",
|
|
||||||
"collectorEndpointVersion": "HTTP_JSON"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
`, h)
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -20,11 +20,6 @@ func NewLogManager(ctx context.Context, src Source) *LogManager {
|
||||||
return mgr
|
return mgr
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes the log manager.
|
|
||||||
func (mgr *LogManager) Close() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnConfigChange is called whenever configuration changes.
|
// OnConfigChange is called whenever configuration changes.
|
||||||
func (mgr *LogManager) OnConfigChange(_ context.Context, cfg *Config) {
|
func (mgr *LogManager) OnConfigChange(_ context.Context, cfg *Config) {
|
||||||
if cfg == nil || cfg.Options == nil {
|
if cfg == nil || cfg.Options == nil {
|
||||||
|
|
|
@ -45,11 +45,6 @@ func NewMetricsManager(ctx context.Context, src Source) *MetricsManager {
|
||||||
return mgr
|
return mgr
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes any underlying http server.
|
|
||||||
func (mgr *MetricsManager) Close() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnConfigChange updates the metrics manager when configuration is changed.
|
// OnConfigChange updates the metrics manager when configuration is changed.
|
||||||
func (mgr *MetricsManager) OnConfigChange(ctx context.Context, cfg *Config) {
|
func (mgr *MetricsManager) OnConfigChange(ctx context.Context, cfg *Config) {
|
||||||
mgr.mu.Lock()
|
mgr.mu.Lock()
|
||||||
|
|
|
@ -207,26 +207,22 @@ type Options struct {
|
||||||
MetricsClientCA string `mapstructure:"metrics_client_ca" yaml:"metrics_client_ca,omitempty"`
|
MetricsClientCA string `mapstructure:"metrics_client_ca" yaml:"metrics_client_ca,omitempty"`
|
||||||
MetricsClientCAFile string `mapstructure:"metrics_client_ca_file" yaml:"metrics_client_ca_file,omitempty"`
|
MetricsClientCAFile string `mapstructure:"metrics_client_ca_file" yaml:"metrics_client_ca_file,omitempty"`
|
||||||
|
|
||||||
// Tracing shared settings
|
|
||||||
TracingProvider string `mapstructure:"tracing_provider" yaml:"tracing_provider,omitempty"`
|
|
||||||
TracingSampleRate float64 `mapstructure:"tracing_sample_rate" yaml:"tracing_sample_rate,omitempty"`
|
TracingSampleRate float64 `mapstructure:"tracing_sample_rate" yaml:"tracing_sample_rate,omitempty"`
|
||||||
|
TracingProvider string `mapstructure:"tracing_provider" yaml:"tracing_provider,omitempty"`
|
||||||
|
TracingOTLPEndpoint string `mapstructure:"tracing_otlp_endpoint" yaml:"tracing_otlp_endpoint,omitempty"`
|
||||||
|
TracingOTLPProtocol string `mapstructure:"tracing_otlp_protocol" yaml:"tracing_otlp_protocol,omitempty"`
|
||||||
|
|
||||||
// Datadog tracing address
|
// Deprecated: this field is ignored.
|
||||||
|
// Configure tracing using the OTLP options or environment variables.
|
||||||
TracingDatadogAddress string `mapstructure:"tracing_datadog_address" yaml:"tracing_datadog_address,omitempty"`
|
TracingDatadogAddress string `mapstructure:"tracing_datadog_address" yaml:"tracing_datadog_address,omitempty"`
|
||||||
|
// Deprecated: this field is ignored.
|
||||||
// Jaeger
|
// Configure tracing using the OTLP options or environment variables.
|
||||||
//
|
|
||||||
// CollectorEndpoint is the full url to the Jaeger HTTP Thrift collector.
|
|
||||||
// For example, http://localhost:14268/api/traces
|
|
||||||
TracingJaegerCollectorEndpoint string `mapstructure:"tracing_jaeger_collector_endpoint" yaml:"tracing_jaeger_collector_endpoint,omitempty"`
|
TracingJaegerCollectorEndpoint string `mapstructure:"tracing_jaeger_collector_endpoint" yaml:"tracing_jaeger_collector_endpoint,omitempty"`
|
||||||
// AgentEndpoint instructs exporter to send spans to jaeger-agent at this address.
|
// Deprecated: this field is ignored.
|
||||||
// For example, localhost:6831.
|
// Configure tracing using the OTLP options or environment variables.
|
||||||
TracingJaegerAgentEndpoint string `mapstructure:"tracing_jaeger_agent_endpoint" yaml:"tracing_jaeger_agent_endpoint,omitempty"`
|
TracingJaegerAgentEndpoint string `mapstructure:"tracing_jaeger_agent_endpoint" yaml:"tracing_jaeger_agent_endpoint,omitempty"`
|
||||||
|
// Deprecated: this field is ignored.
|
||||||
// Zipkin
|
// Configure tracing using the OTLP options or environment variables.
|
||||||
//
|
|
||||||
// ZipkinEndpoint configures the zipkin collector URI
|
|
||||||
// Example: http://zipkin:9411/api/v2/spans
|
|
||||||
ZipkinEndpoint string `mapstructure:"tracing_zipkin_endpoint" yaml:"tracing_zipkin_endpoint"`
|
ZipkinEndpoint string `mapstructure:"tracing_zipkin_endpoint" yaml:"tracing_zipkin_endpoint"`
|
||||||
|
|
||||||
// GRPC Service Settings
|
// GRPC Service Settings
|
||||||
|
@ -1516,6 +1512,8 @@ func (o *Options) ApplySettings(ctx context.Context, certsIndex *cryptutil.Certi
|
||||||
setCertificate(&o.MetricsCertificate, &o.MetricsCertificateKey, settings.MetricsCertificate)
|
setCertificate(&o.MetricsCertificate, &o.MetricsCertificateKey, settings.MetricsCertificate)
|
||||||
set(&o.MetricsClientCA, settings.MetricsClientCa)
|
set(&o.MetricsClientCA, settings.MetricsClientCa)
|
||||||
set(&o.TracingProvider, settings.TracingProvider)
|
set(&o.TracingProvider, settings.TracingProvider)
|
||||||
|
set(&o.TracingOTLPEndpoint, settings.TracingOtlpEndpoint)
|
||||||
|
set(&o.TracingOTLPProtocol, settings.TracingOtlpProtocol)
|
||||||
set(&o.TracingSampleRate, settings.TracingSampleRate)
|
set(&o.TracingSampleRate, settings.TracingSampleRate)
|
||||||
set(&o.TracingDatadogAddress, settings.TracingDatadogAddress)
|
set(&o.TracingDatadogAddress, settings.TracingDatadogAddress)
|
||||||
set(&o.TracingJaegerCollectorEndpoint, settings.TracingJaegerCollectorEndpoint)
|
set(&o.TracingJaegerCollectorEndpoint, settings.TracingJaegerCollectorEndpoint)
|
||||||
|
@ -1606,6 +1604,8 @@ func (o *Options) ToProto() *config.Config {
|
||||||
copySrcToOptionalDest(&settings.MetricsClientCa, valueOrFromFileBase64(o.MetricsClientCA, o.MetricsClientCAFile))
|
copySrcToOptionalDest(&settings.MetricsClientCa, valueOrFromFileBase64(o.MetricsClientCA, o.MetricsClientCAFile))
|
||||||
copySrcToOptionalDest(&settings.TracingProvider, &o.TracingProvider)
|
copySrcToOptionalDest(&settings.TracingProvider, &o.TracingProvider)
|
||||||
copySrcToOptionalDest(&settings.TracingSampleRate, &o.TracingSampleRate)
|
copySrcToOptionalDest(&settings.TracingSampleRate, &o.TracingSampleRate)
|
||||||
|
copySrcToOptionalDest(&settings.TracingOtlpEndpoint, &o.TracingOTLPEndpoint)
|
||||||
|
copySrcToOptionalDest(&settings.TracingOtlpProtocol, &o.TracingOTLPProtocol)
|
||||||
copySrcToOptionalDest(&settings.TracingDatadogAddress, &o.TracingDatadogAddress)
|
copySrcToOptionalDest(&settings.TracingDatadogAddress, &o.TracingDatadogAddress)
|
||||||
copySrcToOptionalDest(&settings.TracingJaegerCollectorEndpoint, &o.TracingJaegerCollectorEndpoint)
|
copySrcToOptionalDest(&settings.TracingJaegerCollectorEndpoint, &o.TracingJaegerCollectorEndpoint)
|
||||||
copySrcToOptionalDest(&settings.TracingJaegerAgentEndpoint, &o.TracingJaegerAgentEndpoint)
|
copySrcToOptionalDest(&settings.TracingJaegerAgentEndpoint, &o.TracingJaegerAgentEndpoint)
|
||||||
|
|
125
config/trace.go
125
config/trace.go
|
@ -1,125 +0,0 @@
|
||||||
package config
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
|
|
||||||
"github.com/pomerium/pomerium/internal/log"
|
|
||||||
"github.com/pomerium/pomerium/internal/telemetry"
|
|
||||||
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
|
||||||
"github.com/pomerium/pomerium/internal/urlutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TracingOptions are the options for tracing.
|
|
||||||
type TracingOptions = trace.TracingOptions
|
|
||||||
|
|
||||||
// NewTracingOptions builds a new TracingOptions from core Options
|
|
||||||
func NewTracingOptions(o *Options) (*TracingOptions, error) {
|
|
||||||
tracingOpts := TracingOptions{
|
|
||||||
Provider: o.TracingProvider,
|
|
||||||
Service: telemetry.ServiceName(o.Services),
|
|
||||||
JaegerAgentEndpoint: o.TracingJaegerAgentEndpoint,
|
|
||||||
SampleRate: o.TracingSampleRate,
|
|
||||||
}
|
|
||||||
|
|
||||||
switch o.TracingProvider {
|
|
||||||
case trace.DatadogTracingProviderName:
|
|
||||||
tracingOpts.DatadogAddress = o.TracingDatadogAddress
|
|
||||||
case trace.JaegerTracingProviderName:
|
|
||||||
if o.TracingJaegerCollectorEndpoint != "" {
|
|
||||||
jaegerCollectorEndpoint, err := urlutil.ParseAndValidateURL(o.TracingJaegerCollectorEndpoint)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("config: invalid jaeger endpoint url: %w", err)
|
|
||||||
}
|
|
||||||
tracingOpts.JaegerCollectorEndpoint = jaegerCollectorEndpoint
|
|
||||||
tracingOpts.JaegerAgentEndpoint = o.TracingJaegerAgentEndpoint
|
|
||||||
}
|
|
||||||
case trace.ZipkinTracingProviderName:
|
|
||||||
zipkinEndpoint, err := urlutil.ParseAndValidateURL(o.ZipkinEndpoint)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("config: invalid zipkin endpoint url: %w", err)
|
|
||||||
}
|
|
||||||
tracingOpts.ZipkinEndpoint = zipkinEndpoint
|
|
||||||
case "":
|
|
||||||
return &TracingOptions{}, nil
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("config: provider %s unknown", o.TracingProvider)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &tracingOpts, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// A TraceManager manages setting up a trace exporter based on configuration options.
|
|
||||||
type TraceManager struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
traceOpts *TracingOptions
|
|
||||||
provider trace.Provider
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTraceManager creates a new TraceManager.
|
|
||||||
func NewTraceManager(ctx context.Context, src Source) *TraceManager {
|
|
||||||
ctx = log.WithContext(ctx, func(c zerolog.Context) zerolog.Context {
|
|
||||||
return c.Str("service", "trace_manager")
|
|
||||||
})
|
|
||||||
mgr := &TraceManager{}
|
|
||||||
src.OnConfigChange(ctx, mgr.OnConfigChange)
|
|
||||||
mgr.OnConfigChange(ctx, src.GetConfig())
|
|
||||||
return mgr
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes any underlying trace exporter.
|
|
||||||
func (mgr *TraceManager) Close() error {
|
|
||||||
mgr.mu.Lock()
|
|
||||||
defer mgr.mu.Unlock()
|
|
||||||
|
|
||||||
var err error
|
|
||||||
if mgr.provider != nil {
|
|
||||||
err = mgr.provider.Unregister()
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnConfigChange updates the manager whenever the configuration is changed.
|
|
||||||
func (mgr *TraceManager) OnConfigChange(ctx context.Context, cfg *Config) {
|
|
||||||
mgr.mu.Lock()
|
|
||||||
defer mgr.mu.Unlock()
|
|
||||||
|
|
||||||
traceOpts, err := NewTracingOptions(cfg.Options)
|
|
||||||
if err != nil {
|
|
||||||
log.Ctx(ctx).Error().Err(err).Msg("trace: failed to build tracing options")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if reflect.DeepEqual(traceOpts, mgr.traceOpts) {
|
|
||||||
log.Ctx(ctx).Debug().Msg("no change detected in trace options")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
mgr.traceOpts = traceOpts
|
|
||||||
|
|
||||||
if mgr.provider != nil {
|
|
||||||
_ = mgr.provider.Unregister()
|
|
||||||
mgr.provider = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if !traceOpts.Enabled() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Ctx(ctx).Info().Interface("options", traceOpts).Msg("trace: starting exporter")
|
|
||||||
|
|
||||||
mgr.provider, err = trace.GetProvider(traceOpts)
|
|
||||||
if err != nil {
|
|
||||||
log.Ctx(ctx).Error().Err(err).Msg("trace: failed to register exporter")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err = mgr.provider.Register(traceOpts)
|
|
||||||
if err != nil {
|
|
||||||
log.Ctx(ctx).Error().Err(err).Msg("trace: failed to register exporter")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
44
config/trace_client.go
Normal file
44
config/trace_client.go
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ErrNoTracingConfig = errors.New("no tracing config")
|
||||||
|
|
||||||
|
func NewTraceClientFromOptions(opts *Options) (otlptrace.Client, error) {
|
||||||
|
switch opts.TracingProvider {
|
||||||
|
case "otlp":
|
||||||
|
endpoint := opts.TracingOTLPEndpoint
|
||||||
|
protocol := opts.TracingOTLPProtocol
|
||||||
|
if protocol == "" && endpoint != "" {
|
||||||
|
// treat this field as equivalent to OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
|
||||||
|
protocol = trace.BestEffortProtocolFromOTLPEndpoint(opts.TracingOTLPEndpoint, true)
|
||||||
|
}
|
||||||
|
switch strings.ToLower(strings.TrimSpace(protocol)) {
|
||||||
|
case "grpc":
|
||||||
|
return otlptracegrpc.NewClient(
|
||||||
|
otlptracegrpc.WithEndpointURL(endpoint),
|
||||||
|
), nil
|
||||||
|
case "http/protobuf", "":
|
||||||
|
return otlptracehttp.NewClient(
|
||||||
|
otlptracehttp.WithEndpointURL(endpoint),
|
||||||
|
), nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf(`unknown otlp trace exporter protocol %q, expected "grpc" or "http/protobuf"\n`, protocol)
|
||||||
|
}
|
||||||
|
case "none", "noop":
|
||||||
|
return trace.NoopClient{}, nil
|
||||||
|
case "":
|
||||||
|
return nil, ErrNoTracingConfig
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf(`unknown tracing provider %q, expected one of ["otlp"]`, opts.TracingProvider)
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,161 +0,0 @@
|
||||||
package config
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"net/url"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
|
|
||||||
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
|
||||||
)
|
|
||||||
|
|
||||||
func Test_NewTracingOptions(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
opts *Options
|
|
||||||
want *TracingOptions
|
|
||||||
wantErr bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
"datadog_good",
|
|
||||||
&Options{TracingProvider: "datadog"},
|
|
||||||
&TracingOptions{Provider: "datadog", Service: "pomerium"},
|
|
||||||
false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"jaeger_good",
|
|
||||||
&Options{TracingProvider: "jaeger", TracingJaegerAgentEndpoint: "foo", TracingJaegerCollectorEndpoint: "http://foo", Services: ServiceAll},
|
|
||||||
&TracingOptions{Provider: "jaeger", JaegerAgentEndpoint: "foo", JaegerCollectorEndpoint: &url.URL{Scheme: "http", Host: "foo"}, Service: "pomerium"},
|
|
||||||
false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"jaeger_bad",
|
|
||||||
&Options{TracingProvider: "jaeger", TracingJaegerAgentEndpoint: "foo", TracingJaegerCollectorEndpoint: "badurl"},
|
|
||||||
nil,
|
|
||||||
true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"zipkin_good",
|
|
||||||
&Options{TracingProvider: "zipkin", ZipkinEndpoint: "https://foo/api/v1/spans", Services: ServiceAuthorize},
|
|
||||||
&TracingOptions{Provider: "zipkin", ZipkinEndpoint: &url.URL{Scheme: "https", Host: "foo", Path: "/api/v1/spans"}, Service: "pomerium-authorize"},
|
|
||||||
false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"zipkin_bad",
|
|
||||||
&Options{TracingProvider: "zipkin", ZipkinEndpoint: "notaurl"},
|
|
||||||
nil,
|
|
||||||
true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"noprovider",
|
|
||||||
&Options{},
|
|
||||||
&TracingOptions{},
|
|
||||||
false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"fakeprovider",
|
|
||||||
&Options{TracingProvider: "fake"},
|
|
||||||
nil,
|
|
||||||
true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got, err := NewTracingOptions(tt.opts)
|
|
||||||
assert.NotEqual(t, err == nil, tt.wantErr, "unexpected error value")
|
|
||||||
assert.Empty(t, cmp.Diff(tt.want, got))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test_TracingEnabled(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
opts *TracingOptions
|
|
||||||
want bool
|
|
||||||
}{
|
|
||||||
{"enabled", &TracingOptions{Provider: "zipkin"}, true},
|
|
||||||
{"not enabled", &TracingOptions{}, false},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
assert.Equal(t, tt.want, tt.opts.Enabled(), "unexpected tracing state")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTraceManager(t *testing.T) {
|
|
||||||
ctx, clearTimeout := context.WithTimeout(context.Background(), time.Second*30)
|
|
||||||
defer clearTimeout()
|
|
||||||
|
|
||||||
type Request struct {
|
|
||||||
URL string
|
|
||||||
Name string
|
|
||||||
}
|
|
||||||
|
|
||||||
incoming := make(chan Request, 100)
|
|
||||||
|
|
||||||
h := http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) {
|
|
||||||
var objs []struct {
|
|
||||||
Name string
|
|
||||||
}
|
|
||||||
json.NewDecoder(r.Body).Decode(&objs)
|
|
||||||
for _, obj := range objs {
|
|
||||||
incoming <- Request{Name: obj.Name, URL: r.Host}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
srv1 := httptest.NewServer(h)
|
|
||||||
defer srv1.Close()
|
|
||||||
srv2 := httptest.NewServer(h)
|
|
||||||
defer srv2.Close()
|
|
||||||
|
|
||||||
src := NewStaticSource(&Config{Options: &Options{
|
|
||||||
TracingProvider: "zipkin",
|
|
||||||
ZipkinEndpoint: srv1.URL,
|
|
||||||
TracingSampleRate: 1,
|
|
||||||
}})
|
|
||||||
|
|
||||||
_ = NewTraceManager(ctx, src)
|
|
||||||
|
|
||||||
_, span := trace.StartSpan(ctx, "Example")
|
|
||||||
span.End()
|
|
||||||
|
|
||||||
src.SetConfig(ctx, &Config{Options: &Options{
|
|
||||||
TracingProvider: "zipkin",
|
|
||||||
ZipkinEndpoint: srv2.URL,
|
|
||||||
TracingSampleRate: 1,
|
|
||||||
}})
|
|
||||||
|
|
||||||
_, span = trace.StartSpan(ctx, "Example")
|
|
||||||
span.End()
|
|
||||||
|
|
||||||
expect := map[Request]struct{}{
|
|
||||||
{Name: "example", URL: srv1.Listener.Addr().String()}: {},
|
|
||||||
{Name: "example", URL: srv2.Listener.Addr().String()}: {},
|
|
||||||
}
|
|
||||||
|
|
||||||
for len(expect) > 0 {
|
|
||||||
var req Request
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
t.Error("timeout waiting for requests")
|
|
||||||
return
|
|
||||||
case req = <-incoming:
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := expect[req]; ok {
|
|
||||||
delete(expect, req)
|
|
||||||
} else {
|
|
||||||
t.Error("unexpected request", req)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -10,6 +10,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
|
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
|
@ -18,7 +19,7 @@ import (
|
||||||
"github.com/pomerium/pomerium/internal/atomicutil"
|
"github.com/pomerium/pomerium/internal/atomicutil"
|
||||||
"github.com/pomerium/pomerium/internal/events"
|
"github.com/pomerium/pomerium/internal/events"
|
||||||
"github.com/pomerium/pomerium/internal/log"
|
"github.com/pomerium/pomerium/internal/log"
|
||||||
"github.com/pomerium/pomerium/internal/telemetry"
|
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
||||||
"github.com/pomerium/pomerium/internal/version"
|
"github.com/pomerium/pomerium/internal/version"
|
||||||
"github.com/pomerium/pomerium/pkg/cryptutil"
|
"github.com/pomerium/pomerium/pkg/cryptutil"
|
||||||
"github.com/pomerium/pomerium/pkg/envoy/files"
|
"github.com/pomerium/pomerium/pkg/envoy/files"
|
||||||
|
@ -28,6 +29,7 @@ import (
|
||||||
"github.com/pomerium/pomerium/pkg/identity"
|
"github.com/pomerium/pomerium/pkg/identity"
|
||||||
"github.com/pomerium/pomerium/pkg/identity/legacymanager"
|
"github.com/pomerium/pomerium/pkg/identity/legacymanager"
|
||||||
"github.com/pomerium/pomerium/pkg/identity/manager"
|
"github.com/pomerium/pomerium/pkg/identity/manager"
|
||||||
|
oteltrace "go.opentelemetry.io/otel/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DataBroker represents the databroker service. The databroker service is a simple interface
|
// DataBroker represents the databroker service. The databroker service is a simple interface
|
||||||
|
@ -43,6 +45,8 @@ type DataBroker struct {
|
||||||
localGRPCServer *grpc.Server
|
localGRPCServer *grpc.Server
|
||||||
localGRPCConnection *grpc.ClientConn
|
localGRPCConnection *grpc.ClientConn
|
||||||
sharedKey *atomicutil.Value[[]byte]
|
sharedKey *atomicutil.Value[[]byte]
|
||||||
|
tracerProvider oteltrace.TracerProvider
|
||||||
|
tracer oteltrace.Tracer
|
||||||
}
|
}
|
||||||
|
|
||||||
type Options struct {
|
type Options struct {
|
||||||
|
@ -87,9 +91,12 @@ func New(ctx context.Context, cfg *config.Config, eventsMgr *events.Manager, opt
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
tracerProvider := trace.NewTracerProvider(ctx, "Data Broker")
|
||||||
|
tracer := tracerProvider.Tracer(trace.PomeriumCoreTracer)
|
||||||
// No metrics handler because we have one in the control plane. Add one
|
// No metrics handler because we have one in the control plane. Add one
|
||||||
// if we no longer register with that grpc Server
|
// if we no longer register with that grpc Server
|
||||||
localGRPCServer := grpc.NewServer(
|
localGRPCServer := grpc.NewServer(
|
||||||
|
grpc.StatsHandler(trace.NewServerStatsHandler(otelgrpc.NewServerHandler(otelgrpc.WithTracerProvider(tracerProvider)))),
|
||||||
grpc.ChainStreamInterceptor(log.StreamServerInterceptor(log.Ctx(ctx)), si),
|
grpc.ChainStreamInterceptor(log.StreamServerInterceptor(log.Ctx(ctx)), si),
|
||||||
grpc.ChainUnaryInterceptor(log.UnaryServerInterceptor(log.Ctx(ctx)), ui),
|
grpc.ChainUnaryInterceptor(log.UnaryServerInterceptor(log.Ctx(ctx)), ui),
|
||||||
)
|
)
|
||||||
|
@ -100,12 +107,11 @@ func New(ctx context.Context, cfg *config.Config, eventsMgr *events.Manager, opt
|
||||||
}
|
}
|
||||||
|
|
||||||
sharedKeyValue := atomicutil.NewValue(sharedKey)
|
sharedKeyValue := atomicutil.NewValue(sharedKey)
|
||||||
clientStatsHandler := telemetry.NewGRPCClientStatsHandler(cfg.Options.Services)
|
|
||||||
clientDialOptions := []grpc.DialOption{
|
clientDialOptions := []grpc.DialOption{
|
||||||
grpc.WithInsecure(),
|
grpc.WithInsecure(),
|
||||||
grpc.WithChainUnaryInterceptor(clientStatsHandler.UnaryInterceptor, grpcutil.WithUnarySignedJWT(sharedKeyValue.Load)),
|
grpc.WithChainUnaryInterceptor(grpcutil.WithUnarySignedJWT(sharedKeyValue.Load)),
|
||||||
grpc.WithChainStreamInterceptor(grpcutil.WithStreamSignedJWT(sharedKeyValue.Load)),
|
grpc.WithChainStreamInterceptor(grpcutil.WithStreamSignedJWT(sharedKeyValue.Load)),
|
||||||
grpc.WithStatsHandler(clientStatsHandler.Handler),
|
grpc.WithStatsHandler(otelgrpc.NewClientHandler(otelgrpc.WithTracerProvider(tracerProvider))),
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx = log.WithContext(ctx, func(c zerolog.Context) zerolog.Context {
|
ctx = log.WithContext(ctx, func(c zerolog.Context) zerolog.Context {
|
||||||
|
@ -120,7 +126,7 @@ func New(ctx context.Context, cfg *config.Config, eventsMgr *events.Manager, opt
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
dataBrokerServer, err := newDataBrokerServer(ctx, cfg)
|
dataBrokerServer, err := newDataBrokerServer(ctx, tracerProvider, cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -133,6 +139,8 @@ func New(ctx context.Context, cfg *config.Config, eventsMgr *events.Manager, opt
|
||||||
localGRPCConnection: localGRPCConnection,
|
localGRPCConnection: localGRPCConnection,
|
||||||
sharedKey: sharedKeyValue,
|
sharedKey: sharedKeyValue,
|
||||||
eventsMgr: eventsMgr,
|
eventsMgr: eventsMgr,
|
||||||
|
tracerProvider: tracerProvider,
|
||||||
|
tracer: tracer,
|
||||||
}
|
}
|
||||||
c.Register(c.localGRPCServer)
|
c.Register(c.localGRPCServer)
|
||||||
|
|
||||||
|
@ -202,7 +210,7 @@ func (c *DataBroker) update(ctx context.Context, cfg *config.Config) error {
|
||||||
}, c.legacyManagerOptions...)
|
}, c.legacyManagerOptions...)
|
||||||
|
|
||||||
if cfg.Options.SupportsUserRefresh() {
|
if cfg.Options.SupportsUserRefresh() {
|
||||||
authenticator, err := identity.NewAuthenticator(oauthOptions)
|
authenticator, err := identity.NewAuthenticator(ctx, c.tracerProvider, oauthOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Ctx(ctx).Error().Err(err).Msg("databroker: failed to create authenticator")
|
log.Ctx(ctx).Error().Err(err).Msg("databroker: failed to create authenticator")
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
oteltrace "go.opentelemetry.io/otel/trace"
|
||||||
"google.golang.org/protobuf/types/known/emptypb"
|
"google.golang.org/protobuf/types/known/emptypb"
|
||||||
|
|
||||||
"github.com/pomerium/pomerium/config"
|
"github.com/pomerium/pomerium/config"
|
||||||
|
@ -23,7 +24,7 @@ type dataBrokerServer struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// newDataBrokerServer creates a new databroker service server.
|
// newDataBrokerServer creates a new databroker service server.
|
||||||
func newDataBrokerServer(ctx context.Context, cfg *config.Config) (*dataBrokerServer, error) {
|
func newDataBrokerServer(ctx context.Context, tracerProvider oteltrace.TracerProvider, cfg *config.Config) (*dataBrokerServer, error) {
|
||||||
srv := &dataBrokerServer{
|
srv := &dataBrokerServer{
|
||||||
sharedKey: atomicutil.NewValue([]byte{}),
|
sharedKey: atomicutil.NewValue([]byte{}),
|
||||||
}
|
}
|
||||||
|
@ -33,7 +34,7 @@ func newDataBrokerServer(ctx context.Context, cfg *config.Config) (*dataBrokerSe
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
srv.server = databroker.New(ctx, opts...)
|
srv.server = databroker.New(ctx, tracerProvider, opts...)
|
||||||
srv.setKey(cfg)
|
srv.setKey(cfg)
|
||||||
return srv, nil
|
return srv, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.opentelemetry.io/otel/trace"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
|
@ -29,7 +30,7 @@ var lis *bufconn.Listener
|
||||||
func init() {
|
func init() {
|
||||||
lis = bufconn.Listen(bufSize)
|
lis = bufconn.Listen(bufSize)
|
||||||
s := grpc.NewServer()
|
s := grpc.NewServer()
|
||||||
internalSrv := internal_databroker.New(context.Background())
|
internalSrv := internal_databroker.New(context.Background(), trace.NewNoopTracerProvider())
|
||||||
srv := &dataBrokerServer{server: internalSrv, sharedKey: atomicutil.NewValue([]byte{})}
|
srv := &dataBrokerServer{server: internalSrv, sharedKey: atomicutil.NewValue([]byte{})}
|
||||||
databroker.RegisterDataBrokerServiceServer(s, srv)
|
databroker.RegisterDataBrokerServiceServer(s, srv)
|
||||||
|
|
||||||
|
|
29
go.mod
29
go.mod
|
@ -4,11 +4,8 @@ go 1.23.0
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go/storage v1.49.0
|
cloud.google.com/go/storage v1.49.0
|
||||||
contrib.go.opencensus.io/exporter/jaeger v0.2.1
|
|
||||||
contrib.go.opencensus.io/exporter/prometheus v0.4.2
|
contrib.go.opencensus.io/exporter/prometheus v0.4.2
|
||||||
contrib.go.opencensus.io/exporter/zipkin v0.1.2
|
|
||||||
github.com/CAFxX/httpcompression v0.0.9
|
github.com/CAFxX/httpcompression v0.0.9
|
||||||
github.com/DataDog/opencensus-go-exporter-datadog v0.0.0-20200406135749-5c268882acf0
|
|
||||||
github.com/VictoriaMetrics/fastcache v1.12.2
|
github.com/VictoriaMetrics/fastcache v1.12.2
|
||||||
github.com/aws/aws-sdk-go-v2 v1.32.7
|
github.com/aws/aws-sdk-go-v2 v1.32.7
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.28.7
|
github.com/aws/aws-sdk-go-v2/config v1.28.7
|
||||||
|
@ -46,7 +43,6 @@ require (
|
||||||
github.com/natefinch/atomic v1.0.1
|
github.com/natefinch/atomic v1.0.1
|
||||||
github.com/oapi-codegen/runtime v1.1.1
|
github.com/oapi-codegen/runtime v1.1.1
|
||||||
github.com/open-policy-agent/opa v1.0.0
|
github.com/open-policy-agent/opa v1.0.0
|
||||||
github.com/openzipkin/zipkin-go v0.4.3
|
|
||||||
github.com/peterbourgon/ff/v3 v3.4.0
|
github.com/peterbourgon/ff/v3 v3.4.0
|
||||||
github.com/pires/go-proxyproto v0.8.0
|
github.com/pires/go-proxyproto v0.8.0
|
||||||
github.com/pomerium/csrf v1.7.0
|
github.com/pomerium/csrf v1.7.0
|
||||||
|
@ -70,15 +66,20 @@ require (
|
||||||
github.com/volatiletech/null/v9 v9.0.0
|
github.com/volatiletech/null/v9 v9.0.0
|
||||||
github.com/yuin/gopher-lua v1.1.1
|
github.com/yuin/gopher-lua v1.1.1
|
||||||
go.opencensus.io v0.24.0
|
go.opencensus.io v0.24.0
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.57.0
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0
|
||||||
|
go.opentelemetry.io/contrib/propagators/autoprop v0.57.0
|
||||||
go.opentelemetry.io/otel v1.33.0
|
go.opentelemetry.io/otel v1.33.0
|
||||||
go.opentelemetry.io/otel/bridge/opencensus v1.33.0
|
go.opentelemetry.io/otel/bridge/opencensus v1.32.0
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.33.0
|
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0
|
||||||
go.opentelemetry.io/otel/metric v1.33.0
|
go.opentelemetry.io/otel/metric v1.33.0
|
||||||
go.opentelemetry.io/otel/sdk v1.33.0
|
go.opentelemetry.io/otel/sdk v1.33.0
|
||||||
go.opentelemetry.io/otel/sdk/metric v1.33.0
|
go.opentelemetry.io/otel/sdk/metric v1.32.0
|
||||||
go.opentelemetry.io/otel/trace v1.33.0
|
go.opentelemetry.io/otel/trace v1.33.0
|
||||||
|
go.opentelemetry.io/proto/otlp v1.4.0
|
||||||
go.uber.org/automaxprocs v1.6.0
|
go.uber.org/automaxprocs v1.6.0
|
||||||
go.uber.org/mock v0.5.0
|
go.uber.org/mock v0.5.0
|
||||||
go.uber.org/zap v1.27.0
|
go.uber.org/zap v1.27.0
|
||||||
|
@ -107,7 +108,6 @@ require (
|
||||||
cloud.google.com/go/monitoring v1.21.2 // indirect
|
cloud.google.com/go/monitoring v1.21.2 // indirect
|
||||||
dario.cat/mergo v1.0.0 // indirect
|
dario.cat/mergo v1.0.0 // indirect
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||||
github.com/DataDog/datadog-go v3.5.0+incompatible // indirect
|
|
||||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 // indirect
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 // indirect
|
||||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect
|
||||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect
|
||||||
|
@ -195,7 +195,7 @@ require (
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
github.com/opencontainers/image-spec v1.1.0 // indirect
|
||||||
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||||
github.com/philhofer/fwd v1.1.2 // indirect
|
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
|
@ -218,10 +218,8 @@ require (
|
||||||
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
|
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
|
||||||
github.com/tidwall/match v1.1.1 // indirect
|
github.com/tidwall/match v1.1.1 // indirect
|
||||||
github.com/tidwall/pretty v1.2.0 // indirect
|
github.com/tidwall/pretty v1.2.0 // indirect
|
||||||
github.com/tinylib/msgp v1.1.8 // indirect
|
|
||||||
github.com/tklauser/go-sysconf v0.3.14 // indirect
|
github.com/tklauser/go-sysconf v0.3.14 // indirect
|
||||||
github.com/tklauser/numcpus v0.8.0 // indirect
|
github.com/tklauser/numcpus v0.8.0 // indirect
|
||||||
github.com/uber/jaeger-client-go v2.25.0+incompatible // indirect
|
|
||||||
github.com/x448/float16 v0.8.4 // indirect
|
github.com/x448/float16 v0.8.4 // indirect
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||||
|
@ -232,10 +230,10 @@ require (
|
||||||
github.com/zeebo/xxh3 v1.0.2 // indirect
|
github.com/zeebo/xxh3 v1.0.2 // indirect
|
||||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||||
go.opentelemetry.io/contrib/detectors/gcp v1.31.0 // indirect
|
go.opentelemetry.io/contrib/detectors/gcp v1.31.0 // indirect
|
||||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect
|
go.opentelemetry.io/contrib/propagators/aws v1.32.0 // indirect
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
|
go.opentelemetry.io/contrib/propagators/b3 v1.32.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 // indirect
|
go.opentelemetry.io/contrib/propagators/jaeger v1.32.0 // indirect
|
||||||
go.opentelemetry.io/proto/otlp v1.4.0 // indirect
|
go.opentelemetry.io/contrib/propagators/ot v1.32.0 // indirect
|
||||||
go.uber.org/multierr v1.11.0 // indirect
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect
|
golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect
|
||||||
golang.org/x/mod v0.20.0 // indirect
|
golang.org/x/mod v0.20.0 // indirect
|
||||||
|
@ -243,7 +241,6 @@ require (
|
||||||
golang.org/x/tools v0.24.0 // indirect
|
golang.org/x/tools v0.24.0 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect
|
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect
|
||||||
gopkg.in/DataDog/dd-trace-go.v1 v1.22.0 // indirect
|
|
||||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
)
|
)
|
||||||
|
|
77
go.sum
77
go.sum
|
@ -52,12 +52,8 @@ cloud.google.com/go/storage v1.49.0 h1:zenOPBOWHCnojRd9aJZAyQXBYqkJkdQS42dxL55CI
|
||||||
cloud.google.com/go/storage v1.49.0/go.mod h1:k1eHhhpLvrPjVGfo0mOUPEJ4Y2+a/Hv5PiwehZI9qGU=
|
cloud.google.com/go/storage v1.49.0/go.mod h1:k1eHhhpLvrPjVGfo0mOUPEJ4Y2+a/Hv5PiwehZI9qGU=
|
||||||
cloud.google.com/go/trace v1.11.2 h1:4ZmaBdL8Ng/ajrgKqY5jfvzqMXbrDcBsUGXOT9aqTtI=
|
cloud.google.com/go/trace v1.11.2 h1:4ZmaBdL8Ng/ajrgKqY5jfvzqMXbrDcBsUGXOT9aqTtI=
|
||||||
cloud.google.com/go/trace v1.11.2/go.mod h1:bn7OwXd4pd5rFuAnTrzBuoZ4ax2XQeG3qNgYmfCy0Io=
|
cloud.google.com/go/trace v1.11.2/go.mod h1:bn7OwXd4pd5rFuAnTrzBuoZ4ax2XQeG3qNgYmfCy0Io=
|
||||||
contrib.go.opencensus.io/exporter/jaeger v0.2.1 h1:yGBYzYMewVL0yO9qqJv3Z5+IRhPdU7e9o/2oKpX4YvI=
|
|
||||||
contrib.go.opencensus.io/exporter/jaeger v0.2.1/go.mod h1:Y8IsLgdxqh1QxYxPC5IgXVmBaeLUeQFfBeBi9PbeZd0=
|
|
||||||
contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxaWJs2/OwXtiWwew3oAg=
|
contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxaWJs2/OwXtiWwew3oAg=
|
||||||
contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9fpw1KeYcjrnC1J8B+JKjsZyRQ=
|
contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9fpw1KeYcjrnC1J8B+JKjsZyRQ=
|
||||||
contrib.go.opencensus.io/exporter/zipkin v0.1.2 h1:YqE293IZrKtqPnpwDPH/lOqTWD/s3Iwabycam74JV3g=
|
|
||||||
contrib.go.opencensus.io/exporter/zipkin v0.1.2/go.mod h1:mP5xM3rrgOjpn79MM8fZbj3gsxcuytSqtH0dxSWW1RE=
|
|
||||||
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
||||||
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||||
|
@ -69,10 +65,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
||||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
github.com/CAFxX/httpcompression v0.0.9 h1:0ue2X8dOLEpxTm8tt+OdHcgA+gbDge0OqFQWGKSqgrg=
|
github.com/CAFxX/httpcompression v0.0.9 h1:0ue2X8dOLEpxTm8tt+OdHcgA+gbDge0OqFQWGKSqgrg=
|
||||||
github.com/CAFxX/httpcompression v0.0.9/go.mod h1:XX8oPZA+4IDcfZ0A71Hz0mZsv/YJOgYygkFhizVPilM=
|
github.com/CAFxX/httpcompression v0.0.9/go.mod h1:XX8oPZA+4IDcfZ0A71Hz0mZsv/YJOgYygkFhizVPilM=
|
||||||
github.com/DataDog/datadog-go v3.5.0+incompatible h1:AShr9cqkF+taHjyQgcBcQUt/ZNK+iPq4ROaZwSX5c/U=
|
|
||||||
github.com/DataDog/datadog-go v3.5.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
|
||||||
github.com/DataDog/opencensus-go-exporter-datadog v0.0.0-20200406135749-5c268882acf0 h1:Y6HFfo8UuntPOpfmUmLb0o3MNYKfUuH2aNmvypsDbY4=
|
|
||||||
github.com/DataDog/opencensus-go-exporter-datadog v0.0.0-20200406135749-5c268882acf0/go.mod h1:/VV3EFO/hTNQZHAqaj+CPGy2+ioFrP4EX3iRwozubhQ=
|
|
||||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 h1:3c8yed4lgqTt+oTQ+JNMDo+F4xprBf+O/il4ZC0nRLw=
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 h1:3c8yed4lgqTt+oTQ+JNMDo+F4xprBf+O/il4ZC0nRLw=
|
||||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0/go.mod h1:obipzmGjfSjam60XLwGfqUkJsfiheAl+TUjG+4yzyPM=
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0/go.mod h1:obipzmGjfSjam60XLwGfqUkJsfiheAl+TUjG+4yzyPM=
|
||||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s=
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s=
|
||||||
|
@ -86,8 +78,6 @@ github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA
|
||||||
github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8=
|
github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8=
|
||||||
github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
|
github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
|
||||||
github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk=
|
github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk=
|
||||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
|
||||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
|
||||||
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
|
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
|
||||||
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
|
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
|
||||||
github.com/agnivade/levenshtein v1.2.0 h1:U9L4IOT0Y3i0TIlUIDJ7rVUziKi/zPbrJGaFrtYH3SY=
|
github.com/agnivade/levenshtein v1.2.0 h1:U9L4IOT0Y3i0TIlUIDJ7rVUziKi/zPbrJGaFrtYH3SY=
|
||||||
|
@ -206,10 +196,6 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4
|
||||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
|
||||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
|
||||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||||
|
@ -277,9 +263,7 @@ github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJA
|
||||||
github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
|
github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
|
||||||
github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
||||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
|
||||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
|
||||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
|
@ -316,7 +300,6 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS
|
||||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
|
||||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
github.com/google/brotli/go/cbrotli v0.0.0-20230829110029-ed738e842d2f h1:jopqB+UTSdJGEJT8tEqYyE29zN91fi2827oLET8tl7k=
|
github.com/google/brotli/go/cbrotli v0.0.0-20230829110029-ed738e842d2f h1:jopqB+UTSdJGEJT8tEqYyE29zN91fi2827oLET8tl7k=
|
||||||
|
@ -372,8 +355,6 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
|
||||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||||
github.com/googleapis/gax-go/v2 v2.14.0 h1:f+jMrjBPl+DL9nI4IQzLUxMq7XrAqFYB7hBPqMNIe8o=
|
github.com/googleapis/gax-go/v2 v2.14.0 h1:f+jMrjBPl+DL9nI4IQzLUxMq7XrAqFYB7hBPqMNIe8o=
|
||||||
github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07qSYnHncrgo+zk=
|
github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07qSYnHncrgo+zk=
|
||||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
|
||||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
|
||||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||||
github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ=
|
github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ=
|
||||||
|
@ -450,7 +431,6 @@ github.com/libdns/libdns v0.2.2 h1:O6ws7bAfRPaBsgAYt8MDe2HcNBGC29hkZ9MX2eUSX3s=
|
||||||
github.com/libdns/libdns v0.2.2/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ=
|
github.com/libdns/libdns v0.2.2/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ=
|
||||||
github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae h1:dIZY4ULFcto4tAFlj1FYZl8ztUZ13bdq+PLY+NOfbyI=
|
github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae h1:dIZY4ULFcto4tAFlj1FYZl8ztUZ13bdq+PLY+NOfbyI=
|
||||||
github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
|
github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
|
||||||
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
|
||||||
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
|
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
|
||||||
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||||
github.com/martinlindhe/base36 v1.1.1 h1:1F1MZ5MGghBXDZ2KJ3QfxmiydlWOGB8HCEtkap5NkVg=
|
github.com/martinlindhe/base36 v1.1.1 h1:1F1MZ5MGghBXDZ2KJ3QfxmiydlWOGB8HCEtkap5NkVg=
|
||||||
|
@ -505,13 +485,11 @@ github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+
|
||||||
github.com/oapi-codegen/runtime v1.1.1 h1:EXLHh0DXIJnWhdRPN2w4MXAzFyE4CskzhNLUmtpMYro=
|
github.com/oapi-codegen/runtime v1.1.1 h1:EXLHh0DXIJnWhdRPN2w4MXAzFyE4CskzhNLUmtpMYro=
|
||||||
github.com/oapi-codegen/runtime v1.1.1/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg=
|
github.com/oapi-codegen/runtime v1.1.1/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg=
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
|
||||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||||
github.com/onsi/ginkgo/v2 v2.19.1 h1:QXgq3Z8Crl5EL1WBAC98A5sEBHARrAJNzAmMxzLcRF0=
|
github.com/onsi/ginkgo/v2 v2.19.1 h1:QXgq3Z8Crl5EL1WBAC98A5sEBHARrAJNzAmMxzLcRF0=
|
||||||
github.com/onsi/ginkgo/v2 v2.19.1/go.mod h1:O3DtEWQkPa/F7fBMgmZQKKsluAy8pd3rEQdrjkPb9zA=
|
github.com/onsi/ginkgo/v2 v2.19.1/go.mod h1:O3DtEWQkPa/F7fBMgmZQKKsluAy8pd3rEQdrjkPb9zA=
|
||||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
|
||||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||||
github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
|
github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
|
||||||
|
@ -522,18 +500,10 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
|
||||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||||
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
||||||
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
||||||
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
|
||||||
github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7sjsSdg=
|
|
||||||
github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c=
|
|
||||||
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
|
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
|
||||||
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
|
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
|
||||||
github.com/peterbourgon/ff/v3 v3.4.0 h1:QBvM/rizZM1cB0p0lGMdmR7HxZeI/ZrBWB4DqLkMUBc=
|
github.com/peterbourgon/ff/v3 v3.4.0 h1:QBvM/rizZM1cB0p0lGMdmR7HxZeI/ZrBWB4DqLkMUBc=
|
||||||
github.com/peterbourgon/ff/v3 v3.4.0/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ=
|
github.com/peterbourgon/ff/v3 v3.4.0/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ=
|
||||||
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
|
||||||
github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
|
|
||||||
github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
|
|
||||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1 h1:VGcrWe3yk6o+t7BdVNy5UDPWa4OZuDWtE1W1ZbS7Kyw=
|
|
||||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
|
||||||
github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||||
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
|
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
|
||||||
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||||
|
@ -543,7 +513,6 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
||||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
|
|
||||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
|
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
|
||||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
|
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
@ -598,7 +567,6 @@ github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
|
||||||
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
|
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
|
||||||
github.com/quic-go/quic-go v0.48.2 h1:wsKXZPeGWpMpCGSWqOcqpW2wZYic/8T3aqiOID0/KWE=
|
github.com/quic-go/quic-go v0.48.2 h1:wsKXZPeGWpMpCGSWqOcqpW2wZYic/8T3aqiOID0/KWE=
|
||||||
github.com/quic-go/quic-go v0.48.2/go.mod h1:yBgs3rWBOADpga7F+jJsb6Ybg1LSYiQvwWlLX+/6HMs=
|
github.com/quic-go/quic-go v0.48.2/go.mod h1:yBgs3rWBOADpga7F+jJsb6Ybg1LSYiQvwWlLX+/6HMs=
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
|
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
|
@ -644,7 +612,6 @@ github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+
|
||||||
github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0=
|
github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0=
|
||||||
github.com/sryoya/protorand v0.0.0-20240429201223-e7440656b2a4 h1:/jKH9ivHOUkahZs3zPfJfOmkXDFB6OdsHZ4W8gyDb/c=
|
github.com/sryoya/protorand v0.0.0-20240429201223-e7440656b2a4 h1:/jKH9ivHOUkahZs3zPfJfOmkXDFB6OdsHZ4W8gyDb/c=
|
||||||
github.com/sryoya/protorand v0.0.0-20240429201223-e7440656b2a4/go.mod h1:9a23nlv6vzBeVlQq6JQCjljZ6sfzsB6aha1m5Ly1W2Y=
|
github.com/sryoya/protorand v0.0.0-20240429201223-e7440656b2a4/go.mod h1:9a23nlv6vzBeVlQq6JQCjljZ6sfzsB6aha1m5Ly1W2Y=
|
||||||
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||||
|
@ -677,17 +644,12 @@ github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
|
||||||
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||||
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
|
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
|
||||||
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||||
github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
|
||||||
github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0=
|
|
||||||
github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw=
|
|
||||||
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
|
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
|
||||||
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
|
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
|
||||||
github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY=
|
github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY=
|
||||||
github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE=
|
github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE=
|
||||||
github.com/tniswong/go.rfcx v0.0.0-20181019234604-07783c52761f h1:C43EMGXFtvYf/zunHR6ivZV7Z6ytg73t0GXwYyicXMQ=
|
github.com/tniswong/go.rfcx v0.0.0-20181019234604-07783c52761f h1:C43EMGXFtvYf/zunHR6ivZV7Z6ytg73t0GXwYyicXMQ=
|
||||||
github.com/tniswong/go.rfcx v0.0.0-20181019234604-07783c52761f/go.mod h1:N+sR0vLSCTtI6o06PMWsjMB4TVqqDttKNq4iC9wvxVY=
|
github.com/tniswong/go.rfcx v0.0.0-20181019234604-07783c52761f/go.mod h1:N+sR0vLSCTtI6o06PMWsjMB4TVqqDttKNq4iC9wvxVY=
|
||||||
github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U=
|
|
||||||
github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
|
||||||
github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||||
github.com/valyala/gozstd v1.20.1 h1:xPnnnvjmaDDitMFfDxmQ4vpx0+3CdTg2o3lALvXTU/g=
|
github.com/valyala/gozstd v1.20.1 h1:xPnnnvjmaDDitMFfDxmQ4vpx0+3CdTg2o3lALvXTU/g=
|
||||||
github.com/valyala/gozstd v1.20.1/go.mod h1:y5Ew47GLlP37EkTB+B4s7r6A5rdaeB7ftbl9zoYiIPQ=
|
github.com/valyala/gozstd v1.20.1/go.mod h1:y5Ew47GLlP37EkTB+B4s7r6A5rdaeB7ftbl9zoYiIPQ=
|
||||||
|
@ -730,30 +692,40 @@ go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJyS
|
||||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||||
go.opentelemetry.io/contrib/detectors/gcp v1.31.0 h1:G1JQOreVrfhRkner+l4mrGxmfqYCAuy76asTDAo0xsA=
|
go.opentelemetry.io/contrib/detectors/gcp v1.31.0 h1:G1JQOreVrfhRkner+l4mrGxmfqYCAuy76asTDAo0xsA=
|
||||||
go.opentelemetry.io/contrib/detectors/gcp v1.31.0/go.mod h1:tzQL6E1l+iV44YFTkcAeNQqzXUiekSYP9jjJjXwEd00=
|
go.opentelemetry.io/contrib/detectors/gcp v1.31.0/go.mod h1:tzQL6E1l+iV44YFTkcAeNQqzXUiekSYP9jjJjXwEd00=
|
||||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc=
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.57.0 h1:qtFISDHKolvIxzSs0gIaiPUPR0Cucb0F2coHC7ZLdps=
|
||||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI=
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.57.0/go.mod h1:Y+Pop1Q6hCOnETWTW4NROK/q1hv50hM7yDaUTjG8lp8=
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
|
||||||
|
go.opentelemetry.io/contrib/propagators/autoprop v0.57.0 h1:bNPJOdT5154XxzeFmrh8R+PXnV4t3TZEczy8gHEpcpg=
|
||||||
|
go.opentelemetry.io/contrib/propagators/autoprop v0.57.0/go.mod h1:Tb0j0mK+QatKdCxCKPN7CSzc7kx/q34/KaohJx/N96s=
|
||||||
|
go.opentelemetry.io/contrib/propagators/aws v1.32.0 h1:NELzr8bW7a7aHVZj5gaep1PfkvoSCGx+1qNGZx/uhhU=
|
||||||
|
go.opentelemetry.io/contrib/propagators/aws v1.32.0/go.mod h1:XKMrzHNka3eOA+nGEcNKYVL9s77TAhkwQEynYuaRFnQ=
|
||||||
|
go.opentelemetry.io/contrib/propagators/b3 v1.32.0 h1:MazJBz2Zf6HTN/nK/s3Ru1qme+VhWU5hm83QxEP+dvw=
|
||||||
|
go.opentelemetry.io/contrib/propagators/b3 v1.32.0/go.mod h1:B0s70QHYPrJwPOwD1o3V/R8vETNOG9N3qZf4LDYvA30=
|
||||||
|
go.opentelemetry.io/contrib/propagators/jaeger v1.32.0 h1:K/fOyTMD6GELKTIJBaJ9k3ppF2Njt8MeUGBOwfaWXXA=
|
||||||
|
go.opentelemetry.io/contrib/propagators/jaeger v1.32.0/go.mod h1:ISE6hda//MTWvtngG7p4et3OCngsrTVfl7c6DjN17f8=
|
||||||
|
go.opentelemetry.io/contrib/propagators/ot v1.32.0 h1:Poy02A4wOZubHyd2hpHPDgZW+rn6EIq0vCwTZJ6Lmu8=
|
||||||
|
go.opentelemetry.io/contrib/propagators/ot v1.32.0/go.mod h1:cbhaURV+VR3NIMarzDYZU1RDEkXG1fNd1WMP1XCcGkY=
|
||||||
go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
|
go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
|
||||||
go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
|
go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
|
||||||
go.opentelemetry.io/otel/bridge/opencensus v1.33.0 h1:sGcK0Wif7sPG3GZG9z8b+tpRZiUHwv27WgmsaZ1wgzM=
|
go.opentelemetry.io/otel/bridge/opencensus v1.32.0 h1:OVbbFgPG60UolI8ZUs+Z75NnKiO0C9QltXBrqUDImS0=
|
||||||
go.opentelemetry.io/otel/bridge/opencensus v1.33.0/go.mod h1:LXJy68HiJRu+2yJmVnbDn/F9JS9Kxfsj5WpA5t5NfRY=
|
go.opentelemetry.io/otel/bridge/opencensus v1.32.0/go.mod h1:J5SEiJNu6zzqpcA6+AVpxUKzxNocUMsefgHRpS8zdW8=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.33.0 h1:7F29RDmnlqk6B5d+sUqemt8TBfDqxryYW5gX6L74RFA=
|
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.33.0/go.mod h1:ZiGDq7xwDMKmWDrN1XsXAj0iC7hns+2DhxBFSncNHSE=
|
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI=
|
||||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc=
|
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc=
|
||||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I=
|
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I=
|
||||||
go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
|
go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
|
||||||
go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
|
go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
|
||||||
go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
|
go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
|
||||||
go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
|
go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
|
||||||
go.opentelemetry.io/otel/sdk/metric v1.33.0 h1:Gs5VK9/WUJhNXZgn8MR6ITatvAmKeIuCtNbsP3JkNqU=
|
go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU=
|
||||||
go.opentelemetry.io/otel/sdk/metric v1.33.0/go.mod h1:dL5ykHZmm1B1nVRk9dDjChwDmt81MjVp3gLkQRwKf/Q=
|
go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ=
|
||||||
go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
|
go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
|
||||||
go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
|
go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
|
||||||
go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg=
|
go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg=
|
||||||
|
@ -811,7 +783,6 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
|
||||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0=
|
golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0=
|
||||||
golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||||
|
@ -852,7 +823,6 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
|
||||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||||
|
@ -935,7 +905,6 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
@ -946,7 +915,6 @@ golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
|
||||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||||
|
@ -959,7 +927,6 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
|
||||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||||
|
@ -1014,7 +981,6 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc
|
||||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
|
|
||||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||||
golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
|
golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
|
||||||
golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
|
golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
|
||||||
|
@ -1082,7 +1048,6 @@ google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
|
||||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||||
|
@ -1113,8 +1078,6 @@ google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw
|
||||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||||
google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
|
google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
|
||||||
google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||||
gopkg.in/DataDog/dd-trace-go.v1 v1.22.0 h1:gpWsqqkwUldNZXGJqT69NU9MdEDhLboK1C4nMgR0MWw=
|
|
||||||
gopkg.in/DataDog/dd-trace-go.v1 v1.22.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg=
|
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
|
|
@ -33,3 +33,14 @@ func (v *Value[T]) Load() T {
|
||||||
func (v *Value[T]) Store(val T) {
|
func (v *Value[T]) Store(val T) {
|
||||||
v.value.Store(val)
|
v.value.Store(val)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Swap swaps the value atomically.
|
||||||
|
func (v *Value[T]) Swap(val T) T {
|
||||||
|
old, _ := v.value.Swap(val).(T)
|
||||||
|
return old
|
||||||
|
}
|
||||||
|
|
||||||
|
// Swap swaps the value atomically.
|
||||||
|
func (v *Value[T]) CompareAndSwap(old, new T) bool {
|
||||||
|
return v.value.CompareAndSwap(old, new)
|
||||||
|
}
|
||||||
|
|
|
@ -4,11 +4,17 @@
|
||||||
package authenticateflow
|
package authenticateflow
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
oteltrace "go.opentelemetry.io/otel/trace"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/stats"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
"google.golang.org/protobuf/types/known/structpb"
|
"google.golang.org/protobuf/types/known/structpb"
|
||||||
|
|
||||||
|
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
||||||
"github.com/pomerium/pomerium/pkg/grpc"
|
"github.com/pomerium/pomerium/pkg/grpc"
|
||||||
"github.com/pomerium/pomerium/pkg/grpc/user"
|
"github.com/pomerium/pomerium/pkg/grpc/user"
|
||||||
"github.com/pomerium/pomerium/pkg/identity"
|
"github.com/pomerium/pomerium/pkg/identity"
|
||||||
|
@ -33,3 +39,23 @@ func populateUserFromClaims(u *user.User, claims map[string]any) {
|
||||||
u.Claims[k] = vs
|
u.Claims[k] = vs
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var outboundDatabrokerTraceClientOpts = []trace.ClientStatsHandlerOption{
|
||||||
|
trace.WithStatsInterceptor(ignoreNotFoundErrors),
|
||||||
|
}
|
||||||
|
|
||||||
|
func ignoreNotFoundErrors(ctx context.Context, rs stats.RPCStats) stats.RPCStats {
|
||||||
|
if end, ok := rs.(*stats.End); ok && end.IsClient() {
|
||||||
|
if status.Code(end.Error) == codes.NotFound {
|
||||||
|
oteltrace.SpanFromContext(ctx).AddEvent("status code: NotFound")
|
||||||
|
return &stats.End{
|
||||||
|
Client: end.Client,
|
||||||
|
BeginTime: end.BeginTime,
|
||||||
|
EndTime: end.EndTime,
|
||||||
|
Trailer: end.Trailer,
|
||||||
|
Error: nil,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return rs
|
||||||
|
}
|
||||||
|
|
|
@ -9,7 +9,11 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
oteltrace "go.opentelemetry.io/otel/trace"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
|
googlegrpc "google.golang.org/grpc"
|
||||||
"google.golang.org/protobuf/types/known/timestamppb"
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
|
|
||||||
"github.com/pomerium/pomerium/config"
|
"github.com/pomerium/pomerium/config"
|
||||||
|
@ -19,6 +23,7 @@ import (
|
||||||
"github.com/pomerium/pomerium/internal/httputil"
|
"github.com/pomerium/pomerium/internal/httputil"
|
||||||
"github.com/pomerium/pomerium/internal/log"
|
"github.com/pomerium/pomerium/internal/log"
|
||||||
"github.com/pomerium/pomerium/internal/sessions"
|
"github.com/pomerium/pomerium/internal/sessions"
|
||||||
|
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
||||||
"github.com/pomerium/pomerium/internal/urlutil"
|
"github.com/pomerium/pomerium/internal/urlutil"
|
||||||
"github.com/pomerium/pomerium/pkg/cryptutil"
|
"github.com/pomerium/pomerium/pkg/cryptutil"
|
||||||
"github.com/pomerium/pomerium/pkg/grpc"
|
"github.com/pomerium/pomerium/pkg/grpc"
|
||||||
|
@ -56,7 +61,7 @@ type Stateful struct {
|
||||||
|
|
||||||
// NewStateful initializes the authentication flow for the given configuration
|
// NewStateful initializes the authentication flow for the given configuration
|
||||||
// and session store.
|
// and session store.
|
||||||
func NewStateful(ctx context.Context, cfg *config.Config, sessionStore sessions.SessionStore) (*Stateful, error) {
|
func NewStateful(ctx context.Context, tracerProvider oteltrace.TracerProvider, cfg *config.Config, sessionStore sessions.SessionStore) (*Stateful, error) {
|
||||||
s := &Stateful{
|
s := &Stateful{
|
||||||
sessionDuration: cfg.Options.CookieExpire,
|
sessionDuration: cfg.Options.CookieExpire,
|
||||||
sessionStore: sessionStore,
|
sessionStore: sessionStore,
|
||||||
|
@ -94,7 +99,10 @@ func NewStateful(ctx context.Context, cfg *config.Config, sessionStore sessions.
|
||||||
InstallationID: cfg.Options.InstallationID,
|
InstallationID: cfg.Options.InstallationID,
|
||||||
ServiceName: cfg.Options.Services,
|
ServiceName: cfg.Options.Services,
|
||||||
SignedJWTKey: s.sharedKey,
|
SignedJWTKey: s.sharedKey,
|
||||||
})
|
}, googlegrpc.WithStatsHandler(trace.NewClientStatsHandler(
|
||||||
|
otelgrpc.NewClientHandler(otelgrpc.WithTracerProvider(tracerProvider)),
|
||||||
|
outboundDatabrokerTraceClientOpts...,
|
||||||
|
)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -316,7 +324,7 @@ func (s *Stateful) LogAuthenticateEvent(*http.Request) {}
|
||||||
// AuthenticateSignInURL returns a URL to redirect the user to the authenticate
|
// AuthenticateSignInURL returns a URL to redirect the user to the authenticate
|
||||||
// domain.
|
// domain.
|
||||||
func (s *Stateful) AuthenticateSignInURL(
|
func (s *Stateful) AuthenticateSignInURL(
|
||||||
_ context.Context, queryParams url.Values, redirectURL *url.URL, idpID string,
|
ctx context.Context, queryParams url.Values, redirectURL *url.URL, idpID string,
|
||||||
) (string, error) {
|
) (string, error) {
|
||||||
signinURL := s.authenticateURL.ResolveReference(&url.URL{
|
signinURL := s.authenticateURL.ResolveReference(&url.URL{
|
||||||
Path: "/.pomerium/sign_in",
|
Path: "/.pomerium/sign_in",
|
||||||
|
@ -327,6 +335,7 @@ func (s *Stateful) AuthenticateSignInURL(
|
||||||
}
|
}
|
||||||
queryParams.Set(urlutil.QueryRedirectURI, redirectURL.String())
|
queryParams.Set(urlutil.QueryRedirectURI, redirectURL.String())
|
||||||
queryParams.Set(urlutil.QueryIdentityProviderID, idpID)
|
queryParams.Set(urlutil.QueryIdentityProviderID, idpID)
|
||||||
|
otel.GetTextMapPropagator().Inject(ctx, trace.PomeriumURLQueryCarrier(queryParams))
|
||||||
signinURL.RawQuery = queryParams.Encode()
|
signinURL.RawQuery = queryParams.Encode()
|
||||||
redirectTo := urlutil.NewSignedURL(s.sharedKey, signinURL).String()
|
redirectTo := urlutil.NewSignedURL(s.sharedKey, signinURL).String()
|
||||||
|
|
||||||
|
|
|
@ -15,6 +15,7 @@ import (
|
||||||
"github.com/go-jose/go-jose/v3/jwt"
|
"github.com/go-jose/go-jose/v3/jwt"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.opentelemetry.io/otel/trace"
|
||||||
"go.uber.org/mock/gomock"
|
"go.uber.org/mock/gomock"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
@ -69,7 +70,7 @@ func TestStatefulSignIn(t *testing.T) {
|
||||||
tt := tt
|
tt := tt
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
sessionStore := &mstore.Store{SaveError: tt.saveError}
|
sessionStore := &mstore.Store{SaveError: tt.saveError}
|
||||||
flow, err := NewStateful(context.Background(), &config.Config{Options: opts}, sessionStore)
|
flow, err := NewStateful(context.Background(), trace.NewNoopTracerProvider(), &config.Config{Options: opts}, sessionStore)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -123,12 +124,12 @@ func TestStatefulAuthenticateSignInURL(t *testing.T) {
|
||||||
opts.AuthenticateURLString = "https://authenticate.example.com"
|
opts.AuthenticateURLString = "https://authenticate.example.com"
|
||||||
key := cryptutil.NewKey()
|
key := cryptutil.NewKey()
|
||||||
opts.SharedKey = base64.StdEncoding.EncodeToString(key)
|
opts.SharedKey = base64.StdEncoding.EncodeToString(key)
|
||||||
flow, err := NewStateful(context.Background(), &config.Config{Options: opts}, nil)
|
flow, err := NewStateful(context.Background(), trace.NewNoopTracerProvider(), &config.Config{Options: opts}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
t.Run("NilQueryParams", func(t *testing.T) {
|
t.Run("NilQueryParams", func(t *testing.T) {
|
||||||
redirectURL := &url.URL{Scheme: "https", Host: "example.com"}
|
redirectURL := &url.URL{Scheme: "https", Host: "example.com"}
|
||||||
u, err := flow.AuthenticateSignInURL(nil, nil, redirectURL, "fake-idp-id")
|
u, err := flow.AuthenticateSignInURL(context.Background(), nil, redirectURL, "fake-idp-id")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
parsed, _ := url.Parse(u)
|
parsed, _ := url.Parse(u)
|
||||||
assert.NoError(t, urlutil.NewSignedURL(key, parsed).Validate())
|
assert.NoError(t, urlutil.NewSignedURL(key, parsed).Validate())
|
||||||
|
@ -143,7 +144,7 @@ func TestStatefulAuthenticateSignInURL(t *testing.T) {
|
||||||
redirectURL := &url.URL{Scheme: "https", Host: "example.com"}
|
redirectURL := &url.URL{Scheme: "https", Host: "example.com"}
|
||||||
q := url.Values{}
|
q := url.Values{}
|
||||||
q.Set("foo", "bar")
|
q.Set("foo", "bar")
|
||||||
u, err := flow.AuthenticateSignInURL(nil, q, redirectURL, "fake-idp-id")
|
u, err := flow.AuthenticateSignInURL(context.Background(), q, redirectURL, "fake-idp-id")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
parsed, _ := url.Parse(u)
|
parsed, _ := url.Parse(u)
|
||||||
assert.NoError(t, urlutil.NewSignedURL(key, parsed).Validate())
|
assert.NoError(t, urlutil.NewSignedURL(key, parsed).Validate())
|
||||||
|
@ -238,7 +239,7 @@ func TestStatefulCallback(t *testing.T) {
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
flow, err := NewStateful(context.Background(), &config.Config{Options: opts}, tt.sessionStore)
|
flow, err := NewStateful(context.Background(), trace.NewNoopTracerProvider(), &config.Config{Options: opts}, tt.sessionStore)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -289,7 +290,7 @@ func TestStatefulCallback(t *testing.T) {
|
||||||
|
|
||||||
func TestStatefulRevokeSession(t *testing.T) {
|
func TestStatefulRevokeSession(t *testing.T) {
|
||||||
opts := config.NewDefaultOptions()
|
opts := config.NewDefaultOptions()
|
||||||
flow, err := NewStateful(context.Background(), &config.Config{Options: opts}, nil)
|
flow, err := NewStateful(context.Background(), trace.NewNoopTracerProvider(), &config.Config{Options: opts}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctrl := gomock.NewController(t)
|
ctrl := gomock.NewController(t)
|
||||||
|
@ -367,7 +368,7 @@ func TestPersistSession(t *testing.T) {
|
||||||
|
|
||||||
opts := config.NewDefaultOptions()
|
opts := config.NewDefaultOptions()
|
||||||
opts.CookieExpire = 4 * time.Hour
|
opts.CookieExpire = 4 * time.Hour
|
||||||
flow, err := NewStateful(context.Background(), &config.Config{Options: opts}, nil)
|
flow, err := NewStateful(context.Background(), trace.NewNoopTracerProvider(), &config.Config{Options: opts}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctrl := gomock.NewController(t)
|
ctrl := gomock.NewController(t)
|
||||||
|
|
|
@ -10,6 +10,7 @@ import (
|
||||||
|
|
||||||
"github.com/go-jose/go-jose/v3"
|
"github.com/go-jose/go-jose/v3"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
|
googlegrpc "google.golang.org/grpc"
|
||||||
"google.golang.org/protobuf/encoding/protojson"
|
"google.golang.org/protobuf/encoding/protojson"
|
||||||
|
|
||||||
"github.com/pomerium/pomerium/authenticate/events"
|
"github.com/pomerium/pomerium/authenticate/events"
|
||||||
|
@ -20,6 +21,7 @@ import (
|
||||||
"github.com/pomerium/pomerium/internal/httputil"
|
"github.com/pomerium/pomerium/internal/httputil"
|
||||||
"github.com/pomerium/pomerium/internal/log"
|
"github.com/pomerium/pomerium/internal/log"
|
||||||
"github.com/pomerium/pomerium/internal/sessions"
|
"github.com/pomerium/pomerium/internal/sessions"
|
||||||
|
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
||||||
"github.com/pomerium/pomerium/internal/urlutil"
|
"github.com/pomerium/pomerium/internal/urlutil"
|
||||||
"github.com/pomerium/pomerium/pkg/cryptutil"
|
"github.com/pomerium/pomerium/pkg/cryptutil"
|
||||||
"github.com/pomerium/pomerium/pkg/grpc"
|
"github.com/pomerium/pomerium/pkg/grpc"
|
||||||
|
@ -29,6 +31,9 @@ import (
|
||||||
"github.com/pomerium/pomerium/pkg/grpc/user"
|
"github.com/pomerium/pomerium/pkg/grpc/user"
|
||||||
"github.com/pomerium/pomerium/pkg/hpke"
|
"github.com/pomerium/pomerium/pkg/hpke"
|
||||||
"github.com/pomerium/pomerium/pkg/identity"
|
"github.com/pomerium/pomerium/pkg/identity"
|
||||||
|
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
oteltrace "go.opentelemetry.io/otel/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Stateless implements the stateless authentication flow. In this flow, the
|
// Stateless implements the stateless authentication flow. In this flow, the
|
||||||
|
@ -56,18 +61,21 @@ type Stateless struct {
|
||||||
|
|
||||||
dataBrokerClient databroker.DataBrokerServiceClient
|
dataBrokerClient databroker.DataBrokerServiceClient
|
||||||
|
|
||||||
getIdentityProvider func(options *config.Options, idpID string) (identity.Authenticator, error)
|
getIdentityProvider func(ctx context.Context, tracerProvider oteltrace.TracerProvider, options *config.Options, idpID string) (identity.Authenticator, error)
|
||||||
profileTrimFn func(*identitypb.Profile)
|
profileTrimFn func(*identitypb.Profile)
|
||||||
authEventFn events.AuthEventFn
|
authEventFn events.AuthEventFn
|
||||||
|
|
||||||
|
tracerProvider oteltrace.TracerProvider
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStateless initializes the authentication flow for the given
|
// NewStateless initializes the authentication flow for the given
|
||||||
// configuration, session store, and additional options.
|
// configuration, session store, and additional options.
|
||||||
func NewStateless(
|
func NewStateless(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
|
tracerProvider oteltrace.TracerProvider,
|
||||||
cfg *config.Config,
|
cfg *config.Config,
|
||||||
sessionStore sessions.SessionStore,
|
sessionStore sessions.SessionStore,
|
||||||
getIdentityProvider func(options *config.Options, idpID string) (identity.Authenticator, error),
|
getIdentityProvider func(ctx context.Context, tracerProvider oteltrace.TracerProvider, options *config.Options, idpID string) (identity.Authenticator, error),
|
||||||
profileTrimFn func(*identitypb.Profile),
|
profileTrimFn func(*identitypb.Profile),
|
||||||
authEventFn events.AuthEventFn,
|
authEventFn events.AuthEventFn,
|
||||||
) (*Stateless, error) {
|
) (*Stateless, error) {
|
||||||
|
@ -77,6 +85,7 @@ func NewStateless(
|
||||||
getIdentityProvider: getIdentityProvider,
|
getIdentityProvider: getIdentityProvider,
|
||||||
profileTrimFn: profileTrimFn,
|
profileTrimFn: profileTrimFn,
|
||||||
authEventFn: authEventFn,
|
authEventFn: authEventFn,
|
||||||
|
tracerProvider: tracerProvider,
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
|
@ -137,7 +146,10 @@ func NewStateless(
|
||||||
InstallationID: cfg.Options.InstallationID,
|
InstallationID: cfg.Options.InstallationID,
|
||||||
ServiceName: cfg.Options.Services,
|
ServiceName: cfg.Options.Services,
|
||||||
SignedJWTKey: sharedKey,
|
SignedJWTKey: sharedKey,
|
||||||
})
|
}, googlegrpc.WithStatsHandler(trace.NewClientStatsHandler(
|
||||||
|
otelgrpc.NewClientHandler(otelgrpc.WithTracerProvider(tracerProvider)),
|
||||||
|
outboundDatabrokerTraceClientOpts...,
|
||||||
|
)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -154,7 +166,7 @@ func (s *Stateless) VerifySession(ctx context.Context, r *http.Request, _ *sessi
|
||||||
return fmt.Errorf("identity profile load error: %w", err)
|
return fmt.Errorf("identity profile load error: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
authenticator, err := s.getIdentityProvider(s.options, profile.GetProviderId())
|
authenticator, err := s.getIdentityProvider(ctx, s.tracerProvider, s.options, profile.GetProviderId())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't get identity provider: %w", err)
|
return fmt.Errorf("couldn't get identity provider: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -355,6 +367,7 @@ func (s *Stateless) AuthenticateSignInURL(
|
||||||
for k, v := range queryParams {
|
for k, v := range queryParams {
|
||||||
q[k] = v
|
q[k] = v
|
||||||
}
|
}
|
||||||
|
otel.GetTextMapPropagator().Inject(ctx, trace.PomeriumURLQueryCarrier(q))
|
||||||
authenticateURLWithParams.RawQuery = q.Encode()
|
authenticateURLWithParams.RawQuery = q.Encode()
|
||||||
|
|
||||||
return urlutil.SignInURL(
|
return urlutil.SignInURL(
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pomerium/pomerium/config"
|
||||||
"github.com/pomerium/pomerium/internal/testenv"
|
"github.com/pomerium/pomerium/internal/testenv"
|
||||||
"github.com/pomerium/pomerium/internal/testenv/envutil"
|
"github.com/pomerium/pomerium/internal/testenv/envutil"
|
||||||
"github.com/pomerium/pomerium/internal/testenv/scenarios"
|
"github.com/pomerium/pomerium/internal/testenv/scenarios"
|
||||||
|
@ -48,7 +49,8 @@ func TestRequestLatency(t *testing.T) {
|
||||||
for i := range numRoutes {
|
for i := range numRoutes {
|
||||||
routes[i] = up.Route().
|
routes[i] = up.Route().
|
||||||
From(env.SubdomainURL(fmt.Sprintf("from-%d", i))).
|
From(env.SubdomainURL(fmt.Sprintf("from-%d", i))).
|
||||||
PPL(fmt.Sprintf(`{"allow":{"and":["email":{"is":"user%d@example.com"}]}}`, i))
|
Policy(func(p *config.Policy) { p.AllowPublicUnauthenticatedAccess = true })
|
||||||
|
// PPL(fmt.Sprintf(`{"allow":{"and":["email":{"is":"user%d@example.com"}]}}`, i))
|
||||||
}
|
}
|
||||||
env.AddUpstream(up)
|
env.AddUpstream(up)
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,8 @@ import (
|
||||||
|
|
||||||
"github.com/cenkalti/backoff/v4"
|
"github.com/cenkalti/backoff/v4"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
|
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
|
||||||
|
googlegrpc "google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
|
@ -76,7 +78,7 @@ func (srv *Server) getDataBrokerClient(ctx context.Context) (databrokerpb.DataBr
|
||||||
InstallationID: cfg.Options.InstallationID,
|
InstallationID: cfg.Options.InstallationID,
|
||||||
ServiceName: cfg.Options.Services,
|
ServiceName: cfg.Options.Services,
|
||||||
SignedJWTKey: sharedKey,
|
SignedJWTKey: sharedKey,
|
||||||
})
|
}, googlegrpc.WithStatsHandler(otelgrpc.NewClientHandler(otelgrpc.WithTracerProvider(srv.tracerProvider))))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("controlplane: error creating databroker connection: %w", err)
|
return nil, fmt.Errorf("controlplane: error creating databroker connection: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,6 +7,8 @@ import (
|
||||||
envoy_data_accesslog_v3 "github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3"
|
envoy_data_accesslog_v3 "github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3"
|
||||||
envoy_service_accesslog_v3 "github.com/envoyproxy/go-control-plane/envoy/service/accesslog/v3"
|
envoy_service_accesslog_v3 "github.com/envoyproxy/go-control-plane/envoy/service/accesslog/v3"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
|
||||||
"github.com/pomerium/pomerium/internal/log"
|
"github.com/pomerium/pomerium/internal/log"
|
||||||
)
|
)
|
||||||
|
@ -21,7 +23,11 @@ func (srv *Server) StreamAccessLogs(stream envoy_service_accesslog_v3.AccessLogS
|
||||||
for {
|
for {
|
||||||
msg, err := stream.Recv()
|
msg, err := stream.Recv()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if status.Code(err) == codes.Canceled {
|
||||||
|
log.Ctx(stream.Context()).Debug().Err(err).Msg("access log stream canceled")
|
||||||
|
} else {
|
||||||
log.Ctx(stream.Context()).Error().Err(err).Msg("access log stream error, disconnecting")
|
log.Ctx(stream.Context()).Error().Err(err).Msg("access log stream error, disconnecting")
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
package controlplane
|
package controlplane
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
@ -9,18 +10,21 @@ import (
|
||||||
"github.com/CAFxX/httpcompression"
|
"github.com/CAFxX/httpcompression"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
|
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||||
|
|
||||||
"github.com/pomerium/pomerium/config"
|
"github.com/pomerium/pomerium/config"
|
||||||
"github.com/pomerium/pomerium/internal/handlers"
|
"github.com/pomerium/pomerium/internal/handlers"
|
||||||
"github.com/pomerium/pomerium/internal/log"
|
"github.com/pomerium/pomerium/internal/log"
|
||||||
"github.com/pomerium/pomerium/internal/middleware"
|
"github.com/pomerium/pomerium/internal/middleware"
|
||||||
"github.com/pomerium/pomerium/internal/telemetry"
|
"github.com/pomerium/pomerium/internal/telemetry"
|
||||||
|
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
||||||
"github.com/pomerium/pomerium/internal/urlutil"
|
"github.com/pomerium/pomerium/internal/urlutil"
|
||||||
hpke_handlers "github.com/pomerium/pomerium/pkg/hpke/handlers"
|
hpke_handlers "github.com/pomerium/pomerium/pkg/hpke/handlers"
|
||||||
"github.com/pomerium/pomerium/pkg/telemetry/requestid"
|
"github.com/pomerium/pomerium/pkg/telemetry/requestid"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (srv *Server) addHTTPMiddleware(root *mux.Router, logger *zerolog.Logger, _ *config.Config) {
|
func (srv *Server) addHTTPMiddleware(ctx context.Context, root *mux.Router, _ *config.Config) {
|
||||||
|
logger := log.Ctx(ctx)
|
||||||
compressor, err := httpcompression.DefaultAdapter()
|
compressor, err := httpcompression.DefaultAdapter()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
@ -48,6 +52,7 @@ func (srv *Server) addHTTPMiddleware(root *mux.Router, logger *zerolog.Logger, _
|
||||||
root.Use(telemetry.HTTPStatsHandler(func() string {
|
root.Use(telemetry.HTTPStatsHandler(func() string {
|
||||||
return srv.currentConfig.Load().Options.InstallationID
|
return srv.currentConfig.Load().Options.InstallationID
|
||||||
}, srv.name))
|
}, srv.name))
|
||||||
|
root.Use(trace.NewHTTPMiddleware(otelhttp.WithTracerProvider(srv.tracerProvider)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (srv *Server) mountCommonEndpoints(root *mux.Router, cfg *config.Config) error {
|
func (srv *Server) mountCommonEndpoints(root *mux.Router, cfg *config.Config) error {
|
||||||
|
|
|
@ -11,6 +11,8 @@ import (
|
||||||
envoy_service_discovery_v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
|
envoy_service_discovery_v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
|
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
|
||||||
|
coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/health/grpc_health_v1"
|
"google.golang.org/grpc/health/grpc_health_v1"
|
||||||
|
@ -25,7 +27,6 @@ import (
|
||||||
"github.com/pomerium/pomerium/internal/events"
|
"github.com/pomerium/pomerium/internal/events"
|
||||||
"github.com/pomerium/pomerium/internal/httputil/reproxy"
|
"github.com/pomerium/pomerium/internal/httputil/reproxy"
|
||||||
"github.com/pomerium/pomerium/internal/log"
|
"github.com/pomerium/pomerium/internal/log"
|
||||||
"github.com/pomerium/pomerium/internal/telemetry"
|
|
||||||
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
||||||
"github.com/pomerium/pomerium/internal/urlutil"
|
"github.com/pomerium/pomerium/internal/urlutil"
|
||||||
"github.com/pomerium/pomerium/internal/version"
|
"github.com/pomerium/pomerium/internal/version"
|
||||||
|
@ -34,6 +35,7 @@ import (
|
||||||
"github.com/pomerium/pomerium/pkg/grpcutil"
|
"github.com/pomerium/pomerium/pkg/grpcutil"
|
||||||
"github.com/pomerium/pomerium/pkg/httputil"
|
"github.com/pomerium/pomerium/pkg/httputil"
|
||||||
"github.com/pomerium/pomerium/pkg/telemetry/requestid"
|
"github.com/pomerium/pomerium/pkg/telemetry/requestid"
|
||||||
|
oteltrace "go.opentelemetry.io/otel/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
// A Service can be mounted on the control plane.
|
// A Service can be mounted on the control plane.
|
||||||
|
@ -43,6 +45,7 @@ type Service interface {
|
||||||
|
|
||||||
// A Server is the control-plane gRPC and HTTP servers.
|
// A Server is the control-plane gRPC and HTTP servers.
|
||||||
type Server struct {
|
type Server struct {
|
||||||
|
coltracepb.UnimplementedTraceServiceServer
|
||||||
GRPCListener net.Listener
|
GRPCListener net.Listener
|
||||||
GRPCServer *grpc.Server
|
GRPCServer *grpc.Server
|
||||||
HTTPListener net.Listener
|
HTTPListener net.Listener
|
||||||
|
@ -66,6 +69,9 @@ type Server struct {
|
||||||
proxySvc Service
|
proxySvc Service
|
||||||
|
|
||||||
haveSetCapacity map[string]bool
|
haveSetCapacity map[string]bool
|
||||||
|
|
||||||
|
tracerProvider oteltrace.TracerProvider
|
||||||
|
tracer oteltrace.Tracer
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewServer creates a new Server. Listener ports are chosen by the OS.
|
// NewServer creates a new Server. Listener ports are chosen by the OS.
|
||||||
|
@ -76,7 +82,10 @@ func NewServer(
|
||||||
eventsMgr *events.Manager,
|
eventsMgr *events.Manager,
|
||||||
fileMgr *filemgr.Manager,
|
fileMgr *filemgr.Manager,
|
||||||
) (*Server, error) {
|
) (*Server, error) {
|
||||||
|
tracerProvider := trace.NewTracerProvider(ctx, "Control Plane")
|
||||||
srv := &Server{
|
srv := &Server{
|
||||||
|
tracerProvider: tracerProvider,
|
||||||
|
tracer: tracerProvider.Tracer(trace.PomeriumCoreTracer),
|
||||||
metricsMgr: metricsMgr,
|
metricsMgr: metricsMgr,
|
||||||
EventsMgr: eventsMgr,
|
EventsMgr: eventsMgr,
|
||||||
filemgr: fileMgr,
|
filemgr: fileMgr,
|
||||||
|
@ -105,7 +114,7 @@ func NewServer(
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
srv.GRPCServer = grpc.NewServer(
|
srv.GRPCServer = grpc.NewServer(
|
||||||
grpc.StatsHandler(telemetry.NewGRPCServerStatsHandler(cfg.Options.Services)),
|
grpc.StatsHandler(trace.NewServerStatsHandler(otelgrpc.NewServerHandler(otelgrpc.WithTracerProvider(tracerProvider)))),
|
||||||
grpc.ChainUnaryInterceptor(
|
grpc.ChainUnaryInterceptor(
|
||||||
log.UnaryServerInterceptor(log.Ctx(ctx)),
|
log.UnaryServerInterceptor(log.Ctx(ctx)),
|
||||||
requestid.UnaryServerInterceptor(),
|
requestid.UnaryServerInterceptor(),
|
||||||
|
@ -177,7 +186,9 @@ func NewServer(
|
||||||
|
|
||||||
srv.xdsmgr = xdsmgr.NewManager(res)
|
srv.xdsmgr = xdsmgr.NewManager(res)
|
||||||
envoy_service_discovery_v3.RegisterAggregatedDiscoveryServiceServer(srv.GRPCServer, srv.xdsmgr)
|
envoy_service_discovery_v3.RegisterAggregatedDiscoveryServiceServer(srv.GRPCServer, srv.xdsmgr)
|
||||||
|
if exp := trace.ExporterServerFromContext(ctx); exp != nil {
|
||||||
|
coltracepb.RegisterTraceServiceServer(srv.GRPCServer, exp)
|
||||||
|
}
|
||||||
return srv, nil
|
return srv, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -241,7 +252,7 @@ func (srv *Server) Run(ctx context.Context) error {
|
||||||
|
|
||||||
// OnConfigChange updates the pomerium config options.
|
// OnConfigChange updates the pomerium config options.
|
||||||
func (srv *Server) OnConfigChange(ctx context.Context, cfg *config.Config) error {
|
func (srv *Server) OnConfigChange(ctx context.Context, cfg *config.Config) error {
|
||||||
ctx, span := trace.StartSpan(ctx, "controlplane.Server.OnConfigChange")
|
ctx, span := srv.tracer.Start(ctx, "controlplane.Server.OnConfigChange")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
|
@ -265,7 +276,7 @@ func (srv *Server) EnableProxy(ctx context.Context, svc Service) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (srv *Server) update(ctx context.Context, cfg *config.Config) error {
|
func (srv *Server) update(ctx context.Context, cfg *config.Config) error {
|
||||||
ctx, span := trace.StartSpan(ctx, "controlplane.Server.update")
|
ctx, span := srv.tracer.Start(ctx, "controlplane.Server.update")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
if err := srv.updateRouter(ctx, cfg); err != nil {
|
if err := srv.updateRouter(ctx, cfg); err != nil {
|
||||||
|
@ -283,7 +294,7 @@ func (srv *Server) update(ctx context.Context, cfg *config.Config) error {
|
||||||
|
|
||||||
func (srv *Server) updateRouter(ctx context.Context, cfg *config.Config) error {
|
func (srv *Server) updateRouter(ctx context.Context, cfg *config.Config) error {
|
||||||
httpRouter := mux.NewRouter()
|
httpRouter := mux.NewRouter()
|
||||||
srv.addHTTPMiddleware(httpRouter, log.Ctx(ctx), cfg)
|
srv.addHTTPMiddleware(ctx, httpRouter, cfg)
|
||||||
if err := srv.mountCommonEndpoints(httpRouter, cfg); err != nil {
|
if err := srv.mountCommonEndpoints(httpRouter, cfg); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
|
|
||||||
"github.com/pomerium/pomerium/internal/log"
|
"github.com/pomerium/pomerium/internal/log"
|
||||||
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
|
||||||
"github.com/pomerium/pomerium/pkg/cryptutil"
|
"github.com/pomerium/pomerium/pkg/cryptutil"
|
||||||
"github.com/pomerium/pomerium/pkg/protoutil"
|
"github.com/pomerium/pomerium/pkg/protoutil"
|
||||||
)
|
)
|
||||||
|
@ -21,7 +20,7 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
func (srv *Server) buildDiscoveryResources(ctx context.Context) (map[string][]*envoy_service_discovery_v3.Resource, error) {
|
func (srv *Server) buildDiscoveryResources(ctx context.Context) (map[string][]*envoy_service_discovery_v3.Resource, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "controlplane.Server.buildDiscoveryResources")
|
ctx, span := srv.tracer.Start(ctx, "controlplane.Server.buildDiscoveryResources")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
cfg := srv.currentConfig.Load()
|
cfg := srv.currentConfig.Load()
|
||||||
|
|
|
@ -22,6 +22,9 @@ import (
|
||||||
"github.com/pomerium/pomerium/pkg/grpc/databroker"
|
"github.com/pomerium/pomerium/pkg/grpc/databroker"
|
||||||
"github.com/pomerium/pomerium/pkg/grpcutil"
|
"github.com/pomerium/pomerium/pkg/grpcutil"
|
||||||
"github.com/pomerium/pomerium/pkg/health"
|
"github.com/pomerium/pomerium/pkg/health"
|
||||||
|
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
|
||||||
|
oteltrace "go.opentelemetry.io/otel/trace"
|
||||||
|
googlegrpc "google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ConfigSource provides a new Config source that decorates an underlying config with
|
// ConfigSource provides a new Config source that decorates an underlying config with
|
||||||
|
@ -35,6 +38,7 @@ type ConfigSource struct {
|
||||||
updaterHash uint64
|
updaterHash uint64
|
||||||
cancel func()
|
cancel func()
|
||||||
enableValidation bool
|
enableValidation bool
|
||||||
|
tracerProvider oteltrace.TracerProvider
|
||||||
|
|
||||||
config.ChangeDispatcher
|
config.ChangeDispatcher
|
||||||
}
|
}
|
||||||
|
@ -50,11 +54,13 @@ type EnableConfigValidation bool
|
||||||
// NewConfigSource creates a new ConfigSource.
|
// NewConfigSource creates a new ConfigSource.
|
||||||
func NewConfigSource(
|
func NewConfigSource(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
|
tracerProvider oteltrace.TracerProvider,
|
||||||
underlying config.Source,
|
underlying config.Source,
|
||||||
enableValidation EnableConfigValidation,
|
enableValidation EnableConfigValidation,
|
||||||
listeners ...config.ChangeListener,
|
listeners ...config.ChangeListener,
|
||||||
) *ConfigSource {
|
) *ConfigSource {
|
||||||
src := &ConfigSource{
|
src := &ConfigSource{
|
||||||
|
tracerProvider: tracerProvider,
|
||||||
enableValidation: bool(enableValidation),
|
enableValidation: bool(enableValidation),
|
||||||
dbConfigs: map[string]dbConfig{},
|
dbConfigs: map[string]dbConfig{},
|
||||||
outboundGRPCConnection: new(grpc.CachedOutboundGRPClientConn),
|
outboundGRPCConnection: new(grpc.CachedOutboundGRPClientConn),
|
||||||
|
@ -85,7 +91,7 @@ func (src *ConfigSource) GetConfig() *config.Config {
|
||||||
type firstTime bool
|
type firstTime bool
|
||||||
|
|
||||||
func (src *ConfigSource) rebuild(ctx context.Context, firstTime firstTime) {
|
func (src *ConfigSource) rebuild(ctx context.Context, firstTime firstTime) {
|
||||||
_, span := trace.StartSpan(ctx, "databroker.config_source.rebuild")
|
_, span := trace.Continue(ctx, "databroker.config_source.rebuild")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
@ -259,7 +265,8 @@ func (src *ConfigSource) runUpdater(ctx context.Context, cfg *config.Config) {
|
||||||
|
|
||||||
ctx, src.cancel = context.WithCancel(ctx)
|
ctx, src.cancel = context.WithCancel(ctx)
|
||||||
|
|
||||||
cc, err := src.outboundGRPCConnection.Get(ctx, connectionOptions)
|
cc, err := src.outboundGRPCConnection.Get(ctx, connectionOptions,
|
||||||
|
googlegrpc.WithStatsHandler(otelgrpc.NewClientHandler(otelgrpc.WithTracerProvider(src.tracerProvider))))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Ctx(ctx).Error().Err(err).Msg("databroker: failed to create gRPC connection to data broker")
|
log.Ctx(ctx).Error().Err(err).Msg("databroker: failed to create gRPC connection to data broker")
|
||||||
return
|
return
|
||||||
|
|
|
@ -10,6 +10,7 @@ import (
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.opentelemetry.io/otel/trace"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
|
|
||||||
|
@ -41,7 +42,7 @@ func TestConfigSource(t *testing.T) {
|
||||||
defer func() { _ = li.Close() }()
|
defer func() { _ = li.Close() }()
|
||||||
_, outboundPort, _ := net.SplitHostPort(li.Addr().String())
|
_, outboundPort, _ := net.SplitHostPort(li.Addr().String())
|
||||||
|
|
||||||
dataBrokerServer := New(ctx)
|
dataBrokerServer := New(ctx, trace.NewNoopTracerProvider())
|
||||||
srv := grpc.NewServer()
|
srv := grpc.NewServer()
|
||||||
databroker.RegisterDataBrokerServiceServer(srv, dataBrokerServer)
|
databroker.RegisterDataBrokerServiceServer(srv, dataBrokerServer)
|
||||||
go func() { _ = srv.Serve(li) }()
|
go func() { _ = srv.Serve(li) }()
|
||||||
|
@ -65,7 +66,7 @@ func TestConfigSource(t *testing.T) {
|
||||||
OutboundPort: outboundPort,
|
OutboundPort: outboundPort,
|
||||||
Options: base,
|
Options: base,
|
||||||
})
|
})
|
||||||
src := NewConfigSource(ctx, baseSource, EnableConfigValidation(true), func(_ context.Context, cfg *config.Config) {
|
src := NewConfigSource(ctx, trace.NewNoopTracerProvider(), baseSource, EnableConfigValidation(true), func(_ context.Context, cfg *config.Config) {
|
||||||
cfgs <- cfg
|
cfgs <- cfg
|
||||||
})
|
})
|
||||||
cfgs <- src.GetConfig()
|
cfgs <- src.GetConfig()
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"github.com/pomerium/pomerium/internal/log"
|
"github.com/pomerium/pomerium/internal/log"
|
||||||
"github.com/pomerium/pomerium/internal/registry"
|
"github.com/pomerium/pomerium/internal/registry"
|
||||||
"github.com/pomerium/pomerium/internal/registry/inmemory"
|
"github.com/pomerium/pomerium/internal/registry/inmemory"
|
||||||
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
|
||||||
registrypb "github.com/pomerium/pomerium/pkg/grpc/registry"
|
registrypb "github.com/pomerium/pomerium/pkg/grpc/registry"
|
||||||
"github.com/pomerium/pomerium/pkg/storage"
|
"github.com/pomerium/pomerium/pkg/storage"
|
||||||
)
|
)
|
||||||
|
@ -25,7 +24,7 @@ func (stream registryWatchServer) Context() context.Context {
|
||||||
|
|
||||||
// Report calls the registry Report method.
|
// Report calls the registry Report method.
|
||||||
func (srv *Server) Report(ctx context.Context, req *registrypb.RegisterRequest) (*registrypb.RegisterResponse, error) {
|
func (srv *Server) Report(ctx context.Context, req *registrypb.RegisterRequest) (*registrypb.RegisterResponse, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "databroker.grpc.Report")
|
ctx, span := srv.tracer.Start(ctx, "databroker.grpc.Report")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
r, err := srv.getRegistry(ctx)
|
r, err := srv.getRegistry(ctx)
|
||||||
|
@ -38,7 +37,7 @@ func (srv *Server) Report(ctx context.Context, req *registrypb.RegisterRequest)
|
||||||
|
|
||||||
// List calls the registry List method.
|
// List calls the registry List method.
|
||||||
func (srv *Server) List(ctx context.Context, req *registrypb.ListRequest) (*registrypb.ServiceList, error) {
|
func (srv *Server) List(ctx context.Context, req *registrypb.ListRequest) (*registrypb.ServiceList, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "databroker.grpc.List")
|
ctx, span := srv.tracer.Start(ctx, "databroker.grpc.List")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
r, err := srv.getRegistry(ctx)
|
r, err := srv.getRegistry(ctx)
|
||||||
|
@ -52,7 +51,7 @@ func (srv *Server) List(ctx context.Context, req *registrypb.ListRequest) (*regi
|
||||||
// Watch calls the registry Watch method.
|
// Watch calls the registry Watch method.
|
||||||
func (srv *Server) Watch(req *registrypb.ListRequest, stream registrypb.Registry_WatchServer) error {
|
func (srv *Server) Watch(req *registrypb.ListRequest, stream registrypb.Registry_WatchServer) error {
|
||||||
ctx := stream.Context()
|
ctx := stream.Context()
|
||||||
ctx, span := trace.StartSpan(ctx, "databroker.grpc.Watch")
|
ctx, span := srv.tracer.Start(ctx, "databroker.grpc.Watch")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
r, err := srv.getRegistry(ctx)
|
r, err := srv.getRegistry(ctx)
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"github.com/pomerium/pomerium/pkg/storage"
|
"github.com/pomerium/pomerium/pkg/storage"
|
||||||
"github.com/pomerium/pomerium/pkg/storage/inmemory"
|
"github.com/pomerium/pomerium/pkg/storage/inmemory"
|
||||||
"github.com/pomerium/pomerium/pkg/storage/postgres"
|
"github.com/pomerium/pomerium/pkg/storage/postgres"
|
||||||
|
oteltrace "go.opentelemetry.io/otel/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Server implements the databroker service using an in memory database.
|
// Server implements the databroker service using an in memory database.
|
||||||
|
@ -32,12 +33,17 @@ type Server struct {
|
||||||
backend storage.Backend
|
backend storage.Backend
|
||||||
backendCtx context.Context
|
backendCtx context.Context
|
||||||
registry registry.Interface
|
registry registry.Interface
|
||||||
|
tracerProvider oteltrace.TracerProvider
|
||||||
|
tracer oteltrace.Tracer
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new server.
|
// New creates a new server.
|
||||||
func New(ctx context.Context, options ...ServerOption) *Server {
|
func New(ctx context.Context, tracerProvider oteltrace.TracerProvider, options ...ServerOption) *Server {
|
||||||
|
tracer := tracerProvider.Tracer(trace.PomeriumCoreTracer)
|
||||||
srv := &Server{
|
srv := &Server{
|
||||||
backendCtx: ctx,
|
backendCtx: ctx,
|
||||||
|
tracerProvider: tracerProvider,
|
||||||
|
tracer: tracer,
|
||||||
}
|
}
|
||||||
srv.UpdateConfig(ctx, options...)
|
srv.UpdateConfig(ctx, options...)
|
||||||
return srv
|
return srv
|
||||||
|
@ -74,7 +80,7 @@ func (srv *Server) UpdateConfig(ctx context.Context, options ...ServerOption) {
|
||||||
|
|
||||||
// AcquireLease acquires a lease.
|
// AcquireLease acquires a lease.
|
||||||
func (srv *Server) AcquireLease(ctx context.Context, req *databroker.AcquireLeaseRequest) (*databroker.AcquireLeaseResponse, error) {
|
func (srv *Server) AcquireLease(ctx context.Context, req *databroker.AcquireLeaseRequest) (*databroker.AcquireLeaseResponse, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "databroker.grpc.AcquireLease")
|
ctx, span := srv.tracer.Start(ctx, "databroker.grpc.AcquireLease")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
log.Ctx(ctx).Debug().
|
log.Ctx(ctx).Debug().
|
||||||
Str("name", req.GetName()).
|
Str("name", req.GetName()).
|
||||||
|
@ -101,7 +107,7 @@ func (srv *Server) AcquireLease(ctx context.Context, req *databroker.AcquireLeas
|
||||||
|
|
||||||
// Get gets a record from the in-memory list.
|
// Get gets a record from the in-memory list.
|
||||||
func (srv *Server) Get(ctx context.Context, req *databroker.GetRequest) (*databroker.GetResponse, error) {
|
func (srv *Server) Get(ctx context.Context, req *databroker.GetRequest) (*databroker.GetResponse, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "databroker.grpc.Get")
|
ctx, span := srv.tracer.Start(ctx, "databroker.grpc.Get")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
log.Ctx(ctx).Debug().
|
log.Ctx(ctx).Debug().
|
||||||
Str("type", req.GetType()).
|
Str("type", req.GetType()).
|
||||||
|
@ -128,7 +134,7 @@ func (srv *Server) Get(ctx context.Context, req *databroker.GetRequest) (*databr
|
||||||
|
|
||||||
// ListTypes lists all the record types.
|
// ListTypes lists all the record types.
|
||||||
func (srv *Server) ListTypes(ctx context.Context, _ *emptypb.Empty) (*databroker.ListTypesResponse, error) {
|
func (srv *Server) ListTypes(ctx context.Context, _ *emptypb.Empty) (*databroker.ListTypesResponse, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "databroker.grpc.ListTypes")
|
ctx, span := srv.tracer.Start(ctx, "databroker.grpc.ListTypes")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
log.Ctx(ctx).Debug().Msg("list types")
|
log.Ctx(ctx).Debug().Msg("list types")
|
||||||
|
|
||||||
|
@ -145,7 +151,7 @@ func (srv *Server) ListTypes(ctx context.Context, _ *emptypb.Empty) (*databroker
|
||||||
|
|
||||||
// Query queries for records.
|
// Query queries for records.
|
||||||
func (srv *Server) Query(ctx context.Context, req *databroker.QueryRequest) (*databroker.QueryResponse, error) {
|
func (srv *Server) Query(ctx context.Context, req *databroker.QueryRequest) (*databroker.QueryResponse, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "databroker.grpc.Query")
|
ctx, span := srv.tracer.Start(ctx, "databroker.grpc.Query")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
log.Ctx(ctx).Debug().
|
log.Ctx(ctx).Debug().
|
||||||
Str("type", req.GetType()).
|
Str("type", req.GetType()).
|
||||||
|
@ -198,7 +204,7 @@ func (srv *Server) Query(ctx context.Context, req *databroker.QueryRequest) (*da
|
||||||
|
|
||||||
// Put updates an existing record or adds a new one.
|
// Put updates an existing record or adds a new one.
|
||||||
func (srv *Server) Put(ctx context.Context, req *databroker.PutRequest) (*databroker.PutResponse, error) {
|
func (srv *Server) Put(ctx context.Context, req *databroker.PutRequest) (*databroker.PutResponse, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "databroker.grpc.Put")
|
ctx, span := srv.tracer.Start(ctx, "databroker.grpc.Put")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
records := req.GetRecords()
|
records := req.GetRecords()
|
||||||
|
@ -237,7 +243,7 @@ func (srv *Server) Put(ctx context.Context, req *databroker.PutRequest) (*databr
|
||||||
|
|
||||||
// Patch updates specific fields of an existing record.
|
// Patch updates specific fields of an existing record.
|
||||||
func (srv *Server) Patch(ctx context.Context, req *databroker.PatchRequest) (*databroker.PatchResponse, error) {
|
func (srv *Server) Patch(ctx context.Context, req *databroker.PatchRequest) (*databroker.PatchResponse, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "databroker.grpc.Patch")
|
ctx, span := srv.tracer.Start(ctx, "databroker.grpc.Patch")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
records := req.GetRecords()
|
records := req.GetRecords()
|
||||||
|
@ -276,9 +282,9 @@ func (srv *Server) Patch(ctx context.Context, req *databroker.PatchRequest) (*da
|
||||||
|
|
||||||
// ReleaseLease releases a lease.
|
// ReleaseLease releases a lease.
|
||||||
func (srv *Server) ReleaseLease(ctx context.Context, req *databroker.ReleaseLeaseRequest) (*emptypb.Empty, error) {
|
func (srv *Server) ReleaseLease(ctx context.Context, req *databroker.ReleaseLeaseRequest) (*emptypb.Empty, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "databroker.grpc.ReleaseLease")
|
ctx, span := srv.tracer.Start(ctx, "databroker.grpc.ReleaseLease")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
log.Ctx(ctx).Debug().
|
log.Ctx(ctx).Trace().
|
||||||
Str("name", req.GetName()).
|
Str("name", req.GetName()).
|
||||||
Str("id", req.GetId()).
|
Str("id", req.GetId()).
|
||||||
Msg("release lease")
|
Msg("release lease")
|
||||||
|
@ -298,9 +304,9 @@ func (srv *Server) ReleaseLease(ctx context.Context, req *databroker.ReleaseLeas
|
||||||
|
|
||||||
// RenewLease releases a lease.
|
// RenewLease releases a lease.
|
||||||
func (srv *Server) RenewLease(ctx context.Context, req *databroker.RenewLeaseRequest) (*emptypb.Empty, error) {
|
func (srv *Server) RenewLease(ctx context.Context, req *databroker.RenewLeaseRequest) (*emptypb.Empty, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "databroker.grpc.RenewLease")
|
ctx, span := srv.tracer.Start(ctx, "databroker.grpc.RenewLease")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
log.Ctx(ctx).Debug().
|
log.Ctx(ctx).Trace().
|
||||||
Str("name", req.GetName()).
|
Str("name", req.GetName()).
|
||||||
Str("id", req.GetId()).
|
Str("id", req.GetId()).
|
||||||
Dur("duration", req.GetDuration().AsDuration()).
|
Dur("duration", req.GetDuration().AsDuration()).
|
||||||
|
@ -323,7 +329,7 @@ func (srv *Server) RenewLease(ctx context.Context, req *databroker.RenewLeaseReq
|
||||||
|
|
||||||
// SetOptions sets options for a type in the databroker.
|
// SetOptions sets options for a type in the databroker.
|
||||||
func (srv *Server) SetOptions(ctx context.Context, req *databroker.SetOptionsRequest) (*databroker.SetOptionsResponse, error) {
|
func (srv *Server) SetOptions(ctx context.Context, req *databroker.SetOptionsRequest) (*databroker.SetOptionsResponse, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "databroker.grpc.SetOptions")
|
ctx, span := srv.tracer.Start(ctx, "databroker.grpc.SetOptions")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
backend, err := srv.getBackend(ctx)
|
backend, err := srv.getBackend(ctx)
|
||||||
|
@ -346,7 +352,7 @@ func (srv *Server) SetOptions(ctx context.Context, req *databroker.SetOptionsReq
|
||||||
// Sync streams updates for the given record type.
|
// Sync streams updates for the given record type.
|
||||||
func (srv *Server) Sync(req *databroker.SyncRequest, stream databroker.DataBrokerService_SyncServer) error {
|
func (srv *Server) Sync(req *databroker.SyncRequest, stream databroker.DataBrokerService_SyncServer) error {
|
||||||
ctx := stream.Context()
|
ctx := stream.Context()
|
||||||
ctx, span := trace.StartSpan(ctx, "databroker.grpc.Sync")
|
ctx, span := srv.tracer.Start(ctx, "databroker.grpc.Sync")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
@ -384,7 +390,7 @@ func (srv *Server) Sync(req *databroker.SyncRequest, stream databroker.DataBroke
|
||||||
// SyncLatest returns the latest value of every record in the databroker as a stream of records.
|
// SyncLatest returns the latest value of every record in the databroker as a stream of records.
|
||||||
func (srv *Server) SyncLatest(req *databroker.SyncLatestRequest, stream databroker.DataBrokerService_SyncLatestServer) error {
|
func (srv *Server) SyncLatest(req *databroker.SyncLatestRequest, stream databroker.DataBrokerService_SyncLatestServer) error {
|
||||||
ctx := stream.Context()
|
ctx := stream.Context()
|
||||||
ctx, span := trace.StartSpan(ctx, "databroker.grpc.SyncLatest")
|
ctx, span := srv.tracer.Start(ctx, "databroker.grpc.SyncLatest")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
|
|
@ -14,6 +14,7 @@ import (
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.opentelemetry.io/otel/trace/noop"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
|
@ -49,10 +50,9 @@ func (h testSyncerHandler) UpdateRecords(ctx context.Context, serverVersion uint
|
||||||
}
|
}
|
||||||
|
|
||||||
func newServer(cfg *serverConfig) *Server {
|
func newServer(cfg *serverConfig) *Server {
|
||||||
return &Server{
|
srv := New(context.Background(), noop.NewTracerProvider())
|
||||||
cfg: cfg,
|
srv.cfg = cfg
|
||||||
backendCtx: context.Background(),
|
return srv
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServer_Get(t *testing.T) {
|
func TestServer_Get(t *testing.T) {
|
||||||
|
|
|
@ -14,7 +14,7 @@ import (
|
||||||
func SetHeaders(headers map[string]string) func(next http.Handler) http.Handler {
|
func SetHeaders(headers map[string]string) func(next http.Handler) http.Handler {
|
||||||
return func(next http.Handler) http.Handler {
|
return func(next http.Handler) http.Handler {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx, span := trace.StartSpan(r.Context(), "middleware.SetHeaders")
|
ctx, span := trace.Continue(r.Context(), "middleware.SetHeaders")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
for key, val := range headers {
|
for key, val := range headers {
|
||||||
w.Header().Set(key, val)
|
w.Header().Set(key, val)
|
||||||
|
@ -29,7 +29,7 @@ func SetHeaders(headers map[string]string) func(next http.Handler) http.Handler
|
||||||
func ValidateSignature(sharedKey []byte) func(next http.Handler) http.Handler {
|
func ValidateSignature(sharedKey []byte) func(next http.Handler) http.Handler {
|
||||||
return func(next http.Handler) http.Handler {
|
return func(next http.Handler) http.Handler {
|
||||||
return httputil.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
|
return httputil.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
|
||||||
ctx, span := trace.StartSpan(r.Context(), "middleware.ValidateSignature")
|
ctx, span := trace.Continue(r.Context(), "middleware.ValidateSignature")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
if err := ValidateRequestURL(r, sharedKey); err != nil {
|
if err := ValidateRequestURL(r, sharedKey); err != nil {
|
||||||
return httputil.NewError(http.StatusBadRequest, err)
|
return httputil.NewError(http.StatusBadRequest, err)
|
||||||
|
|
|
@ -15,18 +15,23 @@ import (
|
||||||
"github.com/pomerium/pomerium/internal/log"
|
"github.com/pomerium/pomerium/internal/log"
|
||||||
"github.com/pomerium/pomerium/pkg/grpc"
|
"github.com/pomerium/pomerium/pkg/grpc"
|
||||||
pb "github.com/pomerium/pomerium/pkg/grpc/registry"
|
pb "github.com/pomerium/pomerium/pkg/grpc/registry"
|
||||||
|
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
|
||||||
|
oteltrace "go.opentelemetry.io/otel/trace"
|
||||||
|
googlegrpc "google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reporter periodically submits a list of services available on this instance to the service registry
|
// Reporter periodically submits a list of services available on this instance to the service registry
|
||||||
type Reporter struct {
|
type Reporter struct {
|
||||||
cancel func()
|
cancel func()
|
||||||
outboundGRPCConnection *grpc.CachedOutboundGRPClientConn
|
outboundGRPCConnection *grpc.CachedOutboundGRPClientConn
|
||||||
|
tracerProvider oteltrace.TracerProvider
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewReporter creates a new Reporter.
|
// NewReporter creates a new Reporter.
|
||||||
func NewReporter() *Reporter {
|
func NewReporter(tracerProvider oteltrace.TracerProvider) *Reporter {
|
||||||
return &Reporter{
|
return &Reporter{
|
||||||
outboundGRPCConnection: new(grpc.CachedOutboundGRPClientConn),
|
outboundGRPCConnection: new(grpc.CachedOutboundGRPClientConn),
|
||||||
|
tracerProvider: tracerProvider,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -52,7 +57,7 @@ func (r *Reporter) OnConfigChange(ctx context.Context, cfg *config.Config) {
|
||||||
InstallationID: cfg.Options.InstallationID,
|
InstallationID: cfg.Options.InstallationID,
|
||||||
ServiceName: cfg.Options.Services,
|
ServiceName: cfg.Options.Services,
|
||||||
SignedJWTKey: sharedKey,
|
SignedJWTKey: sharedKey,
|
||||||
})
|
}, googlegrpc.WithStatsHandler(otelgrpc.NewClientHandler(otelgrpc.WithTracerProvider(r.tracerProvider))))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Ctx(ctx).Error().Err(err).Msg("connecting to registry")
|
log.Ctx(ctx).Error().Err(err).Msg("connecting to registry")
|
||||||
return
|
return
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
|
"go.opentelemetry.io/otel/trace"
|
||||||
|
|
||||||
"github.com/pomerium/pomerium/internal/log"
|
"github.com/pomerium/pomerium/internal/log"
|
||||||
)
|
)
|
||||||
|
@ -47,7 +48,10 @@ restart:
|
||||||
backoff:
|
backoff:
|
||||||
for {
|
for {
|
||||||
interval := backoff.NextBackOff()
|
interval := backoff.NextBackOff()
|
||||||
log.Ctx(ctx).Info().Msgf("backing off for %s...", interval.String())
|
span := trace.SpanFromContext(ctx)
|
||||||
|
msg := fmt.Sprintf("backing off for %s...", interval.String())
|
||||||
|
span.AddEvent(msg)
|
||||||
|
log.Ctx(ctx).Info().Msg(msg)
|
||||||
timer := time.NewTimer(interval)
|
timer := time.NewTimer(interval)
|
||||||
s := makeSelect(ctx, watches, name, timer.C, fn)
|
s := makeSelect(ctx, watches, name, timer.C, fn)
|
||||||
next, err := s.Exec(ctx)
|
next, err := s.Exec(ctx)
|
||||||
|
|
|
@ -1,100 +0,0 @@
|
||||||
package telemetry
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"go.opencensus.io/plugin/ocgrpc"
|
|
||||||
"go.opencensus.io/plugin/ochttp/propagation/b3"
|
|
||||||
"go.opencensus.io/trace"
|
|
||||||
"go.opencensus.io/trace/propagation"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
"google.golang.org/grpc/metadata"
|
|
||||||
grpcstats "google.golang.org/grpc/stats"
|
|
||||||
|
|
||||||
"github.com/pomerium/pomerium/internal/telemetry/metrics"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
grpcTraceBinHeader = "grpc-trace-bin"
|
|
||||||
b3TraceIDHeader = "x-b3-traceid"
|
|
||||||
b3SpanIDHeader = "x-b3-spanid"
|
|
||||||
)
|
|
||||||
|
|
||||||
type tagRPCHandler interface {
|
|
||||||
TagRPC(context.Context, *grpcstats.RPCTagInfo) context.Context
|
|
||||||
}
|
|
||||||
|
|
||||||
// GRPCServerStatsHandler provides a grpc stats.Handler for metrics and tracing for a pomerium service
|
|
||||||
type GRPCServerStatsHandler struct {
|
|
||||||
service string
|
|
||||||
metricsHandler tagRPCHandler
|
|
||||||
grpcstats.Handler
|
|
||||||
}
|
|
||||||
|
|
||||||
// TagRPC implements grpc.stats.Handler and adds metrics and tracing metadata to the context of a given RPC
|
|
||||||
func (h *GRPCServerStatsHandler) TagRPC(ctx context.Context, tagInfo *grpcstats.RPCTagInfo) context.Context {
|
|
||||||
// the opencensus trace handler only supports grpc-trace-bin, so we use that code and support b3 too
|
|
||||||
|
|
||||||
md, _ := metadata.FromIncomingContext(ctx)
|
|
||||||
name := strings.TrimPrefix(tagInfo.FullMethodName, "/")
|
|
||||||
name = strings.Replace(name, "/", ".", -1)
|
|
||||||
|
|
||||||
var parent trace.SpanContext
|
|
||||||
hasParent := false
|
|
||||||
if traceBin := md[grpcTraceBinHeader]; len(traceBin) > 0 {
|
|
||||||
parent, hasParent = propagation.FromBinary([]byte(traceBin[0]))
|
|
||||||
}
|
|
||||||
|
|
||||||
if hdr := md[b3TraceIDHeader]; len(hdr) > 0 {
|
|
||||||
if tid, ok := b3.ParseTraceID(hdr[0]); ok {
|
|
||||||
parent.TraceID = tid
|
|
||||||
hasParent = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if hdr := md[b3SpanIDHeader]; len(hdr) > 0 {
|
|
||||||
if sid, ok := b3.ParseSpanID(hdr[0]); ok {
|
|
||||||
parent.SpanID = sid
|
|
||||||
hasParent = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if hasParent {
|
|
||||||
ctx, _ = trace.StartSpanWithRemoteParent(ctx, name, parent,
|
|
||||||
trace.WithSpanKind(trace.SpanKindServer))
|
|
||||||
} else {
|
|
||||||
ctx, _ = trace.StartSpan(ctx, name,
|
|
||||||
trace.WithSpanKind(trace.SpanKindServer))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ocgrpc's TagRPC must be called to attach the context rpcDataKey correctly
|
|
||||||
// https://github.com/census-instrumentation/opencensus-go/blob/bf52d9df8bb2d44cad934587ab946794456cf3c8/plugin/ocgrpc/server_stats_handler.go#L45
|
|
||||||
metricCtx := h.metricsHandler.TagRPC(h.Handler.TagRPC(ctx, tagInfo), tagInfo)
|
|
||||||
return metricCtx
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewGRPCServerStatsHandler creates a new GRPCServerStatsHandler for a pomerium service
|
|
||||||
func NewGRPCServerStatsHandler(service string) grpcstats.Handler {
|
|
||||||
return &GRPCServerStatsHandler{
|
|
||||||
service: ServiceName(service),
|
|
||||||
Handler: &ocgrpc.ServerHandler{},
|
|
||||||
metricsHandler: metrics.NewGRPCServerMetricsHandler(ServiceName(service)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GRPCClientStatsHandler provides DialOptions for grpc clients to instrument network calls with
|
|
||||||
// both metrics and tracing
|
|
||||||
type GRPCClientStatsHandler struct {
|
|
||||||
UnaryInterceptor grpc.UnaryClientInterceptor
|
|
||||||
// TODO: we should have a streaming interceptor too
|
|
||||||
grpcstats.Handler
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewGRPCClientStatsHandler returns a new GRPCClientStatsHandler used to create
|
|
||||||
// telemetry related client DialOptions
|
|
||||||
func NewGRPCClientStatsHandler(service string) *GRPCClientStatsHandler {
|
|
||||||
return &GRPCClientStatsHandler{
|
|
||||||
Handler: &ocgrpc.ClientHandler{},
|
|
||||||
UnaryInterceptor: metrics.GRPCClientInterceptor(ServiceName(service)),
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,47 +0,0 @@
|
||||||
package telemetry
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"go.opencensus.io/plugin/ocgrpc"
|
|
||||||
"go.opencensus.io/plugin/ochttp/propagation/b3"
|
|
||||||
"go.opencensus.io/trace"
|
|
||||||
"google.golang.org/grpc/metadata"
|
|
||||||
grpcstats "google.golang.org/grpc/stats"
|
|
||||||
)
|
|
||||||
|
|
||||||
type mockTagHandler struct {
|
|
||||||
called bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type mockCtxTag string
|
|
||||||
|
|
||||||
func (m *mockTagHandler) TagRPC(ctx context.Context, _ *grpcstats.RPCTagInfo) context.Context {
|
|
||||||
m.called = true
|
|
||||||
return context.WithValue(ctx, mockCtxTag("added"), "true")
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test_GRPCServerStatsHandler(t *testing.T) {
|
|
||||||
metricsHandler := &mockTagHandler{}
|
|
||||||
h := &GRPCServerStatsHandler{
|
|
||||||
metricsHandler: metricsHandler,
|
|
||||||
Handler: &ocgrpc.ServerHandler{},
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.WithValue(context.Background(), mockCtxTag("original"), "true")
|
|
||||||
ctx = metadata.NewIncomingContext(ctx, metadata.MD{
|
|
||||||
b3TraceIDHeader: {"9de3f6756f315fef"},
|
|
||||||
b3SpanIDHeader: {"b4f83d3096b6bf9c"},
|
|
||||||
})
|
|
||||||
ctx = h.TagRPC(ctx, &grpcstats.RPCTagInfo{})
|
|
||||||
|
|
||||||
assert.True(t, metricsHandler.called)
|
|
||||||
assert.Equal(t, ctx.Value(mockCtxTag("added")), "true")
|
|
||||||
assert.Equal(t, ctx.Value(mockCtxTag("original")), "true")
|
|
||||||
|
|
||||||
span := trace.FromContext(ctx)
|
|
||||||
expectedTraceID, _ := b3.ParseTraceID("9de3f6756f315fef")
|
|
||||||
assert.Equal(t, expectedTraceID, span.SpanContext().TraceID)
|
|
||||||
}
|
|
266
internal/telemetry/trace/buffer.go
Normal file
266
internal/telemetry/trace/buffer.go
Normal file
|
@ -0,0 +1,266 @@
|
||||||
|
package trace
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cmp"
|
||||||
|
"encoding/base64"
|
||||||
|
"maps"
|
||||||
|
"slices"
|
||||||
|
"sync"
|
||||||
|
"unique"
|
||||||
|
|
||||||
|
"github.com/pomerium/pomerium/internal/hashutil"
|
||||||
|
oteltrace "go.opentelemetry.io/otel/trace"
|
||||||
|
commonv1 "go.opentelemetry.io/proto/otlp/common/v1"
|
||||||
|
resourcev1 "go.opentelemetry.io/proto/otlp/resource/v1"
|
||||||
|
tracev1 "go.opentelemetry.io/proto/otlp/trace/v1"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ScopeBuffer struct {
|
||||||
|
scope *ScopeInfo
|
||||||
|
spans []*tracev1.Span
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb *ScopeBuffer) Insert(spans ...*tracev1.Span) {
|
||||||
|
sb.spans = append(sb.spans, spans...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewScopeBuffer(scope *ScopeInfo) *ScopeBuffer {
|
||||||
|
return &ScopeBuffer{
|
||||||
|
scope: scope,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourceBuffer struct {
|
||||||
|
resource *ResourceInfo
|
||||||
|
spansByScope map[string]*ScopeBuffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewResourceBuffer(resource *ResourceInfo) *ResourceBuffer {
|
||||||
|
return &ResourceBuffer{
|
||||||
|
resource: resource,
|
||||||
|
spansByScope: make(map[string]*ScopeBuffer),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rb *ResourceBuffer) Insert(scope *ScopeInfo, span *tracev1.Span) {
|
||||||
|
var spans *ScopeBuffer
|
||||||
|
if sp, ok := rb.spansByScope[scope.ID()]; ok {
|
||||||
|
spans = sp
|
||||||
|
} else {
|
||||||
|
spans = NewScopeBuffer(scope)
|
||||||
|
rb.spansByScope[scope.ID()] = spans
|
||||||
|
}
|
||||||
|
spans.Insert(span)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rb *ResourceBuffer) Flush() []*tracev1.ScopeSpans {
|
||||||
|
out := make([]*tracev1.ScopeSpans, 0, len(rb.spansByScope))
|
||||||
|
for _, key := range slices.Sorted(maps.Keys(rb.spansByScope)) {
|
||||||
|
spans := rb.spansByScope[key]
|
||||||
|
slices.SortStableFunc(spans.spans, func(a, b *tracev1.Span) int {
|
||||||
|
return cmp.Compare(a.StartTimeUnixNano, b.StartTimeUnixNano)
|
||||||
|
})
|
||||||
|
scopeSpans := &tracev1.ScopeSpans{
|
||||||
|
Scope: spans.scope.Scope,
|
||||||
|
SchemaUrl: spans.scope.Schema,
|
||||||
|
Spans: spans.spans,
|
||||||
|
}
|
||||||
|
out = append(out, scopeSpans)
|
||||||
|
}
|
||||||
|
clear(rb.spansByScope)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rb *ResourceBuffer) FlushAs(rewriteTraceID unique.Handle[oteltrace.TraceID]) []*tracev1.ScopeSpans {
|
||||||
|
out := make([]*tracev1.ScopeSpans, 0, len(rb.spansByScope))
|
||||||
|
for _, key := range slices.Sorted(maps.Keys(rb.spansByScope)) {
|
||||||
|
spans := rb.spansByScope[key]
|
||||||
|
{
|
||||||
|
id := rewriteTraceID.Value()
|
||||||
|
for _, span := range spans.spans {
|
||||||
|
copy(span.TraceId, id[:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
slices.SortStableFunc(spans.spans, func(a, b *tracev1.Span) int {
|
||||||
|
return cmp.Compare(a.StartTimeUnixNano, b.StartTimeUnixNano)
|
||||||
|
})
|
||||||
|
scopeSpans := &tracev1.ScopeSpans{
|
||||||
|
Scope: spans.scope.Scope,
|
||||||
|
SchemaUrl: spans.scope.Schema,
|
||||||
|
Spans: spans.spans,
|
||||||
|
}
|
||||||
|
out = append(out, scopeSpans)
|
||||||
|
}
|
||||||
|
clear(rb.spansByScope)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rb *ResourceBuffer) Merge(other *ResourceBuffer) {
|
||||||
|
for scope, otherSpans := range other.spansByScope {
|
||||||
|
if ourSpans, ok := rb.spansByScope[scope]; !ok {
|
||||||
|
rb.spansByScope[scope] = otherSpans
|
||||||
|
} else {
|
||||||
|
ourSpans.Insert(otherSpans.spans...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
clear(other.spansByScope)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rb *ResourceBuffer) MergeAs(other *ResourceBuffer, rewriteTraceID unique.Handle[oteltrace.TraceID]) {
|
||||||
|
for scope, otherSpans := range other.spansByScope {
|
||||||
|
{
|
||||||
|
id := rewriteTraceID.Value()
|
||||||
|
for _, span := range otherSpans.spans {
|
||||||
|
copy(span.TraceId, id[:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ourSpans, ok := rb.spansByScope[scope]; !ok {
|
||||||
|
rb.spansByScope[scope] = otherSpans
|
||||||
|
} else {
|
||||||
|
ourSpans.Insert(otherSpans.spans...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
clear(other.spansByScope)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Buffer struct {
|
||||||
|
scopesByResourceID map[string]*ResourceBuffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBuffer() *Buffer {
|
||||||
|
return &Buffer{
|
||||||
|
scopesByResourceID: make(map[string]*ResourceBuffer),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Buffer) Insert(resource *ResourceInfo, scope *ScopeInfo, span *tracev1.Span) {
|
||||||
|
resourceEq := resource.ID()
|
||||||
|
var scopes *ResourceBuffer
|
||||||
|
if sc, ok := b.scopesByResourceID[resourceEq]; ok {
|
||||||
|
scopes = sc
|
||||||
|
} else {
|
||||||
|
scopes = NewResourceBuffer(resource)
|
||||||
|
b.scopesByResourceID[resourceEq] = scopes
|
||||||
|
}
|
||||||
|
scopes.Insert(scope, span)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Buffer) Flush() []*tracev1.ResourceSpans {
|
||||||
|
out := make([]*tracev1.ResourceSpans, 0, len(b.scopesByResourceID))
|
||||||
|
for _, key := range slices.Sorted(maps.Keys(b.scopesByResourceID)) {
|
||||||
|
scopes := b.scopesByResourceID[key]
|
||||||
|
resourceSpans := &tracev1.ResourceSpans{
|
||||||
|
Resource: scopes.resource.Resource,
|
||||||
|
ScopeSpans: scopes.Flush(),
|
||||||
|
SchemaUrl: scopes.resource.Schema,
|
||||||
|
}
|
||||||
|
out = append(out, resourceSpans)
|
||||||
|
}
|
||||||
|
clear(b.scopesByResourceID)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Buffer) FlushAs(rewriteTraceID unique.Handle[oteltrace.TraceID]) []*tracev1.ResourceSpans {
|
||||||
|
out := make([]*tracev1.ResourceSpans, 0, len(b.scopesByResourceID))
|
||||||
|
for _, key := range slices.Sorted(maps.Keys(b.scopesByResourceID)) {
|
||||||
|
scopes := b.scopesByResourceID[key]
|
||||||
|
resourceSpans := &tracev1.ResourceSpans{
|
||||||
|
Resource: scopes.resource.Resource,
|
||||||
|
ScopeSpans: scopes.FlushAs(rewriteTraceID),
|
||||||
|
SchemaUrl: scopes.resource.Schema,
|
||||||
|
}
|
||||||
|
out = append(out, resourceSpans)
|
||||||
|
}
|
||||||
|
clear(b.scopesByResourceID)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Buffer) Merge(other *Buffer) {
|
||||||
|
if b != nil {
|
||||||
|
for k, otherV := range other.scopesByResourceID {
|
||||||
|
if v, ok := b.scopesByResourceID[k]; !ok {
|
||||||
|
b.scopesByResourceID[k] = otherV
|
||||||
|
} else {
|
||||||
|
v.Merge(otherV)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
clear(other.scopesByResourceID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Buffer) MergeAs(other *Buffer, rewriteTraceID unique.Handle[oteltrace.TraceID]) {
|
||||||
|
if b != nil {
|
||||||
|
for k, otherV := range other.scopesByResourceID {
|
||||||
|
if v, ok := b.scopesByResourceID[k]; !ok {
|
||||||
|
newRb := NewResourceBuffer(otherV.resource)
|
||||||
|
newRb.MergeAs(otherV, rewriteTraceID)
|
||||||
|
b.scopesByResourceID[k] = newRb
|
||||||
|
} else {
|
||||||
|
v.MergeAs(otherV, rewriteTraceID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
clear(other.scopesByResourceID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Buffer) IsEmpty() bool {
|
||||||
|
return len(b.scopesByResourceID) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourceInfo struct {
|
||||||
|
Resource *resourcev1.Resource
|
||||||
|
Schema string
|
||||||
|
ID func() string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewResourceInfo(resource *resourcev1.Resource, resourceSchema string) *ResourceInfo {
|
||||||
|
ri := &ResourceInfo{
|
||||||
|
Resource: resource,
|
||||||
|
Schema: resourceSchema,
|
||||||
|
}
|
||||||
|
ri.ID = sync.OnceValue(ri.computeID)
|
||||||
|
return ri
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ri *ResourceInfo) computeID() string {
|
||||||
|
hash := hashutil.NewDigest()
|
||||||
|
tmp := resourcev1.Resource{
|
||||||
|
Attributes: ri.Resource.Attributes,
|
||||||
|
}
|
||||||
|
bytes, _ := proto.Marshal(&tmp)
|
||||||
|
hash.WriteStringWithLen(ri.Schema)
|
||||||
|
hash.WriteWithLen(bytes)
|
||||||
|
return base64.StdEncoding.EncodeToString(hash.Sum(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
type ScopeInfo struct {
|
||||||
|
Scope *commonv1.InstrumentationScope
|
||||||
|
Schema string
|
||||||
|
ID func() string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewScopeInfo(scope *commonv1.InstrumentationScope, scopeSchema string) *ScopeInfo {
|
||||||
|
si := &ScopeInfo{
|
||||||
|
Scope: scope,
|
||||||
|
Schema: scopeSchema,
|
||||||
|
}
|
||||||
|
si.ID = sync.OnceValue(si.computeID)
|
||||||
|
return si
|
||||||
|
}
|
||||||
|
|
||||||
|
func (si *ScopeInfo) computeID() string {
|
||||||
|
if si.Scope == nil {
|
||||||
|
return "(unknown)"
|
||||||
|
}
|
||||||
|
hash := hashutil.NewDigest()
|
||||||
|
tmp := commonv1.InstrumentationScope{
|
||||||
|
Name: si.Scope.Name,
|
||||||
|
Version: si.Scope.Version,
|
||||||
|
Attributes: si.Scope.Attributes,
|
||||||
|
}
|
||||||
|
bytes, _ := proto.Marshal(&tmp)
|
||||||
|
hash.WriteStringWithLen(si.Schema)
|
||||||
|
hash.WriteWithLen(bytes)
|
||||||
|
return base64.StdEncoding.EncodeToString(hash.Sum(nil))
|
||||||
|
}
|
229
internal/telemetry/trace/buffer_test.go
Normal file
229
internal/telemetry/trace/buffer_test.go
Normal file
|
@ -0,0 +1,229 @@
|
||||||
|
package trace_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
"unique"
|
||||||
|
|
||||||
|
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
||||||
|
"github.com/pomerium/pomerium/internal/testutil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
oteltrace "go.opentelemetry.io/otel/trace"
|
||||||
|
commonv1 "go.opentelemetry.io/proto/otlp/common/v1"
|
||||||
|
resourcev1 "go.opentelemetry.io/proto/otlp/resource/v1"
|
||||||
|
tracev1 "go.opentelemetry.io/proto/otlp/trace/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
Trace uint32
|
||||||
|
Span uint32
|
||||||
|
Scope uint32
|
||||||
|
Schema uint32
|
||||||
|
Resource uint32
|
||||||
|
)
|
||||||
|
|
||||||
|
func (n Trace) String() string { return fmt.Sprintf("Trace %d", n) }
|
||||||
|
func (n Span) String() string { return fmt.Sprintf("Span %d", n) }
|
||||||
|
func (n Scope) String() string { return fmt.Sprintf("Scope %d", n) }
|
||||||
|
func (n Schema) String() string { return fmt.Sprintf("Schema %d", n) }
|
||||||
|
func (n Resource) String() string { return fmt.Sprintf("Resource %d", n) }
|
||||||
|
|
||||||
|
func (n Trace) ID() unique.Handle[oteltrace.TraceID] {
|
||||||
|
id, _ := trace.ToTraceID(n.B())
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Trace) B() []byte {
|
||||||
|
var id oteltrace.TraceID
|
||||||
|
binary.BigEndian.PutUint32(id[12:], uint32(n))
|
||||||
|
return id[:]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Span) ID() oteltrace.SpanID {
|
||||||
|
id, _ := trace.ToSpanID(n.B())
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Span) B() []byte {
|
||||||
|
var id oteltrace.SpanID
|
||||||
|
binary.BigEndian.PutUint32(id[4:], uint32(n))
|
||||||
|
return id[:]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Scope) Make(s ...Schema) *trace.ScopeInfo {
|
||||||
|
if len(s) == 0 {
|
||||||
|
s = append(s, Schema(0))
|
||||||
|
}
|
||||||
|
return trace.NewScopeInfo(&commonv1.InstrumentationScope{
|
||||||
|
Name: n.String(),
|
||||||
|
Version: "v1",
|
||||||
|
Attributes: []*commonv1.KeyValue{
|
||||||
|
{
|
||||||
|
Key: "id",
|
||||||
|
Value: &commonv1.AnyValue{
|
||||||
|
Value: &commonv1.AnyValue_IntValue{
|
||||||
|
IntValue: int64(n),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, s[0].String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Resource) Make(s ...Schema) *trace.ResourceInfo {
|
||||||
|
if len(s) == 0 {
|
||||||
|
s = append(s, Schema(0))
|
||||||
|
}
|
||||||
|
return trace.NewResourceInfo(&resourcev1.Resource{
|
||||||
|
Attributes: []*commonv1.KeyValue{
|
||||||
|
{
|
||||||
|
Key: "name",
|
||||||
|
Value: &commonv1.AnyValue{
|
||||||
|
Value: &commonv1.AnyValue_StringValue{
|
||||||
|
StringValue: n.String(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "id",
|
||||||
|
Value: &commonv1.AnyValue{
|
||||||
|
Value: &commonv1.AnyValue_IntValue{
|
||||||
|
IntValue: int64(n),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, s[0].String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func Traceparent(trace Trace, span Span, sampled bool) string {
|
||||||
|
sampledStr := "00"
|
||||||
|
if sampled {
|
||||||
|
sampledStr = "01"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("00-%s-%s-%s", trace.ID().Value(), span.ID(), sampledStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuffer(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// start time determines sort order of spans within a resource+scope group
|
||||||
|
s := []*tracev1.Span{
|
||||||
|
{TraceId: Trace(1).B(), SpanId: Span(1).B(), StartTimeUnixNano: 1},
|
||||||
|
{TraceId: Trace(1).B(), SpanId: Span(2).B(), StartTimeUnixNano: 2},
|
||||||
|
{TraceId: Trace(2).B(), SpanId: Span(3).B(), StartTimeUnixNano: 3},
|
||||||
|
{TraceId: Trace(2).B(), SpanId: Span(4).B(), StartTimeUnixNano: 4},
|
||||||
|
{TraceId: Trace(1).B(), SpanId: Span(5).B(), StartTimeUnixNano: 5},
|
||||||
|
{TraceId: Trace(1).B(), SpanId: Span(6).B(), StartTimeUnixNano: 6},
|
||||||
|
{TraceId: Trace(2).B(), SpanId: Span(7).B(), StartTimeUnixNano: 7},
|
||||||
|
{TraceId: Trace(2).B(), SpanId: Span(8).B(), StartTimeUnixNano: 8},
|
||||||
|
{TraceId: Trace(1).B(), SpanId: Span(9).B(), StartTimeUnixNano: 9},
|
||||||
|
{TraceId: Trace(1).B(), SpanId: Span(10).B(), StartTimeUnixNano: 10},
|
||||||
|
{TraceId: Trace(2).B(), SpanId: Span(11).B(), StartTimeUnixNano: 11},
|
||||||
|
{TraceId: Trace(2).B(), SpanId: Span(12).B(), StartTimeUnixNano: 12},
|
||||||
|
{TraceId: Trace(1).B(), SpanId: Span(13).B(), StartTimeUnixNano: 13},
|
||||||
|
{TraceId: Trace(1).B(), SpanId: Span(14).B(), StartTimeUnixNano: 14},
|
||||||
|
{TraceId: Trace(2).B(), SpanId: Span(15).B(), StartTimeUnixNano: 15},
|
||||||
|
{TraceId: Trace(2).B(), SpanId: Span(16).B(), StartTimeUnixNano: 16},
|
||||||
|
}
|
||||||
|
|
||||||
|
newTestBuffer := func() *trace.Buffer {
|
||||||
|
b := trace.NewBuffer()
|
||||||
|
b.Insert(Resource(1).Make(), Scope(1).Make(), s[0])
|
||||||
|
b.Insert(Resource(1).Make(), Scope(1).Make(), s[1])
|
||||||
|
b.Insert(Resource(1).Make(), Scope(1).Make(), s[2])
|
||||||
|
b.Insert(Resource(1).Make(), Scope(1).Make(), s[3])
|
||||||
|
b.Insert(Resource(1).Make(), Scope(2).Make(), s[4])
|
||||||
|
b.Insert(Resource(1).Make(), Scope(2).Make(), s[5])
|
||||||
|
b.Insert(Resource(1).Make(), Scope(2).Make(), s[6])
|
||||||
|
b.Insert(Resource(1).Make(), Scope(2).Make(), s[7])
|
||||||
|
b.Insert(Resource(2).Make(), Scope(1).Make(), s[8])
|
||||||
|
b.Insert(Resource(2).Make(), Scope(1).Make(), s[9])
|
||||||
|
b.Insert(Resource(2).Make(), Scope(1).Make(), s[10])
|
||||||
|
b.Insert(Resource(2).Make(), Scope(1).Make(), s[11])
|
||||||
|
b.Insert(Resource(2).Make(), Scope(2).Make(), s[12])
|
||||||
|
b.Insert(Resource(2).Make(), Scope(2).Make(), s[13])
|
||||||
|
b.Insert(Resource(2).Make(), Scope(2).Make(), s[14])
|
||||||
|
b.Insert(Resource(2).Make(), Scope(2).Make(), s[15])
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
newExpectedSpans := func() []*tracev1.ResourceSpans {
|
||||||
|
return []*tracev1.ResourceSpans{
|
||||||
|
{
|
||||||
|
Resource: Resource(1).Make().Resource,
|
||||||
|
ScopeSpans: []*tracev1.ScopeSpans{
|
||||||
|
{
|
||||||
|
Scope: Scope(1).Make().Scope,
|
||||||
|
Spans: []*tracev1.Span{s[0], s[1], s[2], s[3]},
|
||||||
|
SchemaUrl: Schema(0).String(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Scope: Scope(2).Make().Scope,
|
||||||
|
Spans: []*tracev1.Span{s[4], s[5], s[6], s[7]},
|
||||||
|
SchemaUrl: Schema(0).String(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
SchemaUrl: Schema(0).String(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Resource: Resource(2).Make().Resource,
|
||||||
|
ScopeSpans: []*tracev1.ScopeSpans{
|
||||||
|
{
|
||||||
|
Scope: Scope(1).Make().Scope,
|
||||||
|
Spans: []*tracev1.Span{s[8], s[9], s[10], s[11]},
|
||||||
|
SchemaUrl: Schema(0).String(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Scope: Scope(2).Make().Scope,
|
||||||
|
Spans: []*tracev1.Span{s[12], s[13], s[14], s[15]},
|
||||||
|
SchemaUrl: Schema(0).String(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
SchemaUrl: Schema(0).String(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.Run("Flush", func(t *testing.T) {
|
||||||
|
b := newTestBuffer()
|
||||||
|
actual := b.Flush()
|
||||||
|
assert.True(t, b.IsEmpty())
|
||||||
|
testutil.AssertProtoEqual(t, newExpectedSpans(), actual)
|
||||||
|
})
|
||||||
|
t.Run("FlushAs", func(t *testing.T) {
|
||||||
|
b := newTestBuffer()
|
||||||
|
actual := b.FlushAs(Trace(100).ID())
|
||||||
|
assert.True(t, b.IsEmpty())
|
||||||
|
expected := newExpectedSpans()
|
||||||
|
for _, resourceSpans := range expected {
|
||||||
|
for _, scopeSpans := range resourceSpans.ScopeSpans {
|
||||||
|
for _, span := range scopeSpans.Spans {
|
||||||
|
span.TraceId = Trace(100).B()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
testutil.AssertProtoEqual(t, expected, actual)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Default scope", func(t *testing.T) {
|
||||||
|
b := trace.NewBuffer()
|
||||||
|
b.Insert(Resource(1).Make(Schema(2)), trace.NewScopeInfo(nil, ""), s[0])
|
||||||
|
b.Insert(Resource(1).Make(Schema(2)), trace.NewScopeInfo(nil, ""), s[1])
|
||||||
|
b.Insert(Resource(1).Make(Schema(2)), trace.NewScopeInfo(nil, ""), s[2])
|
||||||
|
actual := b.Flush()
|
||||||
|
testutil.AssertProtoEqual(t, []*tracev1.ResourceSpans{
|
||||||
|
{
|
||||||
|
Resource: Resource(1).Make(Schema(2)).Resource,
|
||||||
|
ScopeSpans: []*tracev1.ScopeSpans{
|
||||||
|
{
|
||||||
|
Scope: nil,
|
||||||
|
Spans: []*tracev1.Span{s[0], s[1], s[2]},
|
||||||
|
SchemaUrl: "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
SchemaUrl: Schema(2).String(),
|
||||||
|
},
|
||||||
|
}, actual)
|
||||||
|
})
|
||||||
|
}
|
28
internal/telemetry/trace/carriers.go
Normal file
28
internal/telemetry/trace/carriers.go
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
package trace
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/propagation"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PomeriumURLQueryCarrier url.Values
|
||||||
|
|
||||||
|
// Get implements propagation.TextMapCarrier.
|
||||||
|
func (q PomeriumURLQueryCarrier) Get(key string) string {
|
||||||
|
return url.Values(q).Get("pomerium_" + key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set implements propagation.TextMapCarrier.
|
||||||
|
func (q PomeriumURLQueryCarrier) Set(key string, value string) {
|
||||||
|
url.Values(q).Set("pomerium_"+key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keys implements propagation.TextMapCarrier.
|
||||||
|
func (q PomeriumURLQueryCarrier) Keys() []string {
|
||||||
|
// this function is never called in otel, and the way it would be
|
||||||
|
// implemented in this instance is unclear.
|
||||||
|
panic("unimplemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ propagation.TextMapCarrier = PomeriumURLQueryCarrier{}
|
30
internal/telemetry/trace/carriers_test.go
Normal file
30
internal/telemetry/trace/carriers_test.go
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
package trace_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPomeriumURLQueryCarrier(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
values := url.Values{}
|
||||||
|
carrier := trace.PomeriumURLQueryCarrier(values)
|
||||||
|
assert.Empty(t, carrier.Get("foo"))
|
||||||
|
carrier.Set("foo", "bar")
|
||||||
|
assert.Equal(t, url.Values{
|
||||||
|
"pomerium_foo": []string{"bar"},
|
||||||
|
}, values)
|
||||||
|
assert.Equal(t, "bar", carrier.Get("foo"))
|
||||||
|
carrier.Set("foo", "bar2")
|
||||||
|
assert.Equal(t, url.Values{
|
||||||
|
"pomerium_foo": []string{"bar2"},
|
||||||
|
}, values)
|
||||||
|
assert.Equal(t, "bar2", carrier.Get("foo"))
|
||||||
|
|
||||||
|
assert.Panics(t, func() {
|
||||||
|
carrier.Keys()
|
||||||
|
})
|
||||||
|
}
|
227
internal/telemetry/trace/client.go
Normal file
227
internal/telemetry/trace/client.go
Normal file
|
@ -0,0 +1,227 @@
|
||||||
|
package trace
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
|
||||||
|
v1 "go.opentelemetry.io/proto/otlp/trace/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrNoClient = errors.New("no client")
|
||||||
|
ErrClientStopped = errors.New("client is stopped")
|
||||||
|
)
|
||||||
|
|
||||||
|
type SyncClient interface {
|
||||||
|
otlptrace.Client
|
||||||
|
|
||||||
|
Update(ctx context.Context, newClient otlptrace.Client) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSyncClient(client otlptrace.Client) SyncClient {
|
||||||
|
return &syncClient{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type syncClient struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
client otlptrace.Client
|
||||||
|
waitForNewClient chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ SyncClient = (*syncClient)(nil)
|
||||||
|
|
||||||
|
// Start implements otlptrace.Client.
|
||||||
|
func (ac *syncClient) Start(ctx context.Context) error {
|
||||||
|
ac.mu.Lock()
|
||||||
|
defer ac.mu.Unlock()
|
||||||
|
if ac.waitForNewClient != nil {
|
||||||
|
panic("bug: Start called during Stop or Update")
|
||||||
|
}
|
||||||
|
if ac.client == nil {
|
||||||
|
return ErrNoClient
|
||||||
|
}
|
||||||
|
return ac.client.Start(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop implements otlptrace.Client.
|
||||||
|
func (ac *syncClient) Stop(ctx context.Context) error {
|
||||||
|
ac.mu.Lock()
|
||||||
|
defer ac.mu.Unlock()
|
||||||
|
if ac.waitForNewClient != nil {
|
||||||
|
panic("bug: Stop called concurrently")
|
||||||
|
}
|
||||||
|
return ac.resetLocked(ctx, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *syncClient) resetLocked(ctx context.Context, newClient otlptrace.Client) error {
|
||||||
|
if ac.client == nil {
|
||||||
|
return ErrNoClient
|
||||||
|
}
|
||||||
|
ac.waitForNewClient = make(chan struct{})
|
||||||
|
ac.mu.Unlock()
|
||||||
|
|
||||||
|
err := ac.client.Stop(ctx)
|
||||||
|
|
||||||
|
ac.mu.Lock()
|
||||||
|
close(ac.waitForNewClient)
|
||||||
|
ac.waitForNewClient = nil
|
||||||
|
ac.client = newClient
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadTraces implements otlptrace.Client.
|
||||||
|
func (ac *syncClient) UploadTraces(ctx context.Context, protoSpans []*v1.ResourceSpans) error {
|
||||||
|
ac.mu.Lock()
|
||||||
|
if ac.waitForNewClient != nil {
|
||||||
|
wait := ac.waitForNewClient
|
||||||
|
ac.mu.Unlock()
|
||||||
|
select {
|
||||||
|
case <-wait:
|
||||||
|
ac.mu.Lock()
|
||||||
|
case <-ctx.Done():
|
||||||
|
return context.Cause(ctx)
|
||||||
|
}
|
||||||
|
} else if ac.client == nil {
|
||||||
|
ac.mu.Unlock()
|
||||||
|
return ErrNoClient
|
||||||
|
}
|
||||||
|
client := ac.client
|
||||||
|
ac.mu.Unlock()
|
||||||
|
if client == nil {
|
||||||
|
return ErrClientStopped
|
||||||
|
}
|
||||||
|
return client.UploadTraces(ctx, protoSpans)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *syncClient) Update(ctx context.Context, newClient otlptrace.Client) error {
|
||||||
|
if newClient != nil {
|
||||||
|
if err := newClient.Start(ctx); err != nil {
|
||||||
|
return fmt.Errorf("error starting new client: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ac.mu.Lock()
|
||||||
|
defer ac.mu.Unlock()
|
||||||
|
if ac.waitForNewClient != nil {
|
||||||
|
panic("bug: Update called during Stop")
|
||||||
|
}
|
||||||
|
if newClient == ac.client {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return ac.resetLocked(ctx, newClient)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRemoteClientFromEnv creates an otlp trace client using the well-known
|
||||||
|
// environment variables defined in the [OpenTelemetry documentation].
|
||||||
|
//
|
||||||
|
// [OpenTelemetry documentation]: https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/
|
||||||
|
func NewRemoteClientFromEnv() otlptrace.Client {
|
||||||
|
if os.Getenv("OTEL_SDK_DISABLED") == "true" {
|
||||||
|
return NoopClient{}
|
||||||
|
}
|
||||||
|
|
||||||
|
exporter, ok := os.LookupEnv("OTEL_TRACES_EXPORTER")
|
||||||
|
if !ok {
|
||||||
|
exporter = "none"
|
||||||
|
}
|
||||||
|
|
||||||
|
switch strings.ToLower(strings.TrimSpace(exporter)) {
|
||||||
|
case "none", "noop", "":
|
||||||
|
return NoopClient{}
|
||||||
|
case "otlp":
|
||||||
|
var protocol string
|
||||||
|
if v, ok := os.LookupEnv("OTEL_EXPORTER_OTLP_TRACES_PROTOCOL"); ok {
|
||||||
|
protocol = v
|
||||||
|
} else if v, ok := os.LookupEnv("OTEL_EXPORTER_OTLP_PROTOCOL"); ok {
|
||||||
|
protocol = v
|
||||||
|
} else {
|
||||||
|
// try to guess the expected protocol from the port number
|
||||||
|
var endpoint string
|
||||||
|
var specific bool
|
||||||
|
if v, ok := os.LookupEnv("OTEL_EXPORTER_OTLP_TRACES_ENDPOINT"); ok {
|
||||||
|
endpoint = v
|
||||||
|
specific = true
|
||||||
|
} else if v, ok := os.LookupEnv("OTEL_EXPORTER_OTLP_ENDPOINT"); ok {
|
||||||
|
endpoint = v
|
||||||
|
}
|
||||||
|
protocol = BestEffortProtocolFromOTLPEndpoint(endpoint, specific)
|
||||||
|
}
|
||||||
|
switch strings.ToLower(strings.TrimSpace(protocol)) {
|
||||||
|
case "grpc":
|
||||||
|
return otlptracegrpc.NewClient()
|
||||||
|
case "http/protobuf", "":
|
||||||
|
return otlptracehttp.NewClient()
|
||||||
|
default:
|
||||||
|
otel.Handle(fmt.Errorf(`unknown otlp trace exporter protocol %q, expected "grpc" or "http/protobuf"`, protocol))
|
||||||
|
return NoopClient{}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
otel.Handle(fmt.Errorf(`unknown otlp trace exporter %q, expected "otlp" or "none"`, exporter))
|
||||||
|
return NoopClient{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BestEffortProtocolFromOTLPEndpoint(endpoint string, specificEnv bool) string {
|
||||||
|
if endpoint == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
u, err := url.Parse(endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
switch u.Port() {
|
||||||
|
case "4318":
|
||||||
|
return "http/protobuf"
|
||||||
|
case "4317":
|
||||||
|
return "grpc"
|
||||||
|
default:
|
||||||
|
// For http, if the signal-specific form of the endpoint env variable
|
||||||
|
// (e.g. $OTEL_EXPORTER_OTLP_TRACES_ENDPOINT) is used, the /v1/<signal>
|
||||||
|
// ^^^^^^
|
||||||
|
// path must be present. Otherwise, the path must _not_ be present,
|
||||||
|
// because the sdk will add it.
|
||||||
|
// This doesn't apply to grpc endpoints, so assume grpc if there is a
|
||||||
|
// conflict here.
|
||||||
|
hasPath := len(strings.Trim(u.Path, "/")) > 0
|
||||||
|
switch {
|
||||||
|
case hasPath && specificEnv:
|
||||||
|
return "http/protobuf"
|
||||||
|
case !hasPath && specificEnv:
|
||||||
|
return "grpc"
|
||||||
|
case hasPath && !specificEnv:
|
||||||
|
// would be invalid for http, so assume it's grpc on a subpath
|
||||||
|
return "grpc"
|
||||||
|
case !hasPath && !specificEnv:
|
||||||
|
// could be either, but default to http
|
||||||
|
return "http/protobuf"
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type NoopClient struct{}
|
||||||
|
|
||||||
|
// Start implements otlptrace.Client.
|
||||||
|
func (n NoopClient) Start(context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop implements otlptrace.Client.
|
||||||
|
func (n NoopClient) Stop(context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadTraces implements otlptrace.Client.
|
||||||
|
func (n NoopClient) UploadTraces(context.Context, []*v1.ResourceSpans) error {
|
||||||
|
return nil
|
||||||
|
}
|
468
internal/telemetry/trace/client_test.go
Normal file
468
internal/telemetry/trace/client_test.go
Normal file
|
@ -0,0 +1,468 @@
|
||||||
|
package trace_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"sync/atomic"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/pomerium/pomerium/internal/log"
|
||||||
|
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
||||||
|
"github.com/pomerium/pomerium/internal/telemetry/trace/mock_otlptrace"
|
||||||
|
"github.com/pomerium/pomerium/internal/testenv"
|
||||||
|
"github.com/pomerium/pomerium/internal/testenv/scenarios"
|
||||||
|
"github.com/pomerium/pomerium/internal/testenv/snippets"
|
||||||
|
"github.com/pomerium/pomerium/internal/testutil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
tracev1 "go.opentelemetry.io/proto/otlp/trace/v1"
|
||||||
|
"go.uber.org/mock/gomock"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSyncClient(t *testing.T) {
|
||||||
|
t.Run("No client", func(t *testing.T) {
|
||||||
|
sc := trace.NewSyncClient(nil)
|
||||||
|
assert.ErrorIs(t, sc.Start(context.Background()), trace.ErrNoClient)
|
||||||
|
assert.ErrorIs(t, sc.UploadTraces(context.Background(), nil), trace.ErrNoClient)
|
||||||
|
assert.ErrorIs(t, sc.Stop(context.Background()), trace.ErrNoClient)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Valid client", func(t *testing.T) {
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
mockClient := mock_otlptrace.NewMockClient(ctrl)
|
||||||
|
start := mockClient.EXPECT().
|
||||||
|
Start(gomock.Any()).
|
||||||
|
Return(nil)
|
||||||
|
upload := mockClient.EXPECT().
|
||||||
|
UploadTraces(gomock.Any(), gomock.Any()).
|
||||||
|
Return(nil).
|
||||||
|
After(start)
|
||||||
|
mockClient.EXPECT().
|
||||||
|
Stop(gomock.Any()).
|
||||||
|
Return(nil).
|
||||||
|
After(upload)
|
||||||
|
sc := trace.NewSyncClient(mockClient)
|
||||||
|
assert.NoError(t, sc.Start(context.Background()))
|
||||||
|
assert.NoError(t, sc.UploadTraces(context.Background(), []*tracev1.ResourceSpans{}))
|
||||||
|
assert.NoError(t, sc.Stop(context.Background()))
|
||||||
|
})
|
||||||
|
t.Run("Update", func(t *testing.T) {
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
mockClient1 := mock_otlptrace.NewMockClient(ctrl)
|
||||||
|
mockClient2 := mock_otlptrace.NewMockClient(ctrl)
|
||||||
|
|
||||||
|
start1 := mockClient1.EXPECT().
|
||||||
|
Start(gomock.Any()).
|
||||||
|
Return(nil)
|
||||||
|
upload1 := mockClient1.EXPECT().
|
||||||
|
UploadTraces(gomock.Any(), gomock.Any()).
|
||||||
|
Return(nil).
|
||||||
|
After(start1)
|
||||||
|
start2 := mockClient2.EXPECT().
|
||||||
|
Start(gomock.Any()).
|
||||||
|
Return(nil).
|
||||||
|
After(upload1)
|
||||||
|
stop1 := mockClient1.EXPECT().
|
||||||
|
Stop(gomock.Any()).
|
||||||
|
Return(nil).
|
||||||
|
After(start2)
|
||||||
|
upload2 := mockClient2.EXPECT().
|
||||||
|
UploadTraces(gomock.Any(), gomock.Any()).
|
||||||
|
Return(nil).
|
||||||
|
After(stop1)
|
||||||
|
mockClient2.EXPECT().
|
||||||
|
Stop(gomock.Any()).
|
||||||
|
Return(nil).
|
||||||
|
After(upload2)
|
||||||
|
sc := trace.NewSyncClient(mockClient1)
|
||||||
|
assert.NoError(t, sc.Start(context.Background()))
|
||||||
|
assert.NoError(t, sc.UploadTraces(context.Background(), []*tracev1.ResourceSpans{}))
|
||||||
|
assert.NoError(t, sc.Update(context.Background(), mockClient2))
|
||||||
|
assert.NoError(t, sc.UploadTraces(context.Background(), []*tracev1.ResourceSpans{}))
|
||||||
|
assert.NoError(t, sc.Stop(context.Background()))
|
||||||
|
})
|
||||||
|
|
||||||
|
spinWait := func(counter *atomic.Int32, until int32) error {
|
||||||
|
startTime := time.Now()
|
||||||
|
for counter.Load() != until {
|
||||||
|
if time.Since(startTime) > 1*time.Second {
|
||||||
|
return fmt.Errorf("timed out waiting for counter to equal %d", until)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("Concurrent UploadTraces", func(t *testing.T) {
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
mockClient1 := mock_otlptrace.NewMockClient(ctrl)
|
||||||
|
count := atomic.Int32{}
|
||||||
|
unlock := make(chan struct{})
|
||||||
|
concurrency := min(runtime.NumCPU(), 4)
|
||||||
|
mockClient1.EXPECT().
|
||||||
|
UploadTraces(gomock.Any(), gomock.Any()).
|
||||||
|
DoAndReturn(func(context.Context, []*tracev1.ResourceSpans) error {
|
||||||
|
count.Add(1)
|
||||||
|
defer count.Add(-1)
|
||||||
|
<-unlock
|
||||||
|
return nil
|
||||||
|
}).
|
||||||
|
Times(concurrency)
|
||||||
|
sc := trace.NewSyncClient(mockClient1)
|
||||||
|
start := make(chan struct{})
|
||||||
|
for range concurrency {
|
||||||
|
go func() {
|
||||||
|
runtime.LockOSThread()
|
||||||
|
defer runtime.UnlockOSThread()
|
||||||
|
<-start
|
||||||
|
require.NoError(t, sc.UploadTraces(context.Background(), []*tracev1.ResourceSpans{}))
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
runtime.LockOSThread()
|
||||||
|
defer runtime.UnlockOSThread()
|
||||||
|
close(start)
|
||||||
|
assert.NoError(t, spinWait(&count, int32(concurrency)))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Concurrent Update/UploadTraces", func(t *testing.T) {
|
||||||
|
runtime.LockOSThread()
|
||||||
|
defer runtime.UnlockOSThread()
|
||||||
|
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
mockClient1 := mock_otlptrace.NewMockClient(ctrl)
|
||||||
|
mockClient2 := mock_otlptrace.NewMockClient(ctrl)
|
||||||
|
uploadTracesCount1 := atomic.Int32{}
|
||||||
|
uploadTracesCount2 := atomic.Int32{}
|
||||||
|
unlock1 := make(chan struct{})
|
||||||
|
unlock2 := make(chan struct{})
|
||||||
|
waitForStop := make(chan struct{})
|
||||||
|
concurrency := min(runtime.NumCPU(), 4)
|
||||||
|
|
||||||
|
// start 1 -> upload 1 -> start 2 -> stop 1 -> upload 2 -> stop 2
|
||||||
|
fStart1 := mockClient1.EXPECT().
|
||||||
|
Start(gomock.Any()).
|
||||||
|
Return(nil)
|
||||||
|
fUpload1 := mockClient1.EXPECT().
|
||||||
|
UploadTraces(gomock.Any(), gomock.Any()).
|
||||||
|
DoAndReturn(func(context.Context, []*tracev1.ResourceSpans) error {
|
||||||
|
// called from non-test threads
|
||||||
|
uploadTracesCount1.Add(1)
|
||||||
|
defer uploadTracesCount1.Add(-1)
|
||||||
|
<-unlock1
|
||||||
|
return nil
|
||||||
|
}).
|
||||||
|
Times(concurrency).
|
||||||
|
After(fStart1)
|
||||||
|
fStart2 := mockClient2.EXPECT().
|
||||||
|
Start(gomock.Any()).
|
||||||
|
Return(nil).
|
||||||
|
After(fUpload1)
|
||||||
|
fStop1 := mockClient1.EXPECT().
|
||||||
|
Stop(gomock.Any()).
|
||||||
|
DoAndReturn(func(context.Context) error {
|
||||||
|
// called from test thread
|
||||||
|
close(unlock1)
|
||||||
|
assert.NoError(t, spinWait(&uploadTracesCount1, 0))
|
||||||
|
return nil
|
||||||
|
}).
|
||||||
|
After(fStart2)
|
||||||
|
fUpload2 := mockClient2.EXPECT().
|
||||||
|
UploadTraces(gomock.Any(), gomock.Any()).
|
||||||
|
DoAndReturn(func(context.Context, []*tracev1.ResourceSpans) error {
|
||||||
|
// called from non-test threads
|
||||||
|
uploadTracesCount2.Add(1)
|
||||||
|
defer uploadTracesCount2.Add(-1)
|
||||||
|
<-unlock2
|
||||||
|
return nil
|
||||||
|
}).
|
||||||
|
Times(concurrency).
|
||||||
|
After(fStop1)
|
||||||
|
mockClient2.EXPECT().
|
||||||
|
Stop(gomock.Any()).
|
||||||
|
DoAndReturn(func(context.Context) error {
|
||||||
|
// called from test thread
|
||||||
|
close(unlock2)
|
||||||
|
assert.NoError(t, spinWait(&uploadTracesCount2, 0))
|
||||||
|
close(waitForStop)
|
||||||
|
// no way around sleeping here - we have to give the other threads time
|
||||||
|
// to call UploadTraces and block waiting on waitForNewClient to be
|
||||||
|
// closed, which happens after this function returns
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
return nil
|
||||||
|
}).
|
||||||
|
After(fUpload2)
|
||||||
|
sc := trace.NewSyncClient(mockClient1)
|
||||||
|
require.NoError(t, sc.Start(context.Background()))
|
||||||
|
|
||||||
|
for range concurrency {
|
||||||
|
go func() {
|
||||||
|
require.NoError(t, sc.UploadTraces(context.Background(), []*tracev1.ResourceSpans{}))
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
require.NoError(t, spinWait(&uploadTracesCount1, int32(concurrency)))
|
||||||
|
// at this point, all calls to UploadTraces for client1 are blocked
|
||||||
|
|
||||||
|
for range concurrency {
|
||||||
|
go func() {
|
||||||
|
<-unlock1 // wait for client1.Stop
|
||||||
|
// after this, calls to UploadTraces will block waiting for the
|
||||||
|
// new client, instead of using the old one we're about to close
|
||||||
|
require.NoError(t, sc.UploadTraces(context.Background(), []*tracev1.ResourceSpans{}))
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
require.NoError(t, sc.Update(context.Background(), mockClient2))
|
||||||
|
require.NoError(t, spinWait(&uploadTracesCount2, int32(concurrency)))
|
||||||
|
// at this point, all calls to UploadTraces for client2 are blocked.
|
||||||
|
|
||||||
|
// while SyncClient is waiting for the underlying client to stop during
|
||||||
|
// sc.Stop(), *new* calls to sc.UploadTraces will wait for it to stop, then
|
||||||
|
// error with trace.ErrClientStopped, but the previous calls blocked in
|
||||||
|
// client2 will complete without error.
|
||||||
|
for range concurrency {
|
||||||
|
go func() {
|
||||||
|
<-waitForStop
|
||||||
|
assert.ErrorIs(t, sc.UploadTraces(context.Background(), []*tracev1.ResourceSpans{}), trace.ErrClientStopped)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
assert.NoError(t, sc.Stop(context.Background()))
|
||||||
|
|
||||||
|
// sanity checks
|
||||||
|
assert.ErrorIs(t, sc.UploadTraces(context.Background(), []*tracev1.ResourceSpans{}), trace.ErrNoClient)
|
||||||
|
assert.ErrorIs(t, sc.Start(context.Background()), trace.ErrNoClient)
|
||||||
|
assert.ErrorIs(t, sc.Stop(context.Background()), trace.ErrNoClient)
|
||||||
|
assert.NoError(t, sc.Update(context.Background(), nil))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type errHandler struct {
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ otel.ErrorHandler = (*errHandler)(nil)
|
||||||
|
|
||||||
|
func (h *errHandler) Handle(err error) {
|
||||||
|
h.err = err
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewRemoteClientFromEnv(t *testing.T) {
|
||||||
|
env := testenv.New(t, testenv.WithTraceDebugFlags(testenv.StandardTraceDebugFlags))
|
||||||
|
|
||||||
|
receiver := scenarios.NewOTLPTraceReceiver()
|
||||||
|
env.Add(receiver)
|
||||||
|
|
||||||
|
grpcEndpoint := receiver.GRPCEndpointURL()
|
||||||
|
httpEndpoint := receiver.HTTPEndpointURL()
|
||||||
|
|
||||||
|
env.Start()
|
||||||
|
snippets.WaitStartupComplete(env)
|
||||||
|
|
||||||
|
for _, tc := range []struct {
|
||||||
|
name string
|
||||||
|
env map[string]string
|
||||||
|
newClientErr string
|
||||||
|
uploadErr string
|
||||||
|
expectNoSpans bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "GRPC endpoint, auto protocol",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_TRACES_EXPORTER": "otlp",
|
||||||
|
"OTEL_EXPORTER_OTLP_TRACES_ENDPOINT": grpcEndpoint.Value(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "GRPC endpoint, alternate env, auto protocol",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_TRACES_EXPORTER": "otlp",
|
||||||
|
"OTEL_EXPORTER_OTLP_ENDPOINT": grpcEndpoint.Value(),
|
||||||
|
},
|
||||||
|
uploadErr: "net/http: HTTP/1.x transport connection broken",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "HTTP endpoint, auto protocol",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_TRACES_EXPORTER": "otlp",
|
||||||
|
"OTEL_EXPORTER_OTLP_TRACES_ENDPOINT": httpEndpoint.Value(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "HTTP endpoint, alternate env, auto protocol",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_TRACES_EXPORTER": "otlp",
|
||||||
|
"OTEL_EXPORTER_OTLP_ENDPOINT": strings.TrimSuffix(httpEndpoint.Value(), "/v1/traces"), // path is added automatically by the sdk here
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "GRPC endpoint, explicit protocol",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_TRACES_EXPORTER": "otlp",
|
||||||
|
"OTEL_EXPORTER_OTLP_TRACES_ENDPOINT": grpcEndpoint.Value(),
|
||||||
|
"OTEL_EXPORTER_OTLP_TRACES_PROTOCOL": "grpc",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "HTTP endpoint, explicit protocol",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_TRACES_EXPORTER": "otlp",
|
||||||
|
"OTEL_EXPORTER_OTLP_TRACES_ENDPOINT": httpEndpoint.Value(),
|
||||||
|
"OTEL_EXPORTER_OTLP_TRACES_PROTOCOL": "http/protobuf",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no exporter",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_TRACES_EXPORTER": "",
|
||||||
|
"OTEL_EXPORTER_OTLP_TRACES_ENDPOINT": httpEndpoint.Value(),
|
||||||
|
"OTEL_EXPORTER_OTLP_TRACES_PROTOCOL": "http/protobuf",
|
||||||
|
},
|
||||||
|
expectNoSpans: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no exporter",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_TRACES_EXPORTER": "noop",
|
||||||
|
"OTEL_EXPORTER_OTLP_TRACES_ENDPOINT": httpEndpoint.Value(),
|
||||||
|
"OTEL_EXPORTER_OTLP_TRACES_PROTOCOL": "http/protobuf",
|
||||||
|
},
|
||||||
|
expectNoSpans: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no exporter",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_TRACES_EXPORTER": "none",
|
||||||
|
"OTEL_EXPORTER_OTLP_TRACES_ENDPOINT": httpEndpoint.Value(),
|
||||||
|
"OTEL_EXPORTER_OTLP_TRACES_PROTOCOL": "http/protobuf",
|
||||||
|
},
|
||||||
|
expectNoSpans: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid exporter",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_TRACES_EXPORTER": "invalid",
|
||||||
|
},
|
||||||
|
newClientErr: `unknown otlp trace exporter "invalid", expected "otlp" or "none"`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid protocol",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_TRACES_EXPORTER": "otlp",
|
||||||
|
"OTEL_EXPORTER_OTLP_TRACES_ENDPOINT": grpcEndpoint.Value(),
|
||||||
|
"OTEL_EXPORTER_OTLP_TRACES_PROTOCOL": "invalid",
|
||||||
|
},
|
||||||
|
newClientErr: `unknown otlp trace exporter protocol "invalid", expected "grpc" or "http/protobuf"`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid configuration, but sdk disabled",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_TRACES_EXPORTER": "otlp",
|
||||||
|
"OTEL_EXPORTER_OTLP_TRACES_ENDPOINT": grpcEndpoint.Value(),
|
||||||
|
"OTEL_EXPORTER_OTLP_TRACES_PROTOCOL": "grpc",
|
||||||
|
"OTEL_SDK_DISABLED": "true",
|
||||||
|
},
|
||||||
|
expectNoSpans: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid configuration, wrong value for sdk disabled env",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_TRACES_EXPORTER": "otlp",
|
||||||
|
"OTEL_EXPORTER_OTLP_TRACES_ENDPOINT": grpcEndpoint.Value(),
|
||||||
|
"OTEL_EXPORTER_OTLP_TRACES_PROTOCOL": "grpc",
|
||||||
|
"OTEL_SDK_DISABLED": "1", // only "true" works according to the spec
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "endpoint variable precedence",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_TRACES_EXPORTER": "otlp",
|
||||||
|
"OTEL_EXPORTER_OTLP_ENDPOINT": "invalid",
|
||||||
|
"OTEL_EXPORTER_OTLP_TRACES_ENDPOINT": grpcEndpoint.Value(), // should take precedence
|
||||||
|
"OTEL_EXPORTER_OTLP_PROTOCOL": "grpc",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "protocol variable precedence",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_TRACES_EXPORTER": "otlp",
|
||||||
|
"OTEL_EXPORTER_OTLP_PROTOCOL": "invalid",
|
||||||
|
"OTEL_EXPORTER_OTLP_TRACES_PROTOCOL": "grpc", // should take precedence
|
||||||
|
"OTEL_EXPORTER_OTLP_ENDPOINT": grpcEndpoint.Value(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
for k, v := range tc.env {
|
||||||
|
t.Setenv(k, v)
|
||||||
|
}
|
||||||
|
handler := &errHandler{}
|
||||||
|
oldErrHandler := otel.GetErrorHandler()
|
||||||
|
otel.SetErrorHandler(handler)
|
||||||
|
t.Cleanup(func() { otel.SetErrorHandler(oldErrHandler) })
|
||||||
|
|
||||||
|
if tc.uploadErr != "" {
|
||||||
|
recorder := env.NewLogRecorder(testenv.WithSkipCloseDelay())
|
||||||
|
defer func() {
|
||||||
|
recorder.Match([]map[string]any{
|
||||||
|
{
|
||||||
|
"level": "error",
|
||||||
|
"error": regexp.MustCompile(`^Post "[^"]+": net/http: HTTP/1.x transport connection broken: malformed HTTP response.*$`),
|
||||||
|
"message": "error uploading traces",
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
remoteClient := trace.NewRemoteClientFromEnv()
|
||||||
|
ctx := trace.Options{
|
||||||
|
RemoteClient: remoteClient,
|
||||||
|
}.NewContext(log.Ctx(env.Context()).WithContext(context.Background()))
|
||||||
|
|
||||||
|
if tc.newClientErr != "" {
|
||||||
|
assert.ErrorContains(t, handler.err, tc.newClientErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
tp := trace.NewTracerProvider(ctx, t.Name())
|
||||||
|
|
||||||
|
_, span := tp.Tracer(trace.PomeriumCoreTracer).Start(ctx, "test span")
|
||||||
|
span.End()
|
||||||
|
|
||||||
|
assert.NoError(t, trace.ForceFlush(ctx))
|
||||||
|
assert.NoError(t, trace.ShutdownContext(ctx))
|
||||||
|
|
||||||
|
if tc.uploadErr != "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
results := testutil.NewTraceResults(receiver.FlushResourceSpans())
|
||||||
|
if tc.expectNoSpans {
|
||||||
|
results.MatchTraces(t, testutil.MatchOptions{Exact: true})
|
||||||
|
} else {
|
||||||
|
results.MatchTraces(t, testutil.MatchOptions{
|
||||||
|
Exact: true,
|
||||||
|
}, testutil.Match{Name: t.Name() + ": test span", TraceCount: 1, Services: []string{t.Name()}})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBestEffortProtocolFromOTLPEndpoint(t *testing.T) {
|
||||||
|
t.Run("Well-known port numbers", func(t *testing.T) {
|
||||||
|
assert.Equal(t, "grpc", trace.BestEffortProtocolFromOTLPEndpoint("http://127.0.0.1:4317", true))
|
||||||
|
assert.Equal(t, "http/protobuf", trace.BestEffortProtocolFromOTLPEndpoint("http://127.0.0.1:4318", true))
|
||||||
|
})
|
||||||
|
t.Run("path presence", func(t *testing.T) {
|
||||||
|
assert.Equal(t, "http/protobuf", trace.BestEffortProtocolFromOTLPEndpoint("http://127.0.0.1:12345", false))
|
||||||
|
assert.Equal(t, "grpc", trace.BestEffortProtocolFromOTLPEndpoint("http://127.0.0.1:12345", true))
|
||||||
|
assert.Equal(t, "grpc", trace.BestEffortProtocolFromOTLPEndpoint("http://127.0.0.1:12345/v1/traces", false))
|
||||||
|
assert.Equal(t, "http/protobuf", trace.BestEffortProtocolFromOTLPEndpoint("http://127.0.0.1:12345/v1/traces", true))
|
||||||
|
})
|
||||||
|
t.Run("invalid inputs", func(t *testing.T) {
|
||||||
|
assert.Equal(t, "", trace.BestEffortProtocolFromOTLPEndpoint("", false))
|
||||||
|
assert.Equal(t, "", trace.BestEffortProtocolFromOTLPEndpoint("http://\x7f", false))
|
||||||
|
})
|
||||||
|
}
|
|
@ -1,34 +0,0 @@
|
||||||
package trace
|
|
||||||
|
|
||||||
import (
|
|
||||||
datadog "github.com/DataDog/opencensus-go-exporter-datadog"
|
|
||||||
octrace "go.opencensus.io/trace"
|
|
||||||
)
|
|
||||||
|
|
||||||
type datadogProvider struct {
|
|
||||||
exporter *datadog.Exporter
|
|
||||||
}
|
|
||||||
|
|
||||||
func (provider *datadogProvider) Register(opts *TracingOptions) error {
|
|
||||||
dOpts := datadog.Options{
|
|
||||||
Service: opts.Service,
|
|
||||||
TraceAddr: opts.DatadogAddress,
|
|
||||||
}
|
|
||||||
dex, err := datadog.NewExporter(dOpts)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
octrace.RegisterExporter(dex)
|
|
||||||
provider.exporter = dex
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (provider *datadogProvider) Unregister() error {
|
|
||||||
if provider.exporter == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
octrace.UnregisterExporter(provider.exporter)
|
|
||||||
provider.exporter.Stop()
|
|
||||||
provider.exporter = nil
|
|
||||||
return nil
|
|
||||||
}
|
|
119
internal/telemetry/trace/debug.go
Normal file
119
internal/telemetry/trace/debug.go
Normal file
|
@ -0,0 +1,119 @@
|
||||||
|
package trace
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
type DebugFlags uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
// If set, adds the "caller" attribute to each trace with the source location
|
||||||
|
// where the trace was started.
|
||||||
|
TrackSpanCallers = (1 << iota)
|
||||||
|
|
||||||
|
// If set, keeps track of all span references and will attempt to wait for
|
||||||
|
// all traces to complete when shutting down a trace context.
|
||||||
|
// Use with caution, this will cause increasing memory usage over time.
|
||||||
|
TrackSpanReferences = (1 << iota)
|
||||||
|
|
||||||
|
// If set, keeps track of all observed spans, including span context and
|
||||||
|
// all attributes.
|
||||||
|
// Use with caution, this will cause significantly increasing memory usage
|
||||||
|
// over time.
|
||||||
|
TrackAllSpans = (1 << iota) | TrackSpanCallers
|
||||||
|
|
||||||
|
// If set, will log all trace ID mappings on close.
|
||||||
|
LogTraceIDMappings = (1 << iota)
|
||||||
|
|
||||||
|
// If set, will log all spans observed by the exporter on close. These spans
|
||||||
|
// may belong to incomplete traces.
|
||||||
|
//
|
||||||
|
// Enables [TrackAllSpans]
|
||||||
|
LogAllSpans = (1 << iota) | TrackAllSpans
|
||||||
|
|
||||||
|
// If set, will log the raw json payloads and timestamps of export requests
|
||||||
|
// on close.
|
||||||
|
// Use with caution, this will cause significantly increasing memory usage
|
||||||
|
// over time.
|
||||||
|
LogAllEvents = (1 << iota)
|
||||||
|
|
||||||
|
// If set, will log all exported spans when a warning is issued on close
|
||||||
|
// (requires warning flags to also be set)
|
||||||
|
//
|
||||||
|
// Enables [TrackAllSpans]
|
||||||
|
LogAllSpansOnWarn = (1 << iota) | TrackAllSpans
|
||||||
|
|
||||||
|
// If set, will log all trace ID mappings when a warning is issued on close.
|
||||||
|
// (requires warning flags to also be set)
|
||||||
|
LogTraceIDMappingsOnWarn = (1 << iota)
|
||||||
|
|
||||||
|
// If set, will print a warning to stderr on close if there are any incomplete
|
||||||
|
// traces (traces with no observed root spans)
|
||||||
|
WarnOnIncompleteTraces = (1 << iota)
|
||||||
|
|
||||||
|
// If set, will print a warning to stderr on close if there are any incomplete
|
||||||
|
// spans (spans started, but not ended)
|
||||||
|
WarnOnIncompleteSpans = (1 << iota)
|
||||||
|
|
||||||
|
// If set, will print a warning to stderr on close if there are any spans
|
||||||
|
// which reference unknown parent spans.
|
||||||
|
//
|
||||||
|
// Enables [TrackSpanReferences]
|
||||||
|
WarnOnUnresolvedReferences = (1 << iota) | TrackSpanReferences
|
||||||
|
|
||||||
|
// If set, configures Envoy to flush every span individually, disabling its
|
||||||
|
// internal buffer.
|
||||||
|
EnvoyFlushEverySpan = (1 << iota)
|
||||||
|
)
|
||||||
|
|
||||||
|
func (df DebugFlags) Check(flags DebugFlags) bool {
|
||||||
|
return (df & flags) == flags
|
||||||
|
}
|
||||||
|
|
||||||
|
type stackTraceProcessor struct{}
|
||||||
|
|
||||||
|
// ForceFlush implements trace.SpanProcessor.
|
||||||
|
func (s *stackTraceProcessor) ForceFlush(context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnEnd implements trace.SpanProcessor.
|
||||||
|
func (*stackTraceProcessor) OnEnd(sdktrace.ReadOnlySpan) {
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnStart implements trace.SpanProcessor.
|
||||||
|
func (*stackTraceProcessor) OnStart(_ context.Context, s sdktrace.ReadWriteSpan) {
|
||||||
|
_, file, line, _ := runtime.Caller(2)
|
||||||
|
s.SetAttributes(attribute.String("caller", fmt.Sprintf("%s:%d", file, line)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown implements trace.SpanProcessor.
|
||||||
|
func (s *stackTraceProcessor) Shutdown(context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var debugMessageWriter io.Writer
|
||||||
|
|
||||||
|
func startMsg(title string) *strings.Builder {
|
||||||
|
msg := &strings.Builder{}
|
||||||
|
msg.WriteString("\n==================================================\n")
|
||||||
|
msg.WriteString(title)
|
||||||
|
return msg
|
||||||
|
}
|
||||||
|
|
||||||
|
func endMsg(msg *strings.Builder) {
|
||||||
|
msg.WriteString("==================================================\n")
|
||||||
|
w := debugMessageWriter
|
||||||
|
if w == nil {
|
||||||
|
w = os.Stderr
|
||||||
|
}
|
||||||
|
fmt.Fprint(w, msg.String())
|
||||||
|
}
|
|
@ -1,2 +0,0 @@
|
||||||
// Package trace contains support for OpenCensus distributed tracing.
|
|
||||||
package trace
|
|
46
internal/telemetry/trace/global.go
Normal file
46
internal/telemetry/trace/global.go
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
package trace
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/contrib/propagators/autoprop"
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
"go.opentelemetry.io/otel/trace"
|
||||||
|
"go.opentelemetry.io/otel/trace/embedded"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PomeriumCoreTracer should be used for all tracers created in pomerium core.
|
||||||
|
const PomeriumCoreTracer = "pomerium.io/core"
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
otel.SetTextMapPropagator(autoprop.NewTextMapPropagator())
|
||||||
|
}
|
||||||
|
|
||||||
|
// UseGlobalPanicTracer sets the global tracer provider to one whose tracers
|
||||||
|
// panic when starting spans. This can be used to locate errant usages of the
|
||||||
|
// global tracer, and is enabled automatically in some tests. It is otherwise
|
||||||
|
// not used by default, since pomerium is used as a library in some places that
|
||||||
|
// might use the global tracer provider.
|
||||||
|
func UseGlobalPanicTracer() {
|
||||||
|
otel.SetTracerProvider(panicTracerProvider{})
|
||||||
|
}
|
||||||
|
|
||||||
|
type panicTracerProvider struct {
|
||||||
|
embedded.TracerProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tracer implements trace.TracerProvider.
|
||||||
|
func (w panicTracerProvider) Tracer(string, ...trace.TracerOption) trace.Tracer {
|
||||||
|
return panicTracer{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type panicTracer struct {
|
||||||
|
embedded.Tracer
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ trace.Tracer = panicTracer{}
|
||||||
|
|
||||||
|
// Start implements trace.Tracer.
|
||||||
|
func (p panicTracer) Start(context.Context, string, ...trace.SpanStartOption) (context.Context, trace.Span) {
|
||||||
|
panic("global tracer used")
|
||||||
|
}
|
22
internal/telemetry/trace/global_test.go
Normal file
22
internal/telemetry/trace/global_test.go
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
package trace_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
"go.opentelemetry.io/otel/trace/noop"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestUseGlobalPanicTracer(t *testing.T) {
|
||||||
|
t.Cleanup(func() {
|
||||||
|
otel.SetTracerProvider(noop.NewTracerProvider())
|
||||||
|
})
|
||||||
|
trace.UseGlobalPanicTracer()
|
||||||
|
tracer := otel.GetTracerProvider().Tracer("test")
|
||||||
|
assert.Panics(t, func() {
|
||||||
|
tracer.Start(context.Background(), "span")
|
||||||
|
})
|
||||||
|
}
|
|
@ -1,37 +0,0 @@
|
||||||
package trace
|
|
||||||
|
|
||||||
import (
|
|
||||||
"contrib.go.opencensus.io/exporter/jaeger"
|
|
||||||
octrace "go.opencensus.io/trace"
|
|
||||||
)
|
|
||||||
|
|
||||||
type jaegerProvider struct {
|
|
||||||
exporter *jaeger.Exporter
|
|
||||||
}
|
|
||||||
|
|
||||||
func (provider *jaegerProvider) Register(opts *TracingOptions) error {
|
|
||||||
jOpts := jaeger.Options{
|
|
||||||
ServiceName: opts.Service,
|
|
||||||
AgentEndpoint: opts.JaegerAgentEndpoint,
|
|
||||||
}
|
|
||||||
if opts.JaegerCollectorEndpoint != nil {
|
|
||||||
jOpts.CollectorEndpoint = opts.JaegerCollectorEndpoint.String()
|
|
||||||
}
|
|
||||||
jex, err := jaeger.NewExporter(jOpts)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
octrace.RegisterExporter(jex)
|
|
||||||
provider.exporter = jex
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (provider *jaegerProvider) Unregister() error {
|
|
||||||
if provider.exporter == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
octrace.UnregisterExporter(provider.exporter)
|
|
||||||
provider.exporter.Flush()
|
|
||||||
provider.exporter = nil
|
|
||||||
return nil
|
|
||||||
}
|
|
13
internal/telemetry/trace/main_test.go
Normal file
13
internal/telemetry/trace/main_test.go
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
package trace_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
trace.UseGlobalPanicTracer()
|
||||||
|
os.Exit(m.Run())
|
||||||
|
}
|
162
internal/telemetry/trace/middleware.go
Normal file
162
internal/telemetry/trace/middleware.go
Normal file
|
@ -0,0 +1,162 @@
|
||||||
|
package trace
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
"go.opentelemetry.io/otel/propagation"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/stats"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewHTTPMiddleware(opts ...otelhttp.Option) func(http.Handler) http.Handler {
|
||||||
|
return func(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
routeStr := ""
|
||||||
|
route := mux.CurrentRoute(r)
|
||||||
|
if route != nil {
|
||||||
|
var err error
|
||||||
|
routeStr, err = route.GetPathTemplate()
|
||||||
|
if err != nil {
|
||||||
|
routeStr, err = route.GetPathRegexp()
|
||||||
|
if err != nil {
|
||||||
|
routeStr = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
traceparent := r.Header.Get("Traceparent")
|
||||||
|
if traceparent != "" {
|
||||||
|
xPomeriumTraceparent := r.Header.Get("X-Pomerium-Traceparent")
|
||||||
|
if xPomeriumTraceparent != "" {
|
||||||
|
sc, err := ParseTraceparent(xPomeriumTraceparent)
|
||||||
|
if err == nil {
|
||||||
|
r.Header.Set("Traceparent", WithTraceFromSpanContext(traceparent, sc))
|
||||||
|
ctx := otel.GetTextMapPropagator().Extract(r.Context(), propagation.HeaderCarrier(r.Header))
|
||||||
|
r = r.WithContext(ctx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
otelhttp.NewHandler(next, fmt.Sprintf("Server: %s %s", r.Method, routeStr), opts...).ServeHTTP(w, r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewServerStatsHandler(base stats.Handler) stats.Handler {
|
||||||
|
return &serverStatsHandlerWrapper{
|
||||||
|
base: base,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type serverStatsHandlerWrapper struct {
|
||||||
|
base stats.Handler
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *serverStatsHandlerWrapper) wrapContext(ctx context.Context) context.Context {
|
||||||
|
md, ok := metadata.FromIncomingContext(ctx)
|
||||||
|
if !ok {
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
traceparent := md.Get("traceparent")
|
||||||
|
xPomeriumTraceparent := md.Get("x-pomerium-traceparent")
|
||||||
|
if len(traceparent) > 0 && traceparent[0] != "" && len(xPomeriumTraceparent) > 0 && xPomeriumTraceparent[0] != "" {
|
||||||
|
newTracectx, err := ParseTraceparent(xPomeriumTraceparent[0])
|
||||||
|
if err != nil {
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
md.Set("traceparent", WithTraceFromSpanContext(traceparent[0], newTracectx))
|
||||||
|
return metadata.NewIncomingContext(ctx, md)
|
||||||
|
}
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleConn implements stats.Handler.
|
||||||
|
func (w *serverStatsHandlerWrapper) HandleConn(ctx context.Context, stats stats.ConnStats) {
|
||||||
|
w.base.HandleConn(w.wrapContext(ctx), stats)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleRPC implements stats.Handler.
|
||||||
|
func (w *serverStatsHandlerWrapper) HandleRPC(ctx context.Context, stats stats.RPCStats) {
|
||||||
|
w.base.HandleRPC(w.wrapContext(ctx), stats)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TagConn implements stats.Handler.
|
||||||
|
func (w *serverStatsHandlerWrapper) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context {
|
||||||
|
return w.base.TagConn(w.wrapContext(ctx), info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TagRPC implements stats.Handler.
|
||||||
|
func (w *serverStatsHandlerWrapper) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
|
||||||
|
return w.base.TagRPC(w.wrapContext(ctx), info)
|
||||||
|
}
|
||||||
|
|
||||||
|
type clientStatsHandlerWrapper struct {
|
||||||
|
ClientStatsHandlerOptions
|
||||||
|
base stats.Handler
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClientStatsHandlerOptions struct {
|
||||||
|
statsInterceptor func(ctx context.Context, rs stats.RPCStats) stats.RPCStats
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClientStatsHandlerOption func(*ClientStatsHandlerOptions)
|
||||||
|
|
||||||
|
func (o *ClientStatsHandlerOptions) apply(opts ...ClientStatsHandlerOption) {
|
||||||
|
for _, op := range opts {
|
||||||
|
op(o)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithStatsInterceptor calls the given function to modify the rpc stats before
|
||||||
|
// passing it to the stats handler during HandleRPC events.
|
||||||
|
//
|
||||||
|
// The interceptor MUST NOT modify the RPCStats it is given. It should instead
|
||||||
|
// return a copy of the underlying object with the same type, with any
|
||||||
|
// modifications made to the copy.
|
||||||
|
func WithStatsInterceptor(statsInterceptor func(ctx context.Context, rs stats.RPCStats) stats.RPCStats) ClientStatsHandlerOption {
|
||||||
|
return func(o *ClientStatsHandlerOptions) {
|
||||||
|
o.statsInterceptor = statsInterceptor
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewClientStatsHandler(base stats.Handler, opts ...ClientStatsHandlerOption) stats.Handler {
|
||||||
|
options := ClientStatsHandlerOptions{}
|
||||||
|
options.apply(opts...)
|
||||||
|
return &clientStatsHandlerWrapper{
|
||||||
|
ClientStatsHandlerOptions: options,
|
||||||
|
base: base,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleConn implements stats.Handler.
|
||||||
|
func (w *clientStatsHandlerWrapper) HandleConn(ctx context.Context, stats stats.ConnStats) {
|
||||||
|
w.base.HandleConn(ctx, stats)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleRPC implements stats.Handler.
|
||||||
|
func (w *clientStatsHandlerWrapper) HandleRPC(ctx context.Context, stats stats.RPCStats) {
|
||||||
|
if w.statsInterceptor != nil {
|
||||||
|
modified := w.statsInterceptor(ctx, stats)
|
||||||
|
if reflect.TypeOf(stats) != reflect.TypeOf(modified) {
|
||||||
|
panic("bug: stats interceptor returned a message of a different type")
|
||||||
|
}
|
||||||
|
w.base.HandleRPC(ctx, modified)
|
||||||
|
} else {
|
||||||
|
w.base.HandleRPC(ctx, stats)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TagConn implements stats.Handler.
|
||||||
|
func (w *clientStatsHandlerWrapper) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context {
|
||||||
|
return w.base.TagConn(ctx, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TagRPC implements stats.Handler.
|
||||||
|
func (w *clientStatsHandlerWrapper) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
|
||||||
|
return w.base.TagRPC(ctx, info)
|
||||||
|
}
|
336
internal/telemetry/trace/middleware_test.go
Normal file
336
internal/telemetry/trace/middleware_test.go
Normal file
|
@ -0,0 +1,336 @@
|
||||||
|
package trace_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
|
||||||
|
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||||
|
oteltrace "go.opentelemetry.io/otel/trace"
|
||||||
|
"go.opentelemetry.io/otel/trace/noop"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
|
"google.golang.org/grpc/interop/grpc_testing"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/stats"
|
||||||
|
"google.golang.org/grpc/test/bufconn"
|
||||||
|
)
|
||||||
|
|
||||||
|
var cases = []struct {
|
||||||
|
name string
|
||||||
|
setTraceparent string
|
||||||
|
setPomeriumTraceparent string
|
||||||
|
check func(t testing.TB, ctx context.Context)
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "x-pomerium-traceparent not present",
|
||||||
|
setTraceparent: Traceparent(Trace(1), Span(1), true),
|
||||||
|
check: func(t testing.TB, ctx context.Context) {
|
||||||
|
span := oteltrace.SpanFromContext(ctx)
|
||||||
|
assert.Equal(t, Trace(1).ID().Value(), span.SpanContext().TraceID())
|
||||||
|
assert.Equal(t, Span(1).ID(), span.SpanContext().SpanID())
|
||||||
|
assert.True(t, span.SpanContext().IsSampled())
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "x-pomerium-traceparent present",
|
||||||
|
setTraceparent: Traceparent(Trace(2), Span(2), true),
|
||||||
|
setPomeriumTraceparent: Traceparent(Trace(1), Span(1), true),
|
||||||
|
check: func(t testing.TB, ctx context.Context) {
|
||||||
|
span := oteltrace.SpanFromContext(ctx)
|
||||||
|
assert.Equal(t, Trace(1).ID().Value(), span.SpanContext().TraceID())
|
||||||
|
assert.Equal(t, Span(2).ID(), span.SpanContext().SpanID())
|
||||||
|
assert.True(t, span.SpanContext().IsSampled())
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "x-pomerium-traceparent present, force sampling off",
|
||||||
|
setTraceparent: Traceparent(Trace(2), Span(2), true),
|
||||||
|
setPomeriumTraceparent: Traceparent(Trace(1), Span(1), false),
|
||||||
|
check: func(t testing.TB, ctx context.Context) {
|
||||||
|
span := oteltrace.SpanFromContext(ctx)
|
||||||
|
assert.Equal(t, Trace(1).ID().Value(), span.SpanContext().TraceID())
|
||||||
|
assert.Equal(t, Span(2).ID(), span.SpanContext().SpanID())
|
||||||
|
assert.Equal(t, false, span.SpanContext().IsSampled())
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "x-pomerium-traceparent present, force sampling on",
|
||||||
|
setTraceparent: Traceparent(Trace(2), Span(2), false),
|
||||||
|
setPomeriumTraceparent: Traceparent(Trace(1), Span(1), true),
|
||||||
|
check: func(t testing.TB, ctx context.Context) {
|
||||||
|
span := oteltrace.SpanFromContext(ctx)
|
||||||
|
assert.Equal(t, Trace(1).ID().Value(), span.SpanContext().TraceID())
|
||||||
|
assert.Equal(t, Span(2).ID(), span.SpanContext().SpanID())
|
||||||
|
assert.Equal(t, true, span.SpanContext().IsSampled())
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "malformed x-pomerium-traceparent",
|
||||||
|
setTraceparent: Traceparent(Trace(2), Span(2), false),
|
||||||
|
setPomeriumTraceparent: "00-xxxxxx-yyyyyy-03",
|
||||||
|
check: func(t testing.TB, ctx context.Context) {
|
||||||
|
span := oteltrace.SpanFromContext(ctx)
|
||||||
|
assert.Equal(t, Trace(2).ID().Value(), span.SpanContext().TraceID())
|
||||||
|
assert.Equal(t, Span(2).ID(), span.SpanContext().SpanID())
|
||||||
|
assert.Equal(t, false, span.SpanContext().IsSampled())
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHTTPMiddleware(t *testing.T) {
|
||||||
|
for _, tc := range cases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
r := httptest.NewRequest(http.MethodGet, "/foo", nil)
|
||||||
|
if tc.setTraceparent != "" {
|
||||||
|
r.Header.Add("Traceparent", tc.setTraceparent)
|
||||||
|
}
|
||||||
|
if tc.setPomeriumTraceparent != "" {
|
||||||
|
r.Header.Add("X-Pomerium-Traceparent", tc.setPomeriumTraceparent)
|
||||||
|
}
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
trace.NewHTTPMiddleware(
|
||||||
|
otelhttp.WithTracerProvider(noop.NewTracerProvider()),
|
||||||
|
)(http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) {
|
||||||
|
tc.check(t, r.Context())
|
||||||
|
})).ServeHTTP(w, r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGRPCMiddleware(t *testing.T) {
|
||||||
|
for _, tc := range cases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
srv := grpc.NewServer(
|
||||||
|
grpc.StatsHandler(trace.NewServerStatsHandler(otelgrpc.NewServerHandler(
|
||||||
|
otelgrpc.WithTracerProvider(noop.NewTracerProvider())))),
|
||||||
|
grpc.Creds(insecure.NewCredentials()),
|
||||||
|
)
|
||||||
|
lis := bufconn.Listen(4096)
|
||||||
|
grpc_testing.RegisterTestServiceServer(srv, &testServer{
|
||||||
|
fn: func(ctx context.Context) {
|
||||||
|
tc.check(t, ctx)
|
||||||
|
},
|
||||||
|
})
|
||||||
|
go srv.Serve(lis)
|
||||||
|
t.Cleanup(srv.Stop)
|
||||||
|
|
||||||
|
client, err := grpc.NewClient("passthrough://ignore",
|
||||||
|
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||||
|
grpc.WithStatsHandler(otelgrpc.NewClientHandler(
|
||||||
|
otelgrpc.WithTracerProvider(noop.NewTracerProvider()))),
|
||||||
|
grpc.WithContextDialer(func(ctx context.Context, _ string) (net.Conn, error) {
|
||||||
|
return lis.DialContext(ctx)
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
if tc.setTraceparent != "" {
|
||||||
|
ctx = metadata.AppendToOutgoingContext(ctx,
|
||||||
|
"traceparent", tc.setTraceparent,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if tc.setPomeriumTraceparent != "" {
|
||||||
|
ctx = metadata.AppendToOutgoingContext(ctx,
|
||||||
|
"x-pomerium-traceparent", tc.setPomeriumTraceparent,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
_, err = grpc_testing.NewTestServiceClient(client).EmptyCall(ctx, &grpc_testing.Empty{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type testServer struct {
|
||||||
|
grpc_testing.UnimplementedTestServiceServer
|
||||||
|
fn func(ctx context.Context)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *testServer) EmptyCall(ctx context.Context, _ *grpc_testing.Empty) (*grpc_testing.Empty, error) {
|
||||||
|
ts.fn(ctx)
|
||||||
|
return &grpc_testing.Empty{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockHandler struct {
|
||||||
|
handleConn func(ctx context.Context, stats stats.ConnStats)
|
||||||
|
handleRPC func(ctx context.Context, stats stats.RPCStats)
|
||||||
|
tagConn func(ctx context.Context, info *stats.ConnTagInfo) context.Context
|
||||||
|
tagRPC func(ctx context.Context, info *stats.RPCTagInfo) context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleConn implements stats.Handler.
|
||||||
|
func (m *mockHandler) HandleConn(ctx context.Context, stats stats.ConnStats) {
|
||||||
|
m.handleConn(ctx, stats)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleRPC implements stats.Handler.
|
||||||
|
func (m *mockHandler) HandleRPC(ctx context.Context, stats stats.RPCStats) {
|
||||||
|
m.handleRPC(ctx, stats)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TagConn implements stats.Handler.
|
||||||
|
func (m *mockHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context {
|
||||||
|
return m.tagConn(ctx, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TagRPC implements stats.Handler.
|
||||||
|
func (m *mockHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
|
||||||
|
return m.tagRPC(ctx, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ stats.Handler = (*mockHandler)(nil)
|
||||||
|
|
||||||
|
func TestStatsInterceptor(t *testing.T) {
|
||||||
|
var outBegin *stats.Begin
|
||||||
|
var outEnd *stats.End
|
||||||
|
base := &mockHandler{
|
||||||
|
handleRPC: func(_ context.Context, rs stats.RPCStats) {
|
||||||
|
switch rs := rs.(type) {
|
||||||
|
case *stats.Begin:
|
||||||
|
outBegin = rs
|
||||||
|
case *stats.End:
|
||||||
|
outEnd = rs
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
interceptor := func(_ context.Context, rs stats.RPCStats) stats.RPCStats {
|
||||||
|
switch rs := rs.(type) {
|
||||||
|
case *stats.Begin:
|
||||||
|
return &stats.Begin{
|
||||||
|
Client: rs.Client,
|
||||||
|
BeginTime: rs.BeginTime.Add(-1 * time.Minute),
|
||||||
|
FailFast: rs.FailFast,
|
||||||
|
IsClientStream: rs.IsClientStream,
|
||||||
|
IsServerStream: rs.IsServerStream,
|
||||||
|
IsTransparentRetryAttempt: rs.IsTransparentRetryAttempt,
|
||||||
|
}
|
||||||
|
case *stats.End:
|
||||||
|
return &stats.End{
|
||||||
|
Client: rs.Client,
|
||||||
|
BeginTime: rs.BeginTime,
|
||||||
|
EndTime: rs.EndTime,
|
||||||
|
Trailer: rs.Trailer,
|
||||||
|
Error: errors.New("modified"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return rs
|
||||||
|
}
|
||||||
|
handler := trace.NewClientStatsHandler(
|
||||||
|
base,
|
||||||
|
trace.WithStatsInterceptor(interceptor),
|
||||||
|
)
|
||||||
|
inBegin := &stats.Begin{
|
||||||
|
Client: true,
|
||||||
|
BeginTime: time.Now(),
|
||||||
|
FailFast: true,
|
||||||
|
IsClientStream: true,
|
||||||
|
IsServerStream: false,
|
||||||
|
IsTransparentRetryAttempt: false,
|
||||||
|
}
|
||||||
|
handler.HandleRPC(context.Background(), inBegin)
|
||||||
|
assert.NotNil(t, outBegin)
|
||||||
|
assert.NotSame(t, inBegin, outBegin)
|
||||||
|
assert.Equal(t, inBegin.BeginTime.Add(-1*time.Minute), outBegin.BeginTime)
|
||||||
|
assert.Equal(t, inBegin.Client, outBegin.Client)
|
||||||
|
assert.Equal(t, inBegin.FailFast, outBegin.FailFast)
|
||||||
|
assert.Equal(t, inBegin.IsClientStream, outBegin.IsClientStream)
|
||||||
|
assert.Equal(t, inBegin.IsServerStream, outBegin.IsServerStream)
|
||||||
|
assert.Equal(t, inBegin.IsTransparentRetryAttempt, outBegin.IsTransparentRetryAttempt)
|
||||||
|
|
||||||
|
inEnd := &stats.End{
|
||||||
|
Client: true,
|
||||||
|
BeginTime: time.Now(),
|
||||||
|
EndTime: time.Now().Add(1 * time.Minute),
|
||||||
|
Trailer: metadata.Pairs("a", "b", "c", "d"),
|
||||||
|
Error: errors.New("input"),
|
||||||
|
}
|
||||||
|
handler.HandleRPC(context.Background(), inEnd)
|
||||||
|
assert.NotNil(t, outEnd)
|
||||||
|
assert.NotSame(t, inEnd, outEnd)
|
||||||
|
assert.Equal(t, inEnd.Client, outEnd.Client)
|
||||||
|
assert.Equal(t, inEnd.BeginTime, outEnd.BeginTime)
|
||||||
|
assert.Equal(t, inEnd.EndTime, outEnd.EndTime)
|
||||||
|
assert.Equal(t, inEnd.Trailer, outEnd.Trailer)
|
||||||
|
assert.Equal(t, "input", inEnd.Error.Error())
|
||||||
|
assert.Equal(t, "modified", outEnd.Error.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStatsInterceptor_Nil(t *testing.T) {
|
||||||
|
var outCtx context.Context
|
||||||
|
var outConnStats stats.ConnStats
|
||||||
|
var outRPCStats stats.RPCStats
|
||||||
|
var outConnTagInfo *stats.ConnTagInfo
|
||||||
|
var outRPCTagInfo *stats.RPCTagInfo
|
||||||
|
base := &mockHandler{
|
||||||
|
handleConn: func(ctx context.Context, stats stats.ConnStats) {
|
||||||
|
outCtx = ctx
|
||||||
|
outConnStats = stats
|
||||||
|
},
|
||||||
|
handleRPC: func(ctx context.Context, stats stats.RPCStats) {
|
||||||
|
outCtx = ctx
|
||||||
|
outRPCStats = stats
|
||||||
|
},
|
||||||
|
tagConn: func(ctx context.Context, info *stats.ConnTagInfo) context.Context {
|
||||||
|
outCtx = ctx
|
||||||
|
outConnTagInfo = info
|
||||||
|
return ctx
|
||||||
|
},
|
||||||
|
tagRPC: func(ctx context.Context, info *stats.RPCTagInfo) context.Context {
|
||||||
|
outCtx = ctx
|
||||||
|
outRPCTagInfo = info
|
||||||
|
return ctx
|
||||||
|
},
|
||||||
|
}
|
||||||
|
handler := trace.NewClientStatsHandler(
|
||||||
|
base,
|
||||||
|
trace.WithStatsInterceptor(nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
inCtx := context.Background()
|
||||||
|
inConnStats := &stats.ConnBegin{}
|
||||||
|
inRPCStats := &stats.Begin{}
|
||||||
|
inConnTagInfo := &stats.ConnTagInfo{}
|
||||||
|
inRPCTagInfo := &stats.RPCTagInfo{}
|
||||||
|
|
||||||
|
handler.HandleConn(inCtx, inConnStats)
|
||||||
|
assert.Equal(t, inCtx, outCtx)
|
||||||
|
assert.Same(t, inConnStats, outConnStats)
|
||||||
|
|
||||||
|
handler.HandleRPC(inCtx, inRPCStats)
|
||||||
|
assert.Equal(t, inCtx, outCtx)
|
||||||
|
assert.Same(t, inRPCStats, outRPCStats)
|
||||||
|
|
||||||
|
handler.TagConn(inCtx, inConnTagInfo)
|
||||||
|
assert.Equal(t, inCtx, outCtx)
|
||||||
|
assert.Same(t, inConnTagInfo, outConnTagInfo)
|
||||||
|
|
||||||
|
handler.TagRPC(inCtx, inRPCTagInfo)
|
||||||
|
assert.Equal(t, inCtx, outCtx)
|
||||||
|
assert.Same(t, inRPCTagInfo, outRPCTagInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStatsInterceptor_Bug(t *testing.T) {
|
||||||
|
handler := trace.NewClientStatsHandler(
|
||||||
|
&mockHandler{
|
||||||
|
handleRPC: func(_ context.Context, _ stats.RPCStats) {
|
||||||
|
t.Error("should not be reached")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
trace.WithStatsInterceptor(func(_ context.Context, rs stats.RPCStats) stats.RPCStats {
|
||||||
|
_ = rs.(*stats.Begin)
|
||||||
|
return &stats.End{}
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
assert.PanicsWithValue(t, "bug: stats interceptor returned a message of a different type", func() {
|
||||||
|
handler.HandleRPC(context.Background(), &stats.Begin{})
|
||||||
|
})
|
||||||
|
}
|
84
internal/telemetry/trace/mock_otlptrace/mock_client.go
Normal file
84
internal/telemetry/trace/mock_otlptrace/mock_client.go
Normal file
|
@ -0,0 +1,84 @@
|
||||||
|
// Code generated by MockGen. DO NOT EDIT.
|
||||||
|
// Source: go.opentelemetry.io/otel/exporters/otlp/otlptrace (interfaces: Client)
|
||||||
|
//
|
||||||
|
// Generated by this command:
|
||||||
|
//
|
||||||
|
// mockgen go.opentelemetry.io/otel/exporters/otlp/otlptrace Client
|
||||||
|
//
|
||||||
|
|
||||||
|
// Package mock_otlptrace is a generated GoMock package.
|
||||||
|
package mock_otlptrace
|
||||||
|
|
||||||
|
import (
|
||||||
|
context "context"
|
||||||
|
reflect "reflect"
|
||||||
|
|
||||||
|
v1 "go.opentelemetry.io/proto/otlp/trace/v1"
|
||||||
|
gomock "go.uber.org/mock/gomock"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MockClient is a mock of Client interface.
|
||||||
|
type MockClient struct {
|
||||||
|
ctrl *gomock.Controller
|
||||||
|
recorder *MockClientMockRecorder
|
||||||
|
isgomock struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockClientMockRecorder is the mock recorder for MockClient.
|
||||||
|
type MockClientMockRecorder struct {
|
||||||
|
mock *MockClient
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMockClient creates a new mock instance.
|
||||||
|
func NewMockClient(ctrl *gomock.Controller) *MockClient {
|
||||||
|
mock := &MockClient{ctrl: ctrl}
|
||||||
|
mock.recorder = &MockClientMockRecorder{mock}
|
||||||
|
return mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||||
|
func (m *MockClient) EXPECT() *MockClientMockRecorder {
|
||||||
|
return m.recorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start mocks base method.
|
||||||
|
func (m *MockClient) Start(ctx context.Context) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "Start", ctx)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start indicates an expected call of Start.
|
||||||
|
func (mr *MockClientMockRecorder) Start(ctx any) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockClient)(nil).Start), ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop mocks base method.
|
||||||
|
func (m *MockClient) Stop(ctx context.Context) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "Stop", ctx)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop indicates an expected call of Stop.
|
||||||
|
func (mr *MockClientMockRecorder) Stop(ctx any) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockClient)(nil).Stop), ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadTraces mocks base method.
|
||||||
|
func (m *MockClient) UploadTraces(ctx context.Context, protoSpans []*v1.ResourceSpans) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "UploadTraces", ctx, protoSpans)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadTraces indicates an expected call of UploadTraces.
|
||||||
|
func (mr *MockClientMockRecorder) UploadTraces(ctx, protoSpans any) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadTraces", reflect.TypeOf((*MockClient)(nil).UploadTraces), ctx, protoSpans)
|
||||||
|
}
|
817
internal/telemetry/trace/queue.go
Normal file
817
internal/telemetry/trace/queue.go
Normal file
|
@ -0,0 +1,817 @@
|
||||||
|
package trace
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
"unique"
|
||||||
|
|
||||||
|
lru "github.com/hashicorp/golang-lru/v2"
|
||||||
|
"github.com/pomerium/pomerium/internal/log"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||||
|
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||||
|
oteltrace "go.opentelemetry.io/otel/trace"
|
||||||
|
coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
|
||||||
|
tracev1 "go.opentelemetry.io/proto/otlp/trace/v1"
|
||||||
|
"google.golang.org/protobuf/encoding/protojson"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
maxPendingTraces atomic.Int32
|
||||||
|
maxCachedTraceIDs atomic.Int32
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
envOrDefault := func(envName string, def int32) int32 {
|
||||||
|
if val, ok := os.LookupEnv(envName); ok {
|
||||||
|
if num, err := strconv.ParseInt(val, 10, 32); err == nil {
|
||||||
|
return int32(num)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return def
|
||||||
|
}
|
||||||
|
maxPendingTraces.Store(envOrDefault("POMERIUM_OTEL_MAX_PENDING_TRACES", 8192))
|
||||||
|
maxCachedTraceIDs.Store(envOrDefault("POMERIUM_OTEL_MAX_CACHED_TRACE_IDS", 16384))
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetMaxPendingTraces(num int32) {
|
||||||
|
maxPendingTraces.Store(max(num, 0))
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetMaxCachedTraceIDs(num int32) {
|
||||||
|
maxCachedTraceIDs.Store(max(num, 0))
|
||||||
|
}
|
||||||
|
|
||||||
|
type SpanExportQueue struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
logger *zerolog.Logger
|
||||||
|
client otlptrace.Client
|
||||||
|
pendingResourcesByTraceID *lru.Cache[unique.Handle[oteltrace.TraceID], *Buffer]
|
||||||
|
knownTraceIDMappings *lru.Cache[unique.Handle[oteltrace.TraceID], unique.Handle[oteltrace.TraceID]]
|
||||||
|
uploadC chan []*tracev1.ResourceSpans
|
||||||
|
closing bool
|
||||||
|
closed chan struct{}
|
||||||
|
debugFlags DebugFlags
|
||||||
|
debugAllEnqueuedSpans map[oteltrace.SpanID]*tracev1.Span
|
||||||
|
tracker *spanTracker
|
||||||
|
observer *spanObserver
|
||||||
|
debugEvents []DebugEvent
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSpanExportQueue(ctx context.Context, client otlptrace.Client) *SpanExportQueue {
|
||||||
|
debug := DebugFlagsFromContext(ctx)
|
||||||
|
var observer *spanObserver
|
||||||
|
if debug.Check(TrackSpanReferences) {
|
||||||
|
observer = newSpanObserver()
|
||||||
|
}
|
||||||
|
q := &SpanExportQueue{
|
||||||
|
logger: log.Ctx(ctx),
|
||||||
|
client: client,
|
||||||
|
uploadC: make(chan []*tracev1.ResourceSpans, 64),
|
||||||
|
closed: make(chan struct{}),
|
||||||
|
debugFlags: debug,
|
||||||
|
debugAllEnqueuedSpans: make(map[oteltrace.SpanID]*tracev1.Span),
|
||||||
|
tracker: newSpanTracker(observer, debug),
|
||||||
|
observer: observer,
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
q.pendingResourcesByTraceID, err = lru.NewWithEvict(int(maxPendingTraces.Load()), q.onEvict)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
q.knownTraceIDMappings, err = lru.New[unique.Handle[oteltrace.TraceID], unique.Handle[oteltrace.TraceID]](int(maxCachedTraceIDs.Load()))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
go q.runUploader()
|
||||||
|
return q
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *SpanExportQueue) runUploader() {
|
||||||
|
defer close(q.closed)
|
||||||
|
for resourceSpans := range q.uploadC {
|
||||||
|
ctx, ca := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
if err := q.client.UploadTraces(ctx, resourceSpans); err != nil {
|
||||||
|
q.logger.Err(err).Msg("error uploading traces")
|
||||||
|
}
|
||||||
|
ca()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *SpanExportQueue) onEvict(traceID unique.Handle[oteltrace.TraceID], buf *Buffer) {
|
||||||
|
if buf.IsEmpty() {
|
||||||
|
// if the buffer is not empty, it was evicted automatically
|
||||||
|
return
|
||||||
|
} else if mapping, ok := q.knownTraceIDMappings.Get(traceID); ok && mapping == zeroTraceID {
|
||||||
|
q.logger.Debug().
|
||||||
|
Str("traceID", traceID.Value().String()).
|
||||||
|
Msg("dropping unsampled trace")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case q.uploadC <- buf.Flush():
|
||||||
|
q.logger.Warn().
|
||||||
|
Str("traceID", traceID.Value().String()).
|
||||||
|
Msg("trace export buffer is full, uploading oldest incomplete trace")
|
||||||
|
default:
|
||||||
|
q.logger.Warn().
|
||||||
|
Str("traceID", traceID.Value().String()).
|
||||||
|
Msg("trace export buffer and upload queues are full, dropping trace")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *SpanExportQueue) insertPendingSpanLocked(
|
||||||
|
resource *ResourceInfo,
|
||||||
|
scope *ScopeInfo,
|
||||||
|
traceID unique.Handle[oteltrace.TraceID],
|
||||||
|
span *tracev1.Span,
|
||||||
|
) {
|
||||||
|
var pendingTraceResources *Buffer
|
||||||
|
|
||||||
|
if ptr, ok := q.pendingResourcesByTraceID.Get(traceID); ok {
|
||||||
|
pendingTraceResources = ptr
|
||||||
|
} else {
|
||||||
|
pendingTraceResources = NewBuffer()
|
||||||
|
q.pendingResourcesByTraceID.Add(traceID, pendingTraceResources)
|
||||||
|
}
|
||||||
|
pendingTraceResources.Insert(resource, scope, span)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *SpanExportQueue) resolveTraceIDMappingLocked(out *Buffer, original, target unique.Handle[oteltrace.TraceID]) {
|
||||||
|
q.knownTraceIDMappings.Add(original, target)
|
||||||
|
|
||||||
|
if target == zeroTraceID && original != zeroTraceID {
|
||||||
|
// mapping a trace id to zero indicates we should drop the trace
|
||||||
|
q.pendingResourcesByTraceID.Remove(original)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if originalPending, ok := q.pendingResourcesByTraceID.Peek(original); ok {
|
||||||
|
if original == target {
|
||||||
|
out.Merge(originalPending)
|
||||||
|
} else {
|
||||||
|
// check if the target id is also pending
|
||||||
|
if targetPending, ok := q.pendingResourcesByTraceID.Peek(target); ok {
|
||||||
|
targetPending.MergeAs(originalPending, target)
|
||||||
|
} else {
|
||||||
|
out.MergeAs(originalPending, target)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
q.pendingResourcesByTraceID.Remove(original)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *SpanExportQueue) getTraceIDMappingLocked(id unique.Handle[oteltrace.TraceID]) (unique.Handle[oteltrace.TraceID], bool) {
|
||||||
|
v, ok := q.knownTraceIDMappings.Get(id)
|
||||||
|
return v, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *SpanExportQueue) isKnownTracePendingLocked(id unique.Handle[oteltrace.TraceID]) bool {
|
||||||
|
_, ok := q.pendingResourcesByTraceID.Get(id) // will update the key's recent-ness in the lru
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
var ErrShuttingDown = errors.New("exporter is shutting down")
|
||||||
|
|
||||||
|
func (q *SpanExportQueue) Enqueue(ctx context.Context, req *coltracepb.ExportTraceServiceRequest) error {
|
||||||
|
q.mu.Lock()
|
||||||
|
defer q.mu.Unlock()
|
||||||
|
if q.closing {
|
||||||
|
return ErrShuttingDown
|
||||||
|
}
|
||||||
|
|
||||||
|
if q.debugFlags.Check(LogAllEvents) {
|
||||||
|
q.debugEvents = append(q.debugEvents, DebugEvent{
|
||||||
|
Timestamp: time.Now(),
|
||||||
|
Request: proto.Clone(req).(*coltracepb.ExportTraceServiceRequest),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spans are processed in two passes:
|
||||||
|
// 1. Look through each span to check if we have not yet seen its trace ID.
|
||||||
|
// If we haven't, and the span is a root span (no parent, or marked as such
|
||||||
|
// by us), mark the trace as observed, and (if indicated) keep track of the
|
||||||
|
// trace ID we need to rewrite it as, so that other spans we see later in
|
||||||
|
// this trace can also be rewritten the same way.
|
||||||
|
// If we find a new trace ID for which there are pending non-root spans,
|
||||||
|
// collect them and rewrite their trace IDs (if necessary), and prepare
|
||||||
|
// them to be uploaded.
|
||||||
|
//
|
||||||
|
// At this point, all trace IDs for the spans in the request are known.
|
||||||
|
//
|
||||||
|
// 2. Look through each span again, this time to filter out any spans in
|
||||||
|
// the request which belong to "pending" traces (known trace IDs for which
|
||||||
|
// we have not yet seen a root span), adding them to the list of pending
|
||||||
|
// spans for their corresponding trace IDs. They will be uploaded in the
|
||||||
|
// future once we have observed a root span for those traces, or if they
|
||||||
|
// are evicted by the queue.
|
||||||
|
|
||||||
|
// Pass 1
|
||||||
|
toUpload := NewBuffer()
|
||||||
|
for _, resource := range req.ResourceSpans {
|
||||||
|
for _, scope := range resource.ScopeSpans {
|
||||||
|
SPANS:
|
||||||
|
for _, span := range scope.Spans {
|
||||||
|
FormatSpanName(span)
|
||||||
|
spanID, ok := ToSpanID(span.SpanId)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if q.debugFlags.Check(TrackAllSpans) {
|
||||||
|
q.debugAllEnqueuedSpans[spanID] = span
|
||||||
|
}
|
||||||
|
trackSpanReferences := q.debugFlags.Check(TrackSpanReferences)
|
||||||
|
parentSpanID, ok := ToSpanID(span.ParentSpanId)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
traceID, ok := ToTraceID(span.TraceId)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if trackSpanReferences {
|
||||||
|
q.observer.Observe(spanID)
|
||||||
|
}
|
||||||
|
if mapping, ok := q.getTraceIDMappingLocked(traceID); ok {
|
||||||
|
if trackSpanReferences && mapping != zeroTraceID && parentSpanID.IsValid() {
|
||||||
|
q.observer.ObserveReference(parentSpanID, spanID)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Observed a new trace ID. Check if the span is a root span
|
||||||
|
isRootSpan := !parentSpanID.IsValid() // no parent == root span
|
||||||
|
|
||||||
|
// Assume the trace is sampled, because it was exported. span.Flags
|
||||||
|
// is an unreliable way to detect whether the span was sampled,
|
||||||
|
// because neither envoy nor opentelemetry-go encode the sampling
|
||||||
|
// decision there, assuming unsampled spans would not be exported
|
||||||
|
// (this was not taking into account tail-based sampling strategies)
|
||||||
|
// https://github.com/open-telemetry/opentelemetry-proto/issues/166
|
||||||
|
isSampled := true
|
||||||
|
|
||||||
|
mappedTraceID := traceID
|
||||||
|
for _, attr := range span.Attributes {
|
||||||
|
switch attr.Key {
|
||||||
|
case "pomerium.traceparent":
|
||||||
|
tp, err := ParseTraceparent(attr.GetValue().GetStringValue())
|
||||||
|
if err != nil {
|
||||||
|
data, _ := protojson.Marshal(span)
|
||||||
|
log.Ctx(ctx).
|
||||||
|
Err(err).
|
||||||
|
Str("span", string(data)).
|
||||||
|
Msg("error processing span")
|
||||||
|
continue SPANS
|
||||||
|
}
|
||||||
|
mappedTraceID = unique.Make(tp.TraceID())
|
||||||
|
// use the sampling decision from pomerium.traceparent instead
|
||||||
|
isSampled = tp.IsSampled()
|
||||||
|
case "pomerium.external-parent-span":
|
||||||
|
// This is a non-root span whose parent we do not expect to see
|
||||||
|
// here. For example, if a request originated externally from a
|
||||||
|
// system that is uploading its own spans out-of-band from us,
|
||||||
|
// we will never observe a root span for this trace and it would
|
||||||
|
// otherwise get stuck in the queue.
|
||||||
|
if !isRootSpan && q.debugFlags.Check(TrackSpanReferences) {
|
||||||
|
value, err := oteltrace.SpanIDFromHex(attr.GetValue().GetStringValue())
|
||||||
|
if err != nil {
|
||||||
|
data, _ := protojson.Marshal(span)
|
||||||
|
log.Ctx(ctx).
|
||||||
|
Err(err).
|
||||||
|
Str("span", string(data)).
|
||||||
|
Msg("error processing span: invalid value for pomerium.external-parent-span")
|
||||||
|
} else {
|
||||||
|
q.observer.Observe(value) // mark this id as observed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
isRootSpan = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if q.debugFlags.Check(TrackSpanReferences) {
|
||||||
|
if isSampled && parentSpanID.IsValid() {
|
||||||
|
q.observer.ObserveReference(parentSpanID, spanID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isSampled {
|
||||||
|
// We have observed a new trace that is not sampled (regardless of
|
||||||
|
// whether or not it is a root span). Resolve it using the zero
|
||||||
|
// trace ID to indicate that all spans for this trace should be
|
||||||
|
// dropped.
|
||||||
|
q.resolveTraceIDMappingLocked(toUpload, traceID, zeroTraceID)
|
||||||
|
} else if isRootSpan {
|
||||||
|
// We have observed a new trace that is sampled and is a root span.
|
||||||
|
// Resolve it using the mapped trace ID (if present), or its own
|
||||||
|
// trace ID (indicating it does not need to be rewritten).
|
||||||
|
// If the mapped trace is pending, this does not flush pending
|
||||||
|
// spans to the output buffer (toUpload), but instead merges them
|
||||||
|
// into the mapped trace's pending buffer.
|
||||||
|
q.resolveTraceIDMappingLocked(toUpload, traceID, mappedTraceID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pass 2
|
||||||
|
for _, resource := range req.ResourceSpans {
|
||||||
|
resourceInfo := NewResourceInfo(resource.Resource, resource.SchemaUrl)
|
||||||
|
for _, scope := range resource.ScopeSpans {
|
||||||
|
scopeInfo := NewScopeInfo(scope.Scope, scope.SchemaUrl)
|
||||||
|
for _, span := range scope.Spans {
|
||||||
|
traceID, ok := ToTraceID(span.TraceId)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if mapping, hasMapping := q.getTraceIDMappingLocked(traceID); hasMapping {
|
||||||
|
if mapping == zeroTraceID {
|
||||||
|
continue // the trace has been dropped
|
||||||
|
}
|
||||||
|
id := mapping.Value()
|
||||||
|
copy(span.TraceId, id[:])
|
||||||
|
// traceID = mapping
|
||||||
|
if q.isKnownTracePendingLocked(mapping) {
|
||||||
|
q.insertPendingSpanLocked(resourceInfo, scopeInfo, mapping, span)
|
||||||
|
} else {
|
||||||
|
toUpload.Insert(resourceInfo, scopeInfo, span)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
q.insertPendingSpanLocked(resourceInfo, scopeInfo, traceID, span)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if resourceSpans := toUpload.Flush(); len(resourceSpans) > 0 {
|
||||||
|
q.uploadC <- resourceSpans
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrIncompleteTraces = errors.New("exporter shut down with incomplete traces")
|
||||||
|
ErrIncompleteSpans = errors.New("exporter shut down with incomplete spans")
|
||||||
|
ErrIncompleteUploads = errors.New("exporter shut down with pending trace uploads")
|
||||||
|
ErrMissingParentSpans = errors.New("exporter shut down with missing parent spans")
|
||||||
|
)
|
||||||
|
|
||||||
|
func (q *SpanExportQueue) WaitForSpans(maxDuration time.Duration) error {
|
||||||
|
if !q.debugFlags.Check(TrackSpanReferences) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
done := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
defer close(done)
|
||||||
|
q.observer.wait(q.debugAllEnqueuedSpans, 10*time.Second)
|
||||||
|
}()
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
return nil
|
||||||
|
case <-time.After(maxDuration):
|
||||||
|
return ErrMissingParentSpans
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *SpanExportQueue) Close(ctx context.Context) error {
|
||||||
|
q.mu.Lock()
|
||||||
|
q.closing = true
|
||||||
|
close(q.uploadC)
|
||||||
|
q.mu.Unlock()
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
log.Ctx(ctx).Error().Msg("exporter stopped before all traces could be exported")
|
||||||
|
// drain uploadC
|
||||||
|
for range q.uploadC {
|
||||||
|
}
|
||||||
|
return context.Cause(ctx)
|
||||||
|
case <-q.closed:
|
||||||
|
q.mu.Lock()
|
||||||
|
defer q.mu.Unlock()
|
||||||
|
err := q.runOnCloseChecksLocked()
|
||||||
|
log.Ctx(ctx).Debug().Err(err).Msg("exporter stopped")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *SpanExportQueue) runOnCloseChecksLocked() error {
|
||||||
|
didWarn := false
|
||||||
|
if q.debugFlags.Check(TrackSpanReferences) {
|
||||||
|
var unknownParentIDs []string
|
||||||
|
for id, via := range q.observer.referencedIDs {
|
||||||
|
if via.IsValid() {
|
||||||
|
if q.debugFlags.Check(TrackAllSpans) {
|
||||||
|
if viaSpan, ok := q.debugAllEnqueuedSpans[via]; ok {
|
||||||
|
unknownParentIDs = append(unknownParentIDs, fmt.Sprintf("%s via %s (%s)", id, via, viaSpan.Name))
|
||||||
|
} else {
|
||||||
|
unknownParentIDs = append(unknownParentIDs, fmt.Sprintf("%s via %s", id, via))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(unknownParentIDs) > 0 {
|
||||||
|
didWarn = true
|
||||||
|
msg := startMsg("WARNING: parent spans referenced but never seen:\n")
|
||||||
|
for _, str := range unknownParentIDs {
|
||||||
|
msg.WriteString(str)
|
||||||
|
msg.WriteString("\n")
|
||||||
|
}
|
||||||
|
endMsg(msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
incomplete := q.pendingResourcesByTraceID.Len() > 0
|
||||||
|
if incomplete && q.debugFlags.Check(WarnOnIncompleteTraces) {
|
||||||
|
didWarn = true
|
||||||
|
msg := startMsg("WARNING: exporter shut down with incomplete traces\n")
|
||||||
|
keys := q.pendingResourcesByTraceID.Keys()
|
||||||
|
values := q.pendingResourcesByTraceID.Values()
|
||||||
|
for i, k := range keys {
|
||||||
|
v := values[i]
|
||||||
|
fmt.Fprintf(msg, "- Trace: %s\n", k.Value())
|
||||||
|
for _, pendingScope := range v.scopesByResourceID {
|
||||||
|
msg.WriteString(" - Resource:\n")
|
||||||
|
for _, v := range pendingScope.resource.Resource.Attributes {
|
||||||
|
fmt.Fprintf(msg, " %s=%s\n", v.Key, v.Value.String())
|
||||||
|
}
|
||||||
|
for _, spanBuffer := range pendingScope.spansByScope {
|
||||||
|
if spanBuffer.scope != nil {
|
||||||
|
fmt.Fprintf(msg, " Scope: %s\n", spanBuffer.scope.ID())
|
||||||
|
} else {
|
||||||
|
msg.WriteString(" Scope: (unknown)\n")
|
||||||
|
}
|
||||||
|
msg.WriteString(" Spans:\n")
|
||||||
|
longestName := 0
|
||||||
|
for _, span := range spanBuffer.spans {
|
||||||
|
longestName = max(longestName, len(span.Name)+2)
|
||||||
|
}
|
||||||
|
for _, span := range spanBuffer.spans {
|
||||||
|
spanID, ok := ToSpanID(span.SpanId)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
traceID, ok := ToTraceID(span.TraceId)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
parentSpanID, ok := ToSpanID(span.ParentSpanId)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
_, seenParent := q.debugAllEnqueuedSpans[parentSpanID]
|
||||||
|
var missing string
|
||||||
|
if !seenParent {
|
||||||
|
missing = " [missing]"
|
||||||
|
}
|
||||||
|
fmt.Fprintf(msg, " - %-*s (trace: %s | span: %s | parent:%s %s)\n", longestName,
|
||||||
|
"'"+span.Name+"'", traceID.Value(), spanID, missing, parentSpanID)
|
||||||
|
for _, attr := range span.Attributes {
|
||||||
|
if attr.Key == "caller" {
|
||||||
|
fmt.Fprintf(msg, " => caller: '%s'\n", attr.Value.GetStringValue())
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
endMsg(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
if q.debugFlags.Check(LogTraceIDMappings) || (didWarn && q.debugFlags.Check(LogTraceIDMappingsOnWarn)) {
|
||||||
|
msg := startMsg("Known trace ids:\n")
|
||||||
|
keys := q.knownTraceIDMappings.Keys()
|
||||||
|
values := q.knownTraceIDMappings.Values()
|
||||||
|
for i, k := range keys {
|
||||||
|
v := values[i]
|
||||||
|
if k != v {
|
||||||
|
if v == zeroTraceID {
|
||||||
|
fmt.Fprintf(msg, "%s (dropped)\n", k.Value())
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(msg, "%s => %s\n", k.Value(), v.Value())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(msg, "%s (no change)\n", k.Value())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
endMsg(msg)
|
||||||
|
}
|
||||||
|
if q.debugFlags.Check(LogAllSpans) || (didWarn && q.debugFlags.Check(LogAllSpansOnWarn)) {
|
||||||
|
msg := startMsg("All exported spans:\n")
|
||||||
|
longestName := 0
|
||||||
|
for _, span := range q.debugAllEnqueuedSpans {
|
||||||
|
longestName = max(longestName, len(span.Name)+2)
|
||||||
|
}
|
||||||
|
for _, span := range q.debugAllEnqueuedSpans {
|
||||||
|
spanID, ok := ToSpanID(span.SpanId)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
traceID, ok := ToTraceID(span.TraceId)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
parentSpanID, ok := ToSpanID(span.ParentSpanId)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Fprintf(msg, "%-*s (trace: %s | span: %s | parent: %s)", longestName,
|
||||||
|
"'"+span.Name+"'", traceID.Value(), spanID, parentSpanID)
|
||||||
|
var foundCaller bool
|
||||||
|
for _, attr := range span.Attributes {
|
||||||
|
if attr.Key == "caller" {
|
||||||
|
fmt.Fprintf(msg, " => %s\n", attr.Value.GetStringValue())
|
||||||
|
foundCaller = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !foundCaller {
|
||||||
|
msg.WriteString("\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
endMsg(msg)
|
||||||
|
}
|
||||||
|
if q.debugFlags.Check(LogAllEvents) {
|
||||||
|
msg := startMsg("All Events:\n")
|
||||||
|
msg.WriteByte('[')
|
||||||
|
for i, event := range q.debugEvents {
|
||||||
|
msg.WriteString("\n ")
|
||||||
|
eventData, _ := json.Marshal(event)
|
||||||
|
msg.Write(eventData)
|
||||||
|
if i < len(q.debugEvents)-1 {
|
||||||
|
msg.WriteByte(',')
|
||||||
|
} else {
|
||||||
|
msg.WriteString("\n]")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
msg.WriteByte('\n')
|
||||||
|
endMsg(msg)
|
||||||
|
}
|
||||||
|
if incomplete {
|
||||||
|
return ErrIncompleteTraces
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type DebugEvent struct {
|
||||||
|
Timestamp time.Time `json:"timestamp"`
|
||||||
|
Request *coltracepb.ExportTraceServiceRequest `json:"request"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e DebugEvent) MarshalJSON() ([]byte, error) {
|
||||||
|
type debugEvent struct {
|
||||||
|
Timestamp time.Time `json:"timestamp"`
|
||||||
|
Request json.RawMessage `json:"request"`
|
||||||
|
}
|
||||||
|
reqData, _ := protojson.Marshal(e.Request)
|
||||||
|
return json.Marshal(debugEvent{
|
||||||
|
Timestamp: e.Timestamp,
|
||||||
|
Request: reqData,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *DebugEvent) UnmarshalJSON(b []byte) error {
|
||||||
|
type debugEvent struct {
|
||||||
|
Timestamp time.Time `json:"timestamp"`
|
||||||
|
Request json.RawMessage `json:"request"`
|
||||||
|
}
|
||||||
|
var ev debugEvent
|
||||||
|
if err := json.Unmarshal(b, &ev); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
e.Timestamp = ev.Timestamp
|
||||||
|
var msg coltracepb.ExportTraceServiceRequest
|
||||||
|
if err := protojson.Unmarshal(ev.Request, &msg); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
e.Request = &msg
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type spanTracker struct {
|
||||||
|
inflightSpans sync.Map
|
||||||
|
allSpans sync.Map
|
||||||
|
debugFlags DebugFlags
|
||||||
|
observer *spanObserver
|
||||||
|
shutdownOnce sync.Once
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSpanTracker(observer *spanObserver, debugFlags DebugFlags) *spanTracker {
|
||||||
|
return &spanTracker{
|
||||||
|
observer: observer,
|
||||||
|
debugFlags: debugFlags,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type spanInfo struct {
|
||||||
|
Name string
|
||||||
|
SpanContext oteltrace.SpanContext
|
||||||
|
Parent oteltrace.SpanContext
|
||||||
|
caller string
|
||||||
|
startTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForceFlush implements trace.SpanProcessor.
|
||||||
|
func (t *spanTracker) ForceFlush(context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnEnd implements trace.SpanProcessor.
|
||||||
|
func (t *spanTracker) OnEnd(s sdktrace.ReadOnlySpan) {
|
||||||
|
id := s.SpanContext().SpanID()
|
||||||
|
t.inflightSpans.Delete(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnStart implements trace.SpanProcessor.
|
||||||
|
func (t *spanTracker) OnStart(_ context.Context, s sdktrace.ReadWriteSpan) {
|
||||||
|
id := s.SpanContext().SpanID()
|
||||||
|
t.inflightSpans.Store(id, struct{}{})
|
||||||
|
if t.debugFlags.Check(TrackSpanReferences) {
|
||||||
|
t.observer.Observe(id)
|
||||||
|
}
|
||||||
|
if t.debugFlags.Check(TrackAllSpans) {
|
||||||
|
var caller string
|
||||||
|
for _, attr := range s.Attributes() {
|
||||||
|
if attr.Key == "caller" {
|
||||||
|
caller = attr.Value.AsString()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.allSpans.Store(id, &spanInfo{
|
||||||
|
Name: s.Name(),
|
||||||
|
SpanContext: s.SpanContext(),
|
||||||
|
Parent: s.Parent(),
|
||||||
|
caller: caller,
|
||||||
|
startTime: s.StartTime(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown implements trace.SpanProcessor.
|
||||||
|
func (t *spanTracker) Shutdown(_ context.Context) error {
|
||||||
|
if t.debugFlags == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
didWarn := false
|
||||||
|
t.shutdownOnce.Do(func() {
|
||||||
|
if t.debugFlags.Check(WarnOnIncompleteSpans) {
|
||||||
|
if t.debugFlags.Check(TrackAllSpans) {
|
||||||
|
incompleteSpans := []*spanInfo{}
|
||||||
|
t.inflightSpans.Range(func(key, _ any) bool {
|
||||||
|
if info, ok := t.allSpans.Load(key); ok {
|
||||||
|
incompleteSpans = append(incompleteSpans, info.(*spanInfo))
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
if len(incompleteSpans) > 0 {
|
||||||
|
didWarn = true
|
||||||
|
msg := startMsg("WARNING: spans not ended:\n")
|
||||||
|
longestName := 0
|
||||||
|
for _, span := range incompleteSpans {
|
||||||
|
longestName = max(longestName, len(span.Name)+2)
|
||||||
|
}
|
||||||
|
for _, span := range incompleteSpans {
|
||||||
|
var startedAt string
|
||||||
|
if span.caller != "" {
|
||||||
|
startedAt = " | started at: " + span.caller
|
||||||
|
}
|
||||||
|
fmt.Fprintf(msg, "%-*s (trace: %s | span: %s | parent: %s%s)\n", longestName, "'"+span.Name+"'",
|
||||||
|
span.SpanContext.TraceID(), span.SpanContext.SpanID(), span.Parent.SpanID(), startedAt)
|
||||||
|
}
|
||||||
|
endMsg(msg)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
incompleteSpans := []oteltrace.SpanID{}
|
||||||
|
t.inflightSpans.Range(func(key, _ any) bool {
|
||||||
|
incompleteSpans = append(incompleteSpans, key.(oteltrace.SpanID))
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
if len(incompleteSpans) > 0 {
|
||||||
|
didWarn = true
|
||||||
|
msg := startMsg("WARNING: spans not ended:\n")
|
||||||
|
for _, span := range incompleteSpans {
|
||||||
|
fmt.Fprintf(msg, "%s\n", span)
|
||||||
|
}
|
||||||
|
msg.WriteString("Note: set TrackAllSpans flag for more info\n")
|
||||||
|
endMsg(msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.debugFlags.Check(LogAllSpans) || (t.debugFlags.Check(LogAllSpansOnWarn) && didWarn) {
|
||||||
|
allSpans := []*spanInfo{}
|
||||||
|
t.allSpans.Range(func(_, value any) bool {
|
||||||
|
allSpans = append(allSpans, value.(*spanInfo))
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
slices.SortFunc(allSpans, func(a, b *spanInfo) int {
|
||||||
|
return a.startTime.Compare(b.startTime)
|
||||||
|
})
|
||||||
|
msg := startMsg("All observed spans:\n")
|
||||||
|
longestName := 0
|
||||||
|
for _, span := range allSpans {
|
||||||
|
longestName = max(longestName, len(span.Name)+2)
|
||||||
|
}
|
||||||
|
for _, span := range allSpans {
|
||||||
|
var startedAt string
|
||||||
|
if span.caller != "" {
|
||||||
|
startedAt = " | started at: " + span.caller
|
||||||
|
}
|
||||||
|
fmt.Fprintf(msg, "%-*s (trace: %s | span: %s | parent: %s%s)\n", longestName, "'"+span.Name+"'",
|
||||||
|
span.SpanContext.TraceID(), span.SpanContext.SpanID(), span.Parent.SpanID(), startedAt)
|
||||||
|
}
|
||||||
|
endMsg(msg)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if didWarn {
|
||||||
|
return ErrIncompleteSpans
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSpanObserver() *spanObserver {
|
||||||
|
return &spanObserver{
|
||||||
|
referencedIDs: map[oteltrace.SpanID]oteltrace.SpanID{},
|
||||||
|
cond: sync.NewCond(&sync.Mutex{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type spanObserver struct {
|
||||||
|
cond *sync.Cond
|
||||||
|
referencedIDs map[oteltrace.SpanID]oteltrace.SpanID
|
||||||
|
unobservedIDs int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obs *spanObserver) ObserveReference(id oteltrace.SpanID, via oteltrace.SpanID) {
|
||||||
|
obs.cond.L.Lock()
|
||||||
|
defer obs.cond.L.Unlock()
|
||||||
|
if _, referenced := obs.referencedIDs[id]; !referenced {
|
||||||
|
obs.referencedIDs[id] = via // referenced, but not observed
|
||||||
|
// It is possible for new unobserved references to come in while waiting,
|
||||||
|
// but incrementing the counter wouldn't satisfy the condition so we don't
|
||||||
|
// need to signal the waiters
|
||||||
|
obs.unobservedIDs++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obs *spanObserver) Observe(id oteltrace.SpanID) {
|
||||||
|
obs.cond.L.Lock()
|
||||||
|
defer obs.cond.L.Unlock()
|
||||||
|
if observed, referenced := obs.referencedIDs[id]; !referenced || observed.IsValid() { // NB: subtle condition
|
||||||
|
obs.referencedIDs[id] = zeroSpanID
|
||||||
|
if referenced {
|
||||||
|
obs.unobservedIDs--
|
||||||
|
obs.cond.Broadcast()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obs *spanObserver) wait(debugAllEnqueuedSpans map[oteltrace.SpanID]*tracev1.Span, warnAfter time.Duration) {
|
||||||
|
done := make(chan struct{})
|
||||||
|
defer close(done)
|
||||||
|
go func() {
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
return
|
||||||
|
case <-time.After(warnAfter):
|
||||||
|
obs.debugWarnWaiting(debugAllEnqueuedSpans)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
obs.cond.L.Lock()
|
||||||
|
for obs.unobservedIDs > 0 {
|
||||||
|
obs.cond.Wait()
|
||||||
|
}
|
||||||
|
obs.cond.L.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obs *spanObserver) debugWarnWaiting(debugAllEnqueuedSpans map[oteltrace.SpanID]*tracev1.Span) {
|
||||||
|
obs.cond.L.Lock()
|
||||||
|
msg := startMsg(fmt.Sprintf("Waiting on %d unobserved spans:\n", obs.unobservedIDs))
|
||||||
|
for id, via := range obs.referencedIDs {
|
||||||
|
if via.IsValid() {
|
||||||
|
fmt.Fprintf(msg, "%s via %s", id, via)
|
||||||
|
if span := debugAllEnqueuedSpans[id]; span != nil {
|
||||||
|
createdAt := "(unknown)"
|
||||||
|
for _, attr := range span.Attributes {
|
||||||
|
if attr.Key == "caller" {
|
||||||
|
createdAt = attr.Value.GetStringValue()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Fprintf(msg, "'%s' (trace: %s | created: %s)\n", span.GetName(), span.TraceId, createdAt)
|
||||||
|
} else {
|
||||||
|
msg.WriteString("\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
endMsg(msg)
|
||||||
|
obs.cond.L.Unlock()
|
||||||
|
}
|
798
internal/telemetry/trace/queue_test.go
Normal file
798
internal/telemetry/trace/queue_test.go
Normal file
|
@ -0,0 +1,798 @@
|
||||||
|
package trace_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"embed"
|
||||||
|
"fmt"
|
||||||
|
"io/fs"
|
||||||
|
"regexp"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
||||||
|
"github.com/pomerium/pomerium/internal/telemetry/trace/mock_otlptrace"
|
||||||
|
"github.com/pomerium/pomerium/internal/testutil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||||
|
oteltrace "go.opentelemetry.io/otel/trace"
|
||||||
|
coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
|
||||||
|
commonv1 "go.opentelemetry.io/proto/otlp/common/v1"
|
||||||
|
tracev1 "go.opentelemetry.io/proto/otlp/trace/v1"
|
||||||
|
"go.uber.org/mock/gomock"
|
||||||
|
"google.golang.org/protobuf/encoding/protojson"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:embed testdata
|
||||||
|
var testdata embed.FS
|
||||||
|
|
||||||
|
func TestSpanExportQueue_Replay(t *testing.T) {
|
||||||
|
for _, tc := range []struct {
|
||||||
|
name string
|
||||||
|
file string
|
||||||
|
check func(t testing.TB, inputs, outputs *testutil.TraceResults)
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "single trace",
|
||||||
|
file: "testdata/recording_01_single_trace.json",
|
||||||
|
check: func(t testing.TB, inputs, outputs *testutil.TraceResults) {
|
||||||
|
inputs.AssertEqual(t, outputs)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "rewriting multiple traces",
|
||||||
|
file: "testdata/recording_02_multi_trace.json",
|
||||||
|
check: func(t testing.TB, inputs, outputs *testutil.TraceResults) {
|
||||||
|
inputTraces := inputs.GetTraces().WithoutErrors()
|
||||||
|
outputTraces := outputs.GetTraces().WithoutErrors()
|
||||||
|
|
||||||
|
// find upstream trace
|
||||||
|
var inputUpstreamTrace, outputUpstreamTrace *testutil.TraceDetails
|
||||||
|
isUpstreamTrace := func(v *testutil.TraceDetails) bool {
|
||||||
|
if strings.HasPrefix(v.Name, "Envoy: ingress:") {
|
||||||
|
for _, attr := range v.Spans[0].Raw.Attributes {
|
||||||
|
if attr.Key == "http.url" {
|
||||||
|
if regexp.MustCompile(`https://127\.0\.0\.1:\d+/foo`).MatchString(attr.Value.GetStringValue()) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, v := range inputTraces.ByID {
|
||||||
|
if isUpstreamTrace(v) {
|
||||||
|
inputUpstreamTrace = v
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, v := range outputTraces.ByID {
|
||||||
|
if isUpstreamTrace(v) {
|
||||||
|
outputUpstreamTrace = v
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
equal, diff := inputUpstreamTrace.Equal(outputUpstreamTrace)
|
||||||
|
if !equal {
|
||||||
|
assert.Failf(t, "upstream traces not equal:\n%s", diff)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// find downstream traces
|
||||||
|
// should be composed of:
|
||||||
|
// - 'ingress: GET foo.localhost.pomerium.io:<port>/foo'
|
||||||
|
// - 'internal: GET authenticate.localhost.pomerium.io:<port>/.pomerium/sign_in' (unauthorized)
|
||||||
|
// - 'internal: GET authenticate.localhost.pomerium.io:<port>/oauth2/callback'
|
||||||
|
// - 'internal: GET authenticate.localhost.pomerium.io:<port>/.pomerium/sign_in' (authorized)
|
||||||
|
// - 'internal: GET foo.localhost.pomerium.io:<port>/.pomerium/callback/'
|
||||||
|
envoyOutputTraces := outputTraces.ByParticipant["Envoy"]
|
||||||
|
// there should be two
|
||||||
|
require.Len(t, envoyOutputTraces, 2)
|
||||||
|
// find which one is not the upstream trace
|
||||||
|
var downstreamTrace *testutil.TraceDetails
|
||||||
|
if envoyOutputTraces[0].ID == outputUpstreamTrace.ID {
|
||||||
|
downstreamTrace = envoyOutputTraces[1]
|
||||||
|
} else {
|
||||||
|
downstreamTrace = envoyOutputTraces[0]
|
||||||
|
}
|
||||||
|
tree := downstreamTrace.SpanTree()
|
||||||
|
require.Empty(t, tree.DetachedParents)
|
||||||
|
parts := tree.Root.Children
|
||||||
|
require.Len(t, parts, 5)
|
||||||
|
assert.True(t, regexp.MustCompile(`ingress: GET foo\.localhost\.pomerium\.io:\d+/foo`).MatchString(parts[0].Span.Raw.Name))
|
||||||
|
assert.True(t, regexp.MustCompile(`internal: GET authenticate\.localhost\.pomerium\.io:\d+/.pomerium/sign_in`).MatchString(parts[1].Span.Raw.Name))
|
||||||
|
assert.True(t, regexp.MustCompile(`internal: GET authenticate\.localhost\.pomerium\.io:\d+/oauth2/callback`).MatchString(parts[2].Span.Raw.Name))
|
||||||
|
assert.True(t, regexp.MustCompile(`internal: GET authenticate\.localhost\.pomerium\.io:\d+/.pomerium/sign_in`).MatchString(parts[3].Span.Raw.Name))
|
||||||
|
assert.True(t, regexp.MustCompile(`internal: GET foo\.localhost\.pomerium\.io:\d+/.pomerium/callback`).MatchString(parts[4].Span.Raw.Name))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
mockClient := mock_otlptrace.NewMockClient(ctrl)
|
||||||
|
var resultsMu sync.Mutex
|
||||||
|
outputSpans := [][]*tracev1.ResourceSpans{}
|
||||||
|
mockClient.EXPECT().
|
||||||
|
UploadTraces(gomock.Any(), gomock.Any()).
|
||||||
|
DoAndReturn(func(_ context.Context, protoSpans []*tracev1.ResourceSpans) error {
|
||||||
|
resultsMu.Lock()
|
||||||
|
defer resultsMu.Unlock()
|
||||||
|
outputSpans = append(outputSpans, protoSpans)
|
||||||
|
return nil
|
||||||
|
}).
|
||||||
|
AnyTimes()
|
||||||
|
recording1, err := fs.ReadFile(testdata, tc.file)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
rec, err := testutil.LoadEventRecording(recording1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ctx := trace.Options{
|
||||||
|
DebugFlags: trace.TrackAllSpans | trace.WarnOnIncompleteSpans | trace.WarnOnIncompleteTraces | trace.WarnOnUnresolvedReferences,
|
||||||
|
}.NewContext(context.Background())
|
||||||
|
queue := trace.NewSpanExportQueue(ctx, mockClient)
|
||||||
|
|
||||||
|
recCloned := rec.Clone()
|
||||||
|
|
||||||
|
err = rec.Replay(func(ctx context.Context, req *coltracepb.ExportTraceServiceRequest) (*coltracepb.ExportTraceServiceResponse, error) {
|
||||||
|
return &coltracepb.ExportTraceServiceResponse{}, queue.Enqueue(ctx, req)
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// wait for all calls to UploadTraces to complete
|
||||||
|
ctx, ca := context.WithTimeout(context.Background(), 1*time.Second)
|
||||||
|
defer ca()
|
||||||
|
assert.NoError(t, queue.Close(ctx))
|
||||||
|
|
||||||
|
recCloned.Normalize(rec.NormalizedTo())
|
||||||
|
|
||||||
|
inputRequests := []*coltracepb.ExportTraceServiceRequest{}
|
||||||
|
for _, ev := range recCloned.Events() {
|
||||||
|
inputRequests = append(inputRequests, ev.Request)
|
||||||
|
}
|
||||||
|
inputs := testutil.NewTraceResults(testutil.FlattenExportRequests(inputRequests))
|
||||||
|
outputs := testutil.NewTraceResults(testutil.FlattenResourceSpans(outputSpans))
|
||||||
|
tc.check(t, inputs, outputs)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSpanExportQueue_Enqueue(t *testing.T) {
|
||||||
|
type (
|
||||||
|
mapped struct {
|
||||||
|
s Span
|
||||||
|
t Trace
|
||||||
|
}
|
||||||
|
action struct {
|
||||||
|
exports []Span
|
||||||
|
uploads []any // int|mapped|*tracev1.Span
|
||||||
|
}
|
||||||
|
testCase struct {
|
||||||
|
name string
|
||||||
|
spans []*tracev1.Span // note: span ids are set automatically by index
|
||||||
|
actions []action
|
||||||
|
// if actionSets is present, repeats the same test case for each entry
|
||||||
|
actionSets [][]action
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
traceparent := func(trace Trace, span Span, sampled ...bool) *commonv1.KeyValue {
|
||||||
|
if len(sampled) == 0 {
|
||||||
|
sampled = append(sampled, true)
|
||||||
|
}
|
||||||
|
return &commonv1.KeyValue{
|
||||||
|
Key: "pomerium.traceparent",
|
||||||
|
Value: &commonv1.AnyValue{Value: &commonv1.AnyValue_StringValue{
|
||||||
|
StringValue: Traceparent(trace, span, sampled[0]),
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
externalParent := func(span Span) *commonv1.KeyValue {
|
||||||
|
return &commonv1.KeyValue{
|
||||||
|
Key: "pomerium.external-parent-span",
|
||||||
|
Value: &commonv1.AnyValue{Value: &commonv1.AnyValue_StringValue{
|
||||||
|
StringValue: span.ID().String(),
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
attrs := func(kvs ...*commonv1.KeyValue) []*commonv1.KeyValue { return kvs }
|
||||||
|
|
||||||
|
cases := []testCase{
|
||||||
|
{
|
||||||
|
name: "single trace",
|
||||||
|
spans: []*tracev1.Span{
|
||||||
|
// |<========>| Span 1
|
||||||
|
// | <======> | Span 2
|
||||||
|
// | <====> | Span 3
|
||||||
|
// T123456789A-
|
||||||
|
Span(1): {
|
||||||
|
TraceId: Trace(1).B(),
|
||||||
|
ParentSpanId: nil,
|
||||||
|
StartTimeUnixNano: 1,
|
||||||
|
EndTimeUnixNano: 0xA,
|
||||||
|
},
|
||||||
|
Span(2): {
|
||||||
|
TraceId: Trace(1).B(),
|
||||||
|
ParentSpanId: Span(1).B(),
|
||||||
|
StartTimeUnixNano: 2,
|
||||||
|
EndTimeUnixNano: 9,
|
||||||
|
},
|
||||||
|
Span(3): {
|
||||||
|
TraceId: Trace(1).B(),
|
||||||
|
ParentSpanId: Span(2).B(),
|
||||||
|
StartTimeUnixNano: 3,
|
||||||
|
EndTimeUnixNano: 8,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
actionSets: [][]action{
|
||||||
|
// root span first
|
||||||
|
{
|
||||||
|
{exports: []Span{1}, uploads: []any{1}},
|
||||||
|
{exports: []Span{2, 3}, uploads: []any{2, 3}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{exports: []Span{1, 2}, uploads: []any{1, 2}},
|
||||||
|
{exports: []Span{3}, uploads: []any{3}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{exports: []Span{1, 2, 3}, uploads: []any{1, 2, 3}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{exports: []Span{1, 3, 2}, uploads: []any{1, 2, 3}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{exports: []Span{1}, uploads: []any{1}},
|
||||||
|
{exports: []Span{2}, uploads: []any{2}},
|
||||||
|
{exports: []Span{3}, uploads: []any{3}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{exports: []Span{1}, uploads: []any{1}},
|
||||||
|
{exports: []Span{3}, uploads: []any{3}},
|
||||||
|
{exports: []Span{2}, uploads: []any{2}},
|
||||||
|
},
|
||||||
|
// root span last
|
||||||
|
{
|
||||||
|
{exports: []Span{2}, uploads: []any{}},
|
||||||
|
{exports: []Span{3}, uploads: []any{}},
|
||||||
|
{exports: []Span{1}, uploads: []any{1, 2, 3}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{exports: []Span{3}, uploads: []any{}},
|
||||||
|
{exports: []Span{2}, uploads: []any{}},
|
||||||
|
{exports: []Span{1}, uploads: []any{1, 2, 3}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{exports: []Span{2, 3}, uploads: []any{}},
|
||||||
|
{exports: []Span{1}, uploads: []any{1, 2, 3}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{exports: []Span{3, 2}, uploads: []any{}},
|
||||||
|
{exports: []Span{1}, uploads: []any{1, 2, 3}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{exports: []Span{3}, uploads: []any{}},
|
||||||
|
{exports: []Span{2, 1}, uploads: []any{1, 2, 3}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{exports: []Span{2, 3, 1}, uploads: []any{1, 2, 3}},
|
||||||
|
},
|
||||||
|
// root span in the middle
|
||||||
|
{
|
||||||
|
{exports: []Span{2}, uploads: []any{}},
|
||||||
|
{exports: []Span{1}, uploads: []any{1, 2}},
|
||||||
|
{exports: []Span{3}, uploads: []any{3}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{exports: []Span{3}, uploads: []any{}},
|
||||||
|
{exports: []Span{1}, uploads: []any{1, 3}},
|
||||||
|
{exports: []Span{2}, uploads: []any{2}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{exports: []Span{3}, uploads: []any{}},
|
||||||
|
{exports: []Span{1, 2}, uploads: []any{1, 2, 3}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{exports: []Span{2}, uploads: []any{}},
|
||||||
|
{exports: []Span{1, 3}, uploads: []any{1, 2, 3}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "two correlated traces",
|
||||||
|
spans: []*tracev1.Span{
|
||||||
|
// |<=====> | Span 1 (Trace 1)
|
||||||
|
// | <===> | Span 2 (Trace 1)
|
||||||
|
// | <=> | Span 3 (Trace 1)
|
||||||
|
// | <======>| Span 4 (Trace 2)
|
||||||
|
// | <====> | Span 5 (Trace 2)
|
||||||
|
// T123456789ABCDEF-
|
||||||
|
Span(1): {
|
||||||
|
TraceId: Trace(1).B(),
|
||||||
|
ParentSpanId: nil,
|
||||||
|
StartTimeUnixNano: 1,
|
||||||
|
EndTimeUnixNano: 7,
|
||||||
|
},
|
||||||
|
Span(2): {
|
||||||
|
TraceId: Trace(1).B(),
|
||||||
|
ParentSpanId: Span(1).B(),
|
||||||
|
StartTimeUnixNano: 2,
|
||||||
|
EndTimeUnixNano: 6,
|
||||||
|
},
|
||||||
|
Span(3): {
|
||||||
|
TraceId: Trace(1).B(),
|
||||||
|
ParentSpanId: Span(2).B(),
|
||||||
|
StartTimeUnixNano: 3,
|
||||||
|
EndTimeUnixNano: 5,
|
||||||
|
},
|
||||||
|
Span(4): {
|
||||||
|
TraceId: Trace(2).B(),
|
||||||
|
ParentSpanId: nil,
|
||||||
|
Attributes: attrs(traceparent(Trace(1), Span(1))),
|
||||||
|
StartTimeUnixNano: 8,
|
||||||
|
EndTimeUnixNano: 0xF,
|
||||||
|
},
|
||||||
|
Span(5): {
|
||||||
|
TraceId: Trace(2).B(),
|
||||||
|
ParentSpanId: Span(4).B(),
|
||||||
|
Attributes: attrs(traceparent(Trace(1), Span(1))),
|
||||||
|
StartTimeUnixNano: 9,
|
||||||
|
EndTimeUnixNano: 0xE,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
actionSets: [][]action{
|
||||||
|
0: {
|
||||||
|
{
|
||||||
|
exports: []Span{1, 2, 3, 4, 5},
|
||||||
|
uploads: []any{1, 2, 3, mapped{4, Trace(1)}, mapped{5, Trace(1)}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
1: {
|
||||||
|
{exports: []Span{2, 3, 5}, uploads: []any{}},
|
||||||
|
{
|
||||||
|
exports: []Span{1, 4},
|
||||||
|
uploads: []any{1, 2, 3, mapped{4, Trace(1)}, mapped{5, Trace(1)}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
2: {
|
||||||
|
{exports: []Span{2, 3, 5}, uploads: []any{}},
|
||||||
|
{
|
||||||
|
exports: []Span{1},
|
||||||
|
uploads: []any{1, 2, 3},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
exports: []Span{4},
|
||||||
|
uploads: []any{mapped{4, Trace(1)}, mapped{5, Trace(1)}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
3: {
|
||||||
|
{exports: []Span{2, 3, 5}, uploads: []any{}},
|
||||||
|
{
|
||||||
|
exports: []Span{4, 1},
|
||||||
|
uploads: []any{1, 2, 3, mapped{4, Trace(1)}, mapped{5, Trace(1)}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
4: {
|
||||||
|
{exports: []Span{2, 3, 5}, uploads: []any{}},
|
||||||
|
{exports: []Span{4}, uploads: []any{}}, // root span, but mapped to a pending trace
|
||||||
|
{
|
||||||
|
exports: []Span{1},
|
||||||
|
uploads: []any{1, 2, 3, mapped{4, Trace(1)}, mapped{5, Trace(1)}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "external parent",
|
||||||
|
spans: []*tracev1.Span{
|
||||||
|
// |??????????| Span 1 (external)
|
||||||
|
// | <======> | Span 2 (internal)
|
||||||
|
// | <====> | Span 3
|
||||||
|
// T123456789A-
|
||||||
|
Span(2): {
|
||||||
|
TraceId: Trace(1).B(),
|
||||||
|
ParentSpanId: Span(1).B(),
|
||||||
|
StartTimeUnixNano: 2,
|
||||||
|
EndTimeUnixNano: 9,
|
||||||
|
Attributes: attrs(externalParent(Span(1))),
|
||||||
|
},
|
||||||
|
Span(3): {
|
||||||
|
TraceId: Trace(1).B(),
|
||||||
|
ParentSpanId: Span(2).B(),
|
||||||
|
StartTimeUnixNano: 3,
|
||||||
|
EndTimeUnixNano: 8,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
actionSets: [][]action{
|
||||||
|
{
|
||||||
|
{exports: []Span{3}, uploads: []any{}},
|
||||||
|
{exports: []Span{2}, uploads: []any{2, 3}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{exports: []Span{2, 3}, uploads: []any{2, 3}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{exports: []Span{3, 2}, uploads: []any{3, 2}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
generatedCases := []testCase{}
|
||||||
|
for _, tc := range cases {
|
||||||
|
for i, s := range tc.spans {
|
||||||
|
if s == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
s.SpanId = Span(i).B()
|
||||||
|
}
|
||||||
|
if len(tc.actionSets) > 0 {
|
||||||
|
generated := []testCase{}
|
||||||
|
for i, actions := range tc.actionSets {
|
||||||
|
generated = append(generated, testCase{
|
||||||
|
name: fmt.Sprintf("%s (action set %d of %d)", tc.name, i+1, len(tc.actionSets)),
|
||||||
|
spans: tc.spans,
|
||||||
|
actions: actions,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
generatedCases = append(generatedCases, generated...)
|
||||||
|
} else {
|
||||||
|
generatedCases = append(generatedCases, tc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, tc := range generatedCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
mockClient := mock_otlptrace.NewMockClient(ctrl)
|
||||||
|
var resultsMu sync.Mutex
|
||||||
|
outputSpans := make(chan []*tracev1.ResourceSpans, 64)
|
||||||
|
mockClient.EXPECT().
|
||||||
|
UploadTraces(gomock.Any(), gomock.Any()).
|
||||||
|
DoAndReturn(func(_ context.Context, protoSpans []*tracev1.ResourceSpans) error {
|
||||||
|
resultsMu.Lock()
|
||||||
|
defer resultsMu.Unlock()
|
||||||
|
outputSpans <- protoSpans
|
||||||
|
return nil
|
||||||
|
}).
|
||||||
|
AnyTimes()
|
||||||
|
|
||||||
|
ctx := trace.Options{
|
||||||
|
DebugFlags: trace.TrackAllSpans | trace.WarnOnIncompleteSpans | trace.WarnOnIncompleteTraces | trace.WarnOnUnresolvedReferences,
|
||||||
|
}.NewContext(context.Background())
|
||||||
|
queue := trace.NewSpanExportQueue(ctx, mockClient)
|
||||||
|
|
||||||
|
for actionIdx, action := range tc.actions {
|
||||||
|
spans := []*tracev1.Span{}
|
||||||
|
for _, idx := range action.exports {
|
||||||
|
spans = append(spans, proto.Clone(tc.spans[idx]).(*tracev1.Span))
|
||||||
|
}
|
||||||
|
assert.NoError(t, queue.Enqueue(ctx, &coltracepb.ExportTraceServiceRequest{
|
||||||
|
ResourceSpans: []*tracev1.ResourceSpans{
|
||||||
|
{
|
||||||
|
Resource: Resource(1).Make().Resource,
|
||||||
|
ScopeSpans: []*tracev1.ScopeSpans{{Scope: Scope(1).Make().Scope, Spans: spans}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}))
|
||||||
|
if len(action.uploads) == 0 {
|
||||||
|
for range 5 {
|
||||||
|
runtime.Gosched()
|
||||||
|
require.Empty(t, outputSpans)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
expectedSpans := &tracev1.ResourceSpans{
|
||||||
|
Resource: Resource(1).Make().Resource,
|
||||||
|
ScopeSpans: []*tracev1.ScopeSpans{{Scope: Scope(1).Make().Scope}},
|
||||||
|
}
|
||||||
|
for _, expectedUpload := range action.uploads {
|
||||||
|
switch up := expectedUpload.(type) {
|
||||||
|
case int:
|
||||||
|
expectedSpans.ScopeSpans[0].Spans = append(expectedSpans.ScopeSpans[0].Spans, tc.spans[up])
|
||||||
|
case mapped:
|
||||||
|
clone := proto.Clone(tc.spans[up.s]).(*tracev1.Span)
|
||||||
|
clone.TraceId = up.t.B()
|
||||||
|
expectedSpans.ScopeSpans[0].Spans = append(expectedSpans.ScopeSpans[0].Spans, clone)
|
||||||
|
case *tracev1.Span:
|
||||||
|
expectedSpans.ScopeSpans[0].Spans = append(expectedSpans.ScopeSpans[0].Spans, up)
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("test bug: unexpected type: %T", up))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case resourceSpans := <-outputSpans:
|
||||||
|
expected := testutil.NewTraceResults([]*tracev1.ResourceSpans{expectedSpans})
|
||||||
|
actual := testutil.NewTraceResults(resourceSpans)
|
||||||
|
actual.AssertEqual(t, expected, "action %d/%d", actionIdx+1, len(tc.actions))
|
||||||
|
case <-time.After(1 * time.Second):
|
||||||
|
t.Fatalf("timed out waiting for upload (action %d/%d)", actionIdx+1, len(tc.actions))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !t.Failed() {
|
||||||
|
close(outputSpans)
|
||||||
|
// ensure the queue is read fully
|
||||||
|
if !assert.Empty(t, outputSpans) {
|
||||||
|
for _, out := range <-outputSpans {
|
||||||
|
t.Log(protojson.Format(out))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSpanObserver(t *testing.T) {
|
||||||
|
t.Run("observe single reference", func(t *testing.T) {
|
||||||
|
obs := trace.NewSpanObserver()
|
||||||
|
assert.Equal(t, []oteltrace.SpanID{}, obs.XUnobservedIDs())
|
||||||
|
|
||||||
|
obs.ObserveReference(Span(1).ID(), Span(2).ID())
|
||||||
|
assert.Equal(t, []oteltrace.SpanID{Span(1).ID()}, obs.XUnobservedIDs())
|
||||||
|
obs.Observe(Span(1).ID())
|
||||||
|
assert.Equal(t, []oteltrace.SpanID{}, obs.XUnobservedIDs())
|
||||||
|
})
|
||||||
|
t.Run("observe multiple references", func(t *testing.T) {
|
||||||
|
obs := trace.NewSpanObserver()
|
||||||
|
|
||||||
|
obs.ObserveReference(Span(1).ID(), Span(2).ID())
|
||||||
|
obs.ObserveReference(Span(1).ID(), Span(3).ID())
|
||||||
|
obs.ObserveReference(Span(1).ID(), Span(4).ID())
|
||||||
|
assert.Equal(t, []oteltrace.SpanID{Span(1).ID()}, obs.XUnobservedIDs())
|
||||||
|
obs.Observe(Span(1).ID())
|
||||||
|
assert.Equal(t, []oteltrace.SpanID{}, obs.XUnobservedIDs())
|
||||||
|
})
|
||||||
|
t.Run("observe before reference", func(t *testing.T) {
|
||||||
|
obs := trace.NewSpanObserver()
|
||||||
|
|
||||||
|
obs.Observe(Span(1).ID())
|
||||||
|
assert.Equal(t, []oteltrace.SpanID{}, obs.XUnobservedIDs())
|
||||||
|
obs.ObserveReference(Span(1).ID(), Span(2).ID())
|
||||||
|
assert.Equal(t, []oteltrace.SpanID{}, obs.XUnobservedIDs())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("wait", func(t *testing.T) {
|
||||||
|
obs := trace.NewSpanObserver()
|
||||||
|
obs.ObserveReference(Span(1).ID(), Span(2).ID())
|
||||||
|
obs.Observe(Span(2).ID())
|
||||||
|
obs.ObserveReference(Span(3).ID(), Span(4).ID())
|
||||||
|
obs.Observe(Span(4).ID())
|
||||||
|
obs.ObserveReference(Span(5).ID(), Span(6).ID())
|
||||||
|
obs.Observe(Span(6).ID())
|
||||||
|
waitOkToExit := atomic.Bool{}
|
||||||
|
waitExited := atomic.Bool{}
|
||||||
|
go func() {
|
||||||
|
defer waitExited.Store(true)
|
||||||
|
obs.XWait()
|
||||||
|
assert.True(t, waitOkToExit.Load(), "wait exited early")
|
||||||
|
}()
|
||||||
|
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
assert.False(t, waitExited.Load())
|
||||||
|
|
||||||
|
obs.Observe(Span(1).ID())
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
assert.False(t, waitExited.Load())
|
||||||
|
|
||||||
|
obs.Observe(Span(3).ID())
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
assert.False(t, waitExited.Load())
|
||||||
|
|
||||||
|
waitOkToExit.Store(true)
|
||||||
|
obs.Observe(Span(5).ID())
|
||||||
|
assert.Eventually(t, waitExited.Load, 10*time.Millisecond, 1*time.Millisecond)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("new references observed during wait", func(t *testing.T) {
|
||||||
|
obs := trace.NewSpanObserver()
|
||||||
|
obs.ObserveReference(Span(1).ID(), Span(2).ID())
|
||||||
|
obs.Observe(Span(2).ID())
|
||||||
|
obs.ObserveReference(Span(3).ID(), Span(4).ID())
|
||||||
|
obs.Observe(Span(4).ID())
|
||||||
|
obs.ObserveReference(Span(5).ID(), Span(6).ID())
|
||||||
|
obs.Observe(Span(6).ID())
|
||||||
|
waitOkToExit := atomic.Bool{}
|
||||||
|
waitExited := atomic.Bool{}
|
||||||
|
go func() {
|
||||||
|
defer waitExited.Store(true)
|
||||||
|
obs.XWait()
|
||||||
|
assert.True(t, waitOkToExit.Load(), "wait exited early")
|
||||||
|
}()
|
||||||
|
|
||||||
|
assert.Equal(t, []oteltrace.SpanID{Span(1).ID(), Span(3).ID(), Span(5).ID()}, obs.XUnobservedIDs())
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
assert.False(t, waitExited.Load())
|
||||||
|
|
||||||
|
obs.Observe(Span(1).ID())
|
||||||
|
assert.Equal(t, []oteltrace.SpanID{Span(3).ID(), Span(5).ID()}, obs.XUnobservedIDs())
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
assert.False(t, waitExited.Load())
|
||||||
|
|
||||||
|
obs.Observe(Span(3).ID())
|
||||||
|
assert.Equal(t, []oteltrace.SpanID{Span(5).ID()}, obs.XUnobservedIDs())
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
assert.False(t, waitExited.Load())
|
||||||
|
|
||||||
|
// observe a new reference
|
||||||
|
obs.ObserveReference(Span(7).ID(), Span(8).ID())
|
||||||
|
obs.Observe(Span(8).ID())
|
||||||
|
assert.Equal(t, []oteltrace.SpanID{Span(5).ID(), Span(7).ID()}, obs.XUnobservedIDs())
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
assert.False(t, waitExited.Load())
|
||||||
|
|
||||||
|
obs.Observe(Span(5).ID())
|
||||||
|
assert.Equal(t, []oteltrace.SpanID{Span(7).ID()}, obs.XUnobservedIDs())
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
assert.False(t, waitExited.Load())
|
||||||
|
|
||||||
|
waitOkToExit.Store(true)
|
||||||
|
obs.Observe(Span(7).ID())
|
||||||
|
assert.Equal(t, []oteltrace.SpanID{}, obs.XUnobservedIDs())
|
||||||
|
assert.Eventually(t, waitExited.Load, 10*time.Millisecond, 1*time.Millisecond)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("multiple waiters", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
obs := trace.NewSpanObserver()
|
||||||
|
obs.ObserveReference(Span(1).ID(), Span(2).ID())
|
||||||
|
obs.Observe(Span(2).ID())
|
||||||
|
|
||||||
|
waitersExited := atomic.Int32{}
|
||||||
|
for range 10 {
|
||||||
|
go func() {
|
||||||
|
defer waitersExited.Add(1)
|
||||||
|
obs.XWait()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, []oteltrace.SpanID{Span(1).ID()}, obs.XUnobservedIDs())
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
assert.Equal(t, int32(0), waitersExited.Load())
|
||||||
|
|
||||||
|
obs.Observe(Span(1).ID())
|
||||||
|
|
||||||
|
startTime := time.Now()
|
||||||
|
for waitersExited.Load() != 10 {
|
||||||
|
if time.Since(startTime) > 1*time.Millisecond {
|
||||||
|
t.Fatal("timed out")
|
||||||
|
}
|
||||||
|
runtime.Gosched()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSpanTracker(t *testing.T) {
|
||||||
|
t.Run("no debug flags", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
obs := trace.NewSpanObserver()
|
||||||
|
tracker := trace.NewSpanTracker(obs, 0)
|
||||||
|
tp := sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(tracker))
|
||||||
|
tracer := tp.Tracer("test")
|
||||||
|
assert.Equal(t, []oteltrace.SpanID{}, tracker.XInflightSpans())
|
||||||
|
_, span1 := tracer.Start(context.Background(), "span 1")
|
||||||
|
assert.Equal(t, []oteltrace.SpanID{span1.SpanContext().SpanID()}, tracker.XInflightSpans())
|
||||||
|
assert.Equal(t, []oteltrace.SpanID{}, obs.XObservedIDs())
|
||||||
|
span1.End()
|
||||||
|
assert.Equal(t, []oteltrace.SpanID{}, tracker.XInflightSpans())
|
||||||
|
assert.Equal(t, []oteltrace.SpanID{}, obs.XObservedIDs())
|
||||||
|
})
|
||||||
|
t.Run("with TrackSpanReferences debug flag", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
obs := trace.NewSpanObserver()
|
||||||
|
tracker := trace.NewSpanTracker(obs, trace.TrackSpanReferences)
|
||||||
|
tp := sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(tracker))
|
||||||
|
tracer := tp.Tracer("test")
|
||||||
|
assert.Equal(t, []oteltrace.SpanID{}, tracker.XInflightSpans())
|
||||||
|
_, span1 := tracer.Start(context.Background(), "span 1")
|
||||||
|
assert.Equal(t, []oteltrace.SpanID{span1.SpanContext().SpanID()}, tracker.XInflightSpans())
|
||||||
|
assert.Equal(t, []oteltrace.SpanID{span1.SpanContext().SpanID()}, obs.XObservedIDs())
|
||||||
|
span1.End()
|
||||||
|
assert.Equal(t, []oteltrace.SpanID{}, tracker.XInflightSpans())
|
||||||
|
assert.Equal(t, []oteltrace.SpanID{span1.SpanContext().SpanID()}, obs.XObservedIDs())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSpanTrackerWarnings(t *testing.T) {
|
||||||
|
t.Run("WarnOnIncompleteSpans", func(t *testing.T) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
trace.SetDebugMessageWriterForTest(t, &buf)
|
||||||
|
|
||||||
|
obs := trace.NewSpanObserver()
|
||||||
|
tracker := trace.NewSpanTracker(obs, trace.WarnOnIncompleteSpans)
|
||||||
|
tp := sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(tracker))
|
||||||
|
tracer := tp.Tracer("test")
|
||||||
|
_, span1 := tracer.Start(context.Background(), "span 1")
|
||||||
|
|
||||||
|
assert.ErrorIs(t, tp.Shutdown(context.Background()), trace.ErrIncompleteSpans)
|
||||||
|
|
||||||
|
assert.Equal(t, fmt.Sprintf(`
|
||||||
|
==================================================
|
||||||
|
WARNING: spans not ended:
|
||||||
|
%s
|
||||||
|
Note: set TrackAllSpans flag for more info
|
||||||
|
==================================================
|
||||||
|
`, span1.SpanContext().SpanID()), buf.String())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("WarnOnIncompleteSpans with TrackAllSpans", func(t *testing.T) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
trace.SetDebugMessageWriterForTest(t, &buf)
|
||||||
|
|
||||||
|
obs := trace.NewSpanObserver()
|
||||||
|
tracker := trace.NewSpanTracker(obs, trace.WarnOnIncompleteSpans|trace.TrackAllSpans)
|
||||||
|
tp := sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(tracker))
|
||||||
|
tracer := tp.Tracer("test")
|
||||||
|
_, span1 := tracer.Start(context.Background(), "span 1")
|
||||||
|
|
||||||
|
assert.ErrorIs(t, tp.Shutdown(context.Background()), trace.ErrIncompleteSpans)
|
||||||
|
|
||||||
|
assert.Equal(t, fmt.Sprintf(`
|
||||||
|
==================================================
|
||||||
|
WARNING: spans not ended:
|
||||||
|
'span 1' (trace: %s | span: %s | parent: 0000000000000000)
|
||||||
|
==================================================
|
||||||
|
`, span1.SpanContext().TraceID(), span1.SpanContext().SpanID()), buf.String())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("WarnOnIncompleteSpans with TrackAllSpans and stackTraceProcessor", func(t *testing.T) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
trace.SetDebugMessageWriterForTest(t, &buf)
|
||||||
|
|
||||||
|
obs := trace.NewSpanObserver()
|
||||||
|
tracker := trace.NewSpanTracker(obs, trace.WarnOnIncompleteSpans|trace.TrackAllSpans)
|
||||||
|
tp := sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(&trace.XStackTraceProcessor{}), sdktrace.WithSpanProcessor(tracker))
|
||||||
|
tracer := tp.Tracer("test")
|
||||||
|
_, span1 := tracer.Start(context.Background(), "span 1")
|
||||||
|
_, file, line, _ := runtime.Caller(0)
|
||||||
|
line--
|
||||||
|
|
||||||
|
assert.ErrorIs(t, tp.Shutdown(context.Background()), trace.ErrIncompleteSpans)
|
||||||
|
|
||||||
|
assert.Equal(t, fmt.Sprintf(`
|
||||||
|
==================================================
|
||||||
|
WARNING: spans not ended:
|
||||||
|
'span 1' (trace: %s | span: %s | parent: 0000000000000000 | started at: %s:%d)
|
||||||
|
==================================================
|
||||||
|
`, span1.SpanContext().TraceID(), span1.SpanContext().SpanID(), file, line), buf.String())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("LogAllSpansOnWarn", func(t *testing.T) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
trace.SetDebugMessageWriterForTest(t, &buf)
|
||||||
|
|
||||||
|
obs := trace.NewSpanObserver()
|
||||||
|
tracker := trace.NewSpanTracker(obs, trace.WarnOnIncompleteSpans|trace.TrackAllSpans|trace.LogAllSpansOnWarn)
|
||||||
|
tp := sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(&trace.XStackTraceProcessor{}), sdktrace.WithSpanProcessor(tracker))
|
||||||
|
tracer := tp.Tracer("test")
|
||||||
|
_, span1 := tracer.Start(context.Background(), "span 1")
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
span1.End()
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
_, span2 := tracer.Start(context.Background(), "span 2")
|
||||||
|
_, file, line, _ := runtime.Caller(0)
|
||||||
|
line--
|
||||||
|
|
||||||
|
tp.Shutdown(context.Background())
|
||||||
|
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf(`
|
||||||
|
==================================================
|
||||||
|
WARNING: spans not ended:
|
||||||
|
'span 2' (trace: %[1]s | span: %[2]s | parent: 0000000000000000 | started at: %[3]s:%[4]d)
|
||||||
|
==================================================
|
||||||
|
|
||||||
|
==================================================
|
||||||
|
All observed spans:
|
||||||
|
'span 1' (trace: %[5]s | span: %[6]s | parent: 0000000000000000 | started at: %[3]s:%[7]d)
|
||||||
|
'span 2' (trace: %[1]s | span: %[2]s | parent: 0000000000000000 | started at: %[3]s:%[4]d)
|
||||||
|
==================================================
|
||||||
|
`,
|
||||||
|
span2.SpanContext().TraceID(), span2.SpanContext().SpanID(), file, line,
|
||||||
|
span1.SpanContext().TraceID(), span1.SpanContext().SpanID(), line-4,
|
||||||
|
), buf.String())
|
||||||
|
})
|
||||||
|
}
|
96
internal/telemetry/trace/server.go
Normal file
96
internal/telemetry/trace/server.go
Normal file
|
@ -0,0 +1,96 @@
|
||||||
|
package trace
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
|
"google.golang.org/grpc/test/bufconn"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
|
||||||
|
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Export implements ptraceotlp.GRPCServer.
|
||||||
|
func (srv *ExporterServer) Export(ctx context.Context, req *coltracepb.ExportTraceServiceRequest) (*coltracepb.ExportTraceServiceResponse, error) {
|
||||||
|
if err := srv.spanExportQueue.Enqueue(ctx, req); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &coltracepb.ExportTraceServiceResponse{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExporterServer struct {
|
||||||
|
coltracepb.UnimplementedTraceServiceServer
|
||||||
|
spanExportQueue *SpanExportQueue
|
||||||
|
server *grpc.Server
|
||||||
|
remoteClient otlptrace.Client
|
||||||
|
cc *grpc.ClientConn
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewServer(ctx context.Context, remoteClient otlptrace.Client) *ExporterServer {
|
||||||
|
ex := &ExporterServer{
|
||||||
|
spanExportQueue: NewSpanExportQueue(ctx, remoteClient),
|
||||||
|
remoteClient: remoteClient,
|
||||||
|
server: grpc.NewServer(grpc.Creds(insecure.NewCredentials())),
|
||||||
|
}
|
||||||
|
coltracepb.RegisterTraceServiceServer(ex.server, ex)
|
||||||
|
return ex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (srv *ExporterServer) Start(ctx context.Context) {
|
||||||
|
lis := bufconn.Listen(4096)
|
||||||
|
go func() {
|
||||||
|
if err := srv.remoteClient.Start(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
_ = srv.server.Serve(lis)
|
||||||
|
}()
|
||||||
|
cc, err := grpc.NewClient("passthrough://ignore",
|
||||||
|
grpc.WithContextDialer(func(context.Context, string) (net.Conn, error) {
|
||||||
|
return lis.Dial()
|
||||||
|
}), grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
srv.cc = cc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (srv *ExporterServer) NewClient() otlptrace.Client {
|
||||||
|
return otlptracegrpc.NewClient(
|
||||||
|
otlptracegrpc.WithGRPCConn(srv.cc),
|
||||||
|
otlptracegrpc.WithTimeout(1*time.Minute),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (srv *ExporterServer) SpanProcessors() []sdktrace.SpanProcessor {
|
||||||
|
return []sdktrace.SpanProcessor{srv.spanExportQueue.tracker}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (srv *ExporterServer) Shutdown(ctx context.Context) error {
|
||||||
|
stopped := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
srv.server.GracefulStop()
|
||||||
|
close(stopped)
|
||||||
|
}()
|
||||||
|
select {
|
||||||
|
case <-stopped:
|
||||||
|
case <-ctx.Done():
|
||||||
|
return context.Cause(ctx)
|
||||||
|
}
|
||||||
|
var errs []error
|
||||||
|
if err := srv.spanExportQueue.WaitForSpans(30 * time.Second); err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
if err := srv.spanExportQueue.Close(ctx); err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
if err := srv.remoteClient.Stop(ctx); err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
return errors.Join(errs...)
|
||||||
|
}
|
9
internal/telemetry/trace/testdata/README.md
vendored
Normal file
9
internal/telemetry/trace/testdata/README.md
vendored
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
These trace recordings are generated as follows:
|
||||||
|
|
||||||
|
- recording_01_single_trace.json:
|
||||||
|
|
||||||
|
`go test -v -run "^TestOTLPTracing$" -env.trace-debug-flags=+32 github.com/pomerium/pomerium/internal/testenv/selftests | grep -ozP "(?s)(?<=All Events:\n).*?(?=\n=====)"`
|
||||||
|
|
||||||
|
- recording_02_multi_trace.json:
|
||||||
|
|
||||||
|
`go test -v -run "^TestOTLPTracing_TraceCorrelation$" -env.trace-debug-flags=+32 github.com/pomerium/pomerium/internal/testenv/selftests | grep -ozP "(?s)(?<=All Events:\n).*?(?=\n=====)"`
|
38
internal/telemetry/trace/testdata/recording_01_single_trace.json
vendored
Normal file
38
internal/telemetry/trace/testdata/recording_01_single_trace.json
vendored
Normal file
File diff suppressed because one or more lines are too long
20
internal/telemetry/trace/testdata/recording_02_multi_trace.json
vendored
Normal file
20
internal/telemetry/trace/testdata/recording_02_multi_trace.json
vendored
Normal file
File diff suppressed because one or more lines are too long
|
@ -2,87 +2,238 @@ package trace
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
octrace "go.opencensus.io/trace"
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||||
"github.com/pomerium/pomerium/internal/log"
|
"go.opentelemetry.io/otel/sdk/resource"
|
||||||
|
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||||
|
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
|
||||||
|
"go.opentelemetry.io/otel/trace"
|
||||||
|
"go.opentelemetry.io/otel/trace/noop"
|
||||||
|
coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
type Options struct {
|
||||||
// DatadogTracingProviderName is the name of the tracing provider Datadog.
|
DebugFlags DebugFlags
|
||||||
DatadogTracingProviderName = "datadog"
|
RemoteClient otlptrace.Client
|
||||||
// JaegerTracingProviderName is the name of the tracing provider Jaeger.
|
|
||||||
JaegerTracingProviderName = "jaeger"
|
|
||||||
// ZipkinTracingProviderName is the name of the tracing provider Zipkin.
|
|
||||||
ZipkinTracingProviderName = "zipkin"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Provider is a trace provider.
|
|
||||||
type Provider interface {
|
|
||||||
Register(options *TracingOptions) error
|
|
||||||
Unregister() error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TracingOptions contains the configurations settings for a http server.
|
func (op Options) NewContext(parent context.Context) context.Context {
|
||||||
type TracingOptions struct {
|
if systemContextFromContext(parent) != nil {
|
||||||
// Shared
|
panic("parent already contains trace system context")
|
||||||
Provider string
|
}
|
||||||
Service string
|
if op.RemoteClient == nil {
|
||||||
Debug bool
|
op.RemoteClient = NewRemoteClientFromEnv()
|
||||||
|
}
|
||||||
// Datadog
|
sys := &systemContext{
|
||||||
DatadogAddress string
|
options: op,
|
||||||
|
tpm: &tracerProviderManager{},
|
||||||
// Jaeger
|
}
|
||||||
|
ctx := context.WithValue(parent, systemContextKey, sys)
|
||||||
// CollectorEndpoint is the full url to the Jaeger HTTP Thrift collector.
|
sys.exporterServer = NewServer(ctx, op.RemoteClient)
|
||||||
// For example, http://localhost:14268/api/traces
|
sys.exporterServer.Start(ctx)
|
||||||
JaegerCollectorEndpoint *url.URL
|
return ctx
|
||||||
// AgentEndpoint instructs exporter to send spans to jaeger-agent at this address.
|
|
||||||
// For example, localhost:6831.
|
|
||||||
JaegerAgentEndpoint string
|
|
||||||
|
|
||||||
// Zipkin
|
|
||||||
|
|
||||||
// ZipkinEndpoint configures the zipkin collector URI
|
|
||||||
// Example: http://zipkin:9411/api/v2/spans
|
|
||||||
ZipkinEndpoint *url.URL
|
|
||||||
|
|
||||||
// SampleRate is percentage of requests which are sampled
|
|
||||||
SampleRate float64
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Enabled indicates whether tracing is enabled on a given TracingOptions
|
// NewContext creates a new top-level background context with tracing machinery
|
||||||
func (t *TracingOptions) Enabled() bool {
|
// and configuration that will be used when creating new tracer providers.
|
||||||
return t.Provider != ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetProvider creates a new trace provider from TracingOptions.
|
|
||||||
func GetProvider(opts *TracingOptions) (Provider, error) {
|
|
||||||
var provider Provider
|
|
||||||
switch opts.Provider {
|
|
||||||
case DatadogTracingProviderName:
|
|
||||||
provider = new(datadogProvider)
|
|
||||||
case JaegerTracingProviderName:
|
|
||||||
provider = new(jaegerProvider)
|
|
||||||
case ZipkinTracingProviderName:
|
|
||||||
provider = new(zipkinProvider)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("telemetry/trace: provider %s unknown", opts.Provider)
|
|
||||||
}
|
|
||||||
octrace.ApplyConfig(octrace.Config{DefaultSampler: octrace.ProbabilitySampler(opts.SampleRate)})
|
|
||||||
|
|
||||||
log.Debug().Interface("Opts", opts).Msg("telemetry/trace: provider created")
|
|
||||||
return provider, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartSpan starts a new child span of the current span in the context. If
|
|
||||||
// there is no span in the context, creates a new trace and span.
|
|
||||||
//
|
//
|
||||||
// Returned context contains the newly created span. You can use it to
|
// Any context created with NewContext should eventually be shut down by calling
|
||||||
// propagate the returned span in process.
|
// [ShutdownContext] to ensure all traces are exported.
|
||||||
func StartSpan(ctx context.Context, name string, o ...octrace.StartOption) (context.Context, *octrace.Span) {
|
//
|
||||||
return octrace.StartSpan(ctx, name, o...)
|
// The parent context should be context.Background(), or a background context
|
||||||
|
// containing a logger. If any context in the parent's hierarchy was created
|
||||||
|
// by NewContext, this will panic.
|
||||||
|
func NewContext(parent context.Context) context.Context {
|
||||||
|
return Options{}.NewContext(parent)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTracerProvider creates a new [trace.TracerProvider] with the given service
|
||||||
|
// name and options.
|
||||||
|
//
|
||||||
|
// A context returned by [NewContext] must exist somewhere in the hierarchy of
|
||||||
|
// ctx, otherwise a no-op TracerProvider is returned. The configuration embedded
|
||||||
|
// within that context will be used to configure its resource attributes and
|
||||||
|
// exporter automatically.
|
||||||
|
func NewTracerProvider(ctx context.Context, serviceName string, opts ...sdktrace.TracerProviderOption) trace.TracerProvider {
|
||||||
|
sys := systemContextFromContext(ctx)
|
||||||
|
if sys == nil {
|
||||||
|
return noop.NewTracerProvider()
|
||||||
|
}
|
||||||
|
_, file, line, _ := runtime.Caller(1)
|
||||||
|
exp, err := otlptrace.New(ctx, sys.exporterServer.NewClient())
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
r, err := resource.Merge(
|
||||||
|
resource.Default(),
|
||||||
|
resource.NewWithAttributes(
|
||||||
|
semconv.SchemaURL,
|
||||||
|
semconv.ServiceName(serviceName),
|
||||||
|
attribute.String("provider.created_at", fmt.Sprintf("%s:%d", file, line)),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
options := []sdktrace.TracerProviderOption{}
|
||||||
|
if sys.options.DebugFlags.Check(TrackSpanCallers) {
|
||||||
|
options = append(options, sdktrace.WithSpanProcessor(&stackTraceProcessor{}))
|
||||||
|
}
|
||||||
|
options = append(append(options,
|
||||||
|
sdktrace.WithBatcher(exp),
|
||||||
|
sdktrace.WithResource(r),
|
||||||
|
), opts...)
|
||||||
|
for _, proc := range sys.exporterServer.SpanProcessors() {
|
||||||
|
options = append(options, sdktrace.WithSpanProcessor(proc))
|
||||||
|
}
|
||||||
|
tp := sdktrace.NewTracerProvider(options...)
|
||||||
|
sys.tpm.Add(tp)
|
||||||
|
return tp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Continue starts a new span using the tracer provider of the span in the given
|
||||||
|
// context.
|
||||||
|
//
|
||||||
|
// In most cases, it is better to start spans directly from a specific tracer,
|
||||||
|
// obtained via dependency injection or some other mechanism. This function is
|
||||||
|
// useful in shared code where the tracer used to start the span is not
|
||||||
|
// necessarily the same every time, but can change based on the call site.
|
||||||
|
func Continue(ctx context.Context, name string, o ...trace.SpanStartOption) (context.Context, trace.Span) {
|
||||||
|
return trace.SpanFromContext(ctx).
|
||||||
|
TracerProvider().
|
||||||
|
Tracer(PomeriumCoreTracer).
|
||||||
|
Start(ctx, name, o...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShutdownContext will gracefully shut down all tracing resources created with
|
||||||
|
// a context returned by [NewContext], including all tracer providers and the
|
||||||
|
// underlying exporter and remote client.
|
||||||
|
//
|
||||||
|
// This should only be called once before exiting, but subsequent calls are
|
||||||
|
// a no-op.
|
||||||
|
//
|
||||||
|
// The provided context does not necessarily need to be the exact context
|
||||||
|
// returned by [NewContext]; it can be anywhere in its context hierarchy and
|
||||||
|
// this function will have the same effect.
|
||||||
|
func ShutdownContext(ctx context.Context) error {
|
||||||
|
sys := systemContextFromContext(ctx)
|
||||||
|
if sys == nil {
|
||||||
|
panic("context was not created with trace.NewContext")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !sys.shutdown.CompareAndSwap(false, true) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var errs []error
|
||||||
|
if err := sys.tpm.ShutdownAll(context.Background()); err != nil {
|
||||||
|
errs = append(errs, fmt.Errorf("error shutting down tracer providers: %w", err))
|
||||||
|
}
|
||||||
|
if err := sys.exporterServer.Shutdown(context.Background()); err != nil {
|
||||||
|
errs = append(errs, fmt.Errorf("error shutting down trace exporter: %w", err))
|
||||||
|
}
|
||||||
|
return errors.Join(errs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExporterServerFromContext(ctx context.Context) coltracepb.TraceServiceServer {
|
||||||
|
if sys := systemContextFromContext(ctx); sys != nil {
|
||||||
|
return sys.exporterServer
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func RemoteClientFromContext(ctx context.Context) otlptrace.Client {
|
||||||
|
if sys := systemContextFromContext(ctx); sys != nil {
|
||||||
|
return sys.options.RemoteClient
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func DebugFlagsFromContext(ctx context.Context) DebugFlags {
|
||||||
|
if sys := systemContextFromContext(ctx); sys != nil {
|
||||||
|
return sys.options.DebugFlags
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForSpans will block up to the given max duration and wait for all
|
||||||
|
// in-flight spans from tracers created with the given context to end. This
|
||||||
|
// function can be called more than once, and is safe to call from multiple
|
||||||
|
// goroutines in parallel.
|
||||||
|
//
|
||||||
|
// This requires the [TrackSpanReferences] debug flag to have been set with
|
||||||
|
// [Options.NewContext]. Otherwise, this function is a no-op and will return
|
||||||
|
// immediately.
|
||||||
|
//
|
||||||
|
// If this function blocks for more than 10 seconds, it will print a warning
|
||||||
|
// to stderr containing a list of span IDs it is waiting for, and the IDs of
|
||||||
|
// their parents (if known). Additionally, if the [TrackAllSpans] debug flag
|
||||||
|
// is set, details about parent spans will be displayed, including call site
|
||||||
|
// and trace ID.
|
||||||
|
func WaitForSpans(ctx context.Context, maxDuration time.Duration) error {
|
||||||
|
if sys := systemContextFromContext(ctx); sys != nil {
|
||||||
|
return sys.exporterServer.spanExportQueue.WaitForSpans(maxDuration)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForceFlush immediately exports all spans that have not yet been exported for
|
||||||
|
// all tracer providers created using the given context.
|
||||||
|
func ForceFlush(ctx context.Context) error {
|
||||||
|
if sys := systemContextFromContext(ctx); sys != nil {
|
||||||
|
var errs []error
|
||||||
|
for _, tp := range sys.tpm.tracerProviders {
|
||||||
|
errs = append(errs, tp.ForceFlush(ctx))
|
||||||
|
}
|
||||||
|
return errors.Join(errs...)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type systemContextKeyType struct{}
|
||||||
|
|
||||||
|
var systemContextKey systemContextKeyType
|
||||||
|
|
||||||
|
type systemContext struct {
|
||||||
|
options Options
|
||||||
|
tpm *tracerProviderManager
|
||||||
|
exporterServer *ExporterServer
|
||||||
|
shutdown atomic.Bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func systemContextFromContext(ctx context.Context) *systemContext {
|
||||||
|
sys, _ := ctx.Value(systemContextKey).(*systemContext)
|
||||||
|
return sys
|
||||||
|
}
|
||||||
|
|
||||||
|
type tracerProviderManager struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
tracerProviders []*sdktrace.TracerProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tpm *tracerProviderManager) ShutdownAll(ctx context.Context) error {
|
||||||
|
tpm.mu.Lock()
|
||||||
|
defer tpm.mu.Unlock()
|
||||||
|
var errs []error
|
||||||
|
for _, tp := range tpm.tracerProviders {
|
||||||
|
errs = append(errs, tp.ForceFlush(ctx))
|
||||||
|
}
|
||||||
|
for _, tp := range tpm.tracerProviders {
|
||||||
|
errs = append(errs, tp.Shutdown(ctx))
|
||||||
|
}
|
||||||
|
clear(tpm.tracerProviders)
|
||||||
|
return errors.Join(errs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tpm *tracerProviderManager) Add(tp *sdktrace.TracerProvider) {
|
||||||
|
tpm.mu.Lock()
|
||||||
|
defer tpm.mu.Unlock()
|
||||||
|
tpm.tracerProviders = append(tpm.tracerProviders, tp)
|
||||||
}
|
}
|
||||||
|
|
71
internal/telemetry/trace/trace_export_test.go
Normal file
71
internal/telemetry/trace/trace_export_test.go
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
package trace
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cmp"
|
||||||
|
"io"
|
||||||
|
"slices"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
oteltrace "go.opentelemetry.io/otel/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
NewSpanObserver = newSpanObserver
|
||||||
|
NewSpanTracker = newSpanTracker
|
||||||
|
)
|
||||||
|
|
||||||
|
type XStackTraceProcessor = stackTraceProcessor
|
||||||
|
|
||||||
|
func (obs *spanObserver) XWait() {
|
||||||
|
obs.wait(nil, 5*time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obs *spanObserver) XUnobservedIDs() []oteltrace.SpanID {
|
||||||
|
obs.cond.L.Lock()
|
||||||
|
defer obs.cond.L.Unlock()
|
||||||
|
ids := []oteltrace.SpanID{}
|
||||||
|
for k, v := range obs.referencedIDs {
|
||||||
|
if v.IsValid() {
|
||||||
|
ids = append(ids, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
slices.SortFunc(ids, func(a, b oteltrace.SpanID) int {
|
||||||
|
return cmp.Compare(a.String(), b.String())
|
||||||
|
})
|
||||||
|
return ids
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obs *spanObserver) XObservedIDs() []oteltrace.SpanID {
|
||||||
|
obs.cond.L.Lock()
|
||||||
|
defer obs.cond.L.Unlock()
|
||||||
|
ids := []oteltrace.SpanID{}
|
||||||
|
for k, v := range obs.referencedIDs {
|
||||||
|
if !v.IsValid() {
|
||||||
|
ids = append(ids, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
slices.SortFunc(ids, func(a, b oteltrace.SpanID) int {
|
||||||
|
return cmp.Compare(a.String(), b.String())
|
||||||
|
})
|
||||||
|
return ids
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *spanTracker) XInflightSpans() []oteltrace.SpanID {
|
||||||
|
ids := []oteltrace.SpanID{}
|
||||||
|
t.inflightSpans.Range(func(key, _ any) bool {
|
||||||
|
ids = append(ids, key.(oteltrace.SpanID))
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
slices.SortFunc(ids, func(a, b oteltrace.SpanID) int {
|
||||||
|
return cmp.Compare(a.String(), b.String())
|
||||||
|
})
|
||||||
|
return ids
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetDebugMessageWriterForTest(t testing.TB, w io.Writer) {
|
||||||
|
debugMessageWriter = w
|
||||||
|
t.Cleanup(func() {
|
||||||
|
debugMessageWriter = nil
|
||||||
|
})
|
||||||
|
}
|
|
@ -1,27 +0,0 @@
|
||||||
package trace
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/url"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestGetProvider(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
opts *TracingOptions
|
|
||||||
wantErr bool
|
|
||||||
}{
|
|
||||||
{"jaeger", &TracingOptions{JaegerAgentEndpoint: "localhost:6831", Service: "all", Provider: "jaeger"}, false},
|
|
||||||
{"jaeger with debug", &TracingOptions{JaegerAgentEndpoint: "localhost:6831", Service: "all", Provider: "jaeger", Debug: true}, false},
|
|
||||||
{"jaeger no endpoint", &TracingOptions{JaegerAgentEndpoint: "", Service: "all", Provider: "jaeger"}, false},
|
|
||||||
{"unknown provider", &TracingOptions{JaegerAgentEndpoint: "localhost:0", Service: "all", Provider: "Lucius Cornelius Sulla"}, true},
|
|
||||||
{"zipkin with debug", &TracingOptions{ZipkinEndpoint: &url.URL{Host: "localhost"}, Service: "all", Provider: "zipkin", Debug: true}, false},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
if _, err := GetProvider(tt.opts); (err != nil) != tt.wantErr {
|
|
||||||
t.Errorf("RegisterTracing() error = %v, wantErr %v", err, tt.wantErr)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
136
internal/telemetry/trace/util.go
Normal file
136
internal/telemetry/trace/util.go
Normal file
|
@ -0,0 +1,136 @@
|
||||||
|
package trace
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
"unique"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
oteltrace "go.opentelemetry.io/otel/trace"
|
||||||
|
commonv1 "go.opentelemetry.io/proto/otlp/common/v1"
|
||||||
|
tracev1 "go.opentelemetry.io/proto/otlp/trace/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ParseTraceparent(traceparent string) (oteltrace.SpanContext, error) {
|
||||||
|
parts := strings.Split(traceparent, "-")
|
||||||
|
if len(parts) != 4 {
|
||||||
|
return oteltrace.SpanContext{}, fmt.Errorf("malformed traceparent: expected 4 segments, found %d", len(parts))
|
||||||
|
}
|
||||||
|
traceID, err := oteltrace.TraceIDFromHex(parts[1])
|
||||||
|
if err != nil {
|
||||||
|
return oteltrace.SpanContext{}, fmt.Errorf("malformed traceparent: invalid trace ID: %w", err)
|
||||||
|
}
|
||||||
|
spanID, err := oteltrace.SpanIDFromHex(parts[2])
|
||||||
|
if err != nil {
|
||||||
|
return oteltrace.SpanContext{}, fmt.Errorf("malformed traceparent: invalid span ID: %w", err)
|
||||||
|
}
|
||||||
|
var traceFlags oteltrace.TraceFlags
|
||||||
|
if flags, err := hex.DecodeString(parts[3]); err != nil {
|
||||||
|
return oteltrace.SpanContext{}, fmt.Errorf("malformed traceparent: invalid trace flags: %w", err)
|
||||||
|
} else if len(flags) == 1 {
|
||||||
|
traceFlags = oteltrace.TraceFlags(flags[0])
|
||||||
|
} else {
|
||||||
|
return oteltrace.SpanContext{}, fmt.Errorf("malformed traceparent: invalid trace flags of size %d", len(flags))
|
||||||
|
}
|
||||||
|
if len(traceID) != 16 {
|
||||||
|
return oteltrace.SpanContext{}, fmt.Errorf("malformed traceparent: invalid trace ID of size %d", len(traceID))
|
||||||
|
}
|
||||||
|
if len(spanID) != 8 {
|
||||||
|
return oteltrace.SpanContext{}, fmt.Errorf("malformed traceparent: invalid span ID of size %d", len(spanID))
|
||||||
|
}
|
||||||
|
return oteltrace.NewSpanContext(oteltrace.SpanContextConfig{
|
||||||
|
TraceID: traceID,
|
||||||
|
SpanID: spanID,
|
||||||
|
TraceFlags: traceFlags,
|
||||||
|
}), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTraceFromSpanContext returns a copy of traceparent with the trace ID
|
||||||
|
// (2nd segment) and trace flags (4th segment) replaced with the corresponding
|
||||||
|
// values from spanContext.
|
||||||
|
func WithTraceFromSpanContext(traceparent string, spanContext oteltrace.SpanContext) string {
|
||||||
|
parts := strings.Split(traceparent, "-")
|
||||||
|
if len(parts) != 4 {
|
||||||
|
return traceparent
|
||||||
|
}
|
||||||
|
parts[1] = spanContext.TraceID().String()
|
||||||
|
parts[3] = spanContext.TraceFlags().String()
|
||||||
|
return strings.Join(parts, "-")
|
||||||
|
}
|
||||||
|
|
||||||
|
func FormatSpanName(span *tracev1.Span) {
|
||||||
|
hasVariables := strings.Contains(span.GetName(), "${")
|
||||||
|
if hasVariables {
|
||||||
|
replacements := make([]string, 0, 6)
|
||||||
|
for _, attr := range span.Attributes {
|
||||||
|
switch attr.Key {
|
||||||
|
case "http.url":
|
||||||
|
u, _ := url.Parse(attr.Value.GetStringValue())
|
||||||
|
replacements = append(replacements,
|
||||||
|
"${path}", u.Path,
|
||||||
|
"${host}", u.Host,
|
||||||
|
)
|
||||||
|
case "http.method":
|
||||||
|
replacements = append(replacements, "${method}", attr.Value.GetStringValue())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
span.Name = strings.NewReplacer(replacements...).Replace(span.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
zeroSpanID oteltrace.SpanID
|
||||||
|
zeroTraceID = unique.Make(oteltrace.TraceID([16]byte{}))
|
||||||
|
)
|
||||||
|
|
||||||
|
func ToSpanID(bytes []byte) (oteltrace.SpanID, bool) {
|
||||||
|
switch len(bytes) {
|
||||||
|
case 0:
|
||||||
|
return zeroSpanID, true
|
||||||
|
case 8:
|
||||||
|
return oteltrace.SpanID(bytes), true
|
||||||
|
}
|
||||||
|
return zeroSpanID, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func ToTraceID(bytes []byte) (unique.Handle[oteltrace.TraceID], bool) {
|
||||||
|
switch len(bytes) {
|
||||||
|
case 0:
|
||||||
|
return zeroTraceID, true
|
||||||
|
case 16:
|
||||||
|
return unique.Make(oteltrace.TraceID(bytes)), true
|
||||||
|
}
|
||||||
|
return zeroTraceID, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAttributeSet(kvs ...*commonv1.KeyValue) attribute.Set {
|
||||||
|
attrs := make([]attribute.KeyValue, len(kvs))
|
||||||
|
for i, kv := range kvs {
|
||||||
|
var value attribute.Value
|
||||||
|
switch v := kv.Value.Value.(type) {
|
||||||
|
case *commonv1.AnyValue_BoolValue:
|
||||||
|
value = attribute.BoolValue(v.BoolValue)
|
||||||
|
case *commonv1.AnyValue_BytesValue:
|
||||||
|
value = attribute.StringValue(string(v.BytesValue))
|
||||||
|
case *commonv1.AnyValue_DoubleValue:
|
||||||
|
value = attribute.Float64Value(v.DoubleValue)
|
||||||
|
case *commonv1.AnyValue_IntValue:
|
||||||
|
value = attribute.Int64Value(v.IntValue)
|
||||||
|
case *commonv1.AnyValue_StringValue:
|
||||||
|
value = attribute.StringValue(v.StringValue)
|
||||||
|
case *commonv1.AnyValue_ArrayValue:
|
||||||
|
panic("unimplemented")
|
||||||
|
case *commonv1.AnyValue_KvlistValue:
|
||||||
|
panic("unimplemented")
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unexpected v1.isAnyValue_Value: %#v", v))
|
||||||
|
}
|
||||||
|
attrs[i] = attribute.KeyValue{
|
||||||
|
Key: attribute.Key(kv.Key),
|
||||||
|
Value: value,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return attribute.NewSet(attrs...)
|
||||||
|
}
|
|
@ -1,49 +0,0 @@
|
||||||
package trace
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
stdlog "log"
|
|
||||||
|
|
||||||
oczipkin "contrib.go.opencensus.io/exporter/zipkin"
|
|
||||||
"github.com/openzipkin/zipkin-go"
|
|
||||||
"github.com/openzipkin/zipkin-go/reporter"
|
|
||||||
zipkinHTTP "github.com/openzipkin/zipkin-go/reporter/http"
|
|
||||||
octrace "go.opencensus.io/trace"
|
|
||||||
|
|
||||||
"github.com/pomerium/pomerium/internal/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
type zipkinProvider struct {
|
|
||||||
reporter reporter.Reporter
|
|
||||||
exporter *oczipkin.Exporter
|
|
||||||
}
|
|
||||||
|
|
||||||
func (provider *zipkinProvider) Register(opts *TracingOptions) error {
|
|
||||||
localEndpoint, err := zipkin.NewEndpoint(opts.Service, "")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("telemetry/trace: could not create local endpoint: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
logger := log.With().Str("service", "zipkin").Logger()
|
|
||||||
logWriter := &log.StdLogWrapper{Logger: &logger}
|
|
||||||
stdLogger := stdlog.New(logWriter, "", 0)
|
|
||||||
|
|
||||||
provider.reporter = zipkinHTTP.NewReporter(opts.ZipkinEndpoint.String(), zipkinHTTP.Logger(stdLogger))
|
|
||||||
provider.exporter = oczipkin.NewExporter(provider.reporter, localEndpoint)
|
|
||||||
octrace.RegisterExporter(provider.exporter)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (provider *zipkinProvider) Unregister() error {
|
|
||||||
if provider.exporter != nil {
|
|
||||||
octrace.UnregisterExporter(provider.exporter)
|
|
||||||
provider.exporter = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
if provider.reporter != nil {
|
|
||||||
err = provider.reporter.Close()
|
|
||||||
provider.reporter = nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
|
@ -17,7 +17,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math/big"
|
"math/big"
|
||||||
"math/bits"
|
|
||||||
"net"
|
"net"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
@ -26,25 +25,34 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"syscall"
|
"syscall"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pomerium/pomerium/config"
|
"github.com/pomerium/pomerium/config"
|
||||||
"github.com/pomerium/pomerium/config/envoyconfig/filemgr"
|
"github.com/pomerium/pomerium/config/envoyconfig/filemgr"
|
||||||
|
databroker_service "github.com/pomerium/pomerium/databroker"
|
||||||
"github.com/pomerium/pomerium/internal/log"
|
"github.com/pomerium/pomerium/internal/log"
|
||||||
|
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
||||||
"github.com/pomerium/pomerium/internal/testenv/envutil"
|
"github.com/pomerium/pomerium/internal/testenv/envutil"
|
||||||
"github.com/pomerium/pomerium/internal/testenv/values"
|
"github.com/pomerium/pomerium/internal/testenv/values"
|
||||||
"github.com/pomerium/pomerium/pkg/cmd/pomerium"
|
"github.com/pomerium/pomerium/pkg/cmd/pomerium"
|
||||||
"github.com/pomerium/pomerium/pkg/envoy"
|
"github.com/pomerium/pomerium/pkg/envoy"
|
||||||
"github.com/pomerium/pomerium/pkg/grpc/databroker"
|
"github.com/pomerium/pomerium/pkg/grpc/databroker"
|
||||||
"github.com/pomerium/pomerium/pkg/health"
|
"github.com/pomerium/pomerium/pkg/health"
|
||||||
|
"github.com/pomerium/pomerium/pkg/identity/legacymanager"
|
||||||
|
"github.com/pomerium/pomerium/pkg/identity/manager"
|
||||||
"github.com/pomerium/pomerium/pkg/netutil"
|
"github.com/pomerium/pomerium/pkg/netutil"
|
||||||
"github.com/pomerium/pomerium/pkg/slices"
|
"github.com/pomerium/pomerium/pkg/slices"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||||
|
oteltrace "go.opentelemetry.io/otel/trace"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
)
|
)
|
||||||
|
@ -56,6 +64,7 @@ type Environment interface {
|
||||||
// top-level logger scoped to this environment. It will be canceled when
|
// top-level logger scoped to this environment. It will be canceled when
|
||||||
// Stop() is called, or during test cleanup.
|
// Stop() is called, or during test cleanup.
|
||||||
Context() context.Context
|
Context() context.Context
|
||||||
|
Tracer() oteltrace.Tracer
|
||||||
|
|
||||||
Assert() *assert.Assertions
|
Assert() *assert.Assertions
|
||||||
Require() *require.Assertions
|
Require() *require.Assertions
|
||||||
|
@ -133,10 +142,29 @@ type Environment interface {
|
||||||
// the Pomerium server and Envoy.
|
// the Pomerium server and Envoy.
|
||||||
NewLogRecorder(opts ...LogRecorderOption) *LogRecorder
|
NewLogRecorder(opts ...LogRecorderOption) *LogRecorder
|
||||||
|
|
||||||
|
// GetState returns the current state of the test environment.
|
||||||
|
GetState() EnvironmentState
|
||||||
|
|
||||||
// OnStateChanged registers a callback to be invoked when the environment's
|
// OnStateChanged registers a callback to be invoked when the environment's
|
||||||
// state changes to the given state. The callback is invoked in a separate
|
// state changes to the given state. Each callback is invoked in a separate
|
||||||
// goroutine.
|
// goroutine, but the test environment will wait for all callbacks to return
|
||||||
OnStateChanged(state EnvironmentState, callback func())
|
// before continuing, after triggering the state change.
|
||||||
|
//
|
||||||
|
// Calling the returned stop function will prevent the callback from being
|
||||||
|
// run. Returns true if it stopped the callback from being run, or false if
|
||||||
|
// it already ran or is currently running.
|
||||||
|
//
|
||||||
|
// If the environment is already in the given state, the callback will be run
|
||||||
|
// in a separate goroutine immediately and the returned stop function will
|
||||||
|
// have no effect. A callback run in this way will prevent the state from
|
||||||
|
// advancing until the callback returns.
|
||||||
|
//
|
||||||
|
// State changes are triggered in the following places:
|
||||||
|
// - NotRunning->Starting: in Start(), as the first operation
|
||||||
|
// - Starting->Running: in Start(), just before returning
|
||||||
|
// - Running->Stopping: in Stop(), just before the env context is canceled
|
||||||
|
// - Stopping->Stopped: in Stop(), after all tasks have completed
|
||||||
|
OnStateChanged(state EnvironmentState, callback func()) (stop func() bool)
|
||||||
}
|
}
|
||||||
|
|
||||||
type Certificate tls.Certificate
|
type Certificate tls.Certificate
|
||||||
|
@ -153,10 +181,9 @@ func (c *Certificate) SPKIHash() string {
|
||||||
|
|
||||||
type EnvironmentState uint32
|
type EnvironmentState uint32
|
||||||
|
|
||||||
const NotRunning EnvironmentState = 0
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
Starting EnvironmentState = 1 << iota
|
NotRunning EnvironmentState = iota
|
||||||
|
Starting
|
||||||
Running
|
Running
|
||||||
Stopping
|
Stopping
|
||||||
Stopped
|
Stopped
|
||||||
|
@ -196,6 +223,9 @@ type environment struct {
|
||||||
cancel context.CancelCauseFunc
|
cancel context.CancelCauseFunc
|
||||||
cleanupOnce sync.Once
|
cleanupOnce sync.Once
|
||||||
logWriter *log.MultiWriter
|
logWriter *log.MultiWriter
|
||||||
|
tracerProvider oteltrace.TracerProvider
|
||||||
|
tracer oteltrace.Tracer
|
||||||
|
rootSpan oteltrace.Span
|
||||||
|
|
||||||
mods []WithCaller[Modifier]
|
mods []WithCaller[Modifier]
|
||||||
tasks []WithCaller[Task]
|
tasks []WithCaller[Task]
|
||||||
|
@ -204,6 +234,7 @@ type environment struct {
|
||||||
stateMu sync.Mutex
|
stateMu sync.Mutex
|
||||||
state EnvironmentState
|
state EnvironmentState
|
||||||
stateChangeListeners map[EnvironmentState][]func()
|
stateChangeListeners map[EnvironmentState][]func()
|
||||||
|
stateChangeBlockers sync.WaitGroup
|
||||||
|
|
||||||
src *configSource
|
src *configSource
|
||||||
}
|
}
|
||||||
|
@ -212,6 +243,8 @@ type EnvironmentOptions struct {
|
||||||
debug bool
|
debug bool
|
||||||
pauseOnFailure bool
|
pauseOnFailure bool
|
||||||
forceSilent bool
|
forceSilent bool
|
||||||
|
traceDebugFlags trace.DebugFlags
|
||||||
|
traceClient otlptrace.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
type EnvironmentOption func(*EnvironmentOptions)
|
type EnvironmentOption func(*EnvironmentOptions)
|
||||||
|
@ -249,28 +282,57 @@ func Silent(silent ...bool) EnvironmentOption {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const StandardTraceDebugFlags = trace.TrackSpanCallers |
|
||||||
|
trace.WarnOnIncompleteSpans |
|
||||||
|
trace.WarnOnIncompleteTraces |
|
||||||
|
trace.WarnOnUnresolvedReferences |
|
||||||
|
trace.LogTraceIDMappingsOnWarn |
|
||||||
|
trace.LogAllSpansOnWarn
|
||||||
|
|
||||||
|
func WithTraceDebugFlags(flags trace.DebugFlags) EnvironmentOption {
|
||||||
|
return func(o *EnvironmentOptions) {
|
||||||
|
o.traceDebugFlags = flags
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithTraceClient(traceClient otlptrace.Client) EnvironmentOption {
|
||||||
|
return func(o *EnvironmentOptions) {
|
||||||
|
o.traceClient = traceClient
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var setGrpcLoggerOnce sync.Once
|
var setGrpcLoggerOnce sync.Once
|
||||||
|
|
||||||
|
const defaultTraceDebugFlags = trace.TrackSpanCallers
|
||||||
|
|
||||||
var (
|
var (
|
||||||
flagDebug = flag.Bool("env.debug", false, "enables test environment debug logging (equivalent to Debug() option)")
|
flagDebug = flag.Bool("env.debug", false, "enables test environment debug logging (equivalent to Debug() option)")
|
||||||
flagPauseOnFailure = flag.Bool("env.pause-on-failure", false, "enables pausing the test environment on failure (equivalent to PauseOnFailure() option)")
|
flagPauseOnFailure = flag.Bool("env.pause-on-failure", false, "enables pausing the test environment on failure (equivalent to PauseOnFailure() option)")
|
||||||
flagSilent = flag.Bool("env.silent", false, "suppresses all test environment output (equivalent to Silent() option)")
|
flagSilent = flag.Bool("env.silent", false, "suppresses all test environment output (equivalent to Silent() option)")
|
||||||
|
flagTraceDebugFlags = flag.String("env.trace-debug-flags", strconv.Itoa(defaultTraceDebugFlags), "trace debug flags (equivalent to TraceDebugFlags() option)")
|
||||||
)
|
)
|
||||||
|
|
||||||
func New(t testing.TB, opts ...EnvironmentOption) Environment {
|
func New(t testing.TB, opts ...EnvironmentOption) Environment {
|
||||||
if runtime.GOOS != "linux" {
|
addTraceDebugFlags := strings.HasPrefix(*flagTraceDebugFlags, "+")
|
||||||
t.Skip("test environment only supported on linux")
|
defaultTraceDebugFlags, err := strconv.Atoi(strings.TrimPrefix(*flagTraceDebugFlags, "+"))
|
||||||
|
if err != nil {
|
||||||
|
panic("malformed value for --env.trace-debug-flags: " + err.Error())
|
||||||
}
|
}
|
||||||
options := EnvironmentOptions{
|
options := EnvironmentOptions{
|
||||||
debug: *flagDebug,
|
debug: *flagDebug,
|
||||||
pauseOnFailure: *flagPauseOnFailure,
|
pauseOnFailure: *flagPauseOnFailure,
|
||||||
forceSilent: *flagSilent,
|
forceSilent: *flagSilent,
|
||||||
|
traceDebugFlags: trace.DebugFlags(defaultTraceDebugFlags),
|
||||||
}
|
}
|
||||||
options.apply(opts...)
|
options.apply(opts...)
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
t.Skip("test environment disabled in short mode")
|
t.Skip("test environment disabled in short mode")
|
||||||
}
|
}
|
||||||
|
if addTraceDebugFlags {
|
||||||
|
options.traceDebugFlags |= trace.DebugFlags(defaultTraceDebugFlags)
|
||||||
|
}
|
||||||
|
trace.UseGlobalPanicTracer()
|
||||||
databroker.DebugUseFasterBackoff.Store(true)
|
databroker.DebugUseFasterBackoff.Store(true)
|
||||||
workspaceFolder, err := os.Getwd()
|
workspaceFolder, err := os.Getwd()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -305,7 +367,16 @@ func New(t testing.TB, opts ...EnvironmentOption) Environment {
|
||||||
})
|
})
|
||||||
logger := zerolog.New(writer).With().Timestamp().Logger().Level(zerolog.DebugLevel)
|
logger := zerolog.New(writer).With().Timestamp().Logger().Level(zerolog.DebugLevel)
|
||||||
|
|
||||||
ctx, cancel := context.WithCancelCause(logger.WithContext(context.Background()))
|
ctx := trace.Options{
|
||||||
|
DebugFlags: options.traceDebugFlags,
|
||||||
|
RemoteClient: options.traceClient,
|
||||||
|
}.NewContext(logger.WithContext(context.Background()))
|
||||||
|
tracerProvider := trace.NewTracerProvider(ctx, "Test Environment")
|
||||||
|
tracer := tracerProvider.Tracer(trace.PomeriumCoreTracer)
|
||||||
|
ctx, span := tracer.Start(ctx, t.Name(), oteltrace.WithNewRoot())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancelCause(ctx)
|
||||||
taskErrGroup, ctx := errgroup.WithContext(ctx)
|
taskErrGroup, ctx := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
e := &environment{
|
e := &environment{
|
||||||
|
@ -313,7 +384,7 @@ func New(t testing.TB, opts ...EnvironmentOption) Environment {
|
||||||
t: t,
|
t: t,
|
||||||
assert: assert.New(t),
|
assert: assert.New(t),
|
||||||
require: require.New(t),
|
require: require.New(t),
|
||||||
tempDir: t.TempDir(),
|
tempDir: tempDir(t),
|
||||||
ports: Ports{
|
ports: Ports{
|
||||||
ProxyHTTP: values.Deferred[int](),
|
ProxyHTTP: values.Deferred[int](),
|
||||||
ProxyGRPC: values.Deferred[int](),
|
ProxyGRPC: values.Deferred[int](),
|
||||||
|
@ -329,9 +400,14 @@ func New(t testing.TB, opts ...EnvironmentOption) Environment {
|
||||||
silent: silent,
|
silent: silent,
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
cancel: cancel,
|
cancel: cancel,
|
||||||
|
tracerProvider: tracerProvider,
|
||||||
|
tracer: tracer,
|
||||||
logWriter: writer,
|
logWriter: writer,
|
||||||
taskErrGroup: taskErrGroup,
|
taskErrGroup: taskErrGroup,
|
||||||
|
stateChangeListeners: make(map[EnvironmentState][]func()),
|
||||||
|
rootSpan: span,
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = rand.Read(e.sharedSecret[:])
|
_, err = rand.Read(e.sharedSecret[:])
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = rand.Read(e.cookieSecret[:])
|
_, err = rand.Read(e.cookieSecret[:])
|
||||||
|
@ -362,11 +438,13 @@ func New(t testing.TB, opts ...EnvironmentOption) Environment {
|
||||||
|
|
||||||
func (e *environment) debugf(format string, args ...any) {
|
func (e *environment) debugf(format string, args ...any) {
|
||||||
e.t.Helper()
|
e.t.Helper()
|
||||||
|
if e.rootSpan.IsRecording() {
|
||||||
|
e.rootSpan.AddEvent(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
if !e.debug {
|
if !e.debug {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
e.t.Logf("\x1b[34mDEBUG ["+e.t.Name()+"] "+format+"\x1b[0m", args...)
|
||||||
e.t.Logf("\x1b[34m[debug] "+format+"\x1b[0m", args...)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type WithCaller[T any] struct {
|
type WithCaller[T any] struct {
|
||||||
|
@ -394,6 +472,10 @@ func (e *environment) Context() context.Context {
|
||||||
return ContextWithEnv(e.ctx, e)
|
return ContextWithEnv(e.ctx, e)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *environment) Tracer() oteltrace.Tracer {
|
||||||
|
return e.tracer
|
||||||
|
}
|
||||||
|
|
||||||
func (e *environment) Assert() *assert.Assertions {
|
func (e *environment) Assert() *assert.Assertions {
|
||||||
return e.assert
|
return e.assert
|
||||||
}
|
}
|
||||||
|
@ -455,9 +537,11 @@ var ErrCauseTestCleanup = errors.New("test cleanup")
|
||||||
var ErrCauseManualStop = errors.New("Stop() called")
|
var ErrCauseManualStop = errors.New("Stop() called")
|
||||||
|
|
||||||
func (e *environment) Start() {
|
func (e *environment) Start() {
|
||||||
|
_, span := e.tracer.Start(e.Context(), "Start")
|
||||||
|
defer span.End()
|
||||||
e.debugf("Start()")
|
e.debugf("Start()")
|
||||||
e.advanceState(Starting)
|
e.advanceState(Starting)
|
||||||
e.t.Cleanup(e.cleanup)
|
e.t.Cleanup(e.onTestCleanup)
|
||||||
e.t.Setenv("TMPDIR", e.TempDir())
|
e.t.Setenv("TMPDIR", e.TempDir())
|
||||||
e.debugf("temp dir: %s", e.TempDir())
|
e.debugf("temp dir: %s", e.TempDir())
|
||||||
|
|
||||||
|
@ -515,6 +599,7 @@ func (e *environment) Start() {
|
||||||
log.AccessLogFieldUserAgent,
|
log.AccessLogFieldUserAgent,
|
||||||
log.AccessLogFieldClientCertificate,
|
log.AccessLogFieldClientCertificate,
|
||||||
}
|
}
|
||||||
|
cfg.Options.TracingSampleRate = 1.0
|
||||||
|
|
||||||
e.src = &configSource{cfg: cfg}
|
e.src = &configSource{cfg: cfg}
|
||||||
e.AddTask(TaskFunc(func(ctx context.Context) error {
|
e.AddTask(TaskFunc(func(ctx context.Context) error {
|
||||||
|
@ -524,8 +609,13 @@ func (e *environment) Start() {
|
||||||
require.NoError(e.t, cfg.Options.Validate(), "invoking modifier resulted in an invalid configuration:\nadded by: "+mod.Caller)
|
require.NoError(e.t, cfg.Options.Validate(), "invoking modifier resulted in an invalid configuration:\nadded by: "+mod.Caller)
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := []pomerium.RunOption{
|
opts := []pomerium.Option{
|
||||||
pomerium.WithOverrideFileManager(fileMgr),
|
pomerium.WithOverrideFileManager(fileMgr),
|
||||||
|
pomerium.WithEnvoyServerOptions(envoy.WithExitGracePeriod(30 * time.Second)),
|
||||||
|
pomerium.WithDataBrokerServerOptions(
|
||||||
|
databroker_service.WithManagerOptions(manager.WithLeaseTTL(1*time.Second)),
|
||||||
|
databroker_service.WithLegacyManagerOptions(legacymanager.WithLeaseTTL(1*time.Second)),
|
||||||
|
),
|
||||||
}
|
}
|
||||||
envoyBinaryPath := filepath.Join(e.workspaceFolder, fmt.Sprintf("pkg/envoy/files/envoy-%s-%s", runtime.GOOS, runtime.GOARCH))
|
envoyBinaryPath := filepath.Join(e.workspaceFolder, fmt.Sprintf("pkg/envoy/files/envoy-%s-%s", runtime.GOOS, runtime.GOARCH))
|
||||||
if envutil.EnvoyProfilerAvailable(envoyBinaryPath) {
|
if envutil.EnvoyProfilerAvailable(envoyBinaryPath) {
|
||||||
|
@ -556,23 +646,29 @@ func (e *environment) Start() {
|
||||||
}
|
}
|
||||||
if len(envVars) > 0 {
|
if len(envVars) > 0 {
|
||||||
e.debugf("adding envoy env vars: %v\n", envVars)
|
e.debugf("adding envoy env vars: %v\n", envVars)
|
||||||
opts = append(opts, pomerium.WithEnvoyServerOptions(
|
opts = append(opts, pomerium.WithEnvoyServerOptions(envoy.WithExtraEnvVars(envVars...)))
|
||||||
envoy.WithExtraEnvVars(envVars...),
|
|
||||||
envoy.WithExitGracePeriod(10*time.Second), // allow envoy time to flush pprof data to disk
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
e.debugf("envoy profiling not available")
|
e.debugf("envoy profiling not available")
|
||||||
}
|
}
|
||||||
|
|
||||||
return pomerium.Run(ctx, e.src, opts...)
|
pom := pomerium.New(opts...)
|
||||||
|
e.OnStateChanged(Stopping, func() {
|
||||||
|
if err := pom.Shutdown(ctx); err != nil {
|
||||||
|
log.Ctx(ctx).Err(err).Msg("error shutting down pomerium server")
|
||||||
|
} else {
|
||||||
|
e.debugf("pomerium server shut down without error")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
require.NoError(e.t, pom.Start(ctx, e.tracerProvider, e.src))
|
||||||
|
return pom.Wait()
|
||||||
}))
|
}))
|
||||||
|
|
||||||
for i, task := range e.tasks {
|
for i, task := range e.tasks {
|
||||||
log.Ctx(e.ctx).Debug().Str("caller", task.Caller).Msgf("starting task %d", i)
|
log.Ctx(e.Context()).Debug().Str("caller", task.Caller).Msgf("starting task %d", i)
|
||||||
e.taskErrGroup.Go(func() error {
|
e.taskErrGroup.Go(func() error {
|
||||||
defer log.Ctx(e.ctx).Debug().Str("caller", task.Caller).Msgf("task %d exited", i)
|
defer log.Ctx(e.Context()).Debug().Str("caller", task.Caller).Msgf("task %d exited", i)
|
||||||
return task.Value.Run(e.ctx)
|
return task.Value.Run(e.Context())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -695,14 +791,9 @@ func (e *environment) Stop() {
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
defer b.StartTimer()
|
defer b.StartTimer()
|
||||||
}
|
}
|
||||||
|
_, file, line, _ := runtime.Caller(1)
|
||||||
e.cleanupOnce.Do(func() {
|
e.cleanupOnce.Do(func() {
|
||||||
e.debugf("stop: Stop() called manually")
|
e.cleanup(fmt.Errorf("%w (caller: %s:%d)", ErrCauseManualStop, file, line))
|
||||||
e.advanceState(Stopping)
|
|
||||||
e.cancel(ErrCauseManualStop)
|
|
||||||
err := e.taskErrGroup.Wait()
|
|
||||||
e.advanceState(Stopped)
|
|
||||||
e.debugf("stop: done waiting")
|
|
||||||
assert.ErrorIs(e.t, err, ErrCauseManualStop)
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -714,9 +805,14 @@ func (e *environment) Pause() {
|
||||||
e.t.Log("\x1b[31mctrl+c received, continuing\x1b[0m")
|
e.t.Log("\x1b[31mctrl+c received, continuing\x1b[0m")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *environment) cleanup() {
|
func (e *environment) onTestCleanup() {
|
||||||
e.cleanupOnce.Do(func() {
|
e.cleanupOnce.Do(func() {
|
||||||
e.debugf("stop: test cleanup")
|
e.cleanup(ErrCauseTestCleanup)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *environment) cleanup(cancelCause error) {
|
||||||
|
e.debugf("stop: %s", cancelCause.Error())
|
||||||
if e.t.Failed() {
|
if e.t.Failed() {
|
||||||
if e.pauseOnFailure {
|
if e.pauseOnFailure {
|
||||||
e.t.Log("\x1b[31m*** pausing on test failure; continue with ctrl+c ***\x1b[0m")
|
e.t.Log("\x1b[31m*** pausing on test failure; continue with ctrl+c ***\x1b[0m")
|
||||||
|
@ -728,19 +824,32 @@ func (e *environment) cleanup() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
e.advanceState(Stopping)
|
e.advanceState(Stopping)
|
||||||
e.cancel(ErrCauseTestCleanup)
|
e.cancel(cancelCause)
|
||||||
err := e.taskErrGroup.Wait()
|
errs := []error{}
|
||||||
|
if err := e.taskErrGroup.Wait(); err != nil {
|
||||||
|
errs = append(errs, fmt.Errorf("error waiting for tasks: %w", err))
|
||||||
|
}
|
||||||
|
e.rootSpan.End()
|
||||||
|
if err := trace.ShutdownContext(e.Context()); err != nil {
|
||||||
|
errs = append(errs, fmt.Errorf("error shutting down trace context: %w", err))
|
||||||
|
}
|
||||||
e.advanceState(Stopped)
|
e.advanceState(Stopped)
|
||||||
e.debugf("stop: done waiting")
|
// Wait for any additional callbacks created during stopped callbacks
|
||||||
assert.ErrorIs(e.t, err, ErrCauseTestCleanup)
|
// (for consistency, we consider the stopped state to "end" here)
|
||||||
})
|
e.stateChangeBlockers.Wait()
|
||||||
|
e.debugf("stop: done")
|
||||||
|
// err can be nil if e.g. the only task is the internal pomerium task, which
|
||||||
|
// returns a nil error if it exits cleanly
|
||||||
|
if err := errors.Join(errs...); err != nil {
|
||||||
|
assert.ErrorIs(e.t, err, cancelCause)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *environment) Add(m Modifier) {
|
func (e *environment) Add(m Modifier) {
|
||||||
e.t.Helper()
|
e.t.Helper()
|
||||||
caller := getCaller()
|
caller := getCaller()
|
||||||
e.debugf("Add: %T from %s", m, caller)
|
e.debugf("Add: %T from %s", m, caller)
|
||||||
switch e.getState() {
|
switch e.GetState() {
|
||||||
case NotRunning:
|
case NotRunning:
|
||||||
for _, mod := range e.mods {
|
for _, mod := range e.mods {
|
||||||
if mod.Value == m {
|
if mod.Value == m {
|
||||||
|
@ -757,11 +866,11 @@ func (e *environment) Add(m Modifier) {
|
||||||
panic("test bug: cannot call Add() before Start() has returned")
|
panic("test bug: cannot call Add() before Start() has returned")
|
||||||
case Running:
|
case Running:
|
||||||
e.debugf("Add: state=Running; calling ModifyConfig")
|
e.debugf("Add: state=Running; calling ModifyConfig")
|
||||||
e.src.ModifyConfig(e.ctx, m)
|
e.src.ModifyConfig(e.Context(), m)
|
||||||
case Stopped, Stopping:
|
case Stopped, Stopping:
|
||||||
panic("test bug: cannot call Add() after Stop()")
|
panic("test bug: cannot call Add() after Stop()")
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("unexpected environment state: %s", e.getState()))
|
panic(fmt.Sprintf("unexpected environment state: %s", e.GetState()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -803,34 +912,63 @@ func (e *environment) advanceState(newState EnvironmentState) {
|
||||||
if newState <= e.state {
|
if newState <= e.state {
|
||||||
panic(fmt.Sprintf("internal test environment bug: changed state to <= current: newState=%s, current=%s", newState, e.state))
|
panic(fmt.Sprintf("internal test environment bug: changed state to <= current: newState=%s, current=%s", newState, e.state))
|
||||||
}
|
}
|
||||||
|
e.stateChangeBlockers.Wait()
|
||||||
e.debugf("state %s -> %s", e.state.String(), newState.String())
|
e.debugf("state %s -> %s", e.state.String(), newState.String())
|
||||||
e.state = newState
|
e.state = newState
|
||||||
|
if len(e.stateChangeListeners[newState]) > 0 {
|
||||||
e.debugf("notifying %d listeners of state change", len(e.stateChangeListeners[newState]))
|
e.debugf("notifying %d listeners of state change", len(e.stateChangeListeners[newState]))
|
||||||
|
var wg sync.WaitGroup
|
||||||
for _, listener := range e.stateChangeListeners[newState] {
|
for _, listener := range e.stateChangeListeners[newState] {
|
||||||
go listener()
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
_, span := e.tracer.Start(e.Context(), "State Change Callback")
|
||||||
|
span.SetAttributes(attribute.String("state", newState.String()))
|
||||||
|
defer span.End()
|
||||||
|
defer wg.Done()
|
||||||
|
listener()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
e.debugf("done notifying state change listeners")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *environment) getState() EnvironmentState {
|
func (e *environment) GetState() EnvironmentState {
|
||||||
e.stateMu.Lock()
|
e.stateMu.Lock()
|
||||||
defer e.stateMu.Unlock()
|
defer e.stateMu.Unlock()
|
||||||
return e.state
|
return e.state
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *environment) OnStateChanged(state EnvironmentState, callback func()) {
|
func (e *environment) OnStateChanged(state EnvironmentState, callback func()) (cancel func() bool) {
|
||||||
e.stateMu.Lock()
|
e.stateMu.Lock()
|
||||||
defer e.stateMu.Unlock()
|
defer e.stateMu.Unlock()
|
||||||
|
|
||||||
if e.state&state != 0 {
|
_, file, line, _ := runtime.Caller(1)
|
||||||
go callback()
|
switch {
|
||||||
return
|
case state < e.state:
|
||||||
|
panic(fmt.Sprintf("test bug: OnStateChanged called with state %s which is < current state (%s)", state, e.sharedSecret))
|
||||||
|
case state == e.state:
|
||||||
|
e.stateChangeBlockers.Add(1)
|
||||||
|
e.debugf("invoking callback for current state (state: %s, caller: %s:%d)", state.String(), file, line)
|
||||||
|
go func() {
|
||||||
|
defer func() {
|
||||||
|
e.stateChangeBlockers.Done()
|
||||||
|
}()
|
||||||
|
callback()
|
||||||
|
}()
|
||||||
|
return func() bool { return false }
|
||||||
|
default:
|
||||||
|
canceled := &atomic.Bool{}
|
||||||
|
e.stateChangeListeners[state] = append(e.stateChangeListeners[state], func() {
|
||||||
|
if canceled.CompareAndSwap(false, true) {
|
||||||
|
e.debugf("invoking state change callback (caller: %s:%d)", file, line)
|
||||||
|
callback()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return func() bool {
|
||||||
|
e.debugf("stopped state change callback (state: %s, caller: %s:%d)", state.String(), file, line)
|
||||||
|
return canceled.CompareAndSwap(false, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// add change listeners for all states, if there are multiple bits set
|
|
||||||
for state > 0 {
|
|
||||||
stateBit := EnvironmentState(bits.TrailingZeros32(uint32(state)))
|
|
||||||
state &= (state - 1)
|
|
||||||
e.stateChangeListeners[stateBit] = append(e.stateChangeListeners[stateBit], callback)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue