mirror of
https://github.com/pomerium/pomerium.git
synced 2025-07-02 17:38:27 +02:00
Merge branch 'main' into kenjenkins/databroker-patch-inmemory
This commit is contained in:
commit
0c9d51d426
20 changed files with 1186 additions and 912 deletions
|
@ -27,7 +27,6 @@ type Config struct {
|
|||
Options *Options
|
||||
AutoCertificates []tls.Certificate
|
||||
EnvoyVersion string
|
||||
Version int64
|
||||
|
||||
// DerivedCertificates are TLS certificates derived from the shared secret
|
||||
DerivedCertificates []tls.Certificate
|
||||
|
@ -63,7 +62,6 @@ func (cfg *Config) Clone() *Config {
|
|||
_ = copy(endpoints, cfg.MetricsScrapeEndpoints)
|
||||
|
||||
return &Config{
|
||||
Version: cfg.Version,
|
||||
Options: newOptions,
|
||||
AutoCertificates: cfg.AutoCertificates,
|
||||
EnvoyVersion: cfg.EnvoyVersion,
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/google/uuid"
|
||||
"github.com/rs/zerolog"
|
||||
|
||||
"github.com/pomerium/pomerium/internal/events"
|
||||
"github.com/pomerium/pomerium/internal/fileutil"
|
||||
"github.com/pomerium/pomerium/internal/log"
|
||||
"github.com/pomerium/pomerium/internal/telemetry/metrics"
|
||||
|
@ -19,27 +20,27 @@ import (
|
|||
// A ChangeListener is called when configuration changes.
|
||||
type ChangeListener = func(context.Context, *Config)
|
||||
|
||||
type changeDispatcherEvent struct {
|
||||
cfg *Config
|
||||
}
|
||||
|
||||
// A ChangeDispatcher manages listeners on config changes.
|
||||
type ChangeDispatcher struct {
|
||||
sync.Mutex
|
||||
onConfigChangeListeners []ChangeListener
|
||||
target events.Target[changeDispatcherEvent]
|
||||
}
|
||||
|
||||
// Trigger triggers a change.
|
||||
func (dispatcher *ChangeDispatcher) Trigger(ctx context.Context, cfg *Config) {
|
||||
dispatcher.Lock()
|
||||
defer dispatcher.Unlock()
|
||||
|
||||
for _, li := range dispatcher.onConfigChangeListeners {
|
||||
li(ctx, cfg)
|
||||
}
|
||||
dispatcher.target.Dispatch(ctx, changeDispatcherEvent{
|
||||
cfg: cfg,
|
||||
})
|
||||
}
|
||||
|
||||
// OnConfigChange adds a listener.
|
||||
func (dispatcher *ChangeDispatcher) OnConfigChange(_ context.Context, li ChangeListener) {
|
||||
dispatcher.Lock()
|
||||
defer dispatcher.Unlock()
|
||||
dispatcher.onConfigChangeListeners = append(dispatcher.onConfigChangeListeners, li)
|
||||
dispatcher.target.AddListener(func(ctx context.Context, evt changeDispatcherEvent) {
|
||||
li(ctx, evt.cfg)
|
||||
})
|
||||
}
|
||||
|
||||
// A Source gets configuration.
|
||||
|
@ -114,7 +115,6 @@ func NewFileOrEnvironmentSource(
|
|||
cfg := &Config{
|
||||
Options: options,
|
||||
EnvoyVersion: envoyVersion,
|
||||
Version: 1,
|
||||
}
|
||||
|
||||
ports, err := netutil.AllocatePorts(6)
|
||||
|
@ -152,7 +152,6 @@ func (src *FileOrEnvironmentSource) check(ctx context.Context) {
|
|||
options, err := newOptionsFromConfig(src.configFile)
|
||||
if err == nil {
|
||||
cfg = cfg.Clone()
|
||||
cfg.Version++
|
||||
cfg.Options = options
|
||||
metrics.SetConfigInfo(ctx, cfg.Options.Services, "local", cfg.Checksum(), true)
|
||||
} else {
|
||||
|
@ -162,7 +161,7 @@ func (src *FileOrEnvironmentSource) check(ctx context.Context) {
|
|||
src.config = cfg
|
||||
src.mu.Unlock()
|
||||
|
||||
log.Info(ctx).Int64("config-version", cfg.Version).Msg("config: loaded configuration")
|
||||
log.Info(ctx).Msg("config: loaded configuration")
|
||||
|
||||
src.Trigger(ctx, cfg)
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
func Test_buildPolicyTransportSocket(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cacheDir, _ := os.UserCacheDir()
|
||||
customCA := filepath.Join(cacheDir, "pomerium", "envoy", "files", "custom-ca-32484c314b584447463735303142374c31414145374650305a525539554938594d524855353757313942494d473847535231.pem")
|
||||
customCA := filepath.Join(cacheDir, "pomerium", "envoy", "files", "custom-ca-57394a4e5157303436544830.pem")
|
||||
|
||||
b := New("local-grpc", "local-http", "local-metrics", filemgr.NewManager(), nil)
|
||||
rootCABytes, _ := getCombinedCertificateAuthority(&config.Config{Options: &config.Options{}})
|
||||
|
@ -406,10 +406,10 @@ func Test_buildPolicyTransportSocket(t *testing.T) {
|
|||
},
|
||||
"tlsCertificates": [{
|
||||
"certificateChain":{
|
||||
"filename": "`+filepath.Join(cacheDir, "pomerium", "envoy", "files", "tls-crt-354e49305a5a39414a545530374e58454e48334148524c4e324258463837364355564c4e4532464b54355139495547514a38.pem")+`"
|
||||
"filename": "`+filepath.Join(cacheDir, "pomerium", "envoy", "files", "tls-crt-32375a484d4f49594c4d374830.pem")+`"
|
||||
},
|
||||
"privateKey": {
|
||||
"filename": "`+filepath.Join(cacheDir, "pomerium", "envoy", "files", "tls-key-3350415a38414e4e4a4655424e55393430474147324651433949384e485341334b5157364f424b4c5856365a545937383735.pem")+`"
|
||||
"filename": "`+filepath.Join(cacheDir, "pomerium", "envoy", "files", "tls-key-33393156483053584631414836.pem")+`"
|
||||
}
|
||||
}],
|
||||
"validationContext": {
|
||||
|
|
|
@ -3,20 +3,21 @@ package filemgr
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
envoy_config_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
||||
"github.com/martinlindhe/base36"
|
||||
|
||||
"github.com/pomerium/pomerium/internal/log"
|
||||
"github.com/pomerium/pomerium/pkg/cryptutil"
|
||||
)
|
||||
|
||||
// A Manager manages files for envoy.
|
||||
type Manager struct {
|
||||
cfg *config
|
||||
|
||||
initOnce sync.Once
|
||||
initErr error
|
||||
}
|
||||
|
||||
// NewManager creates a new Manager.
|
||||
|
@ -27,18 +28,23 @@ func NewManager(options ...Option) *Manager {
|
|||
}
|
||||
}
|
||||
|
||||
func (mgr *Manager) init() {
|
||||
mgr.initOnce.Do(func() {
|
||||
mgr.initErr = os.MkdirAll(mgr.cfg.cacheDir, 0o700)
|
||||
})
|
||||
}
|
||||
|
||||
// BytesDataSource returns an envoy config data source based on bytes.
|
||||
func (mgr *Manager) BytesDataSource(fileName string, data []byte) *envoy_config_core_v3.DataSource {
|
||||
h := base36.EncodeBytes(cryptutil.Hash("filemgr", data))
|
||||
ext := filepath.Ext(fileName)
|
||||
fileName = fmt.Sprintf("%s-%x%s", fileName[:len(fileName)-len(ext)], h, ext)
|
||||
|
||||
if err := os.MkdirAll(mgr.cfg.cacheDir, 0o700); err != nil {
|
||||
log.Error(context.TODO()).Err(err).Msg("filemgr: error creating cache directory, falling back to inline bytes")
|
||||
mgr.init()
|
||||
if mgr.initErr != nil {
|
||||
log.Error(context.Background()).Err(mgr.initErr).Msg("filemgr: error creating cache directory, falling back to inline bytes")
|
||||
return inlineBytes(data)
|
||||
}
|
||||
|
||||
fileName = GetFileNameWithBytesHash(fileName, data)
|
||||
filePath := filepath.Join(mgr.cfg.cacheDir, fileName)
|
||||
|
||||
if _, err := os.Stat(filePath); os.IsNotExist(err) {
|
||||
err = os.WriteFile(filePath, data, 0o600)
|
||||
if err != nil {
|
||||
|
|
|
@ -17,7 +17,7 @@ func Test(t *testing.T) {
|
|||
ds := mgr.BytesDataSource("test.txt", []byte{1, 2, 3, 4, 5})
|
||||
assert.Equal(t, &envoy_config_core_v3.DataSource{
|
||||
Specifier: &envoy_config_core_v3.DataSource_Filename{
|
||||
Filename: filepath.Join(dir, "test-353354494b53534a5538435652584d594a5759394d43484f38514b34594b4b524b34515339593249344e4238474a5436414b.txt"),
|
||||
Filename: filepath.Join(dir, "test-32354837325a545944534a4537.txt"),
|
||||
},
|
||||
}, ds)
|
||||
mgr.ClearCache()
|
||||
|
@ -32,7 +32,7 @@ func Test(t *testing.T) {
|
|||
ds := mgr.FileDataSource(tmpFilePath)
|
||||
assert.Equal(t, &envoy_config_core_v3.DataSource{
|
||||
Specifier: &envoy_config_core_v3.DataSource_Filename{
|
||||
Filename: filepath.Join(dir, "test-34514f59593332445a5649504230484142544c515057383944383730554833564d32574836354654585954304e424f464336.txt"),
|
||||
Filename: filepath.Join(dir, "test-474136555958463735414951.txt"),
|
||||
},
|
||||
}, ds)
|
||||
|
||||
|
@ -41,7 +41,7 @@ func Test(t *testing.T) {
|
|||
ds = mgr.FileDataSource(tmpFilePath)
|
||||
assert.Equal(t, &envoy_config_core_v3.DataSource{
|
||||
Specifier: &envoy_config_core_v3.DataSource_Filename{
|
||||
Filename: filepath.Join(dir, "test-32564e4457304430393559364b5747373138584f484f5a51334d365758584b47364b555a4c444849513241513457323259.txt"),
|
||||
Filename: filepath.Join(dir, "test-3331324c4a35574d5439444d4c.txt"),
|
||||
},
|
||||
}, ds)
|
||||
|
||||
|
|
18
config/envoyconfig/filemgr/name.go
Normal file
18
config/envoyconfig/filemgr/name.go
Normal file
|
@ -0,0 +1,18 @@
|
|||
package filemgr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
"github.com/martinlindhe/base36"
|
||||
)
|
||||
|
||||
// GetFileNameWithBytesHash constructs a filename using a base filename and a hash of
|
||||
// the data. For example: GetFileNameWithBytesHash("example.txt", []byte{...}) ==> "example-abcd1234.txt"
|
||||
func GetFileNameWithBytesHash(base string, data []byte) string {
|
||||
h := xxhash.Sum64(data)
|
||||
he := base36.Encode(h)
|
||||
ext := filepath.Ext(base)
|
||||
return fmt.Sprintf("%s-%x%s", base[:len(base)-len(ext)], he, ext)
|
||||
}
|
19
config/envoyconfig/filemgr/name_test.go
Normal file
19
config/envoyconfig/filemgr/name_test.go
Normal file
|
@ -0,0 +1,19 @@
|
|||
package filemgr
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func BenchmarkGetFileNameWithBytesHash(b *testing.B) {
|
||||
bs := make([]byte, 1024*128)
|
||||
_, err := rand.Read(bs)
|
||||
require.NoError(b, err)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
GetFileNameWithBytesHash("example.crt", bs)
|
||||
}
|
||||
}
|
|
@ -41,8 +41,8 @@ func testData(t *testing.T, name string, data interface{}) string {
|
|||
|
||||
func Test_buildMetricsHTTPConnectionManagerFilter(t *testing.T) {
|
||||
cacheDir, _ := os.UserCacheDir()
|
||||
certFileName := filepath.Join(cacheDir, "pomerium", "envoy", "files", "tls-crt-354e49305a5a39414a545530374e58454e48334148524c4e324258463837364355564c4e4532464b54355139495547514a38.pem")
|
||||
keyFileName := filepath.Join(cacheDir, "pomerium", "envoy", "files", "tls-key-3350415a38414e4e4a4655424e55393430474147324651433949384e485341334b5157364f424b4c5856365a545937383735.pem")
|
||||
certFileName := filepath.Join(cacheDir, "pomerium", "envoy", "files", "tls-crt-32375a484d4f49594c4d374830.pem")
|
||||
keyFileName := filepath.Join(cacheDir, "pomerium", "envoy", "files", "tls-key-33393156483053584631414836.pem")
|
||||
|
||||
b := New("local-grpc", "local-http", "local-metrics", filemgr.NewManager(), nil)
|
||||
li, err := b.buildMetricsListener(&config.Config{
|
||||
|
@ -74,7 +74,7 @@ func Test_buildDownstreamTLSContext(t *testing.T) {
|
|||
b := New("local-grpc", "local-http", "local-metrics", filemgr.NewManager(), nil)
|
||||
|
||||
cacheDir, _ := os.UserCacheDir()
|
||||
clientCAFileName := filepath.Join(cacheDir, "pomerium", "envoy", "files", "client-ca-3533485838304b593757424e3354425157494c4747433534384f474f3631364d5332554c3332485a483834334d50454c344a.pem")
|
||||
clientCAFileName := filepath.Join(cacheDir, "pomerium", "envoy", "files", "client-ca-313754424855313435355a5348.pem")
|
||||
|
||||
t.Run("no-validation", func(t *testing.T) {
|
||||
downstreamTLSContext, err := b.buildDownstreamTLSContextMulti(context.Background(), &config.Config{Options: &config.Options{}}, nil)
|
||||
|
@ -207,8 +207,7 @@ func Test_buildDownstreamTLSContext(t *testing.T) {
|
|||
}}
|
||||
|
||||
maxVerifyDepth = 10
|
||||
downstreamTLSContext, err :=
|
||||
b.buildDownstreamTLSContextMulti(context.Background(), config, nil)
|
||||
downstreamTLSContext, err := b.buildDownstreamTLSContextMulti(context.Background(), config, nil)
|
||||
require.NoError(t, err)
|
||||
testutil.AssertProtoJSONEqual(t, `{
|
||||
"maxVerifyDepth": 10,
|
||||
|
@ -220,8 +219,7 @@ func Test_buildDownstreamTLSContext(t *testing.T) {
|
|||
}`, downstreamTLSContext.GetCommonTlsContext().GetValidationContext())
|
||||
|
||||
maxVerifyDepth = 0
|
||||
downstreamTLSContext, err =
|
||||
b.buildDownstreamTLSContextMulti(context.Background(), config, nil)
|
||||
downstreamTLSContext, err = b.buildDownstreamTLSContextMulti(context.Background(), config, nil)
|
||||
require.NoError(t, err)
|
||||
testutil.AssertProtoJSONEqual(t, `{
|
||||
"onlyVerifyLeafCertCrl": true,
|
||||
|
@ -243,8 +241,7 @@ func Test_buildDownstreamTLSContext(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}}
|
||||
downstreamTLSContext, err :=
|
||||
b.buildDownstreamTLSContextMulti(context.Background(), config, nil)
|
||||
downstreamTLSContext, err := b.buildDownstreamTLSContextMulti(context.Background(), config, nil)
|
||||
require.NoError(t, err)
|
||||
testutil.AssertProtoJSONEqual(t, `{
|
||||
"maxVerifyDepth": 1,
|
||||
|
|
|
@ -3,7 +3,9 @@ package config_test
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -13,6 +15,8 @@ import (
|
|||
)
|
||||
|
||||
func TestLayeredConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("error on initial build", func(t *testing.T) {
|
||||
|
@ -33,12 +37,15 @@ func TestLayeredConfig(t *testing.T) {
|
|||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
var dst *config.Config
|
||||
var dst atomic.Pointer[config.Config]
|
||||
dst.Store(layered.GetConfig())
|
||||
layered.OnConfigChange(ctx, func(ctx context.Context, c *config.Config) {
|
||||
dst = c
|
||||
dst.Store(c)
|
||||
})
|
||||
|
||||
underlying.SetConfig(ctx, &config.Config{Options: &config.Options{DeriveInternalDomainCert: proto.String("b.com")}})
|
||||
assert.Equal(t, "b.com", dst.Options.GetDeriveInternalDomain())
|
||||
assert.Eventually(t, func() bool {
|
||||
return dst.Load().Options.GetDeriveInternalDomain() == "b.com"
|
||||
}, 10*time.Second, time.Millisecond)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -75,16 +75,15 @@ func TestEvents(t *testing.T) {
|
|||
|
||||
srv := &Server{
|
||||
haveSetCapacity: make(map[string]bool),
|
||||
currentConfig: atomicutil.NewValue(versionedConfig{
|
||||
Config: &config.Config{
|
||||
OutboundPort: outboundPort,
|
||||
Options: &config.Options{
|
||||
SharedKey: cryptutil.NewBase64Key(),
|
||||
DataBrokerURLString: "http://" + li.Addr().String(),
|
||||
GRPCInsecure: proto.Bool(true),
|
||||
},
|
||||
currentConfig: atomicutil.NewValue(&config.Config{
|
||||
OutboundPort: outboundPort,
|
||||
Options: &config.Options{
|
||||
SharedKey: cryptutil.NewBase64Key(),
|
||||
DataBrokerURLString: "http://" + li.Addr().String(),
|
||||
GRPCInsecure: proto.Bool(true),
|
||||
},
|
||||
}),
|
||||
},
|
||||
),
|
||||
}
|
||||
err := srv.storeEvent(ctx, new(events.LastError))
|
||||
assert.NoError(t, err)
|
||||
|
|
|
@ -33,7 +33,7 @@ func (srv *Server) StreamAccessLogs(stream envoy_service_accesslog_v3.AccessLogS
|
|||
}
|
||||
evt = evt.Str("service", "envoy")
|
||||
|
||||
fields := srv.currentConfig.Load().Config.Options.GetAccessLogFields()
|
||||
fields := srv.currentConfig.Load().Options.GetAccessLogFields()
|
||||
for _, field := range fields {
|
||||
evt = populateLogEvent(field, evt, entry)
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"github.com/pomerium/pomerium/internal/log"
|
||||
"github.com/pomerium/pomerium/internal/telemetry"
|
||||
"github.com/pomerium/pomerium/internal/telemetry/requestid"
|
||||
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
||||
"github.com/pomerium/pomerium/internal/urlutil"
|
||||
"github.com/pomerium/pomerium/internal/version"
|
||||
"github.com/pomerium/pomerium/pkg/envoy/files"
|
||||
|
@ -34,11 +35,6 @@ import (
|
|||
"github.com/pomerium/pomerium/pkg/grpcutil"
|
||||
)
|
||||
|
||||
type versionedConfig struct {
|
||||
*config.Config
|
||||
version int64
|
||||
}
|
||||
|
||||
// A Service can be mounted on the control plane.
|
||||
type Service interface {
|
||||
Mount(r *mux.Router)
|
||||
|
@ -56,7 +52,8 @@ type Server struct {
|
|||
Builder *envoyconfig.Builder
|
||||
EventsMgr *events.Manager
|
||||
|
||||
currentConfig *atomicutil.Value[versionedConfig]
|
||||
updateConfig chan *config.Config
|
||||
currentConfig *atomicutil.Value[*config.Config]
|
||||
name string
|
||||
xdsmgr *xdsmgr.Manager
|
||||
filemgr *filemgr.Manager
|
||||
|
@ -77,10 +74,9 @@ func NewServer(cfg *config.Config, metricsMgr *config.MetricsManager, eventsMgr
|
|||
EventsMgr: eventsMgr,
|
||||
reproxy: reproxy.New(),
|
||||
haveSetCapacity: map[string]bool{},
|
||||
currentConfig: atomicutil.NewValue(versionedConfig{
|
||||
Config: cfg,
|
||||
}),
|
||||
httpRouter: atomicutil.NewValue(mux.NewRouter()),
|
||||
updateConfig: make(chan *config.Config, 1),
|
||||
currentConfig: atomicutil.NewValue(cfg),
|
||||
httpRouter: atomicutil.NewValue(mux.NewRouter()),
|
||||
}
|
||||
|
||||
var err error
|
||||
|
@ -249,38 +245,65 @@ func (srv *Server) Run(ctx context.Context) error {
|
|||
})
|
||||
}
|
||||
|
||||
// apply configuration changes
|
||||
eg.Go(func() error {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case cfg := <-srv.updateConfig:
|
||||
err := srv.update(ctx, cfg)
|
||||
if err != nil {
|
||||
log.Error(ctx).Err(err).
|
||||
Msg("controlplane: error updating server with new config")
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return eg.Wait()
|
||||
}
|
||||
|
||||
// OnConfigChange updates the pomerium config options.
|
||||
func (srv *Server) OnConfigChange(ctx context.Context, cfg *config.Config) error {
|
||||
if err := srv.updateRouter(cfg); err != nil {
|
||||
return err
|
||||
ctx, span := trace.StartSpan(ctx, "controlplane.Server.OnConfigChange")
|
||||
defer span.End()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case srv.updateConfig <- cfg:
|
||||
}
|
||||
srv.reproxy.Update(ctx, cfg)
|
||||
prev := srv.currentConfig.Load()
|
||||
srv.currentConfig.Store(versionedConfig{
|
||||
Config: cfg,
|
||||
version: prev.version + 1,
|
||||
})
|
||||
res, err := srv.buildDiscoveryResources(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.xdsmgr.Update(ctx, cfg.Version, res)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnableAuthenticate enables the authenticate service.
|
||||
func (srv *Server) EnableAuthenticate(svc Service) error {
|
||||
srv.authenticateSvc = svc
|
||||
return srv.updateRouter(srv.currentConfig.Load().Config)
|
||||
return srv.updateRouter(srv.currentConfig.Load())
|
||||
}
|
||||
|
||||
// EnableProxy enables the proxy service.
|
||||
func (srv *Server) EnableProxy(svc Service) error {
|
||||
srv.proxySvc = svc
|
||||
return srv.updateRouter(srv.currentConfig.Load().Config)
|
||||
return srv.updateRouter(srv.currentConfig.Load())
|
||||
}
|
||||
|
||||
func (srv *Server) update(ctx context.Context, cfg *config.Config) error {
|
||||
ctx, span := trace.StartSpan(ctx, "controlplane.Server.update")
|
||||
defer span.End()
|
||||
|
||||
if err := srv.updateRouter(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
srv.reproxy.Update(ctx, cfg)
|
||||
srv.currentConfig.Store(cfg)
|
||||
res, err := srv.buildDiscoveryResources(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.xdsmgr.Update(ctx, res)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (srv *Server) updateRouter(cfg *config.Config) error {
|
||||
|
|
|
@ -3,8 +3,10 @@ package controlplane
|
|||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
envoy_service_discovery_v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/pomerium/pomerium/internal/log"
|
||||
"github.com/pomerium/pomerium/internal/telemetry/trace"
|
||||
|
@ -24,56 +26,72 @@ func (srv *Server) buildDiscoveryResources(ctx context.Context) (map[string][]*e
|
|||
|
||||
cfg := srv.currentConfig.Load()
|
||||
|
||||
log.Info(ctx).Int64("config-version", cfg.Version).Msg("controlplane: building discovery resources")
|
||||
log.Info(ctx).Msg("controlplane: building discovery resources")
|
||||
|
||||
resources := map[string][]*envoy_service_discovery_v3.Resource{}
|
||||
var clusterCount, listenerCount, routeConfigurationCount int
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
clusters, err := srv.Builder.BuildClusters(ctx, cfg.Config)
|
||||
var clusterResources []*envoy_service_discovery_v3.Resource
|
||||
eg.Go(func() error {
|
||||
clusters, err := srv.Builder.BuildClusters(ctx, cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building clusters: %w", err)
|
||||
}
|
||||
for _, cluster := range clusters {
|
||||
clusterResources = append(clusterResources, &envoy_service_discovery_v3.Resource{
|
||||
Name: cluster.Name,
|
||||
Version: hex.EncodeToString(cryptutil.HashProto(cluster)),
|
||||
Resource: protoutil.NewAny(cluster),
|
||||
})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
var listenerResources []*envoy_service_discovery_v3.Resource
|
||||
eg.Go(func() error {
|
||||
listeners, err := srv.Builder.BuildListeners(ctx, cfg, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building listeners: %w", err)
|
||||
}
|
||||
for _, listener := range listeners {
|
||||
listenerResources = append(listenerResources, &envoy_service_discovery_v3.Resource{
|
||||
Name: listener.Name,
|
||||
Version: hex.EncodeToString(cryptutil.HashProto(listener)),
|
||||
Resource: protoutil.NewAny(listener),
|
||||
})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
var routeConfigurationResources []*envoy_service_discovery_v3.Resource
|
||||
eg.Go(func() error {
|
||||
routeConfigurations, err := srv.Builder.BuildRouteConfigurations(ctx, cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building route configurations: %w", err)
|
||||
}
|
||||
for _, routeConfiguration := range routeConfigurations {
|
||||
routeConfigurationResources = append(routeConfigurationResources, &envoy_service_discovery_v3.Resource{
|
||||
Name: routeConfiguration.Name,
|
||||
Version: hex.EncodeToString(cryptutil.HashProto(routeConfiguration)),
|
||||
Resource: protoutil.NewAny(routeConfiguration),
|
||||
})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
err := eg.Wait()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, cluster := range clusters {
|
||||
clusterCount++
|
||||
resources[clusterTypeURL] = append(resources[clusterTypeURL], &envoy_service_discovery_v3.Resource{
|
||||
Name: cluster.Name,
|
||||
Version: hex.EncodeToString(cryptutil.HashProto(cluster)),
|
||||
Resource: protoutil.NewAny(cluster),
|
||||
})
|
||||
}
|
||||
|
||||
listeners, err := srv.Builder.BuildListeners(ctx, cfg.Config, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, listener := range listeners {
|
||||
listenerCount++
|
||||
resources[listenerTypeURL] = append(resources[listenerTypeURL], &envoy_service_discovery_v3.Resource{
|
||||
Name: listener.Name,
|
||||
Version: hex.EncodeToString(cryptutil.HashProto(listener)),
|
||||
Resource: protoutil.NewAny(listener),
|
||||
})
|
||||
}
|
||||
|
||||
routeConfigurations, err := srv.Builder.BuildRouteConfigurations(ctx, cfg.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, routeConfiguration := range routeConfigurations {
|
||||
routeConfigurationCount++
|
||||
resources[routeConfigurationTypeURL] = append(resources[routeConfigurationTypeURL], &envoy_service_discovery_v3.Resource{
|
||||
Name: routeConfiguration.Name,
|
||||
Version: hex.EncodeToString(cryptutil.HashProto(routeConfiguration)),
|
||||
Resource: protoutil.NewAny(routeConfiguration),
|
||||
})
|
||||
}
|
||||
|
||||
log.Info(ctx).
|
||||
Int64("config-version", cfg.Version).
|
||||
Int("cluster-count", clusterCount).
|
||||
Int("listener-count", listenerCount).
|
||||
Int("route-configuration-count", routeConfigurationCount).
|
||||
Int("cluster-count", len(clusterResources)).
|
||||
Int("listener-count", len(listenerResources)).
|
||||
Int("route-configuration-count", len(routeConfigurationResources)).
|
||||
Msg("controlplane: built discovery resources")
|
||||
|
||||
return resources, nil
|
||||
return map[string][]*envoy_service_discovery_v3.Resource{
|
||||
clusterTypeURL: clusterResources,
|
||||
listenerTypeURL: listenerResources,
|
||||
routeConfigurationTypeURL: routeConfigurationResources,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -3,9 +3,6 @@ package xdsmgr
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
envoy_service_discovery_v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
|
||||
|
@ -40,7 +37,7 @@ func NewManager(resources map[string][]*envoy_service_discovery_v3.Resource) *Ma
|
|||
return &Manager{
|
||||
signal: signal.New(),
|
||||
|
||||
nonce: toNonce(0),
|
||||
nonce: uuid.New().String(),
|
||||
resources: resources,
|
||||
}
|
||||
}
|
||||
|
@ -111,8 +108,8 @@ func (mgr *Manager) DeltaAggregatedResources(
|
|||
// neither an ACK or a NACK
|
||||
case req.GetErrorDetail() != nil:
|
||||
log.Info(ctx).
|
||||
Str("type-url", req.GetTypeUrl()).
|
||||
Any("error-detail", req.GetErrorDetail()).
|
||||
Int64("config-version", versionFromNonce(req.GetResponseNonce())).
|
||||
Msg("xdsmgr: nack")
|
||||
// a NACK
|
||||
// - set the client resource versions to the current resource versions
|
||||
|
@ -122,7 +119,7 @@ func (mgr *Manager) DeltaAggregatedResources(
|
|||
}
|
||||
case req.GetResponseNonce() == mgr.nonce:
|
||||
log.Info(ctx).
|
||||
Int64("config-version", versionFromNonce(req.GetResponseNonce())).
|
||||
Str("type-url", req.GetTypeUrl()).
|
||||
Msg("xdsmgr: ack")
|
||||
// an ACK for the last response
|
||||
// - set the client resource versions to the current resource versions
|
||||
|
@ -133,7 +130,7 @@ func (mgr *Manager) DeltaAggregatedResources(
|
|||
default:
|
||||
// an ACK for a response that's not the last response
|
||||
log.Info(ctx).
|
||||
Int64("config-version", versionFromNonce(req.GetResponseNonce())).
|
||||
Str("type-url", req.GetTypeUrl()).
|
||||
Msg("xdsmgr: ack")
|
||||
}
|
||||
|
||||
|
@ -215,7 +212,7 @@ func (mgr *Manager) DeltaAggregatedResources(
|
|||
return ctx.Err()
|
||||
case res := <-outgoing:
|
||||
log.Info(ctx).
|
||||
Int64("config-version", versionFromNonce(res.GetNonce())).
|
||||
Str("type-url", res.GetTypeUrl()).
|
||||
Int("resource-count", len(res.GetResources())).
|
||||
Int("removed-resource-count", len(res.GetRemovedResources())).
|
||||
Msg("xdsmgr: sending resources")
|
||||
|
@ -238,8 +235,8 @@ func (mgr *Manager) StreamAggregatedResources(
|
|||
|
||||
// Update updates the state of resources. If any changes are made they will be pushed to any listening
|
||||
// streams. For each TypeURL the list of resources should be the complete list of resources.
|
||||
func (mgr *Manager) Update(ctx context.Context, version int64, resources map[string][]*envoy_service_discovery_v3.Resource) {
|
||||
nonce := toNonce(version)
|
||||
func (mgr *Manager) Update(ctx context.Context, resources map[string][]*envoy_service_discovery_v3.Resource) {
|
||||
nonce := uuid.New().String()
|
||||
|
||||
mgr.mu.Lock()
|
||||
mgr.nonce = nonce
|
||||
|
@ -248,15 +245,3 @@ func (mgr *Manager) Update(ctx context.Context, version int64, resources map[str
|
|||
|
||||
mgr.signal.Broadcast(ctx)
|
||||
}
|
||||
|
||||
func toNonce(version int64) string {
|
||||
return fmt.Sprintf("%d/%s", version, uuid.New().String())
|
||||
}
|
||||
|
||||
// versionFromNonce parses the version out of the nonce. A missing or invalid version will be returned as 0.
|
||||
func versionFromNonce(nonce string) (version int64) {
|
||||
if idx := strings.Index(nonce, "/"); idx > 0 {
|
||||
version, _ = strconv.ParseInt(nonce[:idx], 10, 64)
|
||||
}
|
||||
return version
|
||||
}
|
||||
|
|
|
@ -94,7 +94,7 @@ func TestManager(t *testing.T) {
|
|||
}, msg.GetResources())
|
||||
ack(msg.Nonce)
|
||||
|
||||
mgr.Update(ctx, 1, map[string][]*envoy_service_discovery_v3.Resource{
|
||||
mgr.Update(ctx, map[string][]*envoy_service_discovery_v3.Resource{
|
||||
typeURL: {{Name: "r1", Version: "2"}},
|
||||
})
|
||||
|
||||
|
@ -105,7 +105,7 @@ func TestManager(t *testing.T) {
|
|||
}, msg.GetResources())
|
||||
ack(msg.Nonce)
|
||||
|
||||
mgr.Update(ctx, 1, map[string][]*envoy_service_discovery_v3.Resource{
|
||||
mgr.Update(ctx, map[string][]*envoy_service_discovery_v3.Resource{
|
||||
typeURL: nil,
|
||||
})
|
||||
|
||||
|
|
|
@ -109,9 +109,6 @@ func (src *ConfigSource) rebuild(ctx context.Context, firstTime firstTime) {
|
|||
// add all the config policies to the list
|
||||
for _, id := range ids {
|
||||
cfgpb := src.dbConfigs[id]
|
||||
if cfgpb.GetVersion() > 0 {
|
||||
cfg.Version = cfgpb.GetVersion()
|
||||
}
|
||||
|
||||
cfg.Options.ApplySettings(ctx, certsIndex, cfgpb.Settings)
|
||||
var errCount uint64
|
||||
|
@ -171,7 +168,7 @@ func (src *ConfigSource) rebuild(ctx context.Context, firstTime firstTime) {
|
|||
// add the additional policies here since calling `Validate` will reset them.
|
||||
cfg.Options.AdditionalPolicies = append(cfg.Options.AdditionalPolicies, additionalPolicies...)
|
||||
|
||||
log.Info(ctx).Int64("config-version", cfg.Version).Msg("databroker: built new config")
|
||||
log.Info(ctx).Msg("databroker: built new config")
|
||||
|
||||
src.computedConfig = cfg
|
||||
if !firstTime {
|
||||
|
|
166
internal/events/target.go
Normal file
166
internal/events/target.go
Normal file
|
@ -0,0 +1,166 @@
|
|||
package events
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type (
|
||||
// A Listener is a function that listens for events of type T.
|
||||
Listener[T any] func(ctx context.Context, event T)
|
||||
// A Handle represents a listener.
|
||||
Handle string
|
||||
|
||||
addListenerEvent[T any] struct {
|
||||
listener Listener[T]
|
||||
handle Handle
|
||||
}
|
||||
removeListenerEvent[T any] struct {
|
||||
handle Handle
|
||||
}
|
||||
dispatchEvent[T any] struct {
|
||||
ctx context.Context
|
||||
event T
|
||||
}
|
||||
)
|
||||
|
||||
// A Target is a target for events.
|
||||
//
|
||||
// Listeners are added with AddListener with a function to be called when the event occurs.
|
||||
// AddListener returns a Handle which can be used to remove a listener with RemoveListener.
|
||||
//
|
||||
// Dispatch dispatches events to all the registered listeners.
|
||||
//
|
||||
// Target is safe to use in its zero state.
|
||||
//
|
||||
// The first time any method of Target is called a background goroutine is started that handles
|
||||
// any requests and maintains the state of the listeners. Each listener also starts a
|
||||
// separate goroutine so that all listeners can be invoked concurrently.
|
||||
//
|
||||
// The channels to the main goroutine and to the listener goroutines have a size of 1 so typically
|
||||
// methods and dispatches will return immediately. However a slow listener will cause the next event
|
||||
// dispatch to block. This is the opposite behavior from Manager.
|
||||
//
|
||||
// Close will cancel all the goroutines. Subsequent calls to AddListener, RemoveListener, Close and
|
||||
// Dispatch are no-ops.
|
||||
type Target[T any] struct {
|
||||
initOnce sync.Once
|
||||
ctx context.Context
|
||||
cancel context.CancelCauseFunc
|
||||
addListenerCh chan addListenerEvent[T]
|
||||
removeListenerCh chan removeListenerEvent[T]
|
||||
dispatchCh chan dispatchEvent[T]
|
||||
listeners map[Handle]chan dispatchEvent[T]
|
||||
}
|
||||
|
||||
// AddListener adds a listener to the target.
|
||||
func (t *Target[T]) AddListener(listener Listener[T]) Handle {
|
||||
t.init()
|
||||
|
||||
// using a handle is necessary because you can't use a function as a map key.
|
||||
handle := Handle(uuid.NewString())
|
||||
|
||||
select {
|
||||
case <-t.ctx.Done():
|
||||
case t.addListenerCh <- addListenerEvent[T]{listener, handle}:
|
||||
}
|
||||
|
||||
return handle
|
||||
}
|
||||
|
||||
// Close closes the event target. This can be called multiple times safely.
|
||||
// Once closed the target cannot be used.
|
||||
func (t *Target[T]) Close() {
|
||||
t.init()
|
||||
|
||||
t.cancel(errors.New("target closed"))
|
||||
}
|
||||
|
||||
// Dispatch dispatches an event to all listeners.
|
||||
func (t *Target[T]) Dispatch(ctx context.Context, evt T) {
|
||||
t.init()
|
||||
|
||||
select {
|
||||
case <-t.ctx.Done():
|
||||
case t.dispatchCh <- dispatchEvent[T]{ctx: ctx, event: evt}:
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveListener removes a listener from the target.
|
||||
func (t *Target[T]) RemoveListener(handle Handle) {
|
||||
t.init()
|
||||
|
||||
select {
|
||||
case <-t.ctx.Done():
|
||||
case t.removeListenerCh <- removeListenerEvent[T]{handle}:
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Target[T]) init() {
|
||||
t.initOnce.Do(func() {
|
||||
t.ctx, t.cancel = context.WithCancelCause(context.Background())
|
||||
t.addListenerCh = make(chan addListenerEvent[T], 1)
|
||||
t.removeListenerCh = make(chan removeListenerEvent[T], 1)
|
||||
t.dispatchCh = make(chan dispatchEvent[T], 1)
|
||||
t.listeners = map[Handle]chan dispatchEvent[T]{}
|
||||
go t.run()
|
||||
})
|
||||
}
|
||||
|
||||
func (t *Target[T]) run() {
|
||||
// listen for add/remove/dispatch events and call functions
|
||||
for {
|
||||
select {
|
||||
case <-t.ctx.Done():
|
||||
return
|
||||
case evt := <-t.addListenerCh:
|
||||
t.addListener(evt.listener, evt.handle)
|
||||
case evt := <-t.removeListenerCh:
|
||||
t.removeListener(evt.handle)
|
||||
case evt := <-t.dispatchCh:
|
||||
t.dispatch(evt.ctx, evt.event)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// these functions are not thread-safe. They are intended to be called only by "run".
|
||||
|
||||
func (t *Target[T]) addListener(listener Listener[T], handle Handle) {
|
||||
ch := make(chan dispatchEvent[T], 1)
|
||||
t.listeners[handle] = ch
|
||||
// start a goroutine to send events to the listener
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-t.ctx.Done():
|
||||
case evt := <-ch:
|
||||
listener(evt.ctx, evt.event)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (t *Target[T]) removeListener(handle Handle) {
|
||||
ch, ok := t.listeners[handle]
|
||||
if !ok {
|
||||
// nothing to do since the listener doesn't exist
|
||||
return
|
||||
}
|
||||
// close the channel to kill the goroutine
|
||||
close(ch)
|
||||
delete(t.listeners, handle)
|
||||
}
|
||||
|
||||
func (t *Target[T]) dispatch(ctx context.Context, evt T) {
|
||||
// loop over all the listeners and send the event to them
|
||||
for _, ch := range t.listeners {
|
||||
select {
|
||||
case <-t.ctx.Done():
|
||||
return
|
||||
case ch <- dispatchEvent[T]{ctx: ctx, event: evt}:
|
||||
}
|
||||
}
|
||||
}
|
53
internal/events/target_test.go
Normal file
53
internal/events/target_test.go
Normal file
|
@ -0,0 +1,53 @@
|
|||
package events_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/pomerium/pomerium/internal/events"
|
||||
)
|
||||
|
||||
func TestTarget(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var target events.Target[int64]
|
||||
t.Cleanup(target.Close)
|
||||
|
||||
var calls1, calls2, calls3 atomic.Int64
|
||||
h1 := target.AddListener(func(_ context.Context, i int64) {
|
||||
calls1.Add(i)
|
||||
})
|
||||
h2 := target.AddListener(func(_ context.Context, i int64) {
|
||||
calls2.Add(i)
|
||||
})
|
||||
h3 := target.AddListener(func(_ context.Context, i int64) {
|
||||
calls3.Add(i)
|
||||
})
|
||||
|
||||
shouldBe := func(i1, i2, i3 int64) {
|
||||
t.Helper()
|
||||
|
||||
assert.Eventually(t, func() bool { return calls1.Load() == i1 }, time.Second, time.Millisecond)
|
||||
assert.Eventually(t, func() bool { return calls2.Load() == i2 }, time.Second, time.Millisecond)
|
||||
assert.Eventually(t, func() bool { return calls3.Load() == i3 }, time.Second, time.Millisecond)
|
||||
}
|
||||
|
||||
target.Dispatch(context.Background(), 1)
|
||||
shouldBe(1, 1, 1)
|
||||
|
||||
target.RemoveListener(h2)
|
||||
target.Dispatch(context.Background(), 2)
|
||||
shouldBe(3, 1, 3)
|
||||
|
||||
target.RemoveListener(h1)
|
||||
target.Dispatch(context.Background(), 3)
|
||||
shouldBe(3, 1, 6)
|
||||
|
||||
target.RemoveListener(h3)
|
||||
target.Dispatch(context.Background(), 4)
|
||||
shouldBe(3, 1, 6)
|
||||
}
|
File diff suppressed because it is too large
Load diff
|
@ -12,7 +12,6 @@ import "crypt/crypt.proto";
|
|||
|
||||
message Config {
|
||||
string name = 1;
|
||||
int64 version = 4;
|
||||
repeated Route routes = 2;
|
||||
Settings settings = 3;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue