pomerium/authorize/run.go
Caleb Doxsey dbd7f55b20
feature/databroker: user data and session refactor project (#926)
* databroker: add databroker, identity manager, update cache (#864)

* databroker: add databroker, identity manager, update cache

* fix cache tests

* directory service (#885)

* directory: add google and okta

* add onelogin

* add directory provider

* initialize before sync, upate google provider, remove dead code

* add azure provider

* fix azure provider

* fix gitlab

* add gitlab test, fix azure test

* hook up okta

* remove dead code

* fix tests

* fix flaky test

* authorize: use databroker data for rego policy (#904)

* wip

* add directory provider

* initialize before sync, upate google provider, remove dead code

* fix flaky test

* update authorize to use databroker data

* implement signed jwt

* wait for session and user to appear

* fix test

* directory service (#885)

* directory: add google and okta

* add onelogin

* add directory provider

* initialize before sync, upate google provider, remove dead code

* add azure provider

* fix azure provider

* fix gitlab

* add gitlab test, fix azure test

* hook up okta

* remove dead code

* fix tests

* fix flaky test

* remove log line

* only redirect when no session id exists

* prepare rego query as part of create

* return on ctx done

* retry on disconnect for sync

* move jwt signing

* use !=

* use parent ctx for wait

* remove session state, remove logs

* rename function

* add log message

* pre-allocate slice

* use errgroup

* return nil on eof for sync

* move check

* disable timeout on gRPC requests in envoy

* fix gitlab test

* use v4 backoff

* authenticate: databroker changes (#914)

* wip

* add directory provider

* initialize before sync, upate google provider, remove dead code

* fix flaky test

* update authorize to use databroker data

* implement signed jwt

* wait for session and user to appear

* fix test

* directory service (#885)

* directory: add google and okta

* add onelogin

* add directory provider

* initialize before sync, upate google provider, remove dead code

* add azure provider

* fix azure provider

* fix gitlab

* add gitlab test, fix azure test

* hook up okta

* remove dead code

* fix tests

* fix flaky test

* remove log line

* only redirect when no session id exists

* prepare rego query as part of create

* return on ctx done

* retry on disconnect for sync

* move jwt signing

* use !=

* use parent ctx for wait

* remove session state, remove logs

* rename function

* add log message

* pre-allocate slice

* use errgroup

* return nil on eof for sync

* move check

* disable timeout on gRPC requests in envoy

* fix dashboard

* delete session on logout

* permanently delete sessions once they are marked as deleted

* remove permanent delete

* fix tests

* remove groups and refresh test

* databroker: remove dead code, rename cache url, move dashboard (#925)

* wip

* add directory provider

* initialize before sync, upate google provider, remove dead code

* fix flaky test

* update authorize to use databroker data

* implement signed jwt

* wait for session and user to appear

* fix test

* directory service (#885)

* directory: add google and okta

* add onelogin

* add directory provider

* initialize before sync, upate google provider, remove dead code

* add azure provider

* fix azure provider

* fix gitlab

* add gitlab test, fix azure test

* hook up okta

* remove dead code

* fix tests

* fix flaky test

* remove log line

* only redirect when no session id exists

* prepare rego query as part of create

* return on ctx done

* retry on disconnect for sync

* move jwt signing

* use !=

* use parent ctx for wait

* remove session state, remove logs

* rename function

* add log message

* pre-allocate slice

* use errgroup

* return nil on eof for sync

* move check

* disable timeout on gRPC requests in envoy

* fix dashboard

* delete session on logout

* permanently delete sessions once they are marked as deleted

* remove permanent delete

* fix tests

* remove cache service

* remove kv

* remove refresh docs

* remove obsolete cache docs

* add databroker url option

* cache: use memberlist to detect multiple instances

* add databroker service url

* remove cache service

* remove kv

* remove refresh docs

* remove obsolete cache docs

* add databroker url option

* cache: use memberlist to detect multiple instances

* add databroker service url

* wip

* remove groups and refresh test

* fix redirect, signout

* remove databroker client from proxy

* remove unused method

* remove user dashboard test

* handle missing session ids

* session: reject sessions with no id

* sessions: invalidate old sessions via databroker server version (#930)

* session: add a version field tied to the databroker server version that can be used to invalidate sessions

* fix tests

* add log

* authenticate: create user record immediately, call "get" directly in authorize (#931)
2020-06-19 07:52:44 -06:00

194 lines
4.4 KiB
Go

package authorize
import (
"context"
"io"
"time"
backoff "github.com/cenkalti/backoff/v4"
"golang.org/x/sync/errgroup"
"google.golang.org/protobuf/types/known/emptypb"
"github.com/pomerium/pomerium/internal/grpc/databroker"
"github.com/pomerium/pomerium/internal/log"
)
// Run runs the authorize server.
func (a *Authorize) Run(ctx context.Context) error {
eg, ctx := errgroup.WithContext(ctx)
updateTypes := make(chan []string)
eg.Go(func() error {
return a.runTypesSyncer(ctx, updateTypes)
})
updateRecord := make(chan *databroker.Record)
eg.Go(func() error {
return a.runDataSyncer(ctx, updateTypes, updateRecord)
})
eg.Go(func() error {
return a.runDataUpdater(ctx, updateRecord)
})
return eg.Wait()
}
func (a *Authorize) runTypesSyncer(ctx context.Context, updateTypes chan<- []string) error {
log.Info().Msg("starting type sync")
return tryForever(ctx, func(backoff interface{ Reset() }) error {
stream, err := a.dataBrokerClient.SyncTypes(ctx, new(emptypb.Empty))
if err != nil {
return err
}
for {
res, err := stream.Recv()
if err == io.EOF {
return nil
} else if err != nil {
return err
}
backoff.Reset()
select {
case <-stream.Context().Done():
return stream.Context().Err()
case updateTypes <- res.GetTypes():
}
}
})
}
func (a *Authorize) runDataSyncer(ctx context.Context, updateTypes <-chan []string, updateRecord chan<- *databroker.Record) error {
eg, ctx := errgroup.WithContext(ctx)
eg.Go(func() error {
seen := map[string]struct{}{}
for {
select {
case <-ctx.Done():
return ctx.Err()
case types := <-updateTypes:
for _, dataType := range types {
dataType := dataType
if _, ok := seen[dataType]; !ok {
eg.Go(func() error {
return a.runDataTypeSyncer(ctx, dataType, updateRecord)
})
seen[dataType] = struct{}{}
}
}
}
}
})
return eg.Wait()
}
func (a *Authorize) runDataTypeSyncer(ctx context.Context, typeURL string, updateRecord chan<- *databroker.Record) error {
var serverVersion, recordVersion string
log.Info().Str("type_url", typeURL).Msg("starting data initial load")
backoff := backoff.NewExponentialBackOff()
for {
res, err := a.dataBrokerClient.GetAll(ctx, &databroker.GetAllRequest{
Type: typeURL,
})
if err != nil {
log.Warn().Err(err).Str("type_url", typeURL).Msg("error getting data")
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(backoff.NextBackOff()):
}
continue
}
serverVersion = res.GetServerVersion()
if typeURL == sessionTypeURL {
a.dataBrokerDataLock.Lock()
a.dataBrokerSessionServerVersion = serverVersion
a.dataBrokerDataLock.Unlock()
}
recordVersion = res.GetRecordVersion()
for _, record := range res.GetRecords() {
select {
case <-ctx.Done():
return ctx.Err()
case updateRecord <- record:
}
}
break
}
log.Info().Str("type_url", typeURL).Msg("starting data syncer")
return tryForever(ctx, func(backoff interface{ Reset() }) error {
stream, err := a.dataBrokerClient.Sync(ctx, &databroker.SyncRequest{
ServerVersion: serverVersion,
RecordVersion: recordVersion,
Type: typeURL,
})
if err != nil {
return err
}
for {
res, err := stream.Recv()
if err == io.EOF {
return nil
} else if err != nil {
return err
}
backoff.Reset()
serverVersion = res.GetServerVersion()
for _, record := range res.GetRecords() {
if record.GetVersion() > recordVersion {
recordVersion = record.GetVersion()
}
}
for _, record := range res.GetRecords() {
select {
case <-stream.Context().Done():
return stream.Context().Err()
case updateRecord <- record:
}
}
}
})
}
func (a *Authorize) runDataUpdater(ctx context.Context, updateRecord <-chan *databroker.Record) error {
log.Info().Msg("starting data updater")
for {
var record *databroker.Record
select {
case <-ctx.Done():
return ctx.Err()
case record = <-updateRecord:
}
a.dataBrokerDataLock.Lock()
a.dataBrokerData.Update(record)
a.dataBrokerDataLock.Unlock()
}
}
func tryForever(ctx context.Context, callback func(onSuccess interface{ Reset() }) error) error {
backoff := backoff.NewExponentialBackOff()
for {
err := callback(backoff)
if err != nil {
log.Warn().Err(err).Msg("sync error")
}
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(backoff.NextBackOff()):
}
}
}