deployment: release v0.8.0 (#686)

Co-authored-by: Travis Groth <travisgroth@users.noreply.github.com>
This commit is contained in:
Bobby DeSimone 2020-05-12 19:10:12 -07:00 committed by GitHub
parent e5e043ee12
commit 80166bcc40
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
19 changed files with 247 additions and 180 deletions

View file

@ -1 +1 @@
v0.7.5
v0.8.0

View file

@ -47,10 +47,11 @@ module.exports = {
{ text: "Enterprise", link: "/enterprise/" },
{
text: "v0.7.x", // current tagged version
text: "v0.8.x", // current tagged version
ariaLabel: "Version menu",
items: [
{ text: "🚧Dev", link: "https://master.docs.pomerium.io/docs" },
{ text: "v0.8.x", link: "https://0-8-0.docs.pomerium.io/docs" },
{ text: "v0.7.x", link: "https://0-7-0.docs.pomerium.io/docs" },
{ text: "v0.6.x", link: "https://0-6-0.docs.pomerium.io/docs" },
{ text: "v0.5.x", link: "https://0-5-0.docs.pomerium.io/docs" },

View file

@ -1,5 +1,6 @@
/docs/reference/reference /configuration/
/docs/reference/reference.html /configuration/
/docs/configuration/ /configuration/
/community/ /docs/community/
/community/index.html /docs/community/

View file

@ -0,0 +1,34 @@
---
title: Announcing Pomerium 0.8
date: 2020-5-11
tags:
- release
- pomerium
- announcement
author: Bobby DeSimone
---
# Announcing Pomerium 0.8
We are excited to announce the [0.8 release] of Pomerium which adds support for some of our most requested features including:
- [**Automatic Certificate Management**] — Pomerium can now be configured to automatically retrieve and renew certificates, adding HTTPS to all Pomerium managed routes. In addition, Pomerium will do [OCSP stapling](https://en.wikipedia.org/wiki/OCSP_stapling) for automatic and custom certificates alike.
- [**Advanced Route Matching**] — Operators can now write access policy that supports route matching based on [regex], [prefix], and [path] settings. Pomerium now has the flexibility to support multiple and layered authorization policies across a single managed route.
- And finally, this release adds [**Github**](https://github.com/) as a supported identity provider.
Pomerium had 95 commits from 8 authors across 5 organizations in this release. This release also includes additional new features, general improvements, and bug fixes, a complete list of which can be found in the [changelog].
As always, we recommend upgrading and testing this release in an isolated environment. If you experience any issues, please report them on the Pomerium GitHub [issue tracker].
<SimpleNewsletter/>
[**advanced route matching**]: ../configuration/readme.md#policy
[**automatic certificate management**]: ../docs/reference/certificates.md#per-route-automatic-certificates
[0.8 release]: https://github.com/pomerium/pomerium/releases/tag/v8.0.0
[changelog]: ../docs/CHANGELOG.md
[**github**]: ../docs/identity-providers/github.md
[issue tracker]: https://github.com/pomerium/pomerium/issues
[let's encrypt]: https://letsencrypt.org/
[path]: ../configuration/readme.md#path
[prefix]: ../configuration/readme.md#prefix
[regex]: ../configuration/readme.md#regex

View file

@ -1,7 +1,7 @@
version: "3"
services:
pomerium:
image: pomerium/pomerium:v0.7.0
image: pomerium/pomerium:v0.8.0
environment:
# Generate new secret keys. e.g. `head -c32 /dev/urandom | base64`
- COOKIE_SECRET=V2JBZk0zWGtsL29UcFUvWjVDWWQ2UHExNXJ0b2VhcDI=

View file

@ -1,7 +1,7 @@
version: "3"
services:
pomerium:
image: pomerium/pomerium:v0.7.0
image: pomerium/pomerium:v0.8.0
environment:
# Generate new secret keys. e.g. `head -c32 /dev/urandom | base64`
- COOKIE_SECRET=V2JBZk0zWGtsL29UcFUvWjVDWWQ2UHExNXJ0b2VhcDI=

View file

@ -12,7 +12,7 @@ services:
- /var/run/docker.sock:/tmp/docker.sock:ro
pomerium-authenticate:
image: pomerium/pomerium:v0.7.0 # or `build: .` to build from source
image: pomerium/pomerium:v0.8.0 # or `build: .` to build from source
restart: always
environment:
- SERVICES=authenticate
@ -39,7 +39,7 @@ services:
- 443
pomerium-proxy:
image: pomerium/pomerium:v0.7.0 # or `build: .` to build from source
image: pomerium/pomerium:v0.8.0 # or `build: .` to build from source
restart: always
environment:
- SERVICES=proxy
@ -61,7 +61,7 @@ services:
- 443
pomerium-authorize:
image: pomerium/pomerium:v0.7.0 # or `build: .` to build from source
image: pomerium/pomerium:v0.8.0 # or `build: .` to build from source
restart: always
environment:
- SERVICES=authorize
@ -77,7 +77,7 @@ services:
- 443
pomerium-cache:
image: pomerium/pomerium:v0.7.0 # or `build: .` to build from source
image: pomerium/pomerium:v0.8.0 # or `build: .` to build from source
restart: always
environment:
- SERVICES=cache

View file

@ -0,0 +1,28 @@
authenticate:
idp:
provider: "google"
clientID: YOUR_CLIENT_ID
clientSecret: YOUR_SECRET
service:
annotations:
cloud.google.com/app-protocols: '{"https":"HTTPS"}'
proxy:
service:
annotations:
cloud.google.com/app-protocols: '{"https":"HTTPS"}'
service:
type: NodePort
config:
rootDomain: corp.beyondperimeter.com
policy:
- from: https://hello.corp.beyondperimeter.com
to: http://nginx.default.svc.cluster.local:80
allowed_domains:
- gmail.com
ingress:
annotations:
kubernetes.io/ingress.allow-http: false

View file

@ -157,7 +157,8 @@ Autocert requires that port `443` be accessible from the internet in order to co
- Type: `string` pointing to the path of the directory
- Required if using Autocert setting
- Default:
- `/data/autocert` in published Pomerium docker images
- `/data/autocert` in published Pomerium docker images
- [$XDG_DATA_HOME](https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html)
- `$HOME/.local/share/pomerium`
@ -374,62 +375,62 @@ Expose a prometheus format HTTP endpoint on the specified port. Disabled by defa
**Metrics tracked**
| Name | Type | Description |
| --------------------------------------------- | --------- | ----------------------------------------------------------------------- |
| boltdb_free_alloc_size_bytes | Gauge | Bytes allocated in free pages |
| boltdb_free_page_n | Gauge | Number of free pages on the freelist |
| boltdb_freelist_inuse_size_bytes | Gauge | Bytes used by the freelist |
| boltdb_open_txn | Gauge | number of currently open read transactions |
| boltdb_pending_page_n | Gauge | Number of pending pages on the freelist |
| boltdb_txn | Gauge | total number of started read transactions |
| boltdb_txn_cursor_total | Counter | Total number of cursors created |
| boltdb_txn_node_deref_total | Counter | Total number of node dereferences |
| boltdb_txn_node_total | Counter | Total number of node allocations |
| boltdb_txn_page_alloc_size_bytes_total | Counter | Total bytes allocated |
| boltdb_txn_page_total | Counter | Total number of page allocations |
| boltdb_txn_rebalance_duration_ms_total | Counter | Total time spent rebalancing |
| boltdb_txn_rebalance_total | Counter | Total number of node rebalances |
| boltdb_txn_spill_duration_ms_total | Counter | Total time spent spilling |
| boltdb_txn_spill_total | Counter | Total number of nodes spilled |
| boltdb_txn_split_total | Counter | Total number of nodes split |
| boltdb_txn_write_duration_ms_total | Counter | Total time spent writing to disk |
| boltdb_txn_write_total | Counter | Total number of writes performed |
| groupcache_cache_hits_total | Counter | Total cache hits in local or cluster cache |
| groupcache_cache_hits_total | Counter | Total cache hits in local or cluster cache |
| groupcache_gets_total | Counter | Total get request, including from peers |
| groupcache_loads_deduped_total | Counter | gets without cache hits after duplicate suppression |
| groupcache_loads_total | Counter | Total gets without cache hits |
| groupcache_local_load_errs_total | Counter | Total local load errors |
| groupcache_local_loads_total | Counter | Total good local loads |
| groupcache_peer_errors_total | Counter | Total errors from peers |
| groupcache_peer_loads_total | Counter | Total remote loads or cache hits without error |
| groupcache_server_requests_total | Counter | Total gets from peers |
| grpc_client_request_duration_ms | Histogram | GRPC client request duration by service |
| grpc_client_request_size_bytes | Histogram | GRPC client request size by service |
| grpc_client_requests_total | Counter | Total GRPC client requests made by service |
| grpc_client_response_size_bytes | Histogram | GRPC client response size by service |
| grpc_server_request_duration_ms | Histogram | GRPC server request duration by service |
| grpc_server_request_size_bytes | Histogram | GRPC server request size by service |
| grpc_server_requests_total | Counter | Total GRPC server requests made by service |
| grpc_server_response_size_bytes | Histogram | GRPC server response size by service |
| http_client_request_duration_ms | Histogram | HTTP client request duration by service |
| http_client_request_size_bytes | Histogram | HTTP client request size by service |
| http_client_requests_total | Counter | Total HTTP client requests made by service |
| http_client_response_size_bytes | Histogram | HTTP client response size by service |
| http_server_request_duration_ms | Histogram | HTTP server request duration by service |
| http_server_request_size_bytes | Histogram | HTTP server request size by service |
| http_server_requests_total | Counter | Total HTTP server requests handled by service |
| http_server_response_size_bytes | Histogram | HTTP server response size by service |
| pomerium_build_info | Gauge | Pomerium build metadata by git revision, service, version and goversion |
| pomerium_config_checksum_int64 | Gauge | Currently loaded configuration checksum by service |
| pomerium_config_last_reload_success | Gauge | Whether the last configuration reload succeeded by service |
| pomerium_config_last_reload_success_timestamp | Gauge | The timestamp of the last successful configuration reload by service |
| redis_conns | Gauge | Number of total connections in the pool |
| redis_hits_total | Counter | Total number of times free connection was found in the pool |
| redis_idle_conns | Gauge | Number of idle connections in the pool |
| redis_misses_total | Counter | Total number of times free connection was NOT found in the pool |
| redis_stale_conns_total | Counter | Total number of stale connections removed from the pool |
| redis_timeouts_total | Counter | Total number of times a wait timeout occurred |
Name | Type | Description
--------------------------------------------- | --------- | -----------------------------------------------------------------------
boltdb_free_alloc_size_bytes | Gauge | Bytes allocated in free pages
boltdb_free_page_n | Gauge | Number of free pages on the freelist
boltdb_freelist_inuse_size_bytes | Gauge | Bytes used by the freelist
boltdb_open_txn | Gauge | number of currently open read transactions
boltdb_pending_page_n | Gauge | Number of pending pages on the freelist
boltdb_txn | Gauge | total number of started read transactions
boltdb_txn_cursor_total | Counter | Total number of cursors created
boltdb_txn_node_deref_total | Counter | Total number of node dereferences
boltdb_txn_node_total | Counter | Total number of node allocations
boltdb_txn_page_alloc_size_bytes_total | Counter | Total bytes allocated
boltdb_txn_page_total | Counter | Total number of page allocations
boltdb_txn_rebalance_duration_ms_total | Counter | Total time spent rebalancing
boltdb_txn_rebalance_total | Counter | Total number of node rebalances
boltdb_txn_spill_duration_ms_total | Counter | Total time spent spilling
boltdb_txn_spill_total | Counter | Total number of nodes spilled
boltdb_txn_split_total | Counter | Total number of nodes split
boltdb_txn_write_duration_ms_total | Counter | Total time spent writing to disk
boltdb_txn_write_total | Counter | Total number of writes performed
groupcache_cache_hits_total | Counter | Total cache hits in local or cluster cache
groupcache_cache_hits_total | Counter | Total cache hits in local or cluster cache
groupcache_gets_total | Counter | Total get request, including from peers
groupcache_loads_deduped_total | Counter | gets without cache hits after duplicate suppression
groupcache_loads_total | Counter | Total gets without cache hits
groupcache_local_load_errs_total | Counter | Total local load errors
groupcache_local_loads_total | Counter | Total good local loads
groupcache_peer_errors_total | Counter | Total errors from peers
groupcache_peer_loads_total | Counter | Total remote loads or cache hits without error
groupcache_server_requests_total | Counter | Total gets from peers
grpc_client_request_duration_ms | Histogram | GRPC client request duration by service
grpc_client_request_size_bytes | Histogram | GRPC client request size by service
grpc_client_requests_total | Counter | Total GRPC client requests made by service
grpc_client_response_size_bytes | Histogram | GRPC client response size by service
grpc_server_request_duration_ms | Histogram | GRPC server request duration by service
grpc_server_request_size_bytes | Histogram | GRPC server request size by service
grpc_server_requests_total | Counter | Total GRPC server requests made by service
grpc_server_response_size_bytes | Histogram | GRPC server response size by service
http_client_request_duration_ms | Histogram | HTTP client request duration by service
http_client_request_size_bytes | Histogram | HTTP client request size by service
http_client_requests_total | Counter | Total HTTP client requests made by service
http_client_response_size_bytes | Histogram | HTTP client response size by service
http_server_request_duration_ms | Histogram | HTTP server request duration by service
http_server_request_size_bytes | Histogram | HTTP server request size by service
http_server_requests_total | Counter | Total HTTP server requests handled by service
http_server_response_size_bytes | Histogram | HTTP server response size by service
pomerium_build_info | Gauge | Pomerium build metadata by git revision, service, version and goversion
pomerium_config_checksum_int64 | Gauge | Currently loaded configuration checksum by service
pomerium_config_last_reload_success | Gauge | Whether the last configuration reload succeeded by service
pomerium_config_last_reload_success_timestamp | Gauge | The timestamp of the last successful configuration reload by service
redis_conns | Gauge | Number of total connections in the pool
redis_hits_total | Counter | Total number of times free connection was found in the pool
redis_idle_conns | Gauge | Number of idle connections in the pool
redis_misses_total | Counter | Total number of times free connection was NOT found in the pool
redis_stale_conns_total | Counter | Total number of stale connections removed from the pool
redis_timeouts_total | Counter | Total number of times a wait timeout occurred
### Tracing
@ -439,10 +440,10 @@ Each unit work is called a Span in a trace. Spans include metadata about the wor
#### Shared Tracing Settings
| Config Key | Description | Required |
| :--------------- | :---------------------------------------------------------------- | -------- |
| tracing_provider | The name of the tracing provider. (e.g. jaeger) | ✅ |
| tracing_debug | Will disable [sampling](https://opencensus.io/tracing/sampling/). | ❌ |
Config Key | Description | Required
:--------------- | :---------------------------------------------------------------- | --------
tracing_provider | The name of the tracing provider. (e.g. jaeger) | ✅
tracing_debug | Will disable [sampling](https://opencensus.io/tracing/sampling/). | ❌
#### Jaeger
@ -454,10 +455,10 @@ Each unit work is called a Span in a trace. Spans include metadata about the wor
- Service dependency analysis
- Performance / latency optimization
| Config Key | Description | Required |
| :-------------------------------- | :------------------------------------------ | -------- |
| tracing_jaeger_collector_endpoint | Url to the Jaeger HTTP Thrift collector. | ✅ |
| tracing_jaeger_agent_endpoint | Send spans to jaeger-agent at this address. | ✅ |
Config Key | Description | Required
:-------------------------------- | :------------------------------------------ | --------
tracing_jaeger_collector_endpoint | Url to the Jaeger HTTP Thrift collector. | ✅
tracing_jaeger_agent_endpoint | Send spans to jaeger-agent at this address. | ✅
#### Example
@ -966,7 +967,7 @@ Note: This setting will replace (not append) the system's trust store for a give
- Type: [base64 encoded] `string` or relative file location
- Optional
Pomerium supports client certificates which can be used to enforce [mutually authenticated and encrypted TLS connections](https://en.wikipedia.org/wiki/Mutual_authentication) (mTLS). For more details, see our [mTLS example repository](https://github.com/pomerium/examples/tree/master/mutual-tls) and the [certificate docs](./certificates.md).
Pomerium supports client certificates which can be used to enforce [mutually authenticated and encrypted TLS connections](https://en.wikipedia.org/wiki/Mutual_authentication) (mTLS). For more details, see our [mTLS example repository](https://github.com/pomerium/examples/tree/master/mutual-tls) and the [certificate docs](../docs/reference/certificates.md).
### Set Request Headers
@ -1008,16 +1009,17 @@ See [ProxyPreserveHost](http://httpd.apache.org/docs/2.0/mod/mod_proxy.html#prox
- Type: [base64 encoded] `string`
- Optional
Signing key is the base64 encoded key used to sign outbound requests. For more information see the [signed headers](./signed-headers.md) docs.
Signing key is the base64 encoded key used to sign outbound requests. For more information see the [signed headers] docs.
If no certificate is specified, one will be generated for you and the base64'd public key will be added to the logs.
[base64 encoded]: https://en.wikipedia.org/wiki/Base64
[environmental variables]: https://en.wikipedia.org/wiki/Environment_variable
[identity provider]: ./identity-providers.md
[identity provider]: ../docs/identity-providers.md
[json]: https://en.wikipedia.org/wiki/JSON
[letsencrypt]: https://letsencrypt.org/
[oidc rfc]: https://openid.net/specs/openid-connect-core-1_0.html#AuthRequest
[script]: https://github.com/pomerium/pomerium/blob/master/scripts/generate_wildcard_cert.sh
[signed headers]: ./signed-headers.md
[toml]: https://en.wikipedia.org/wiki/TOML
[yaml]: https://en.wikipedia.org/wiki/YAML

View file

@ -1,5 +1,36 @@
# Changelog
## v0.8.0
To see a complete list of changes [see the diff](https://github.com/pomerium/pomerium/compare/v0.7.0...v0.8.0).
### New
- cryptutil: add automatic certificate management @desimone [GH-644]
- implement path-based route matching @calebdoxsey [GH-615]
- internal/identity: implement github provider support @Lumexralph [GH-582]
- proxy: add configurable JWT claim headers @travisgroth (#596)
- proxy: remove extra session unmarshalling @desimone (#592)
### Changes
- ci: Switch integration tests from minikube to kind @travisgroth [GH-656]
- integration-tests: add CORS test @calebdoxsey [GH-662]
- integration-tests: add websocket enabled/disabled test @calebdoxsey [GH-661]
- integration-tests: set_request_headers and preserve_host_header options @calebdoxsey [GH-668]
- pre-commit: add pre-commit configuration @calebdoxsey [GH-666]
- proxy: improve JWT header behavior @travisgroth [GH-642]
## Fixed
- authorize: fix authorization check for allowed_domains to only match current route @calebdoxsey [GH-624]
- authorize: fix unexpected panic on reload @travisgroth [GH-652]
- site: fix site on mobile @desimone [GH-597]
### Documentation
- deploy: autocert documentation and defaults @travisgroth [GH-658]
## v0.7.5
### Fixed
@ -46,7 +77,7 @@ There were no changes in the v0.7.1 release, but we updated the build process sl
### New
- \*: remove import path comments @desimone [GH-545]
- *: remove import path comments @desimone [GH-545]
- authenticate: make callback path configurable @desimone [GH-493]
- authenticate: return 401 for some specific error codes @cuonglm [GH-561]
- authorization: log audience claim failure @desimone [GH-553]
@ -131,6 +162,7 @@ There were no changes in the v0.7.1 release, but we updated the build process sl
- config: Remove CookieRefresh [GH-428] @u5surf [GH-436]
- config: validate that `shared_key` does not contain whitespace @travisgroth [GH-427]
- httputil : wrap handlers for additional context @desimone [GH-413]
- forward-auth: validate using forwarded uri header @branchmispredictor [GH-600]
### Fixed

View file

@ -52,6 +52,6 @@ Browse to `external-httpbin.your.domain.example`. Connections between you and [h
[download]: https://github.com/pomerium/pomerium/releases
[environmental configuration variables]: https://12factor.net/config
[httpbin]: https://httpbin.org/
[identity provider]: ../docs/identity-providers/
[identity provider]: ../identity-providers/
[make]: https://en.wikipedia.org/wiki/Make_(software)
[tls certificates]: ../reference/certificates.md

View file

@ -19,16 +19,19 @@ This quick-start will show you how to deploy Pomerium with [Helm](https://helm.s
- Install [helm](https://helm.sh/docs/using_helm/)
- [TLS certificates]
Though there are [many ways](https://kubernetes.io/docs/setup/pick-right-solution/) to work with Kubernetes, for the purpose of this guide, we will be using Google's [Kubernetes Engine](https://cloud.google.com/kubernetes-engine/). That said, most of the following steps should be very similar using any other provider.
Though there are [many ways](https://unofficial-kubernetes.readthedocs.io/en/latest/setup/pick-right-solution/) to work with Kubernetes, for the purpose of this guide, we will be using Google's [Kubernetes Engine](https://cloud.google.com/kubernetes-engine/). That said, most of the following steps should be very similar using any other provider.
In addition to sharing many of the same features as the Kubernetes quickstart guide, the default helm deployment script also includes a bootstrapped certificate authority enabling mutually authenticated and encrypted communication between services that does not depend on the external LetsEncrypt certificates. Having the external domain certificate de-coupled makes it easier to renew external certificates.
## Configure
Download and modify the following [helm_gke.sh script][./scripts/helm_gke.sh] to match your [identity provider] and [TLS certificates] settings.
Download and modify the following [helm_gke.sh script][./scripts/helm_gke.sh] and [values file][/docs/configuration/examples/kubernetes/values.yaml] to match your [identity provider] and [TLS certificates] settings.
<<<@/docs/configuration/examples/kubernetes/values.yaml
<<<@/scripts/helm_gke.sh
## Run
Run [./scripts/helm_gke.sh] which will:

View file

@ -63,8 +63,8 @@ You can also navigate to the special pomerium endpoint `httpbin.your.domain.exam
![currently logged in user](./img/logged-in-as.png)
[./kubernetes_gke.sh]: ../reference/examples#google-kubernetes-engine
[example kubernetes files]: ../reference/examples#google-kubernetes-engine
[./kubernetes_gke.sh]: ../../configuration/examples.md#google-kubernetes-engine
[example kubernetes files]: ../../configuration/examples.md#google-kubernetes-engine
[identity provider]: ../identity-providers/readme.md
[letsencrypt]: https://letsencrypt.org/
[script]: https://github.com/pomerium/pomerium/blob/master/scripts/generate_wildcard_cert.sh

View file

@ -102,5 +102,5 @@ Certificates, TLS, and Public Key Cryptography is a vast subject we cannot adequ
[certificate_authority]: ../../configuration/readme.md#certificate-authority
[certificate_key]: ../../configuration/readme.md#certificates
[override_certificate_name]: ../../configuration/readme.md#override-certificate-name
[principles]: ../docs/#why
[zero-trust]: ../docs/#why
[principles]: ../#why
[zero-trust]: ../#why

View file

@ -1,7 +1,6 @@
---
title: Getting the user's identity
description: >-
This article describes how to to get a user's identity with Pomerium.
description: This article describes how to to get a user's identity with Pomerium.
---
# Getting the user's identity
@ -19,19 +18,19 @@ To secure your app with signed headers, you'll need the following:
A JWT attesting to the authorization of a given request is added to the downstream HTTP request header `x-pomerium-jwt-assertion`. You should verify that the JWT contains at least the following claims:
| [JWT] | description |
| :------: | ------------------------------------------------------------------------------------------------------ |
| `exp` | Expiration time in seconds since the UNIX epoch. Allow 1 minute for skew. |
| `iat` | Issued-at time in seconds since the UNIX epoch. Allow 1 minute for skew. |
| `aud` | The client's final domain e.g. `httpbin.corp.example.com`. |
| `iss` | Issuer must be the URL of your authentication domain e.g. `authenticate.corp.example`. |
| `sub` | Subject is the user's id. Can be used instead of the `x-pomerium-authenticated-user-id` header. |
| `email` | Email is the user's email. Can be used instead of the `x-pomerium-authenticated-user-email` header. |
| `groups` | Groups is the user's groups. Can be used instead of the `x-pomerium-authenticated-user-groups` header. |
[JWT] | description
:------: | ------------------------------------------------------------------------------------------------------
`exp` | Expiration time in seconds since the UNIX epoch. Allow 1 minute for skew.
`iat` | Issued-at time in seconds since the UNIX epoch. Allow 1 minute for skew.
`aud` | The client's final domain e.g. `httpbin.corp.example.com`.
`iss` | Issuer must be the URL of your authentication domain e.g. `authenticate.corp.example`.
`sub` | Subject is the user's id. Can be used instead of the `x-pomerium-authenticated-user-id` header.
`email` | Email is the user's email. Can be used instead of the `x-pomerium-authenticated-user-email` header.
`groups` | Groups is the user's groups. Can be used instead of the `x-pomerium-authenticated-user-groups` header.
### Manual verification
Though you will very likely be verifying signed-headers programmatically in your application's middleware, and using a third-party JWT library, if you are new to JWT it may be helpful to show what manual verification looks like. The following guide assumes you are using the provided [docker-compose.yml] as a base and [httpbin]. Httpbin gives us a convenient way of inspecting client headers.
Though you will very likely be verifying signed-headers programmatically in your application's middleware, and using a third-party JWT library, if you are new to JWT it may be helpful to show what manual verification looks like.
1. Provide pomerium with a base64 encoded Elliptic Curve ([NIST P-256] aka [secp256r1] aka prime256v1) Private Key. In production, you'd likely want to get these from your KMS.
@ -49,17 +48,17 @@ Copy the base64 encoded value of your private key to `pomerium-proxy`'s environm
SIGNING_KEY=ZxqyyIPPX0oWrrOwsxXgl0hHnTx3mBVhQ2kvW1YB4MM=
```
2. Reload `pomerium-proxy`. Navigate to httpbin (by default, `https://httpbin.corp.${YOUR-DOMAIN}.com`), and login as usual. Click **request inspection**. Select `/headers'. Click **try it out** and then **execute**. You should see something like the following.
1. Reload `pomerium-proxy`. Navigate to httpbin (by default, `https://httpbin.corp.${YOUR-DOMAIN}.com`), and login as usual. Click **request inspection**. Select `/headers'. Click **try it out** and then **execute**. You should see something like the following.
![httpbin displaying jwt headers](./img/inspect-headers.png)
3. `X-Pomerium-Jwt-Assertion` is the signature value. It's less scary than it looks and basically just a compressed, json blob as described above. Navigate to [jwt.io] which provides a helpful GUI to manually verify JWT values.
1. `X-Pomerium-Jwt-Assertion` is the signature value. It's less scary than it looks and basically just a compressed, json blob as described above. Navigate to [jwt.io] which provides a helpful GUI to manually verify JWT values.
4. Paste the value of `X-Pomerium-Jwt-Assertion` header token into the `Encoded` form. You should notice that the decoded values look much more familiar.
2. Paste the value of `X-Pomerium-Jwt-Assertion` header token into the `Encoded` form. You should notice that the decoded values look much more familiar.
![httpbin displaying decoded jwt](./img/verifying-headers-1.png)
5. Finally, we want to cryptographically verify the validity of the token. To do this, we will need the signer's public key. You can simply copy and past the output of `cat ec_public.pem`.
1. Finally, we want to cryptographically verify the validity of the token. To do this, we will need the signer's public key. You can simply copy and past the output of `cat ec_public.pem`.
![httpbin displaying verified jwt](./img/verifying-headers-2.png)

View file

@ -5,7 +5,7 @@ description: >-
for Pomerium. Please read it carefully.
---
# Since 0.8.0
# Since 0.7.0
## Breaking
@ -17,6 +17,7 @@ Although it's unlikely anyone ever used it, prior to 0.8.0 the policy configurat
policy:
- from: "https://example.com/some/path"
```
The proxy and authorization server would simply ignore the path and route/authorize based on the host name.
With the introduction of `prefix`, `path` and `regex` fields to the policy route configuration, we decided not to support using a path in the `from` url, since the behavior was somewhat ambiguous and better handled by the explicit fields.

View file

@ -218,7 +218,7 @@ Pomerium is an identity-aware access proxy that can used to serve as an identity
### Configure
Before installing, we will configure Pomerium's configuration settings in `config.yaml`. Other than the typical configuration settings covered in the quick-start guides, we will add a few settings that will make working with Kubernetes Dashboard easier.
Before installing, we will configure Pomerium's configuration settings in `values.yaml`. Other than the typical configuration settings covered in the quick-start guides, we will add a few settings that will make working with Kubernetes Dashboard easier.
We can retrieve the token to add to our proxied policy's authorization header as follows.
@ -245,33 +245,43 @@ token: eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.......
The above token then needs to be assigned to our route configuration and policy.
```yaml
# config.yaml
forward_auth_url: https://forwardauth.domain.example
# values.yaml
authenticate:
idp:
provider: "google"
clientID: YOUR_CLIENT_ID
clientSecret: YOUR_SECRET
policy:
# this route is directly proxied by pomerium & injects the authorization header
- from: https://dashboard-proxied.domain.example
to: https://helm-dashboard-kubernetes-dashboard
allowed_users:
- user@domain.example
tls_skip_verify: true # dashboard uses self-signed certificates in its default configuration
set_request_headers:
Authorization: Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.....
forwardAuth:
enabled: true
# this route is indirectly checked for access using forward-auth
- from: https://dashboard-forwardauth.domain.example
to: https://helm-dashboard-kubernetes-dashboard
allowed_users:
- user@domain.example
config:
sharedSecret: YOUR_SHARED_SECRET
cookieSecret: YOUR_COOKIE_SECRET
rootDomain: domain.example
policy:
# this route is directly proxied by pomerium & injects the authorization header
- from: https://dashboard-proxied.domain.example
to: https://helm-dashboard-kubernetes-dashboard
allowed_users:
- user@domain.example
tls_skip_verify: true # dashboard uses self-signed certificates in its default configuration
set_request_headers:
Authorization: Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.....
# this route is indirectly checked for access using forward-auth
- from: https://dashboard-forwardauth.domain.example
to: https://helm-dashboard-kubernetes-dashboard
allowed_users:
- user@domain.example
ingress:
annotations:
kubernetes.io/ingress.class: "nginx"
cert-manager.io/issuer: "letsencrypt-prod" # see `le.issuer.yaml`
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
secretName: pomerium-ingress-tls
```
We then add our configuration to Kubernetes as a [ConfigMap](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/).
```bash
# add our pomerium policy to kubernetes as a configmap
$ kubectl create configmap config --from-file="config.yaml"="config.yaml"
```
### Install
Finally, we get to install Pomerium! 🎉 Once again, we will use Helm to deploy Pomerium.
@ -279,24 +289,14 @@ Finally, we get to install Pomerium! 🎉 Once again, we will use Helm to deploy
```bash
helm install \
"helm-pomerium" \
stable/pomerium \
--set config.rootDomain="domain.example" \
--set config.existingConfig="config" \
--set authenticate.idp.provider="google" \
--set authenticate.idp.clientID="YOUR_CLIENT_ID" \
--set authenticate.idp.clientSecret="YOUR_SECRET"
pomerium/pomerium \
--values values.yaml
```
## Putting it all together
Now we just need to tell external traffic how to route everything by deploying the following ingresses.
```sh
$kubectl apply -f docs/recipes/yml/pomerium.ingress.yaml
```
<<< @/docs/recipes/yml/pomerium.ingress.yaml
```sh
$kubectl apply -f docs/recipes/yml/dashboard-forwardauth.ingress.yaml
```

View file

@ -1,33 +0,0 @@
# pomerium.ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: pomerium-authenticate
annotations:
kubernetes.io/ingress.class: "nginx"
cert-manager.io/issuer: "letsencrypt-prod" # see `le.issuer.yaml`
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
spec:
tls:
- hosts:
- authenticate.domain.example
secretName: pomerium-authenticate-external-tls
- hosts:
- forwardauth.domain.example
secretName: pomerium-forwardauth-external-tls
rules:
- host: authenticate.domain.example
http:
paths:
- path: /
backend:
serviceName: helm-pomerium-authenticate
servicePort: https
- host: forwardauth.domain.example
http:
paths:
- path: /
backend:
serviceName: helm-pomerium-proxy
servicePort: https

View file

@ -7,7 +7,7 @@
# NOTE! If you are using gsuite, you should also set `authenticate.idp.serviceAccount`, see docs !
echo "=> [GCE] creating cluster"
gcloud container clusters create pomerium --region us-west2
gcloud container clusters create pomerium --region us-west2 --num-nodes 1
echo "=> [GCE] get cluster credentials so we can use kubctl locally"
gcloud container clusters get-credentials pomerium --region us-west2
@ -15,27 +15,26 @@ gcloud container clusters get-credentials pomerium --region us-west2
echo "=> add pomerium's helm repo"
helm repo add pomerium https://helm.pomerium.io
echo "=> add bitnami's helm repo"
helm repo add bitnami https://charts.bitnami.com/bitnami
echo "=> install nginx as a sample hello world app"
helm upgrade --install nginx bitnami/nginx --set service.type=ClusterIP
echo "=> update helm"
helm repo update
echo "=> install pomerium with helm"
echo "=> initiliaze a configmap setting from config.example.yaml"
kubectl create configmap config --from-file="config.yaml"="docs/configuration/examples/kubernetes/kubernetes-config.yaml"
helm install \
pomerium \
pomerium/pomerium \
--set service.type="NodePort" \
--set config.rootDomain="corp.beyondperimeter.com" \
--set config.existingConfig="config" \
--set config.sharedSecret=$(head -c32 /dev/urandom | base64) \
--set config.cookieSecret=$(head -c32 /dev/urandom | base64) \
--set ingress.secret.name="pomerium-tls" \
--set ingress.secret.cert=$(base64 -i "$HOME/.acme.sh/*.corp.beyondperimeter.com_ecc/fullchain.cer") \
--set ingress.secret.key=$(base64 -i "$HOME/.acme.sh/*.corp.beyondperimeter.com_ecc/*.corp.beyondperimeter.com.key") \
--set-string ingress.annotations."kubernetes\.io/ingress\.allow-http"=false \
--set authenticate.service.annotations."cloud\.google\.com/app-protocols"='\{"https":"HTTPS"\}' \
--set proxy.service.annotations."cloud\.google\.com/app-protocols"='\{"https":"HTTPS"\}'
--values docs/configuration/examples/kubernetes/values.yaml
# When done, clean up by deleting the cluster!
# helm del $(helm ls --all --short) --purge # deletes all your helm instances