docs: update changelog, documentaiton, and helm configurations. (#63)

- Update changelog.
- Update docs to cover authorization support.
- Updates helm to support authorization, and policy file.
This commit is contained in:
Bobby DeSimone 2019-03-19 10:55:41 -07:00 committed by GitHub
parent eb9dff0c48
commit 45e6a8dc57
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
41 changed files with 901 additions and 254 deletions

View file

@ -1,16 +1,10 @@
#!/bin/bash
# requires acme.sh
# see : https://github.com/Neilpang/acme.sh
# uncomment below to install
# requires acme.sh see : https://github.com/Neilpang/acme.sh
# curl https://get.acme.sh | sh
# assumes cloudflare, but many DNS providers are supported
export CF_Key="x"
export CF_Email="x@x.com"
echo "=> manually issue a wildcard certificate, renewal is up to you!"
$HOME/.acme.sh/acme.sh \
--issue \
-k ec-256 \
-d '*.corp.beyondperimeter.com' \
--dns dns_cf
--dns \
--yes-I-know-dns-manual-mode-enough-go-ahead-please

47
scripts/helm_gke.sh Executable file
View file

@ -0,0 +1,47 @@
#!/bin/bash
# PRE-REQ:
# 1) Install Helm : You should verify the content of this script before running.
# curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
echo "=> [GCE] creating cluster"
gcloud container clusters create pomerium
echo "=> [GCE] get cluster credentials so we can use kubctl locally"
gcloud container clusters get-credentials pomerium
echo "=> [GCE] ensure your user account has the cluster-admin role in your cluster"
kubectl create \
clusterrolebinding \
user-admin-binding \
--clusterrole=cluster-admin \
--user=$(gcloud config get-value account)
echo "=> Create a service account that Tiller, the server side of Helm, can use for deploying your charts."
kubectl create serviceaccount tiller --namespace kube-system
echo "=> Grant the Tiller service account the cluster-admin role in your cluster"
kubectl create clusterrolebinding tiller-admin-binding --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
echo "=> initialize Helm to install Tiller in your cluster"
helm init --service-account=tiller
helm repo update
echo "=> install pomerium with helm substituting configuration values as required; be sure to change these"
helm install helm/. \
--set config.sharedSecret=$(head -c32 /dev/urandom | base64) \
--set config.cookieSecret=$(head -c32 /dev/urandom | base64) \
--set config.cert=$(base64 -i cert.pem) \
--set config.key=$(base64 -i privkey.pem) \
--set config.policy=$(cat policy.example.yaml) \
--set authentiate.idp.provider="google" \
--set authentiate.proxyRootDomains="pomerium.io" \
--set authentiate.redirectUrl="https://auth.corp.pomerium.io/oauth2/callback" \
--set authentiate.idp.clientID="REPLACE_ME" \
--set authentiate.idp.clientSecret="REPLACE_ME" \
--set proxy.authenticateServiceUrl="https://auth.corp.pomerium.io" \
--set proxy.authorizeServiceUrl="https://access.corp.pomerium.io"
# When done, clean up by deleting the cluster!
#
# helm del $(helm ls --all --short) --purge #!!! DELETES ALL YOUR HELM INSTANCES!
# gcloud container clusters delete pomerium

View file

@ -28,8 +28,8 @@ kubectl create configmap -n pomerium policy --from-literal=policy=$(cat policy.e
echo "=> settingidp-client-secret, you changed this right? :)"
exit 1 # comment out or delete this line once you change the following two settings
# kubectl create secret generic -n pomerium idp-client-secret --from-literal=idp-client-secret=REPLACEME
# kubectl create secret generic -n pomerium idp-service-account --from-literal=idp-service-account=$(base64 -i gsuite.service.account.json)
kubectl create secret generic -n pomerium idp-client-secret --from-literal=idp-client-secret=REPLACE_ME
kubectl create secret generic -n pomerium idp-service-account --from-literal=idp-service-account=$(base64 -i gsuite.service.account.json)
echo "=> apply the proxy, authorize, and authenticate deployment configs"
kubectl apply -f docs/docs/examples/kubernetes/authorize.deploy.yml
@ -48,4 +48,4 @@ kubectl apply -f docs/docs/examples/kubernetes/ingress.yml
# kubectl apply -f docs/docs/examples/kubernetes/ingress.nginx.yml
# When done, clean up by deleting the cluster!
# gcloud container clusters delete pomerium
# gcloud container clusters delete pomerium

View file

@ -1,47 +0,0 @@
#!/bin/bash
# Thank you @ https://medium.com/@benjamin.black/how-to-obtain-an-ecdsa-wildcard-certificate-from-lets-encrypt-be217c737cfe
# See also:
# https://cloud.google.com/iot/docs/how-tos/credentials/keys#generating_an_es256_key_with_a_self-signed_x509_certificate
# https://community.letsencrypt.org/t/ecc-certificates/46729
#
# Lets Encrypt currently generates RSA certificates, but not yet ECDSA certificates.
# Support for generating ECDSA certificates is on the horizon, but is not here yet.
# However, Lets Encrypt does support *signing* ECDSA certificates when presented with a
# Certificate Signing Request. So we can generate the appropriate CSR on the client,
# and send it to Lets Encrypt using the --csr option of the certbot client for Lets Encrypt to sign.
# The following generates a NIST P-256 (aka secp256r1 aka prime256v1) EC Key Pair
openssl ecparam \
-genkey \
-name prime256v1 \
-noout \
-out ec_private.pem
openssl req -x509 -new \
-key ec_private.pem \
-days 365 \
-out ec_public.pem \
-subj "/CN=unused"
openssl req -new \
-sha512 \
-key privkey.pem \
-nodes \
-subj "/CN=beyondperimeter.com" \
-reqexts SAN \
-extensions SAN \
-config <(cat /etc/ssl/openssl.cnf <(printf '[SAN]\nsubjectAltName=DNS:*.corp.beyondperimeter.com')) \
-out csr.pem \
-outform pem
openssl req -in csr.pem -noout -text
certbot certonly \
--preferred-challenges dns-01 \
--work-dir le/work \
--config-dir le/config \
--logs-dir le/logs \
--agree-tos \
--email bobbydesimone@gmail.com \
-d *.corp.beyondperimeter.com \
--csr csr.pem