envoy: Initial changes

This commit is contained in:
Travis Groth 2020-05-18 16:34:31 -04:00
parent 8f78497e99
commit 99e788a9b4
107 changed files with 2542 additions and 3322 deletions

View file

@ -1,6 +1,7 @@
#!/bin/bash
set -euxo pipefail
_script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
_dir=/tmp/pomerium-dev-docker
mkdir -p "$_dir"
@ -14,6 +15,15 @@ env GOOS=linux \
-o "$_dir/pomerium" \
./cmd/pomerium
# embed envoy
(
cd "$_script_dir"
env GOOS=linux \
GOARCH=amd64 \
./embed-envoy.bash \
"$_dir/pomerium"
)
# build docker image
(
@ -30,5 +40,11 @@ ENTRYPOINT [ "/bin/pomerium" ]
CMD ["-config","/pomerium/config.yaml"]
EOF
docker build --tag=pomerium/pomerium:dev .
kind load docker-image pomerium/pomerium:dev
# build for minikube
if command -v minikube >/dev/null 2>&1; then
eval "$(minikube docker-env --shell=bash)"
docker build --tag=pomerium/pomerium:dev .
fi
)

50
scripts/embed-envoy.bash Executable file
View file

@ -0,0 +1,50 @@
#!/bin/bash
set -euo pipefail
_pomerium_binary_path="${1?"pomerium binary path is required"}"
_go_os="$(go env GOOS)"
_go_arch="$(go env GOARCH)"
is_musl() {
ldd /bin/ls | grep musl >/dev/null 2>&1
}
# URLs from: https://tetrate.bintray.com/getenvoy/manifest.json
_envoy_version="1.14.1"
_envoy_build=""
if [ "$_go_os" == linux ] && ! is_musl && [ "$_go_arch" == "amd64" ]; then
_envoy_build="LINUX_GLIBC"
elif [ "$_go_os" == darwin ] && [ "$_go_arch" == "amd64" ]; then
_envoy_build="DARWIN"
fi
if [ -z "$_envoy_build" ]; then
echo "this platform is not supported for embedded envoy"
exit 1
fi
_envoy_url="$(
curl --silent "https://tetrate.bintray.com/getenvoy/manifest.json" |
jq -r '.flavors.standard.versions["'"$_envoy_version"'"].builds["'"$_envoy_build"'"].downloadLocationUrl'
)"
_abs_pomerium_binary_path="$(realpath "$_pomerium_binary_path")"
_wd="/tmp/pomerium-embedded-files"
mkdir -p "$_wd"
(
cd "$_wd"
if [ ! -f "envoy-$_envoy_version.tar.xz" ]; then
echo "downloading $_envoy_url"
curl --silent --location --output "envoy-$_envoy_version.tar.xz" "$_envoy_url"
fi
echo "extracting"
tar --extract --xz --strip-components=3 --file "envoy-$_envoy_version.tar.xz"
echo "appending to $_abs_pomerium_binary_path"
# if this binary already has a zip file appended to it
if [ -z "$(unzip -z -qq "$_abs_pomerium_binary_path" 2>&1)" ]; then
zip -A "$_abs_pomerium_binary_path" envoy
else
zip envoy.zip envoy
cat envoy.zip >>"$_abs_pomerium_binary_path"
fi
zip -A "$_abs_pomerium_binary_path"
)

View file

@ -0,0 +1,24 @@
#!/bin/bash
# acme.sh : https://github.com/Neilpang/acme.sh
# curl https://get.acme.sh | sh
# NOTA BENE:
# if you use a DNS service that supports API access, you may be able to automate
# this process. See https://github.com/Neilpang/acme.sh/wiki/dnsapi
echo "=> first generate a certificate signing request!"
$HOME/.acme.sh/acme.sh \
--issue \
-k ec-256 \
-d '*.corp.example.com' \
--dns \
--yes-I-know-dns-manual-mode-enough-go-ahead-please
read -p "press anykey once you've updated your TXT entries"
$HOME/.acme.sh/acme.sh \
--renew \
--ecc \
-k ec-256 \
-d '*.corp.example.com' \
--dns \
--yes-I-know-dns-manual-mode-enough-go-ahead-please

64
scripts/helm_aws.sh Normal file
View file

@ -0,0 +1,64 @@
#!/bin/bash
# PRE-REQ:
# 1) Install Helm : You should verify the content of this script before running.
# curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
# 2) Install https://eksctl.io/
# For more information see:
# - https://eksworkshop.com/helm_root/helm_intro/install/
echo "=> [AWS] creating cluster"
eksctl create cluster --name=pomerium --nodes=1 --region=us-west-2
echo "=> [AWS] get cluster credentials so we can use kubctl locally"
eksctl utils write-kubeconfig --name=pomerium
echo "=> [AWS] configure Helm access with RBAC"
cat <<EOF >.helm-rbac.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: tiller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: tiller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: tiller
namespace: kube-system
EOF
kubectl apply -f .helm-rbac.yaml
# cleanup
rm .helm-rbac.yaml
echo "=> initialize Helm to install Tiller in your cluster"
helm init --service-account=tiller
helm repo update
echo "=> install pomerium with helm substituting configuration values as required; be sure to change these"
helm install $HOME/charts/stable/pomerium/ \
--name pomerium \
--set config.sharedSecret=$(head -c32 /dev/urandom | base64) \
--set config.cookieSecret=$(head -c32 /dev/urandom | base64) \
--set config.cert=$(base64 -i cert.pem) \
--set config.key=$(base64 -i privkey.pem) \
--set config.policy="$(cat policy.example.yaml | base64)" \
--set authenticate.idp.provider="google" \
--set authenticate.proxyRootDomains="pomerium.io" \
--set authenticate.redirectUrl="https://auth.corp.pomerium.io/oauth2/callback" \
--set authenticate.idp.clientID="REPLACE_ME" \
--set authenticate.idp.clientSecret="REPLACE_ME" \
--set proxy.authenticateServiceUrl="https://auth.corp.pomerium.io" \
--set proxy.authorizeServiceUrl="https://access.corp.pomerium.io"
# When done, clean up by deleting the cluster!
#
# helm del $(helm ls --all --short) --purge #!!! DELETES ALL YOUR HELM INSTANCES!

42
scripts/helm_gke.sh Executable file
View file

@ -0,0 +1,42 @@
#!/bin/bash
# PRE-REQ: Install Helm : You should verify the content of this script before running.
# curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
# NOTE! This will create real resources on Google's cloud. Make sure you clean up any unused
# resources to avoid being billed. For reference, this tutorial cost me <10 cents for a couple of hours.
# NOTE! You must change the identity provider client secret setting, and service account setting!
# NOTE! If you are using gsuite, you should also set `authenticate.idp.serviceAccount`, see docs !
echo "=> [GCE] creating cluster"
gcloud container clusters create pomerium --region us-west2
echo "=> [GCE] get cluster credentials so we can use kubctl locally"
gcloud container clusters get-credentials pomerium --region us-west2
echo "=> add pomerium's helm repo"
helm repo add pomerium https://helm.pomerium.io
echo "=> update helm"
helm repo update
echo "=> install pomerium with helm"
echo "=> initiliaze a configmap setting from config.example.yaml"
kubectl create configmap config --from-file="config.yaml"="docs/configuration/examples/kubernetes/kubernetes-config.yaml"
helm install \
pomerium \
pomerium/pomerium \
--set service.type="NodePort" \
--set config.rootDomain="corp.beyondperimeter.com" \
--set config.existingConfig="config" \
--set config.sharedSecret=$(head -c32 /dev/urandom | base64) \
--set config.cookieSecret=$(head -c32 /dev/urandom | base64) \
--set ingress.secret.name="pomerium-tls" \
--set ingress.secret.cert=$(base64 -i "$HOME/.acme.sh/*.corp.beyondperimeter.com_ecc/fullchain.cer") \
--set ingress.secret.key=$(base64 -i "$HOME/.acme.sh/*.corp.beyondperimeter.com_ecc/*.corp.beyondperimeter.com.key") \
--set-string ingress.annotations."kubernetes\.io/ingress\.allow-http"=false \
--set authenticate.service.annotations."cloud\.google\.com/app-protocols"='\{"https":"HTTPS"\}' \
--set proxy.service.annotations."cloud\.google\.com/app-protocols"='\{"https":"HTTPS"\}'
# When done, clean up by deleting the cluster!
# helm del $(helm ls --all --short) --purge # deletes all your helm instances
# gcloud container clusters delete pomerium # deletes your cluster