I referenced the router deployment guide for Kubernetes and deployed the Ziti router by deviating from the guide to run helm template
and kubectl apply
, so I believe the invalid token error you encountered is legitimate.
Here's how I deployed the router. In this example, *.192.168.49.2.sslip.io
is a DNS wildcard record resolving to the external load balancer address, 192.168.49.2
, of my test cluster.
ziti edge create edge-router "router1" \
--tunneler-enabled \
--jwt-output-file ./router1.jwt
helm template \
"ziti-router1" \
openziti/ziti-router \
--set-file enrollmentJwt=./router1.jwt \
--set ctrl.endpoint=miniziti-controller.192.168.49.2.sslip.io:443 \
--set edge.advertisedHost=router1.192.168.49.2.sslip.io \
| tee ./ziti-router1.yml
kubectl apply -f ./ziti-router1.yml
Let's confirm your router's enrollment token is a valid JWT, is an "edge router one-time token" (erott) and is not expired.
You can paste the token in jwt.io for inspection. Here's mine:
{
"header": {
"alg": "RS256",
"kid": "6523a8808fed727dbcfb43e6d6b1ca47f9ff9fae",
"typ": "JWT"
},
"payload": {
"iss": "https://miniziti-controller.192.168.49.2.sslip.io:443",
"sub": "D6V8UnrJ9",
"aud": [
""
],
"exp": 1733771599,
"jti": "4d90d498-ae62-46d9-af97-f1963aeb562f",
"em": "erott",
"ctrls": null
},
"analysis": {
"signature_valid": true,
"enrollment_method": "one-time token for a router",
"expiration": "valid until 2024-12-09T14:13:19 (2 hours)"
}
}
I used this Py script to check the token signature.
EDIT: Here's the entire manifest, ziti-router1.yml
.
---
# Source: ziti-router/templates/configmap.yaml
# Chart name:ziti-router
apiVersion: v1
kind: ConfigMap
metadata:
name: ziti-router1-config
labels:
helm.sh/chart: ziti-router-1.1.3
app.kubernetes.io/name: ziti-router
app.kubernetes.io/instance: ziti-router1
app.kubernetes.io/version: "1.1.15"
app.kubernetes.io/managed-by: Helm
data:
ziti-router.yaml: |2-
v: 3
identity:
# expected filename defined in SetZitiRouterIdentityCert()
cert: /etc/ziti/config/ziti-router1.cert
# expected filename defined in SetZitiRouterIdentityServerCert()
server_cert: /etc/ziti/config/ziti-router1.server.chain.cert
# expected filename defined in SetZitiRouterIdentityKey()
key: /etc/ziti/config/ziti-router1.key
# expected filename defined in SetZitiRouterIdentityCA()
ca: /etc/ziti/config/ziti-router1.cas
ha:
enabled: false
ctrl:
# router control plane API (:6262)
endpoint: tls:miniziti-controller.192.168.49.2.sslip.io:443
link:
dialers:
- binding: transport
# When 'transport' is disabled this means we are a 'private' router, i.e.,
# not providing incoming links to other routers. Private routers still
# join the mesh, but only form outgoing links.
listeners:
- binding: transport
bind: tls:0.0.0.0:3022
advertise: tls:router1.192.168.49.2.sslip.io:443
options: {}
listeners:
# bindings of edge and tunnel requires an "edge" section below
- binding: edge
address: tls:0.0.0.0:3022
options:
advertise: router1.192.168.49.2.sslip.io:443
edge:
csr:
sans:
dns:
- localhost
- router1.192.168.49.2.sslip.io
- router1.192.168.49.2.sslip.io # end if .Values.csr.sans.noDefaults
ip:
- 127.0.0.1 # end if .Values.csr.sans.noDefaults
email:
uri:
forwarder:
latencyProbeInterval: 10
linkDialQueueLength: 1000
linkDialWorkerCount: 32
rateLimitedQueueLength: 5000
rateLimitedWorkerCount: 64
xgressDialQueueLength: 1000
xgressDialWorkerCount: 128
---
# Source: ziti-router/templates/pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: ziti-router1
namespace: "miniziti"
labels:
helm.sh/chart: ziti-router-1.1.3
app.kubernetes.io/name: ziti-router
app.kubernetes.io/instance: ziti-router1
app.kubernetes.io/version: "1.1.15"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "ziti-router"
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "50Mi"
---
# Source: ziti-router/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: ziti-router1-edge
labels:
helm.sh/chart: ziti-router-1.1.3
app.kubernetes.io/name: ziti-router
app.kubernetes.io/instance: ziti-router1
app.kubernetes.io/version: "1.1.15"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 443
targetPort: 3022
protocol: TCP
name: edge
selector:
app.kubernetes.io/name: ziti-router
app.kubernetes.io/instance: ziti-router1
app.kubernetes.io/component: "ziti-router"
---
# Source: ziti-router/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: ziti-router1-transport
labels:
helm.sh/chart: ziti-router-1.1.3
app.kubernetes.io/name: ziti-router
app.kubernetes.io/instance: ziti-router1
app.kubernetes.io/version: "1.1.15"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 443
targetPort: 3022
protocol: TCP
name: transport
selector:
app.kubernetes.io/name: ziti-router
app.kubernetes.io/instance: ziti-router1
app.kubernetes.io/component: "ziti-router"
---
# Source: ziti-router/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: ziti-router1
labels:
helm.sh/chart: ziti-router-1.1.3
app.kubernetes.io/name: ziti-router
app.kubernetes.io/instance: ziti-router1
app.kubernetes.io/version: "1.1.15"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "ziti-router"
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: ziti-router
app.kubernetes.io/instance: ziti-router1
app.kubernetes.io/component: "ziti-router"
template:
metadata:
annotations:
configmap-checksum: f1487bb4d9b3630e5adbf3d78cef00658fede863696dd87962808820ce6c4fca
labels:
app.kubernetes.io/name: ziti-router
app.kubernetes.io/instance: ziti-router1
app.kubernetes.io/component: "ziti-router"
spec:
securityContext:
fsGroup: 2171
hostNetwork: false
dnsPolicy: ClusterFirstWithHostNet
dnsConfig: {}
containers:
- name: ziti-router
securityContext: null
image: docker.io/openziti/ziti-router:1.1.15
imagePullPolicy: Always
ports: null
resources: {}
command:
- "/entrypoint.bash"
args:
- run
- '/etc/ziti/config/ziti-router.yaml'
env:
- name: ZITI_ENROLL_TOKEN
value: "eyJhbGciOiJSUzI1NiIsImtpZCI6IjY1MjNhODgwOGZlZDcyN2RiY2ZiNDNlNmQ2YjFjYTQ3ZjlmZjlmYWUiLCJ0eXAiOiJKV1QifQ.eyJpc3MiOiJodHRwczovL21pbml6aXRpLWNvbnRyb2xsZXIuMTkyLjE2OC40OS4yLnNzbGlwLmlvOjQ0MyIsInN1YiI6IkQ2VjhVbnJKOSIsImF1ZCI6WyIiXSwiZXhwIjoxNzMzNzcxNTk5LCJqdGkiOiI0ZDkwZDQ5OC1hZTYyLTQ2ZDktYWY5Ny1mMTk2M2FlYjU2MmYiLCJlbSI6ImVyb3R0IiwiY3RybHMiOm51bGx9.yrpTf3yp9wLTUloaqw2h-ioKRc79pevtxqVh0jViwjDMyKriFtrGRJGfo88Zwj8kQ6HItgqtoByWhw_LLy5uNo5VV81HjXaqePeIOQ3Ls0E2ju0Ry6aIHzIJWk-BDn9ni0_H2EYmInSZE5hK28Kfg8RQ3MyKeO8OPJGmfGu87khFWvGcgetOtUKSbK52AbGeYLXciiE4oLTpv9gkVZAtdnPHkYvpZBITWeNh5RF2cs7TgQWZECi7JmVrNNnOeV2hJhRTOvcYN9yweKi6n9GjE2h9Y2fXXorVrBArW8dxYgScCWYuwnIDXUpIjAj5CXtlphBHw0hHB6UBzozVxnhXGgIh_mlAyqfwInNhilc6fC1x6FZMH1sjdQnyX4v8onY6RAsAHwwa54-qmfeHcp_IHVWm7c_ldwD3b_ZycNPX_GH5lXtQ0C2DDifVByKO_Jg572RrqYNehrmmyGA9kOdXiXihe7roNfd_IRtbtxYRXGpxmwbgrdvCgYSd-s0vLR_D1RvWt7FwGqq48S0Kuwo9KqU2ebVlfMdpDewe3arxmQHED6TVGdd4w4ABzarGuK5hffQezynaR5BpEU0SMt5YwLyB-H8X3LMXXEts6ciSfp11QM-ecRFNDwwOHKCuwUKNl3F-ge0mmv1TJgC0tICKIA83YMCNR2EXd1lOMSg2cvA"
# must be true or enroll() will not be called
- name: ZITI_BOOTSTRAP
value: "true"
# -- enroll with controller if "true," overwrite if "force"; requires ZITI_BOOTSTRAP=true
- name: ZITI_BOOTSTRAP_ENROLLMENT
value: "true"
# suppress generating a config.yml because in K8s we mount a configMap rendered by Helm
- name: ZITI_BOOTSTRAP_CONFIG
value: "false"
# entrypoint will append --extend to run command if "true"
- name: ZITI_AUTO_RENEW_CERTS
value: "true"
# used by entrypoint's enroll() function to predict the path to the enrolled identity's cert
- name: ZITI_HOME
value: "/etc/ziti/config"
- name: ZITI_ROUTER_NAME
value: "ziti-router1"
volumeMounts:
- mountPath: /etc/ziti/config
name: config-data
readOnly: false
- mountPath: /etc/ziti/config/ziti-router.yaml
name: ziti-router-config
subPath: ziti-router.yaml # project the read-only config into the writeable volume to allow router to write ./endpoints state file in same dir as config
# deployment condition ready and receive traffic when this passes
readinessProbe:
exec:
command:
- /bin/sh
- -c
- ziti agent stats
initialDelaySeconds: 10
periodSeconds: 10
# delete pod if this fails
livenessProbe:
exec:
command:
- /bin/sh
- -c
- ziti agent stats
initialDelaySeconds: 10
periodSeconds: 10
volumes:
- name: ziti-router-config
configMap:
name: ziti-router1-config
defaultMode: 0444
- name: config-data
persistentVolumeClaim:
claimName: ziti-router1
---
---
# Source: ziti-router/templates/pre-upgrade-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: ziti-router1-hook-serviceaccount
namespace: miniziti
annotations:
"helm.sh/hook": pre-upgrade
"helm.sh/hook-weight": "-8"
"helm.sh/hook-delete-policy": before-hook-creation, hook-succeeded
---
# Source: ziti-router/templates/pre-upgrade-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: ziti-router1-pre-upgrade-hook
labels:
helm.sh/chart: ziti-router-1.1.3
app.kubernetes.io/name: ziti-router
app.kubernetes.io/instance: ziti-router1
app.kubernetes.io/version: "1.1.15"
app.kubernetes.io/managed-by: Helm
annotations:
"helm.sh/hook": pre-upgrade
"helm.sh/hook-weight": "-8"
"helm.sh/hook-delete-policy": before-hook-creation, hook-succeeded
data:
migrate-identity.bash: |-
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
set -o xtrace
# - copy the private key from the hook-managed secret to the persistent volume
# - rename router identity files to match the ziti config generator's conventions
# - remove the hook-managed secret containing the private key, signaling the migration is complete
trap 'echo "ERROR: ${BASH_SOURCE[0]}:${LINENO} exited with code $?" >&2;' ERR
function noClobber() {
local src=$1
local dst=$2
if [[ -s "${src}" ]]
then
if [[ -s "${dst}" ]]
then
echo "ERROR: ${dst} already exists, refusing to overwrite"
return 1
else
echo "INFO: renaming ${src}"
mv "${src}" "${dst}"
fi
else
echo "INFO: ${src} is empty or does not exist, skipping"
fi
}
if kubectl -n miniziti get secret \
ziti-router1-identity &>/dev/null
then
# prior versions of the chart stored certs in a Secret resource, so this copies those certs to the persistent
# volume unless a file already exists in the persistent volume
typeset -a KEYS=(
$(
kubectl -n miniziti get secret \
ziti-router1-identity \
--output go-template='{{range $k,$v := .data}}{{if $v}}{{printf "%s " $k}}{{end}}{{end}}'
)
)
echo "DEBUG: found identity secret dict keys: ${KEYS[*]}"
for KEY in ${KEYS[@]}; do
if [[ ${KEY} =~ ^tls\.key$ ]]
then
kubectl -n miniziti get secret ziti-router1-identity \
--output go-template='{{index .data "'${KEY}'" | base64decode }}' \
> "/etc/ziti/config/ziti-router1.key"
fi
done
declare -A ID_FILES=(
[client.crt]=ziti-router1.cert
[tls.crt]=ziti-router1.server.chain.cert
[ca.crt]=ziti-router1.cas
)
for KEY in ${!ID_FILES[@]}; do
noClobber "/etc/ziti/config/${KEY}" "/etc/ziti/config/${ID_FILES[${KEY}]}"
done
kubectl -n miniziti delete secret \
ziti-router1-identity
else
echo "INFO: identity secret does not exist"
fi
---
# Source: ziti-router/templates/pre-upgrade-serviceaccount.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: ziti-router1-hook-role
annotations:
"helm.sh/hook": pre-upgrade
"helm.sh/hook-weight": "-7"
"helm.sh/hook-delete-policy": before-hook-creation, hook-succeeded
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "delete"]
---
# Source: ziti-router/templates/pre-upgrade-serviceaccount.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: ziti-router1-hook-rolebinding
annotations:
"helm.sh/hook": pre-upgrade
"helm.sh/hook-weight": "-6"
"helm.sh/hook-delete-policy": before-hook-creation, hook-succeeded
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ziti-router1-hook-role
subjects:
- kind: ServiceAccount
name: ziti-router1-hook-serviceaccount
namespace: miniziti
---
# Source: ziti-router/templates/pre-upgrade-job.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ziti-router1-pre-upgrade-job
labels:
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "ziti-router1"
app.kubernetes.io/version: 1.1.15
helm.sh/chart: "ziti-router-1.1.3"
annotations:
# This is what defines this resource as a hook. Without this line, the
# job is considered part of the release.
"helm.sh/hook": pre-upgrade
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": before-hook-creation
spec:
backoffLimit: 0
completions: 1
ttlSecondsAfterFinished: 600
template:
metadata:
name: ziti-router1
labels:
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "ziti-router1"
helm.sh/chart: "ziti-router-1.1.3"
helm.sh/chart: ziti-router-1.1.3
app.kubernetes.io/name: ziti-router
app.kubernetes.io/instance: ziti-router1
app.kubernetes.io/version: "1.1.15"
app.kubernetes.io/managed-by: Helm
spec:
restartPolicy: Never
serviceAccountName: ziti-router1-hook-serviceaccount
containers:
- name: pre-upgrade-job
image: docker.io/openziti/ziti-router:1.1.15
imagePullPolicy: Always
volumeMounts:
- mountPath: /usr/local/bin/migrate-identity.bash
name: migrate-script
subPath: migrate-identity.bash
- mountPath: /etc/ziti/config
name: config-data
readOnly: false
command: ["migrate-identity.bash"]
# command: ["sh", "-c", "while true; do sleep 86400; done"]
volumes:
- name: migrate-script
configMap:
name: ziti-router1-pre-upgrade-hook
items:
- key: migrate-identity.bash
path: migrate-identity.bash
mode: 0555
- name: config-data
persistentVolumeClaim:
claimName: ziti-router1