github.com/replicatedhq/ship@v0.55.0/integration/failing/init/concourse/expected/base/worker-statefulset.yaml (about) 1 --- 2 # Source: concourse/templates/worker-statefulset.yaml 3 apiVersion: apps/v1beta1 4 kind: StatefulSet 5 metadata: 6 name: concourse-worker 7 labels: 8 app: concourse-worker 9 chart: "concourse-3.7.2" 10 release: "concourse" 11 heritage: "Tiller" 12 13 spec: 14 serviceName: concourse-worker 15 replicas: 2 16 template: 17 metadata: 18 labels: 19 app: concourse-worker 20 release: "concourse" 21 annotations: 22 spec: 23 serviceAccountName: concourse-worker 24 tolerations: 25 [] 26 27 terminationGracePeriodSeconds: 60 28 containers: 29 - name: concourse-worker 30 image: "concourse/concourse:4.2.2" 31 imagePullPolicy: "IfNotPresent" 32 command: 33 - /bin/sh 34 args: 35 - -c 36 - |- 37 cp /dev/null /tmp/.liveness_probe 38 rm -rf ${CONCOURSE_WORK_DIR:-/concourse-work-dir}/* 39 while ! concourse retire-worker --name=${HOSTNAME} | grep -q worker-not-found; do 40 touch /tmp/.pre_start_cleanup 41 sleep 5 42 done 43 rm -f /tmp/.pre_start_cleanup 44 concourse worker --name=${HOSTNAME} | tee -a /tmp/.liveness_probe 45 livenessProbe: 46 exec: 47 command: 48 - /bin/sh 49 - -c 50 - |- 51 FATAL_ERRORS=$( echo "${LIVENESS_PROBE_FATAL_ERRORS}" | grep -q '\S' && \ 52 grep -F "${LIVENESS_PROBE_FATAL_ERRORS}" /tmp/.liveness_probe ) 53 cp /dev/null /tmp/.liveness_probe 54 if [ ! -z "${FATAL_ERRORS}" ]; then 55 >&2 echo "Fatal error detected: ${FATAL_ERRORS}" 56 exit 1 57 fi 58 if [ -f /tmp/.pre_start_cleanup ]; then 59 >&2 echo "Still trying to clean up before starting concourse. 'fly prune-worker -w ${HOSTNAME}' might need to be called to force cleanup." 60 exit 1 61 fi 62 failureThreshold: 1 63 initialDelaySeconds: 10 64 periodSeconds: 10 65 lifecycle: 66 preStop: 67 exec: 68 command: 69 - /bin/sh 70 - -c 71 - |- 72 while ! concourse retire-worker --name=${HOSTNAME} | grep -q worker-not-found; do 73 sleep 5 74 done 75 env: 76 - name: CONCOURSE_WORK_DIR 77 value: "/concourse-work-dir" 78 79 - name: CONCOURSE_TSA_HOST 80 value: "concourse-web:2222" 81 - name: CONCOURSE_TSA_PUBLIC_KEY 82 value: "/concourse-keys/host_key.pub" 83 - name: CONCOURSE_TSA_WORKER_PRIVATE_KEY 84 value: "/concourse-keys/worker_key" 85 - name: CONCOURSE_GARDEN_BIND_PORT 86 value: "7777" 87 - name: CONCOURSE_BAGGAGECLAIM_DRIVER 88 value: "naive" 89 - name: LIVENESS_PROBE_FATAL_ERRORS 90 value: "guardian.api.garden-server.create.failed\nbaggageclaim.api.volume-server.create-volume-async.failed-to-create" 91 resources: 92 requests: 93 cpu: 100m 94 memory: 512Mi 95 96 securityContext: 97 privileged: true 98 volumeMounts: 99 - name: concourse-keys 100 mountPath: "/concourse-keys" 101 readOnly: true 102 - name: concourse-work-dir 103 mountPath: "/concourse-work-dir" 104 affinity: 105 podAntiAffinity: 106 preferredDuringSchedulingIgnoredDuringExecution: 107 - weight: 100 108 podAffinityTerm: 109 topologyKey: kubernetes.io/hostname 110 labelSelector: 111 matchLabels: 112 app: concourse-worker 113 release: "concourse" 114 volumes: 115 - name: concourse-keys 116 secret: 117 secretName: concourse-concourse 118 defaultMode: 0400 119 items: 120 - key: host-key-pub 121 path: host_key.pub 122 - key: worker-key 123 path: worker_key 124 - key: worker-key-pub 125 path: worker_key.pub 126 volumeClaimTemplates: 127 - metadata: 128 name: concourse-work-dir 129 spec: 130 accessModes: 131 - "ReadWriteOnce" 132 resources: 133 requests: 134 storage: "20Gi" 135 updateStrategy: 136 type: RollingUpdate 137 podManagementPolicy: Parallel