github.com/nikkelma/oras-project_oras-go@v1.1.1-0.20220201001104-a75f6a419090/testdata/charts/chartmuseum/values.yaml (about) 1 replicaCount: 1 2 strategy: 3 type: RollingUpdate 4 rollingUpdate: 5 maxUnavailable: 0 6 image: 7 repository: chartmuseum/chartmuseum 8 tag: v0.8.0 9 pullPolicy: IfNotPresent 10 env: 11 open: 12 # storage backend, can be one of: local, alibaba, amazon, google, microsoft 13 STORAGE: local 14 # oss bucket to store charts for alibaba storage backend 15 STORAGE_ALIBABA_BUCKET: 16 # prefix to store charts for alibaba storage backend 17 STORAGE_ALIBABA_PREFIX: 18 # oss endpoint to store charts for alibaba storage backend 19 STORAGE_ALIBABA_ENDPOINT: 20 # server side encryption algorithm for alibaba storage backend, can be one 21 # of: AES256 or KMS 22 STORAGE_ALIBABA_SSE: 23 # s3 bucket to store charts for amazon storage backend 24 STORAGE_AMAZON_BUCKET: 25 # prefix to store charts for amazon storage backend 26 STORAGE_AMAZON_PREFIX: 27 # region of s3 bucket to store charts 28 STORAGE_AMAZON_REGION: 29 # alternative s3 endpoint 30 STORAGE_AMAZON_ENDPOINT: 31 # server side encryption algorithm 32 STORAGE_AMAZON_SSE: 33 # gcs bucket to store charts for google storage backend 34 STORAGE_GOOGLE_BUCKET: 35 # prefix to store charts for google storage backend 36 STORAGE_GOOGLE_PREFIX: 37 # container to store charts for microsoft storage backend 38 STORAGE_MICROSOFT_CONTAINER: 39 # prefix to store charts for microsoft storage backend 40 STORAGE_MICROSOFT_PREFIX: 41 # container to store charts for openstack storage backend 42 STORAGE_OPENSTACK_CONTAINER: 43 # prefix to store charts for openstack storage backend 44 STORAGE_OPENSTACK_PREFIX: 45 # region of openstack container 46 STORAGE_OPENSTACK_REGION: 47 # path to a CA cert bundle for your openstack endpoint 48 STORAGE_OPENSTACK_CACERT: 49 # form field which will be queried for the chart file content 50 CHART_POST_FORM_FIELD_NAME: chart 51 # form field which will be queried for the provenance file content 52 PROV_POST_FORM_FIELD_NAME: prov 53 # levels of nested repos for multitenancy. The default depth is 0 (singletenant server) 54 DEPTH: 0 55 # show debug messages 56 DEBUG: false 57 # output structured logs as json 58 LOG_JSON: true 59 # disable use of index-cache.yaml 60 DISABLE_STATEFILES: false 61 # disable Prometheus metrics 62 DISABLE_METRICS: true 63 # disable all routes prefixed with /api 64 DISABLE_API: true 65 # allow chart versions to be re-uploaded 66 ALLOW_OVERWRITE: false 67 # absolute url for .tgzs in index.yaml 68 CHART_URL: 69 # allow anonymous GET operations when auth is used 70 AUTH_ANONYMOUS_GET: false 71 # sets the base context path 72 CONTEXT_PATH: 73 # parallel scan limit for the repo indexer 74 INDEX_LIMIT: 0 75 # cache store, can be one of: redis (leave blank for inmemory cache) 76 CACHE: 77 # address of Redis service (host:port) 78 CACHE_REDIS_ADDR: 79 # Redis database to be selected after connect 80 CACHE_REDIS_DB: 0 81 field: 82 # POD_IP: status.podIP 83 secret: 84 # username for basic http authentication 85 BASIC_AUTH_USER: 86 # password for basic http authentication 87 BASIC_AUTH_PASS: 88 # GCP service account json file 89 GOOGLE_CREDENTIALS_JSON: 90 # Redis requirepass server configuration 91 CACHE_REDIS_PASSWORD: 92 deployment: 93 ## Chartmuseum Deployment annotations 94 annotations: {} 95 # name: value 96 labels: {} 97 # name: value 98 matchlabes: {} 99 # name: value 100 replica: 101 ## Chartmuseum Replicas annotations 102 annotations: {} 103 ## Read more about kube2iam to provide access to s3 https://github.com/jtblin/kube2iam 104 # iam.amazonaws.com/role: role-arn 105 service: 106 servicename: 107 type: ClusterIP 108 # clusterIP: None 109 externalPort: 8080 110 nodePort: 111 annotations: {} 112 labels: {} 113 114 resources: {} 115 # limits: 116 # cpu: 100m 117 # memory: 128Mi 118 # requests: 119 # cpu: 80m 120 # memory: 64Mi 121 122 probes: 123 liveness: 124 initialDelaySeconds: 5 125 periodSeconds: 10 126 timeoutSeconds: 1 127 successThreshold: 1 128 failureThreshold: 3 129 readiness: 130 initialDelaySeconds: 5 131 periodSeconds: 10 132 timeoutSeconds: 1 133 successThreshold: 1 134 failureThreshold: 3 135 136 serviceAccount: 137 create: false 138 # name: 139 140 # UID/GID 1000 is the default user "chartmuseum" used in 141 # the container image starting in v0.8.0 and above. This 142 # is required for local persistant storage. If your cluster 143 # does not allow this, try setting securityContext: {} 144 securityContext: 145 fsGroup: 1000 146 147 nodeSelector: {} 148 149 tolerations: [] 150 151 affinity: {} 152 153 persistence: 154 enabled: false 155 accessMode: ReadWriteOnce 156 size: 8Gi 157 labels: {} 158 # name: value 159 ## A manually managed Persistent Volume and Claim 160 ## Requires persistence.enabled: true 161 ## If defined, PVC must be created manually before volume will be bound 162 # existingClaim: 163 164 ## Chartmuseum data Persistent Volume Storage Class 165 ## If defined, storageClassName: <storageClass> 166 ## If set to "-", storageClassName: "", which disables dynamic provisioning 167 ## If undefined (the default) or set to null, no storageClassName spec is 168 ## set, choosing the default provisioner. (gp2 on AWS, standard on 169 ## GKE, AWS & OpenStack) 170 ## 171 # storageClass: "-" 172 # volumeName: 173 pv: 174 enabled: false 175 pvname: 176 capacity: 177 storage: 8Gi 178 accessMode: ReadWriteOnce 179 nfs: 180 server: 181 path: 182 183 ## Ingress for load balancer 184 ingress: 185 enabled: false 186 ## Chartmuseum Ingress labels 187 ## 188 # labels: 189 # dns: "route53" 190 191 ## Chartmuseum Ingress annotations 192 ## 193 # annotations: 194 # kubernetes.io/ingress.class: nginx 195 # kubernetes.io/tls-acme: "true" 196 197 ## Chartmuseum Ingress hostnames 198 ## Must be provided if Ingress is enabled 199 ## 200 # hosts: 201 # chartmuseum.domain.com: 202 # - /charts 203 # - /index.yaml 204 205 ## Chartmuseum Ingress TLS configuration 206 ## Secrets must be manually created in the namespace 207 ## 208 # tls: 209 # - secretName: chartmuseum-server-tls 210 # hosts: 211 # - chartmuseum.domain.com 212 213 # Adding secrets to tiller is not a great option, so If you want to use an existing 214 # secret that contains the json file, you can use the following entries 215 gcp: 216 secret: 217 enabled: false 218 # Name of the secret that contains the encoded json 219 name: 220 # Secret key that holds the json value. 221 key: credentials.json