github.com/replicatedcom/ship@v0.50.0/integration/unfork/elastic-stack/expected/.ship/state.json (about)

     1  {
     2    "v1": {
     3      "config": {},
     4      "helmValues": "# Default values for elastic-stack.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\ningress:\n  enabled: false\n  # Used to create Ingress record (should used with service.type: ClusterIP).\n  hosts:\n  - chart-example.local\n  annotations:\n    # kubernetes.io/ingress.class: nginx\n    # kubernetes.io/tls-acme: \"true\"\n  tls:\n    # Secrets must be manually created in the namespace.\n    # - secretName: chart-example-tls\n    #   hosts:\n    #     - chart-example.local\n\n# elasticsearch subchart\nelasticsearch:\n  image:\n    repository: gcr.io/cos-containers/elasticsearch\n    tag: 5.4.2-xpack\n    pullPolicy: Always\n  env:\n    XPACK_MONITORING_ENABLED: \"true\"\n\n# kibana subchart\nkibana:\n  image:\n    repository: docker.elastic.co/kibana/kibana\n    tag: 5.4.2\n    pullPolicy: IfNotPresent\n  env:\n    XPACK_MONITORING_ENABLED: \"true\"\n",
     5      "releaseName": "elastic-stack",
     6      "helmValuesDefaults": "# Default values for elastic-stack.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\ningress:\n  enabled: false\n  # Used to create Ingress record (should used with service.type: ClusterIP).\n  hosts:\n  - chart-example.local\n  annotations:\n    # kubernetes.io/ingress.class: nginx\n    # kubernetes.io/tls-acme: \"true\"\n  tls:\n    # Secrets must be manually created in the namespace.\n    # - secretName: chart-example-tls\n    #   hosts:\n    #     - chart-example.local\n\n# elasticsearch subchart\nelasticsearch:\n  image:\n    repository: gcr.io/cos-containers/elasticsearch\n    tag: 5.4.2-xpack\n    pullPolicy: Always\n  env:\n    XPACK_MONITORING_ENABLED: \"true\"\n\n# kibana subchart\nkibana:\n  image:\n    repository: docker.elastic.co/kibana/kibana\n    tag: 5.4.2\n    pullPolicy: IfNotPresent\n  env:\n    XPACK_MONITORING_ENABLED: \"true\"\n",
     7      "kustomize": {
     8        "overlays": {
     9          "ship": {
    10            "excludedBases": [
    11              "/charts/elasticsearch/templates/client-serviceaccount.yaml",
    12              "/charts/elasticsearch/templates/data-serviceaccount.yaml",
    13              "/charts/elasticsearch/templates/master-serviceaccount.yaml",
    14              "/charts/elasticsearch/templates/master-svc.yaml",
    15              "/charts/kibana/templates/configmap.yaml",
    16              "/charts/logstash/templates/patterns-config.yaml",
    17              "/charts/logstash/templates/pipeline-config.yaml",
    18              "/charts/logstash/templates/poddisruptionbudget.yaml",
    19              "/charts/logstash/templates/service.yaml",
    20              "/charts/logstash/templates/statefulset.yaml"
    21            ],
    22            "patches": {
    23              "/client-deployment.yaml": "apiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n  labels:\n    app: elasticsearch\n    chart: elasticsearch-1.16.0\n    component: client\n    heritage: Tiller\n    release: elastic-stack\n  name: elastic-stack-elasticsearch-client\nspec:\n  template:\n    metadata:\n      annotations:\n        checksum/config: 4f07b9e19327171c37a9c353906c75a1f454cd31c3dfc600a8882d6e36713c49\n        checksum/secret: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n    spec:\n      $setElementOrder/containers:\n      - name: elasticsearch\n      $setElementOrder/initContainers:\n      - name: increase-memory-limits\n      containers:\n      - $setElementOrder/env:\n        - name: DISCOVERY_SERVICE\n        - name: NODE_DATA\n        - name: NODE_INGEST\n        - name: ES_HEAP_SIZE\n        - name: NODE_MASTER\n        - name: PROCESSORS\n        - name: ES_JAVA_OPTS\n        - name: MINIMUM_MASTER_NODES\n        $setElementOrder/volumeMounts:\n        - mountPath: /usr/share/elasticsearch/config/elasticsearch.yml\n        - mountPath: /usr/share/elasticsearch/config/log4j2.properties\n        env:\n        - name: DISCOVERY_SERVICE\n          value: elastic-stack-elasticsearch-master.default.svc.cluster.local\n        - name: NODE_INGEST\n          value: \"false\"\n        - name: ES_HEAP_SIZE\n          value: 512m\n        - name: ES_JAVA_OPTS\n          value: -Djava.net.preferIPv4Stack=true\n        image: gcr.io/cos-containers/elasticsearch:5.4.2-xpack\n        imagePullPolicy: Always\n        livenessProbe:\n          exec:\n            command:\n            - sh\n            - -c\n            - curl --request GET --silent --output /dev/null http://127.0.0.1:9200/_cluster/health?wait_for_status=yellow\n          httpGet: null\n        name: elasticsearch\n        readinessProbe:\n          exec:\n            command:\n            - sh\n            - -c\n            - curl --request GET --silent --output /dev/null http://127.0.0.1:9200/_cluster/health?wait_for_status=yellow\n          httpGet: null\n        volumeMounts:\n        - mountPath: /usr/share/elasticsearch/config/elasticsearch.yml\n          readOnly: true\n        - mountPath: /usr/share/elasticsearch/config/log4j2.properties\n          name: config\n          readOnly: true\n          subPath: log4j2.properties\n      initContainers:\n      - command:\n        - sh\n        - -c\n        - |-\n          # see https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html\n          # and https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration-memory.html#mlockall\n          # and https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-cli-run-prod-mode\n          sysctl -w vm.max_map_count=262144\n          # To increase the ulimit\n          # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#_notes_for_production_use_and_defaults\n          ulimit -l unlimited\n        image: busybox\n        name: increase-memory-limits\n        securityContext:\n          privileged: true\n      - $patch: delete\n        name: sysctl\n      serviceAccountName: elastic-stack-elasticsearch\n",
    24              "/client-svc.yaml": "apiVersion: v1\nkind: Service\nmetadata:\n  labels:\n    app: elasticsearch\n    chart: elasticsearch-1.16.0\n    component: client\n    heritage: Tiller\n    release: elastic-stack\n  name: elastic-stack-elasticsearch-client\nspec:\n  $setElementOrder/ports:\n  - port: 9200\n  ports:\n  - name: null\n    port: 9200\n",
    25              "/configmap.yaml": "apiVersion: v1\ndata:\n  elasticsearch.yml: |-\n    cluster.name: elasticsearch\n\n    node.data: ${NODE_DATA:true}\n    node.master: ${NODE_MASTER:true}\n    node.ingest: ${NODE_INGEST:true}\n    node.name: ${HOSTNAME}\n\n    network.host: 0.0.0.0\n    # see https://github.com/kubernetes/kubernetes/issues/3595\n    bootstrap.memory_lock: ${BOOTSTRAP_MEMORY_LOCK:false}\n\n    discovery:\n      zen:\n        ping.unicast.hosts: ${DISCOVERY_SERVICE:}\n        minimum_master_nodes: ${MINIMUM_MASTER_NODES:2}\n\n    # see https://github.com/elastic/elasticsearch-definitive-guide/pull/679\n    processors: ${PROCESSORS:}\n\n    # avoid split-brain w/ a minimum consensus of two masters plus a data node\n    gateway.expected_master_nodes: ${EXPECTED_MASTER_NODES:2}\n    gateway.expected_data_nodes: ${EXPECTED_DATA_NODES:1}\n    gateway.recover_after_time: ${RECOVER_AFTER_TIME:5m}\n    gateway.recover_after_master_nodes: ${RECOVER_AFTER_MASTER_NODES:2}\n    gateway.recover_after_data_nodes: ${RECOVER_AFTER_DATA_NODES:1}\n\n    # Extra Configuration\n\n    # X-Pack\n\n    # Search Guard\n  log4j2.properties: |-\n    status = error\n\n    appender.console.type = Console\n    appender.console.name = console\n    appender.console.layout.type = PatternLayout\n    appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n\n\n    rootLogger.level = info\n    rootLogger.appenderRef.console.ref = console\n  post-start-hook.sh: null\n  pre-stop-hook.sh: \"#!/bin/bash\\nset -e\\n\\nSERVICE_ACCOUNT_PATH=/var/run/secrets/kubernetes.io/serviceaccount\\nKUBE_TOKEN=$(\u003c${SERVICE_ACCOUNT_PATH}/token)\\nKUBE_NAMESPACE=$(\u003c${SERVICE_ACCOUNT_PATH}/namespace)\\n\\nSTATEFULSET_NAME=$(echo\n    \\\"${HOSTNAME}\\\" | sed 's/-[0-9]*$//g')\\nINSTANCE_ID=$(echo \\\"${HOSTNAME}\\\" | grep\n    -o '[0-9]*$')\\n\\necho \\\"Prepare stopping of Pet ${KUBE_NAMESPACE}/${HOSTNAME}\n    of StatefulSet ${KUBE_NAMESPACE}/${STATEFULSET_NAME} instance_id ${INSTANCE_ID}\\\"\\n\\nexport\n    STATEFULSET_STATUS=$(\\n  curl -s \\\\\\n    --cacert ${SERVICE_ACCOUNT_PATH}/ca.crt\n    \\\\\\n    -H \\\"Authorization: Bearer $KUBE_TOKEN\\\" \\\\\\n    \\\"https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_PORT_443_TCP_PORT}/apis/apps/v1beta1/namespaces/${KUBE_NAMESPACE}/statefulsets/${STATEFULSET_NAME}/status\\\"\\n)\\nINSTANCES_DESIRED=$(\\n\\tpython\n    - \u003c\u003c-EOF\\n\\t\\timport json\\n\\t\\timport os\\n\\n\\t\\tobj = json.loads(os.environ.get('STATEFULSET_STATUS'))\\n\\t\\tprint(obj['spec']['replicas'])\\n\\tEOF\\n)\\n\\necho\n    \\\"Desired instance count is ${INSTANCES_DESIRED}\\\"\\n\\nif [ \\\"${INSTANCE_ID}\\\"\n    -lt \\\"${INSTANCES_DESIRED}\\\" ]; then\\n  echo \\\"No data migration needed\\\"\\n  exit\n    0\\nfi\\n\\necho \\\"Prepare to migrate data of the node\\\"\\n\\nexport NODE_STATS=$(\\n\n    \\ curl -X GET -s \\\\\\n    http://127.0.0.1:9200/_nodes/stats\\n)\\nNODE_IP=$(\\n\\tpython\n    - \u003c\u003c-EOF\\n\\t\\timport json\\n\\t\\timport os\\n\\n\\t\\tobj = json.loads(os.environ.get('NODE_STATS'))\\n\\t\\tkey\n    = list(filter(lambda datum: obj['nodes'][datum]['name'] == os.environ.get('HOSTNAME'),\n    obj['nodes'].keys()))[0]\\n\\t\\tnode = obj['nodes'][key]\\n\\t\\tprint(node['host'])\\n\\tEOF\\n)\\n\\necho\n    \\\"Move all data from node ${NODE_IP}\\\"\\n\\ncurl -X PUT -H \\\"Content-Type: application/json\\\"\n    -s \\\\\\n  http://127.0.0.1:9200/_cluster/settings \\\\\\n  --data \\\"{\\n      \\\\\\\"transient\\\\\\\"\n    :{\\n          \\\\\\\"cluster.routing.allocation.exclude._ip\\\\\\\" : \\\\\\\"${NODE_IP}\\\\\\\"\\n\n    \\     }\\n    }\\\"\\necho\\n\\necho \\\"Wait for node documents to become empty\\\"\\nDOC_COUNT=$(\\n\\tpython\n    - \u003c\u003c-EOF\\n\\t\\timport json\\n\\t\\timport os\\n\\n\\t\\tobj = json.loads(os.environ.get('NODE_STATS'))\\n\\t\\tkey\n    = list(filter(lambda datum: obj['nodes'][datum]['name'] == os.environ.get('HOSTNAME'),\n    obj['nodes'].keys()))[0]\\n\\t\\tnode = obj['nodes'][key]\\n\\t\\tprint(node['indices']['docs']['count'])\\n\\tEOF\\n)\\n\\nwhile\n    [ \\\"${DOC_COUNT}\\\" -gt 0 ]; do\\n  export NODE_STATS=$(\\n    curl -X GET -s \\\\\\n\n    \\     http://127.0.0.1:9200/_nodes/stats\\n  )\\n  DOC_COUNT=$(\\n\\t\\tpython - \u003c\u003c-EOF\\n\\t\\t\\timport\n    json\\n\\t\\t\\timport os\\n\\n\\t\\t\\tobj = json.loads(os.environ.get('NODE_STATS'))\\n\\t\\t\\tkey\n    = list(filter(lambda datum: obj['nodes'][datum]['name'] == os.environ.get('HOSTNAME'),\n    obj['nodes'].keys()))[0]\\n\\t\\t\\tnode = obj['nodes'][key]\\n\\t\\t\\tcount = node['indices']['docs']['count']\\n\\t\\t\\tprint(count)\\n\\t\\tEOF\\n\n    \\ )\\n  echo \\\"Node contains ${DOC_COUNT} documents\\\"\\n  sleep 1\\ndone\\n\\necho\n    \\\"Wait for node shards to become empty\\\"\\nexport SHARD_STATS=$(\\n  curl -X GET\n    -s \\\\\\n    http://127.0.0.1:9200/_cat/shards?format=json\\n)\\nSHARD_COUNT=$(\\n\\tpython\n    - \u003c\u003c-EOF\\n\\t\\timport json\\n\\t\\timport os\\n\\n\\t\\tobj = json.loads(os.environ.get('SHARD_STATS'))\\n\\t\\tcount\n    = len(filter(lambda datum: datum['node'] == os.environ.get('HOSTNAME'), obj))\\n\\t\\tprint(count)\\n\\tEOF\\n)\\nwhile\n    [ \\\"${SHARD_COUNT}\\\" -gt 0 ]; do\\n  export SHARD_STATS=$(\\n    curl -X GET -s\n    \\\\\\n      http://127.0.0.1:9200/_cat/shards?format=json\\n  )\\n  SHARD_COUNT=$(\\n\\t\\tpython\n    - \u003c\u003c-EOF\\n\\t\\t\\timport json\\n\\t\\t\\timport os\\n\\n\\t\\t\\tobj = json.loads(os.environ.get('SHARD_STATS'))\\n\\t\\t\\tcount\n    = len(filter(lambda datum: datum['node'] == os.environ.get('HOSTNAME'), obj))\\n\\t\\t\\tprint(count)\\n\\t\\tEOF\\n\n    \\ )\\n  echo \\\"Node contains ${SHARD_COUNT} shards\\\"\\n  sleep 1\\ndone\\n\\necho \\\"Node\n    clear to shutdown\\\"\"\nkind: ConfigMap\nmetadata:\n  labels:\n    app: elastic-stack-elasticsearch\n    chart: elasticsearch-1.16.0\n    heritage: Tiller\n    release: elastic-stack\n  name: elastic-stack-elasticsearch\n",
    26              "/data-statefulset.yaml": "apiVersion: apps/v1beta1\nkind: StatefulSet\nmetadata:\n  labels:\n    app: elasticsearch\n    chart: elasticsearch-1.16.0\n    component: data\n    heritage: Tiller\n    release: elastic-stack\n  name: elastic-stack-elasticsearch-data\nspec:\n  template:\n    metadata:\n      annotations:\n        checksum/config: 4f07b9e19327171c37a9c353906c75a1f454cd31c3dfc600a8882d6e36713c49\n        checksum/secret: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n      labels:\n        role: null\n    spec:\n      $setElementOrder/containers:\n      - name: elasticsearch\n      $setElementOrder/initContainers:\n      - name: increase-memory-limits\n      containers:\n      - $setElementOrder/env:\n        - name: DISCOVERY_SERVICE\n        - name: NODE_MASTER\n        - name: PROCESSORS\n        - name: ES_HEAP_SIZE\n        - name: ES_JAVA_OPTS\n        - name: MINIMUM_MASTER_NODES\n        $setElementOrder/volumeMounts:\n        - mountPath: /usr/share/elasticsearch/data\n        - mountPath: /usr/share/elasticsearch/config/elasticsearch.yml\n        - mountPath: /usr/share/elasticsearch/config/log4j2.properties\n        - mountPath: /pre-stop-hook.sh\n        env:\n        - name: DISCOVERY_SERVICE\n          value: elastic-stack-elasticsearch-master.default.svc.cluster.local\n        - name: ES_HEAP_SIZE\n          value: 1536m\n        - name: ES_JAVA_OPTS\n          value: -Djava.net.preferIPv4Stack=true\n        image: gcr.io/cos-containers/elasticsearch:5.4.2-xpack\n        imagePullPolicy: Always\n        lifecycle:\n          postStart: null\n        name: elasticsearch\n        readinessProbe:\n          exec:\n            command:\n            - sh\n            - -c\n            - curl --request GET --silent --output /dev/null http://127.0.0.1:9200/_cluster/health?local=true\n          httpGet: null\n        volumeMounts:\n        - mountPath: /usr/share/elasticsearch/config/elasticsearch.yml\n          readOnly: true\n        - mountPath: /usr/share/elasticsearch/config/log4j2.properties\n          name: config\n          readOnly: true\n          subPath: log4j2.properties\n        - $patch: delete\n          mountPath: /post-start-hook.sh\n      initContainers:\n      - command:\n        - sh\n        - -c\n        - |-\n          # see https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html\n          # and https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration-memory.html#mlockall\n          # and https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-cli-run-prod-mode\n          sysctl -w vm.max_map_count=262144\n          # To increase the ulimit\n          # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#_notes_for_production_use_and_defaults\n          ulimit -l unlimited\n        image: busybox\n        name: increase-memory-limits\n        securityContext:\n          privileged: true\n      - $patch: delete\n        name: chown\n      - $patch: delete\n        name: sysctl\n      serviceAccountName: elastic-stack-elasticsearch\n  updateStrategy: null\n  volumeClaimTemplates:\n  - metadata:\n      name: data\n    spec:\n      accessModes:\n      - ReadWriteOnce\n      resources:\n        requests:\n          storage: 4Gi\n",
    27              "/deployment.yaml": "apiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n  labels:\n    app: kibana\n    chart: kibana-1.1.2\n    heritage: Tiller\n    release: elastic-stack\n  name: elastic-stack-kibana\nspec:\n  revisionHistoryLimit: null\n  template:\n    metadata:\n      annotations: null\n    spec:\n      $setElementOrder/containers:\n      - name: kibana\n      containers:\n      - $setElementOrder/env:\n        - name: XPACK_MONITORING_ENABLED\n        - name: ELASTICSEARCH_URL\n        $setElementOrder/ports:\n        - containerPort: 5601\n        env:\n        - name: XPACK_MONITORING_ENABLED\n          value: \"true\"\n        - name: ELASTICSEARCH_URL\n          value: http://elastic-stack-elasticsearch:9200\n        image: docker.elastic.co/kibana/kibana:5.4.2\n        livenessProbe:\n          httpGet:\n            path: /\n            port: 5601\n          initialDelaySeconds: 180\n        name: kibana\n        ports:\n        - containerPort: 5601\n          name: http\n          protocol: null\n        readinessProbe:\n          httpGet:\n            path: /status\n            port: 5601\n          initialDelaySeconds: 180\n          periodSeconds: 10\n        resources: null\n        securityContext:\n          runAsNonRoot: true\n          runAsUser: 1000\n        volumeMounts: null\n      serviceAccountName: null\n      tolerations: null\n      volumes: null\n",
    28              "/master-statefulset.yaml": "apiVersion: apps/v1beta1\nkind: StatefulSet\nmetadata:\n  labels:\n    app: elasticsearch\n    chart: elasticsearch-1.16.0\n    component: master\n    heritage: Tiller\n    release: elastic-stack\n  name: elastic-stack-elasticsearch-master\nspec:\n  replicas: 2\n  template:\n    metadata:\n      annotations:\n        checksum/config: 4f07b9e19327171c37a9c353906c75a1f454cd31c3dfc600a8882d6e36713c49\n        checksum/secret: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n      labels:\n        role: null\n    spec:\n      $setElementOrder/containers:\n      - name: elasticsearch\n      $setElementOrder/initContainers:\n      - name: increase-memory-limits\n      containers:\n      - $setElementOrder/env:\n        - name: DISCOVERY_SERVICE\n        - name: NODE_DATA\n        - name: NODE_INGEST\n        - name: ES_HEAP_SIZE\n        - name: PROCESSORS\n        - name: ES_JAVA_OPTS\n        - name: MINIMUM_MASTER_NODES\n        $setElementOrder/volumeMounts:\n        - mountPath: /usr/share/elasticsearch/data\n        - mountPath: /usr/share/elasticsearch/config/elasticsearch.yml\n        - mountPath: /usr/share/elasticsearch/config/log4j2.properties\n        env:\n        - name: DISCOVERY_SERVICE\n          value: elastic-stack-elasticsearch-master.default.svc.cluster.local\n        - name: NODE_INGEST\n          value: \"false\"\n        - name: ES_HEAP_SIZE\n          value: 512m\n        - name: ES_JAVA_OPTS\n          value: -Djava.net.preferIPv4Stack=true\n        image: gcr.io/cos-containers/elasticsearch:5.4.2-xpack\n        imagePullPolicy: Always\n        name: elasticsearch\n        readinessProbe:\n          exec:\n            command:\n            - sh\n            - -c\n            - curl --request GET --silent --output /dev/null http://127.0.0.1:9200/_cluster/health?local=true\n          httpGet: null\n        volumeMounts:\n        - mountPath: /usr/share/elasticsearch/config/elasticsearch.yml\n          readOnly: true\n        - mountPath: /usr/share/elasticsearch/config/log4j2.properties\n          name: config\n          readOnly: true\n          subPath: log4j2.properties\n      initContainers:\n      - command:\n        - sh\n        - -c\n        - |-\n          # see https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html\n          # and https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration-memory.html#mlockall\n          # and https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-cli-run-prod-mode\n          sysctl -w vm.max_map_count=262144\n          # To increase the ulimit\n          # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#_notes_for_production_use_and_defaults\n          ulimit -l unlimited\n        image: busybox\n        name: increase-memory-limits\n        securityContext:\n          privileged: true\n      - $patch: delete\n        name: chown\n      - $patch: delete\n        name: sysctl\n      serviceAccountName: elastic-stack-elasticsearch\n  updateStrategy: null\n",
    29              "/service.yaml": "apiVersion: v1\nkind: Service\nmetadata:\n  labels:\n    app: kibana\n    chart: kibana-1.1.2\n    heritage: Tiller\n    release: elastic-stack\n  name: elastic-stack-kibana\nspec:\n  $setElementOrder/ports:\n  - port: 80\n  ports:\n  - name: kibana\n    port: 80\n    protocol: TCP\n    targetPort: 5601\n  - $patch: delete\n    port: 443\n"
    30            },
    31            "resources": {
    32              "/client-pdb.yaml": "---\n# Source: elastic-stack/charts/elasticsearch/templates/client-pdb.yaml\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n  name: \"elastic-stack-elasticsearch-client\"\n  labels:\n    app: elasticsearch\n    chart: elasticsearch-0.5.1\n    component: \"client\"\n    heritage: Tiller\n    release: elastic-stack\nspec:\n  selector:\n    matchLabels:\n      app: elasticsearch\n      component: \"client\"\n      release: elastic-stack\n  maxUnavailable: 1",
    33              "/data-pdb.yaml": "---\n# Source: elastic-stack/charts/elasticsearch/templates/data-pdb.yaml\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n  name: \"elastic-stack-elasticsearch-data\"\n  labels:\n    app: elasticsearch\n    chart: elasticsearch-0.5.1\n    component: \"data\"\n    heritage: Tiller\n    release: elastic-stack\nspec:\n  selector:\n    matchLabels:\n      app: elasticsearch\n      component: \"data\"\n      release: elastic-stack\n  maxUnavailable: 1",
    34              "/master-pdb.yaml": "---\n# Source: elastic-stack/charts/elasticsearch/templates/master-pdb.yaml\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n  name: \"elastic-stack-elasticsearch-master\"\n  labels:\n    app: elasticsearch\n    chart: elasticsearch-0.5.1\n    component: \"master\"\n    heritage: Tiller\n    release: elastic-stack\nspec:\n  selector:\n    matchLabels:\n      app: elasticsearch\n      component: \"master\"\n      release: elastic-stack\n  maxUnavailable: 1",
    35              "/master-svc.yaml": "---\n# Source: elastic-stack/charts/elasticsearch/templates/master-svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n  labels:\n    app: elasticsearch\n    chart: elasticsearch-0.5.1\n    component: \"master\"\n    heritage: Tiller\n    release: elastic-stack\n  name: elastic-stack-elasticsearch-master\nspec:\n  clusterIP: None\n  ports:\n    - port: 9300\n      targetPort: 9300\n  selector:\n    app: elasticsearch\n    component: \"master\"\n    release: elastic-stack\n",
    36              "/service-account.yaml": "---\n# Source: elastic-stack/charts/elasticsearch/templates/service-account.yaml\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  labels:\n    app: elasticsearch\n    chart: elasticsearch-0.5.1\n    heritage: Tiller\n    release: elastic-stack\n  name: elastic-stack-elasticsearch\n"
    37            }
    38          }
    39        }
    40      },
    41      "upstream": "https://github.com/replicatedhq/test-charts/tree/2f4b5abb35405c61bbe44f00748eeb2d8799a0d2/elastic-stack-upstream-snapshot/elastic-stack",
    42      "metadata": {
    43        "applicationType": "helm",
    44        "icon": "https://www.elastic.co/assets/bltb35193323e8f1770/logo-elastic-stack-lt.svg",
    45        "name": "elastic-stack",
    46        "releaseNotes": "Add upstream dependencies for elastic-stack (#26)",
    47        "license": {
    48          "assignee": "",
    49          "createdAt": "0001-01-01T00:00:00Z",
    50          "expiresAt": "0001-01-01T00:00:00Z",
    51          "id": "",
    52          "type": ""
    53        },
    54        "sequence": 0,
    55        "version": "1.1.0"
    56      },
    57      "contentSHA": "7e1bdd6404a096f3bd1024580f4449c381fe0df165c323e1590e9375977e2b50"
    58    }
    59  }