github.com/IBM-Blockchain/fabric-operator@v1.0.4/sample-network/scripts/cluster.sh (about) 1 #!/bin/bash 2 # 3 # Copyright contributors to the Hyperledger Fabric Operator project 4 # 5 # SPDX-License-Identifier: Apache-2.0 6 # 7 # Licensed under the Apache License, Version 2.0 (the "License"); 8 # you may not use this file except in compliance with the License. 9 # You may obtain a copy of the License at: 10 # 11 # http://www.apache.org/licenses/LICENSE-2.0 12 # 13 # Unless required by applicable law or agreed to in writing, software 14 # distributed under the License is distributed on an "AS IS" BASIS, 15 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 # See the License for the specific language governing permissions and 17 # limitations under the License. 18 # 19 20 # cluster "group" commands. Like "main" for the fabric-cli "cluster" sub-command 21 function cluster_command_group() { 22 23 # Default COMMAND is 'init' if not specified 24 if [ "$#" -eq 0 ]; then 25 COMMAND="init" 26 27 else 28 COMMAND=$1 29 shift 30 fi 31 32 if [ "${COMMAND}" == "init" ]; then 33 log "Initializing K8s cluster" 34 cluster_init 35 log "🏁 - Cluster is ready" 36 37 elif [ "${COMMAND}" == "clean" ]; then 38 log "Cleaning k8s cluster" 39 cluster_clean 40 log "🏁 - Cluster is cleaned" 41 42 elif [ "${COMMAND}" == "load-images" ]; then 43 log "Loading Docker images" 44 pull_docker_images 45 46 if [ "${CLUSTER_RUNTIME}" == "kind" ]; then 47 kind_load_images 48 fi 49 50 log "🏁 - Images are ready" 51 52 else 53 print_help 54 exit 1 55 fi 56 } 57 58 function pull_docker_images() { 59 push_fn "Pulling docker images for Fabric ${FABRIC_VERSION}" 60 61 $CONTAINER_CLI pull ${CONTAINER_NAMESPACE} $FABRIC_OPERATOR_IMAGE 62 $CONTAINER_CLI pull ${CONTAINER_NAMESPACE} $FABRIC_CONSOLE_IMAGE 63 $CONTAINER_CLI pull ${CONTAINER_NAMESPACE} $FABRIC_DEPLOYER_IMAGE 64 $CONTAINER_CLI pull ${CONTAINER_NAMESPACE} $FABRIC_CA_IMAGE 65 $CONTAINER_CLI pull ${CONTAINER_NAMESPACE} $FABRIC_PEER_IMAGE 66 $CONTAINER_CLI pull ${CONTAINER_NAMESPACE} $FABRIC_ORDERER_IMAGE 67 $CONTAINER_CLI pull ${CONTAINER_NAMESPACE} $INIT_IMAGE 68 $CONTAINER_CLI pull ${CONTAINER_NAMESPACE} $COUCHDB_IMAGE 69 $CONTAINER_CLI pull ${CONTAINER_NAMESPACE} $GRPCWEB_IMAGE 70 71 pop_fn 72 } 73 74 function kind_load_images() { 75 push_fn "Loading docker images to KIND control plane" 76 77 kind load docker-image $FABRIC_OPERATOR_IMAGE 78 kind load docker-image $FABRIC_CONSOLE_IMAGE 79 kind load docker-image $FABRIC_DEPLOYER_IMAGE 80 kind load docker-image $FABRIC_CA_IMAGE 81 kind load docker-image $FABRIC_PEER_IMAGE 82 kind load docker-image $FABRIC_ORDERER_IMAGE 83 kind load docker-image $INIT_IMAGE 84 kind load docker-image $COUCHDB_IMAGE 85 kind load docker-image $GRPCWEB_IMAGE 86 87 pop_fn 88 } 89 90 function cluster_init() { 91 apply_fabric_crds 92 apply_nginx_ingress 93 94 wait_for_nginx_ingress 95 96 if [ "${COREDNS_DOMAIN_OVERRIDE}" == true ]; then 97 apply_coredns_domain_override 98 fi 99 100 if [ "${STAGE_DOCKER_IMAGES}" == true ]; then 101 pull_docker_images 102 kind_load_images 103 fi 104 } 105 106 function apply_fabric_crds() { 107 push_fn "Applying Fabric CRDs" 108 109 $KUSTOMIZE_BUILD ../config/crd | kubectl apply -f - 110 111 pop_fn 112 } 113 114 function delete_fabric_crds() { 115 push_fn "Deleting Fabric CRDs" 116 117 $KUSTOMIZE_BUILD ../config/crd | kubectl delete -f - 118 119 pop_fn 120 } 121 122 function apply_nginx_ingress() { 123 push_fn "Applying ingress controller" 124 125 $KUSTOMIZE_BUILD ../config/ingress/${CLUSTER_RUNTIME} | kubectl apply -f - 126 127 sleep 5 128 129 pop_fn 130 } 131 132 function delete_nginx_ingress() { 133 push_fn "Deleting ${CLUSTER_RUNTIME} ingress controller" 134 135 $KUSTOMIZE_BUILD ../config/ingress/${CLUSTER_RUNTIME} | kubectl delete -f - 136 137 pop_fn 138 } 139 140 function wait_for_nginx_ingress() { 141 push_fn "Waiting for ingress controller" 142 143 # Give the ingress controller a chance to get set up in the namespace 144 sleep 5 145 146 kubectl wait --namespace ingress-nginx \ 147 --for=condition=ready pod \ 148 --selector=app.kubernetes.io/component=controller \ 149 --timeout=2m 150 151 pop_fn 152 } 153 154 # Allow pods running in kubernetes to access services at the ingress domain *.localho.st. 155 # 156 # This function identifies the CLUSTER-IP for the ingress controller and overrides the coredns 157 # with a wildcard domain match to the IP. Clients using public DNS will always resolve 158 # *.localho.st as 127.0.0.1, routing to the ingress on the host loopback interface. Clients 159 # resolving *.localho.st on the kube DNS (e.g., pods running in the cluster) will resolve the 160 # dummy DNS wildcard entry, routing to the kube internal IP address for the ingress controller. 161 function apply_coredns_domain_override() { 162 163 CLUSTER_IP=$(kubectl -n ingress-nginx get svc ingress-nginx-controller -o json | jq -r .spec.clusterIP) 164 push_fn "Applying CoreDNS overrides for ingress domain $INGRESS_DOMAIN at CLUSTER-IP $CLUSTER_IP" 165 166 cat <<EOF | kubectl apply -f - 167 --- 168 kind: ConfigMap 169 apiVersion: v1 170 metadata: 171 name: coredns 172 namespace: kube-system 173 data: 174 Corefile: | 175 .:53 { 176 errors 177 health { 178 lameduck 5s 179 } 180 ready 181 rewrite name regex (.*)\.localho\.st host.ingress.internal 182 hosts { 183 ${CLUSTER_IP} host.ingress.internal 184 fallthrough 185 } 186 kubernetes cluster.local in-addr.arpa ip6.arpa { 187 pods insecure 188 fallthrough in-addr.arpa ip6.arpa 189 ttl 30 190 } 191 prometheus :9153 192 forward . /etc/resolv.conf { 193 max_concurrent 1000 194 } 195 cache 30 196 loop 197 reload 198 loadbalance 199 } 200 EOF 201 202 kubectl -n kube-system rollout restart deployment/coredns 203 204 pop_fn 205 } 206 207 function cluster_clean() { 208 delete_fabric_crds 209 delete_nginx_ingress 210 } 211 212 213 214 215 216