k8s.io/kubernetes@v1.29.3/test/images/volume/rbd/bootstrap.sh (about)

     1  #!/usr/bin/env bash
     2  
     3  # Copyright 2015 The Kubernetes Authors.
     4  #
     5  # Licensed under the Apache License, Version 2.0 (the "License");
     6  # you may not use this file except in compliance with the License.
     7  # You may obtain a copy of the License at
     8  #
     9  #     http://www.apache.org/licenses/LICENSE-2.0
    10  #
    11  # Unless required by applicable law or agreed to in writing, software
    12  # distributed under the License is distributed on an "AS IS" BASIS,
    13  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14  # See the License for the specific language governing permissions and
    15  # limitations under the License.
    16  
    17  #
    18  # Bootstraps a CEPH server.
    19  # It creates two OSDs on local machine, creates RBD pool there
    20  # and imports 'block' device there.
    21  #
    22  # We must create fresh OSDs and filesystem here, because shipping it
    23  # in a container would increase the image by ~300MB.
    24  #
    25  
    26  
    27  # Create /etc/ceph/ceph.conf
    28  sh ./ceph.conf.sh "$(hostname -i)"
    29  
    30  # Configure and start ceph-mon
    31  sh ./mon.sh "$(hostname -i)"
    32  
    33  # Configure and start 2x ceph-osd
    34  mkdir -p /var/lib/ceph/osd/ceph-0 /var/lib/ceph/osd/ceph-1
    35  sh ./osd.sh 0
    36  sh ./osd.sh 1
    37  
    38  # Configure and start cephfs metadata server
    39  sh ./mds.sh
    40  
    41  # Prepare a RBD volume "foo" (only with layering feature, the others may
    42  # require newer clients).
    43  # NOTE: we need Ceph kernel modules on the host that runs the client!
    44  # As the default pool `rbd` might not be created on arm64 platform for ceph deployment,
    45  # should create it if it does not exist.
    46  arch=$(uname -m)
    47  if [[ ${arch} = 'aarch64' || ${arch} = 'arm64' ]]; then
    48      if [[ $(ceph osd lspools) = "" ]]; then
    49          ceph osd pool create rbd 8
    50          rbd pool init rbd
    51      fi
    52  fi
    53  rbd import --image-feature layering block foo
    54  
    55  # Prepare a cephfs volume
    56  ceph osd pool create cephfs_data 4
    57  ceph osd pool create cephfs_metadata 4
    58  ceph fs new cephfs cephfs_metadata cephfs_data
    59  # Put index.html into the volume
    60  # It takes a while until the volume created above is mountable,
    61  # 1 second is usually enough, but try indefinetily.
    62  sleep 1
    63  while ! ceph-fuse -m "$(hostname -i):6789" /mnt; do
    64      echo "Waiting for cephfs to be up"
    65      sleep 1
    66  done
    67  echo "Hello Ceph!" > /mnt/index.html
    68  chmod 644 /mnt/index.html
    69  umount /mnt
    70  
    71  echo "Ceph is ready"
    72  
    73  # Wait forever
    74  while true; do
    75      sleep 10
    76  done