github.com/containerd/nerdctl/v2@v2.0.0-beta.5.0.20240520001846-b5758f54fa28/examples/nerdctl-ipfs-registry-kubernetes/ipfs-cluster/nerdctl-ipfs-registry.yaml (about)

     1  # Example YAML of IPFS-based node-to-node image sharing with ipfs-cluster
     2  
     3  apiVersion: v1
     4  kind: ConfigMap
     5  metadata:
     6    name: ipfs-cluster-conf
     7  data:
     8    # `replication_factor_max` and `replication_factor_max`
     9    # https://cluster.ipfs.io/documentation/reference/configuration/
    10    cluster-replication-factor: "2"
    11  
    12  ---
    13  
    14  apiVersion: apps/v1
    15  kind: Deployment
    16  metadata:
    17    name: ipfs-bootstrap
    18  spec:
    19    selector:
    20      matchLabels:
    21        app: ipfs-bootstrap
    22    template:
    23      metadata:
    24        labels:
    25          app: ipfs-bootstrap
    26      spec:
    27        initContainers:
    28          - name: configure-ipfs
    29            image: "ghcr.io/stargz-containers/ipfs/kubo:v0.16.0"
    30            command: ["sh", "/custom/configure-ipfs.sh"]
    31            env:
    32              - name: LIBP2P_FORCE_PNET
    33                value: "1"
    34              - name: IPFS_SWARM_KEY
    35                valueFrom:
    36                  secretKeyRef:
    37                    name: secret-config
    38                    key: ipfs-swarm-key
    39            volumeMounts:
    40              - name: ipfs-storage
    41                mountPath: /data/ipfs
    42              - name: configure-script
    43                mountPath: /custom
    44        containers:
    45          - name: id
    46            image: "ghcr.io/stargz-containers/ipfs/kubo:v0.16.0"
    47            command: ["sh", "/custom/id-server.sh"]
    48            ports:
    49              - name: id
    50                protocol: TCP
    51                containerPort: 8000
    52            volumeMounts:
    53              - name: ipfs-storage
    54                mountPath: /data/ipfs
    55              - name: configure-script
    56                mountPath: /custom
    57          - name: ipfs
    58            image: "ghcr.io/stargz-containers/ipfs/kubo:v0.16.0"
    59            command: ["ipfs", "daemon"]
    60            env:
    61              - name: LIBP2P_FORCE_PNET
    62                value: "1"
    63            ports:
    64              - name: swarm
    65                protocol: TCP
    66                containerPort: 4001
    67            volumeMounts:
    68              - name: ipfs-storage
    69                mountPath: /data/ipfs
    70              - name: configure-script
    71                mountPath: /custom
    72            livenessProbe:
    73              tcpSocket:
    74                port: swarm
    75              initialDelaySeconds: 30
    76              timeoutSeconds: 5
    77              periodSeconds: 15
    78          - name: ipfs-cluster
    79            image: "ghcr.io/stargz-containers/ipfs/ipfs-cluster:1.0.4"
    80            command: ["sh", "/custom/cluster-entrypoint.sh"]
    81            env:
    82              - name: CLUSTER_REPLICATIONFACTORMIN
    83                valueFrom:
    84                  configMapKeyRef:
    85                    name: ipfs-cluster-conf
    86                    key: cluster-replication-factor
    87              - name: CLUSTER_REPLICATIONFACTORMAX
    88                valueFrom:
    89                  configMapKeyRef:
    90                    name: ipfs-cluster-conf
    91                    key: cluster-replication-factor
    92              - name: CLUSTER_BOOTSTRAP_PEER_ID
    93                valueFrom:
    94                  configMapKeyRef:
    95                    name: env-config
    96                    key: cluster-bootstrap-peer-id
    97              - name: CLUSTER_BOOTSTRAP_PEER_PRIV_KEY
    98                valueFrom:
    99                  secretKeyRef:
   100                    name: secret-config
   101                    key: cluster-bootstrap-peer-priv-key
   102              - name: CLUSTER_SECRET
   103                valueFrom:
   104                  secretKeyRef:
   105                    name: secret-config
   106                    key: cluster-secret
   107            ports:
   108              - name: api-http
   109                containerPort: 9094
   110                protocol: TCP
   111              - name: proxy-http
   112                containerPort: 9095
   113                protocol: TCP
   114              - name: cluster-swarm
   115                containerPort: 9096
   116                protocol: TCP
   117            volumeMounts:
   118              - name: cluster-storage
   119                mountPath: /data/ipfs-cluster
   120              - name: configure-script
   121                mountPath: /custom
   122            livenessProbe:
   123              tcpSocket:
   124                port: cluster-swarm
   125              initialDelaySeconds: 5
   126              timeoutSeconds: 5
   127              periodSeconds: 10
   128        volumes:
   129          - name: configure-script
   130            configMap:
   131              name: ipfs-bootstrap-conf
   132          - name: ipfs-storage
   133            emptyDir: {}
   134          - name: cluster-storage
   135            emptyDir: {}
   136  
   137  ---
   138  
   139  apiVersion: v1
   140  kind: Service
   141  metadata:
   142    name: ipfs-bootstrap
   143    labels:
   144      app: ipfs-bootstrap
   145  spec:
   146    type: ClusterIP
   147    ports:
   148      - name: id
   149        targetPort: id
   150        port: 8000
   151      - name: swarm
   152        targetPort: swarm
   153        port: 4001
   154      - name: cluster-swarm
   155        targetPort: cluster-swarm
   156        port: 9096
   157    selector:
   158      app: ipfs-bootstrap
   159  
   160  ---
   161  
   162  apiVersion: apps/v1
   163  kind: DaemonSet
   164  metadata:
   165    name: ipfs
   166  spec:
   167    selector:
   168      matchLabels:
   169        app: ipfs
   170    template:
   171      metadata:
   172        labels:
   173          app: ipfs
   174      spec:
   175        initContainers:
   176          - name: configure-ipfs
   177            image: "ghcr.io/stargz-containers/ipfs/kubo:v0.16.0"
   178            command: ["sh", "/custom/configure-ipfs.sh"]
   179            env:
   180              - name: BOOTSTRAP_SVC_NAME
   181                value: "ipfs-bootstrap"
   182              - name: LIBP2P_FORCE_PNET
   183                value: "1"
   184              - name: IPFS_SWARM_KEY
   185                valueFrom:
   186                  secretKeyRef:
   187                    name: secret-config
   188                    key: ipfs-swarm-key
   189            volumeMounts:
   190              - name: ipfs-storage
   191                mountPath: /data/ipfs
   192              - name: configure-script
   193                mountPath: /custom
   194        containers:
   195          - name: ipfs
   196            image: "ghcr.io/stargz-containers/ipfs/kubo:v0.16.0"
   197            command: ["ipfs", "daemon"]
   198            env:
   199              - name: LIBP2P_FORCE_PNET
   200                value: "1"
   201            ports:
   202              - name: swarm
   203                protocol: TCP
   204                containerPort: 4001
   205              - name: api
   206                protocol: TCP
   207                containerPort: 5001
   208                hostPort: 5001
   209            volumeMounts:
   210              - name: ipfs-storage
   211                mountPath: /data/ipfs
   212              - name: configure-script
   213                mountPath: /custom
   214            livenessProbe:
   215              tcpSocket:
   216                port: swarm
   217              initialDelaySeconds: 30
   218              timeoutSeconds: 5
   219              periodSeconds: 15
   220          - name: ipfs-cluster
   221            image: "ghcr.io/stargz-containers/ipfs/ipfs-cluster:1.0.4"
   222            command: ["sh", "/custom/cluster-entrypoint.sh"]
   223            env:
   224              - name: BOOTSTRAP_SVC_NAME
   225                value: "ipfs-bootstrap"
   226              - name: CLUSTER_REPLICATIONFACTORMIN
   227                valueFrom:
   228                  configMapKeyRef:
   229                    name: ipfs-cluster-conf
   230                    key: cluster-replication-factor
   231              - name: CLUSTER_REPLICATIONFACTORMAX
   232                valueFrom:
   233                  configMapKeyRef:
   234                    name: ipfs-cluster-conf
   235                    key: cluster-replication-factor
   236              - name: CLUSTER_BOOTSTRAP_PEER_ID
   237                valueFrom:
   238                  configMapKeyRef:
   239                    name: env-config
   240                    key: cluster-bootstrap-peer-id
   241              - name: CLUSTER_SECRET
   242                valueFrom:
   243                  secretKeyRef:
   244                    name: secret-config
   245                    key: cluster-secret
   246            ports:
   247              - name: api-http
   248                containerPort: 9094
   249                protocol: TCP
   250              - name: proxy-http
   251                containerPort: 9095
   252                protocol: TCP
   253                hostPort: 9095
   254              - name: cluster-swarm
   255                containerPort: 9096
   256                protocol: TCP
   257            volumeMounts:
   258              - name: cluster-storage
   259                mountPath: /data/ipfs-cluster
   260              - name: configure-script
   261                mountPath: /custom
   262            livenessProbe:
   263              tcpSocket:
   264                port: cluster-swarm
   265              initialDelaySeconds: 5
   266              timeoutSeconds: 5
   267              periodSeconds: 10
   268          - name: nerdctl-ipfs-registry
   269            image: "ghcr.io/stargz-containers/nerdctl-ipfs-registry:v0.23.0"
   270            command: ["sh", "/custom/nerdctl-ipfs-registry-entrypoint.sh"]
   271            env:
   272              - name: IPFS_PATH
   273                value: "/data/ipfs"
   274            ports:
   275              - containerPort: 5050
   276                hostPort: 5050
   277            volumeMounts:
   278              - name: ipfs-storage
   279                mountPath: /data/ipfs
   280              - name: configure-script
   281                mountPath: /custom
   282        volumes:
   283          - name: configure-script
   284            configMap:
   285              name: ipfs-peer-conf
   286          - name: ipfs-storage
   287            hostPath:
   288              path: /var/ipfs/
   289          - name: cluster-storage
   290            hostPath:
   291              path: /var/ipfs-cluster/
   292  
   293  ---
   294  
   295  apiVersion: v1
   296  kind: ConfigMap
   297  metadata:
   298    name: ipfs-peer-conf
   299  data:
   300    nerdctl-ipfs-registry-entrypoint.sh: |
   301      #!/bin/sh
   302      set -eu
   303  
   304      if ! command -v curl &> /dev/null
   305      then
   306          echo "curl not found. installing..."
   307          apt-get update -y && apt-get install -y curl
   308      fi
   309  
   310      # wait for ipfs daemon
   311      ok=false
   312      for i in $(seq 100) ; do
   313          if curl localhost:9095/api/v0/id >/dev/null 2>&1 ; then
   314              ok=true
   315              break
   316          fi
   317          echo "Fail(${i}). Retrying..."
   318          sleep 3
   319      done
   320      if [ "$ok" != "true" ] ; then
   321        echo "failed to detect ipfs api"
   322        exit 1
   323      fi
   324  
   325      exec /usr/local/bin/nerdctl ipfs registry serve --listen-registry 0.0.0.0:5050 --ipfs-address /ip4/127.0.0.1/tcp/9095 --read-retry-num 3 --read-timeout 1s
   326  
   327    cluster-entrypoint.sh: |
   328      #!/bin/sh
   329      set -eu -o pipefail
   330  
   331      # wait for bootstrap node running
   332      ok=false
   333      for i in $(seq 100) ; do
   334          if nc -z ${BOOTSTRAP_SVC_NAME} 9096 ; then
   335              ok=true
   336              break
   337          fi
   338          echo "Fail(${i}). Retrying..."
   339          sleep 3
   340      done
   341      if [ "$ok" != "true" ] ; then
   342        echo "failed to detect bootstrap node"
   343        exit 1
   344      fi
   345  
   346      mkdir -p /data/ipfs-cluster
   347      if ! [ -z "$(ls -A /data/ipfs-cluster)" ]; then
   348        echo "IPFS cluster already configured on this node; destroying the current repo and refreshing..."
   349        rm -rf /data/ipfs-cluster/*
   350      fi
   351      ipfs-cluster-service init
   352      cat /data/ipfs-cluster/service.json | sed 's|/ip4/127.0.0.1/tcp/9095|/ip4/0.0.0.0/tcp/9095|' > /tmp/tmp.json
   353      mv /tmp/tmp.json /data/ipfs-cluster/service.json
   354  
   355      BOOTSTRAP_ADDR=/dns4/${BOOTSTRAP_SVC_NAME}/tcp/9096/ipfs/${CLUSTER_BOOTSTRAP_PEER_ID}
   356      exec ipfs-cluster-service daemon --upgrade --bootstrap $BOOTSTRAP_ADDR --leave
   357  
   358    configure-ipfs.sh: |
   359      #!/bin/sh
   360      set -eu -o pipefail
   361  
   362      # wait for bootstrap node running
   363      ok=false
   364      for i in $(seq 100) ; do
   365          if nc -z ${BOOTSTRAP_SVC_NAME} 4001 ; then
   366              ok=true
   367              break
   368          fi
   369          echo "Fail(${i}). Retrying..."
   370          sleep 3
   371      done
   372      if [ "$ok" != "true" ] ; then
   373        echo "failed to detect bootstrap node"
   374        exit 1
   375      fi
   376  
   377      BOOTSTRAP_ID=$(wget -O - ${BOOTSTRAP_SVC_NAME}:8000/id)
   378      if [ "${BOOTSTRAP_ID}" == "" ] ; then
   379        echo "failed to get bootstrap peer id"
   380        exit 1
   381      fi
   382      if [ "${IPFS_SWARM_KEY}" == "" ] || [ "${LIBP2P_FORCE_PNET}" != "1" ] ; then
   383        echo "must be forced to private ipfs network (got LIBP2P_FORCE_PNET=${LIBP2P_FORCE_PNET})"
   384        exit 1
   385      fi
   386  
   387      mkdir -p /data/ipfs
   388      if ! [ -z "$(ls -A /data/ipfs)" ]; then
   389        echo "IPFS already configured on this node; destroying the current repo and refreshing..."
   390        rm -rf /data/ipfs/*
   391      fi
   392  
   393      ipfs init --profile=server
   394      ipfs bootstrap rm --all
   395      ipfs bootstrap add /dns4/${BOOTSTRAP_SVC_NAME}/tcp/4001/ipfs/${BOOTSTRAP_ID}
   396      ipfs config Addresses.API /ip4/0.0.0.0/tcp/5001
   397      ipfs config Addresses.Gateway /ip4/0.0.0.0/tcp/8080
   398      ipfs config Datastore.StorageMax 100GB
   399      ipfs config Addresses.NoAnnounce --json '[]'
   400      ipfs config Swarm.AddrFilters --json '[]'
   401      echo -n "${IPFS_SWARM_KEY}" > /data/ipfs/swarm.key
   402  
   403  ---
   404  
   405  apiVersion: v1
   406  kind: ConfigMap
   407  metadata:
   408    name: ipfs-bootstrap-conf
   409  data:
   410    id-server.sh: |
   411      #!/bin/sh
   412      set -eu -o pipefail
   413  
   414      if [ ! -f /doc/id ]; then
   415        mkdir /doc
   416        ipfs config show | grep "PeerID" | sed -E 's/.*"PeerID": "([a-zA-Z0-9]*)".*/\1/' > /doc/id
   417      fi
   418      exec httpd -f -p 8000 -h /doc
   419  
   420    cluster-entrypoint.sh: |
   421      #!/bin/sh
   422      set -eu -o pipefail
   423  
   424      mkdir -p /data/ipfs-cluster
   425      if ! [ -z "$(ls -A /data/ipfs-cluster)" ]; then
   426        echo "IPFS cluster already configured on this node; destroying the current repo and refreshing..."
   427        rm -rf /data/ipfs-cluster/*
   428      fi
   429      ipfs-cluster-service init
   430  
   431      CLUSTER_ID=${CLUSTER_BOOTSTRAP_PEER_ID} \
   432      CLUSTER_PRIVATEKEY=${CLUSTER_BOOTSTRAP_PEER_PRIV_KEY} \
   433      exec ipfs-cluster-service daemon --upgrade
   434  
   435    configure-ipfs.sh: |
   436      #!/bin/sh
   437      set -eu -o pipefail
   438  
   439      if [ "${IPFS_SWARM_KEY}" == "" ] || [ "${LIBP2P_FORCE_PNET}" != "1" ] ; then
   440        echo "must be forced to private ipfs network (got LIBP2P_FORCE_PNET=${LIBP2P_FORCE_PNET})"
   441      fi
   442  
   443      mkdir -p /data/ipfs
   444      if ! [ -z "$(ls -A /data/ipfs)" ]; then
   445        echo "IPFS already configured on this node; destroying the current repo and refreshing..."
   446        rm -rf /data/ipfs/*
   447      fi
   448  
   449      ipfs init --profile=server
   450      ipfs bootstrap rm --all
   451      ipfs config Addresses.API /ip4/0.0.0.0/tcp/5001
   452      ipfs config Addresses.Gateway /ip4/0.0.0.0/tcp/8080
   453      ipfs config Addresses.NoAnnounce --json '[]'
   454      ipfs config Swarm.AddrFilters --json '[]'
   455      ipfs config Datastore.StorageMax 1GB
   456      echo -n "${IPFS_SWARM_KEY}" > /data/ipfs/swarm.key