github.com/projectcalico/api@v0.0.0-20231218190037-9183ab93f33e/lib.Makefile (about)

     1  # Disable built-in rules
     2  .SUFFIXES:
     3  
     4  # Shortcut targets
     5  default: build
     6  
     7  ## Build binary for current platform
     8  all: build
     9  
    10  ## Run the tests for the current platform/architecture
    11  test: ut fv st
    12  
    13  ###############################################################################
    14  # Both native and cross architecture builds are supported.
    15  # The target architecture is select by setting the ARCH variable.
    16  # When ARCH is undefined it is set to the detected host architecture.
    17  # When ARCH differs from the host architecture a crossbuild will be performed.
    18  # This variable is only set if ARCHES is not set
    19  ARCHES ?= $(patsubst docker-image/Dockerfile.%,%,$(wildcard docker-image/Dockerfile.*))
    20  
    21  # Some repositories keep their Dockerfile(s) in the root directory instead of in
    22  # the 'docker-image' subdir. Make sure ARCHES gets filled in either way.
    23  ifeq ($(ARCHES),)
    24  	ARCHES=$(patsubst Dockerfile.%,%,$(wildcard Dockerfile.*))
    25  endif
    26  
    27  # list of arches *not* to build when doing *-all
    28  EXCLUDEARCH?=
    29  VALIDARCHES = $(filter-out $(EXCLUDEARCH),$(ARCHES))
    30  
    31  # BUILDARCH is the host architecture
    32  # ARCH is the target architecture
    33  # we need to keep track of them separately
    34  # Note: OS is always set on Windows
    35  ifeq ($(OS),Windows_NT)
    36  BUILDARCH = x86_64
    37  BUILDOS = x86_64
    38  else
    39  BUILDARCH ?= $(shell uname -m)
    40  BUILDOS ?= $(shell uname -s | tr A-Z a-z)
    41  endif
    42  
    43  # canonicalized names for host architecture
    44  ifeq ($(BUILDARCH),aarch64)
    45  	BUILDARCH=arm64
    46  endif
    47  ifeq ($(BUILDARCH),x86_64)
    48  	BUILDARCH=amd64
    49  endif
    50  
    51  # unless otherwise set, I am building for my own architecture, i.e. not cross-compiling
    52  ARCH ?= $(BUILDARCH)
    53  
    54  # canonicalized names for target architecture
    55  ifeq ($(ARCH),aarch64)
    56  	override ARCH=arm64
    57  endif
    58  ifeq ($(ARCH),x86_64)
    59  	override ARCH=amd64
    60  endif
    61  
    62  # detect the local outbound ip address
    63  LOCAL_IP_ENV?=$(shell ip route get 8.8.8.8 | head -1 | awk '{print $$7}')
    64  
    65  LATEST_IMAGE_TAG?=latest
    66  
    67  # these macros create a list of valid architectures for pushing manifests
    68  comma := ,
    69  double_quote := $(shell echo '"')
    70  
    71  ## Targets used when cross building.
    72  .PHONY: native register
    73  native:
    74  ifneq ($(BUILDARCH),$(ARCH))
    75  	@echo "Target $(MAKECMDGOALS)" is not supported when cross building! && false
    76  endif
    77  
    78  # Enable binfmt adding support for miscellaneous binary formats.
    79  # This is only needed when running non-native binaries.
    80  register:
    81  ifneq ($(BUILDARCH),$(ARCH))
    82  	docker run --rm --privileged multiarch/qemu-user-static:register || true
    83  endif
    84  
    85  # If this is a release, also tag and push additional images.
    86  ifeq ($(RELEASE),true)
    87  PUSH_IMAGES+=$(RELEASE_IMAGES)
    88  endif
    89  
    90  DOCKERHUB_REGISTRY ?=registry.hub.docker.com
    91  # filter-registry filters out registries we don't want to include when tagging / pushing docker images. For instance,
    92  # we don't include the registry name when pushing to docker hub because that registry is the default.
    93  filter-registry ?= $(if $(filter-out $(1),$(DOCKERHUB_REGISTRY)),$(1)/)
    94  
    95  # Convenience function to get the first dev image repo in the list.
    96  DEV_REGISTRY ?= $(firstword $(DEV_REGISTRIES))
    97  
    98  # remove from the list to push to manifest any registries that do not support multi-arch
    99  MANIFEST_REGISTRIES         ?= $(DEV_REGISTRIES)
   100  
   101  PUSH_MANIFEST_IMAGES := $(foreach registry,$(MANIFEST_REGISTRIES),$(foreach image,$(BUILD_IMAGES),$(call filter-registry,$(registry))$(image)))
   102  
   103  # location of docker credentials to push manifests
   104  DOCKER_CONFIG ?= $(HOME)/.docker/config.json
   105  
   106  # If a repository still relies on vendoring, it must set GOMOD_VENDOR to "true".
   107  # If that's not the case and we're running in CI, set -mod=readonly to prevent builds
   108  # from being flagged as dirty due to updates in go.mod or go.sum _except_ for:
   109  # - for local builds, which _require_ a change to go.mod.
   110  # - the targets 'commit-pin-updates' and  'golangci-lint' which require
   111  #   updating go.mod and/or go.sum
   112  SKIP_GOMOD_READONLY_FLAG =
   113  ifeq ($(MAKECMDGOALS),commit-pin-updates)
   114  	SKIP_GOMOD_READONLY_FLAG = yes
   115  endif
   116  ifeq ($(MAKECMDGOALS),golangci-lint)
   117  	SKIP_GOMOD_READONLY_FLAG = yes
   118  endif
   119  
   120  ifeq ($(GOMOD_VENDOR),true)
   121  	GOFLAGS?="-mod=vendor"
   122  else
   123  ifeq ($(CI),true)
   124  ifndef SKIP_GOMOD_READONLY_FLAG
   125  	GOFLAGS?="-mod=readonly"
   126  endif
   127  endif
   128  endif
   129  
   130  # For building, we use the go-build image for the *host* architecture, even if the target is different
   131  # the one for the host should contain all the necessary cross-compilation tools
   132  # we do not need to use the arch since go-build:v0.15 now is multi-arch manifest
   133  GO_BUILD_IMAGE ?= calico/go-build
   134  CALICO_BUILD    = $(GO_BUILD_IMAGE):$(GO_BUILD_VER)
   135  
   136  
   137  # We use BoringCrypto as FIPS validated cryptography in order to allow users to run in FIPS Mode (amd64 only).
   138  ifeq ($(ARCH), $(filter $(ARCH),amd64))
   139  GOEXPERIMENT?=boringcrypto
   140  TAGS?=boringcrypto,osusergo,netgo
   141  CGO_ENABLED?=1
   142  else
   143  CGO_ENABLED?=0
   144  endif
   145  
   146  # Build a binary with boring crypto support.
   147  # This function expects you to pass in two arguments:
   148  #   1st arg: path/to/input/package(s)
   149  #   2nd arg: path/to/output/binary
   150  # Only when arch = amd64 it will use boring crypto to build the binary.
   151  # Uses LDFLAGS, CGO_LDFLAGS, CGO_CFLAGS when set.
   152  # Tests that the resulting binary contains boringcrypto symbols.
   153  define build_cgo_boring_binary
   154  	$(DOCKER_RUN) \
   155  		-e CGO_ENABLED=1 \
   156  		-e CGO_CFLAGS=$(CGO_CFLAGS) \
   157  		-e CGO_LDFLAGS=$(CGO_LDFLAGS) \
   158  		$(CALICO_BUILD) \
   159  		sh -c '$(GIT_CONFIG_SSH) GOEXPERIMENT=boringcrypto go build -o $(2) -tags fipsstrict -v -buildvcs=false -ldflags "$(LDFLAGS)" $(1) \
   160  			&& go tool nm $(2) | grep '_Cfunc__goboringcrypto_' 1> /dev/null'
   161  endef
   162  
   163  # Use this when building binaries that need cgo, but have no crypto and therefore would not contain any boring symbols.
   164  define build_cgo_binary
   165  	$(DOCKER_RUN) \
   166  		-e CGO_ENABLED=1 \
   167  		-e CGO_CFLAGS=$(CGO_CFLAGS) \
   168  		-e CGO_LDFLAGS=$(CGO_LDFLAGS) \
   169  		$(CALICO_BUILD) \
   170  		sh -c '$(GIT_CONFIG_SSH) go build -o $(2) -v -buildvcs=false -ldflags "$(LDFLAGS)" $(1)'
   171  endef
   172  
   173  # For binaries that do not require boring crypto.
   174  define build_binary
   175  	$(DOCKER_RUN) \
   176  		-e CGO_ENABLED=0 \
   177  		$(CALICO_BUILD) \
   178  		sh -c '$(GIT_CONFIG_SSH) go build -o $(2) -v -buildvcs=false -ldflags "$(LDFLAGS)" $(1)'
   179  endef
   180  
   181  # For windows builds that do not require cgo.
   182  define build_windows_binary
   183  	$(DOCKER_RUN) \
   184  		-e CGO_ENABLED=0 \
   185  		-e GOARCH=amd64 \
   186  		-e GOOS=windows \
   187  		$(CALICO_BUILD) \
   188  		sh -c '$(GIT_CONFIG_SSH) go build -o $(2) -v -buildvcs=false -ldflags "$(LDFLAGS)" $(1)'
   189  endef
   190  
   191  # Images used in build / test across multiple directories.
   192  PROTOC_CONTAINER=calico/protoc:$(PROTOC_VER)-$(BUILDARCH)
   193  ETCD_IMAGE ?= quay.io/coreos/etcd:$(ETCD_VERSION)-$(ARCH)
   194  ifeq ($(BUILDARCH),amd64)
   195  	# *-amd64 tagged images for etcd are not available until v3.5.0
   196  	ETCD_IMAGE = quay.io/coreos/etcd:$(ETCD_VERSION)
   197  endif
   198  UBI_IMAGE ?= registry.access.redhat.com/ubi8/ubi-minimal:$(UBI_VERSION)
   199  
   200  ifeq ($(GIT_USE_SSH),true)
   201  	GIT_CONFIG_SSH ?= git config --global url."ssh://git@github.com/".insteadOf "https://github.com/";
   202  endif
   203  
   204  # Get version from git.
   205  GIT_VERSION:=$(shell git describe --tags --dirty --always --abbrev=12)
   206  
   207  # Figure out version information.  To support builds from release tarballs, we default to
   208  # <unknown> if this isn't a git checkout.
   209  GIT_COMMIT:=$(shell git rev-parse HEAD || echo '<unknown>')
   210  BUILD_ID:=$(shell git rev-parse HEAD || uuidgen | sed 's/-//g')
   211  
   212  # Lazily set the git version we embed into the binaries we build. We want the
   213  # git tag at the time we build the binary.
   214  # Variables elsewhere that depend on this (such as LDFLAGS) must also be lazy.
   215  GIT_DESCRIPTION=$(shell git describe --tags --dirty --always --abbrev=12 || echo '<unknown>')
   216  
   217  # Calculate a timestamp for any build artifacts.
   218  ifneq ($(OS),Windows_NT)
   219  DATE:=$(shell date -u +'%FT%T%z')
   220  endif
   221  
   222  # Figure out the users UID/GID.  These are needed to run docker containers
   223  # as the current user and ensure that files built inside containers are
   224  # owned by the current user.
   225  ifneq ($(OS),Windows_NT)
   226  LOCAL_USER_ID:=$(shell id -u)
   227  LOCAL_GROUP_ID:=$(shell id -g)
   228  endif
   229  
   230  ifeq ("$(LOCAL_USER_ID)", "0")
   231  # The build needs to run as root.
   232  EXTRA_DOCKER_ARGS+=-e RUN_AS_ROOT='true'
   233  endif
   234  
   235  # Allow the ssh auth sock to be mapped into the build container.
   236  ifdef SSH_AUTH_SOCK
   237  	EXTRA_DOCKER_ARGS += -v $(SSH_AUTH_SOCK):/ssh-agent --env SSH_AUTH_SOCK=/ssh-agent
   238  endif
   239  
   240  # Volume-mount gopath into the build container to cache go module's packages. If the environment is using multiple
   241  # comma-separated directories for gopath, use the first one, as that is the default one used by go modules.
   242  ifneq ($(GOPATH),)
   243  	# If the environment is using multiple comma-separated directories for gopath, use the first one, as that
   244  	# is the default one used by go modules.
   245  	GOMOD_CACHE = $(shell echo $(GOPATH) | cut -d':' -f1)/pkg/mod
   246  else
   247  	# If gopath is empty, default to $(HOME)/go.
   248  	GOMOD_CACHE = $(HOME)/go/pkg/mod
   249  endif
   250  
   251  EXTRA_DOCKER_ARGS += -v $(GOMOD_CACHE):/go/pkg/mod:rw
   252  
   253  # Define go architecture flags to support arm variants
   254  GOARCH_FLAGS :=-e GOARCH=$(ARCH)
   255  
   256  # Location of certificates used in UTs.
   257  REPO_ROOT := $(shell git rev-parse --show-toplevel)
   258  CERTS_PATH := $(REPO_ROOT)/hack/test/certs
   259  
   260  # Set the platform correctly for building docker images so that
   261  # cross-builds get the correct architecture set in the produced images.
   262  ifeq ($(ARCH),arm64)
   263  TARGET_PLATFORM=--platform=linux/arm64/v8
   264  endif
   265  ifeq ($(ARCH),ppc64le)
   266  TARGET_PLATFORM=--platform=linux/ppc64le
   267  endif
   268  ifeq ($(ARCH),s390x)
   269  TARGET_PLATFORM=--platform=linux/s390x
   270  endif
   271  
   272  # DOCKER_BUILD is the base build command used for building all images.
   273  DOCKER_BUILD=docker buildx build --pull \
   274  	     --build-arg QEMU_IMAGE=$(CALICO_BUILD) \
   275  	     --build-arg UBI_IMAGE=$(UBI_IMAGE) \
   276  	     --build-arg GIT_VERSION=$(GIT_VERSION) $(TARGET_PLATFORM)
   277  
   278  DOCKER_RUN := mkdir -p ../.go-pkg-cache bin $(GOMOD_CACHE) && \
   279  	docker run --rm \
   280  		--net=host \
   281  		--init \
   282  		$(EXTRA_DOCKER_ARGS) \
   283  		-e LOCAL_USER_ID=$(LOCAL_USER_ID) \
   284  		-e GOCACHE=/go-cache \
   285  		$(GOARCH_FLAGS) \
   286  		-e GOPATH=/go \
   287  		-e OS=$(BUILDOS) \
   288  		-e GOOS=$(BUILDOS) \
   289  		-e GOFLAGS=$(GOFLAGS) \
   290  		-v $(REPO_ROOT):/go/src/github.com/projectcalico/calico:rw \
   291  		-v $(REPO_ROOT)/.go-pkg-cache:/go-cache:rw \
   292  		-w /go/src/$(PACKAGE_NAME)
   293  
   294  DOCKER_RUN_RO := mkdir -p .go-pkg-cache bin $(GOMOD_CACHE) && \
   295  	docker run --rm \
   296  		--net=host \
   297  		--init \
   298  		$(EXTRA_DOCKER_ARGS) \
   299  		-e LOCAL_USER_ID=$(LOCAL_USER_ID) \
   300  		-e GOCACHE=/go-cache \
   301  		$(GOARCH_FLAGS) \
   302  		-e GOPATH=/go \
   303  		-e OS=$(BUILDOS) \
   304  		-e GOOS=$(BUILDOS) \
   305  		-e GOFLAGS=$(GOFLAGS) \
   306  		-v $(REPO_ROOT):/go/src/github.com/projectcalico/calico:ro \
   307  		-v $(REPO_ROOT)/.go-pkg-cache:/go-cache:rw \
   308  		-w /go/src/$(PACKAGE_NAME)
   309  
   310  DOCKER_GO_BUILD := $(DOCKER_RUN) $(CALICO_BUILD)
   311  
   312  # A target that does nothing but it always stale, used to force a rebuild on certain targets based on some non-file criteria.
   313  .PHONY: force-rebuild
   314  force-rebuild:
   315  
   316  ###############################################################################
   317  # Updating pins
   318  #   the repo importing this Makefile _must_ define the update-pins target
   319  #   for example:
   320  #     update-pins: update-libcalico-pin update-typha-pin
   321  ###############################################################################
   322  PIN_BRANCH?=$(shell git rev-parse --abbrev-ref HEAD)
   323  
   324  # The docker entrypoint script might echo output that could be included in the output of the following command, so this
   325  # prefixes the commit tag with "commit-tag:" so can reliable get the commit tag from the output.
   326  define get_remote_version
   327  	$(shell $(DOCKER_RUN) $(CALICO_BUILD) sh -c '$(GIT_CONFIG_SSH) echo "commit-tag:$$(git ls-remote https://$(1) $(2) | cut -f1)"' | awk -F "commit-tag:" '{print $$2}')
   328  endef
   329  
   330  # update_pin updates the given package's version to the latest available in the specified repo and branch.
   331  # $(1) should be the name of the package, $(2) and $(3) the repository and branch from which to update it.
   332  # If $(4) is specified it's treated as the module version and use in the go get -d command.
   333  define update_pin
   334  	$(eval new_ver := $(call get_remote_version,$(2),$(3)))
   335  	$(eval repo := $(if $(4),$(1)/$(4),$(1)))
   336  
   337  	$(DOCKER_RUN) -i $(CALICO_BUILD) sh -c '\
   338  		if [ ! -z "$(new_ver)" ]; then \
   339  			$(GIT_CONFIG_SSH) \
   340  			go get -d $(repo)@$(new_ver); \
   341  			go mod tidy; \
   342  		fi'
   343  endef
   344  
   345  # update_replace_pin updates the given package's version to the latest available in the specified repo and branch.
   346  # This routine can only be used for packages being replaced in go.mod, such as private versions of open-source packages.
   347  # $(1) should be the name of the package, $(2) and $(3) the repository and branch from which to update it. If $(4) is
   348  # specified it's treated as the module version and use in the go mod edit -replace command.
   349  define update_replace_pin
   350  	$(eval new_ver := $(call get_remote_version,$(2),$(3)))
   351  	$(eval original_repo := $(if $(4),$(1)/$(4),$(1)))
   352  	$(eval replace_repo := $(if $(4),$(2)/$(4),$(2)))
   353  
   354  	$(DOCKER_RUN) -i $(CALICO_BUILD) sh -c '\
   355  		if [ ! -z "$(new_ver)" ]; then \
   356  			$(GIT_CONFIG_SSH) \
   357  			go mod edit -replace $(original_repo)=$(replace_repo)@$(new_ver); \
   358  			go mod tidy; \
   359  		fi'
   360  endef
   361  
   362  GIT_REMOTE?=origin
   363  API_BRANCH?=$(PIN_BRANCH)
   364  API_REPO?=github.com/projectcalico/calico/api
   365  BASE_API_REPO?=github.com/projectcalico/calico/api
   366  APISERVER_BRANCH?=$(PIN_BRANCH)
   367  APISERVER_REPO?=github.com/projectcalico/calico/apiserver
   368  TYPHA_BRANCH?=$(PIN_BRANCH)
   369  TYPHA_REPO?=github.com/projectcalico/calico/typha
   370  LIBCALICO_BRANCH?=$(PIN_BRANCH)
   371  LIBCALICO_REPO?=github.com/projectcalico/calico/libcalico-go
   372  CONFD_BRANCH?=$(PIN_BRANCH)
   373  CONFD_REPO?=github.com/projectcalico/calico/confd
   374  FELIX_BRANCH?=$(PIN_BRANCH)
   375  FELIX_REPO?=github.com/projectcalico/calico/felix
   376  CNI_BRANCH?=$(PIN_BRANCH)
   377  CNI_REPO?=github.com/projectcalico/calico/cni-plugin
   378  
   379  update-api-pin:
   380  	$(call update_pin,$(API_REPO),$(API_REPO),$(API_BRANCH))
   381  
   382  replace-api-pin:
   383  	$(call update_replace_pin,$(BASE_API_REPO),$(API_REPO),$(API_BRANCH))
   384  
   385  update-apiserver-pin:
   386  	$(call update_pin,github.com/projectcalico/calico/apiserver,$(APISERVER_REPO),$(APISERVER_BRANCH))
   387  
   388  replace-apiserver-pin:
   389  	$(call update_replace_pin,github.com/projectcalico/calico/apiserver,$(APISERVER_REPO),$(APISERVER_BRANCH))
   390  
   391  update-typha-pin:
   392  	$(call update_pin,github.com/projectcalico/calico/typha,$(TYPHA_REPO),$(TYPHA_BRANCH))
   393  
   394  replace-typha-pin:
   395  	$(call update_replace_pin,github.com/projectcalico/calico/typha,$(TYPHA_REPO),$(TYPHA_BRANCH))
   396  
   397  update-libcalico-pin:
   398  	$(call update_pin,github.com/projectcalico/calico/libcalico-go,$(LIBCALICO_REPO),$(LIBCALICO_BRANCH))
   399  
   400  replace-libcalico-pin:
   401  	$(call update_replace_pin,github.com/projectcalico/calico/libcalico-go,$(LIBCALICO_REPO),$(LIBCALICO_BRANCH))
   402  
   403  update-confd-pin:
   404  	$(call update_replace_pin,github.com/kelseyhightower/confd,$(CONFD_REPO),$(CONFD_BRANCH))
   405  
   406  update-felix-pin:
   407  	$(call update_pin,github.com/projectcalico/calico/felix,$(FELIX_REPO),$(FELIX_BRANCH))
   408  
   409  replace-felix-pin:
   410  	$(call update_replace_pin,github.com/projectcalico/calico/felix,$(FELIX_REPO),$(FELIX_BRANCH))
   411  
   412  update-cni-plugin-pin:
   413  	$(call update_pin,github.com/projectcalico/calico/cni-plugin,$(CNI_REPO),$(CNI_BRANCH))
   414  
   415  replace-cni-pin:
   416  	$(call update_replace_pin,github.com/projectcalico/calico/cni-plugin,$(CNI_REPO),$(CNI_BRANCH))
   417  
   418  git-status:
   419  	git status --porcelain
   420  
   421  git-config:
   422  ifdef CONFIRM
   423  	git config --global user.name "marvin-tigera"
   424  	git config --global user.email "marvin@projectcalico.io"
   425  endif
   426  
   427  git-commit:
   428  	git diff --quiet HEAD || git commit -m "Semaphore Automatic Update" go.mod go.sum $(EXTRA_FILES_TO_COMMIT)
   429  
   430  ###############################################################################
   431  # External resource affecting macros
   432  # The following macros affect resources outside of the local environment that
   433  # they're run in, i.e. pushing to docker or github. If CONFIM is not defined,
   434  # then the commands are just printed, instead of run.
   435  #
   436  # The <command>-cmd macro should never be run directly, it's used to define
   437  # the command the macro runs but depending on whether CONFIRM is defined the
   438  # command may be printed or run.
   439  #
   440  # You can redefine <command>-cmd to have the targets in this makefile use a
   441  # different implementation.
   442  ###############################################################################
   443  
   444  ifdef LOCAL_CRANE
   445  CRANE_CMD         = bash -c $(double_quote)crane
   446  else
   447  CRANE_CMD         = docker run -t --entrypoint /bin/sh -v $(DOCKER_CONFIG):/root/.docker/config.json $(CALICO_BUILD) -c \
   448                      $(double_quote)crane
   449  endif
   450  
   451  GIT_CMD           = git
   452  DOCKER_CMD        = docker
   453  
   454  ifdef CONFIRM
   455  CRANE         = $(CRANE_CMD)
   456  GIT           = $(GIT_CMD)
   457  DOCKER        = $(DOCKER_CMD)
   458  else
   459  CRANE         = echo [DRY RUN] $(CRANE_CMD)
   460  GIT           = echo [DRY RUN] $(GIT_CMD)
   461  DOCKER        = echo [DRY RUN] $(DOCKER_CMD)
   462  endif
   463  
   464  commit-and-push-pr:
   465  	$(GIT) add $(GIT_COMMIT_FILES)
   466  	$(GIT) commit -m $(GIT_COMMIT_MESSAGE)
   467  	$(GIT) push $(GIT_REMOTE) $(GIT_PR_BRANCH_HEAD)
   468  
   469  ###############################################################################
   470  # GitHub API helpers
   471  #   Helper macros and targets to help with communicating with the github API
   472  ###############################################################################
   473  GIT_COMMIT_MESSAGE?="Automatic Pin Updates"
   474  GIT_PR_BRANCH_BASE?=$(SEMAPHORE_GIT_BRANCH)
   475  PIN_UPDATE_BRANCH?=semaphore-auto-pin-updates-$(GIT_PR_BRANCH_BASE)
   476  GIT_PR_BRANCH_HEAD?=$(PIN_UPDATE_BRANCH)
   477  GIT_REPO_SLUG?=$(SEMAPHORE_GIT_REPO_SLUG)
   478  GIT_PIN_UPDATE_COMMIT_FILES?=go.mod go.sum
   479  GIT_PIN_UPDATE_COMMIT_EXTRA_FILES?=$(GIT_COMMIT_EXTRA_FILES)
   480  GIT_COMMIT_FILES?=$(GIT_PIN_UPDATE_COMMIT_FILES) $(GIT_PIN_UPDATE_COMMIT_EXTRA_FILES)
   481  
   482  # Call the github API. $(1) is the http method type for the https request, $(2) is the repo slug, and is $(3) is for json
   483  # data (if omitted then no data is set for the request). If GITHUB_API_EXIT_ON_FAILURE is set then the macro exits with 1
   484  # on failure. On success, the ENV variable GITHUB_API_RESPONSE will contain the response from github
   485  define github_call_api
   486  	$(eval CMD := curl -f -X$(1) \
   487  		-H "Content-Type: application/json"\
   488  		-H "Authorization: token ${GITHUB_TOKEN}"\
   489  		https://api.github.com/repos/$(2) $(if $(3),--data '$(3)',))
   490  	$(eval GITHUB_API_RESPONSE := $(shell $(CMD) | sed -e 's/#/\\\#/g'))
   491  	$(if $(GITHUB_API_EXIT_ON_FAILURE), $(if $(GITHUB_API_RESPONSE),,exit 1),)
   492  endef
   493  
   494  # Create the pull request. $(1) is the repo slug, $(2) is the title, $(3) is the head branch and $(4) is the base branch.
   495  # If the call was successful then the ENV variable PR_NUMBER will contain the pull request number of the created pull request.
   496  define github_pr_create
   497  	$(eval JSON := {"title": "$(2)", "head": "$(3)", "base": "$(4)"})
   498  	$(call github_call_api,POST,$(1)/pulls,$(JSON))
   499  	$(eval PR_NUMBER := $(filter-out null,$(shell echo '$(GITHUB_API_RESPONSE)' | jq '.number')))
   500  endef
   501  
   502  # Create a comment on a pull request. $(1) is the repo slug, $(2) is the pull request number, and $(3) is the comment
   503  # body.
   504  define github_pr_add_comment
   505  	$(eval JSON := {"body":"$(3)"})
   506  	$(call github_call_api,POST,$(1)/issues/$(2)/comments,$(JSON))
   507  endef
   508  
   509  # List pull open pull requests for a head and base. $(1) is the repo slug, $(2) is the branch head, $(3) is the branch base,
   510  # and $(4) is the state.
   511  define github_pr_list
   512  	$(eval QUERY := $(if $(2),head=$(2),)$(if $(3),\&base=$(3))$(if $(4),\&state=$(4),))
   513  	$(call github_call_api,GET,$(1)/pulls?$(QUERY),)
   514  endef
   515  
   516  # Check if there is a pull request with head GIT_PR_BRANCH_HEAD and base GIT_PR_BRANCH_BASE for the repo with slug
   517  # GIT_REPO_SLUG. If there is a PR that exists the PR_EXISTS will be set to 0, otherwise it is set to 1.
   518  check-if-pin-update-pr-exists:
   519  ifndef ORGANIZATION
   520  	@echo "ORGANIZATION must be set for the project."
   521  	exit 1
   522  endif
   523  	$(call github_pr_list,$(GIT_REPO_SLUG),$(ORGANIZATION):$(GIT_PR_BRANCH_HEAD),$(GIT_PR_BRANCH_BASE),open)
   524  	$(eval PR_EXISTS := $(if $(filter-out 0,$(shell echo '$(GITHUB_API_RESPONSE)' | jq '. | length')),0,1))
   525  
   526  ###############################################################################
   527  # Auto pin update targets
   528  #   Targets updating the pins
   529  ###############################################################################
   530  GITHUB_API_EXIT_ON_FAILURE?=1
   531  
   532  ## Update dependency pins to their latest changeset, committing and pushing it.
   533  ## DEPRECATED This will be removed along with associated helper functions in future releases. Use the trigger-auto-pin-update-process
   534  ## to create PR with the pin updates.
   535  .PHONY: commit-pin-updates
   536  commit-pin-updates: update-pins git-status git-config git-commit ci git-push
   537  
   538  # Creates and checks out the branch defined by GIT_PR_BRANCH_HEAD. It attempts to delete the branch from the local and
   539  # remote repositories. Requires CONFIRM to be set, otherwise it fails with an error.
   540  create-pin-update-head: var-require-one-of-CONFIRM-DRYRUN
   541  ifeq ($(shell git rev-parse --abbrev-ref HEAD),$(GIT_PR_BRANCH_HEAD))
   542  	@echo "Current branch is pull request head, cannot set it up."
   543  	exit 1
   544  endif
   545  	-git branch -D $(GIT_PR_BRANCH_HEAD)
   546  	-$(GIT) push $(GIT_REMOTE) --delete $(GIT_PR_BRANCH_HEAD)
   547  	git checkout -b $(GIT_PR_BRANCH_HEAD)
   548  
   549  create-pin-update-pr:
   550  	$(call github_pr_create,$(GIT_REPO_SLUG),[$(GIT_PR_BRANCH_BASE)] Semaphore Auto Pin Update,$(GIT_PR_BRANCH_HEAD),$(GIT_PR_BRANCH_BASE))
   551  	echo 'Created pin update pull request $(PR_NUMBER)'
   552  
   553  # Add the "/merge-when-ready" comment to enable the "merge when ready" functionality, i.e. when the pull request is passing
   554  # the tests and approved merge it. The PR_NUMBER is set by the dependent target
   555  set-merge-when-ready-on-pin-update-pr:
   556  	$(call github_pr_add_comment,$(GIT_REPO_SLUG),$(PR_NUMBER),/merge-when-ready delete-branch)
   557  	echo "Added '/merge-when-ready' comment command to pull request $(PR_NUMBER)"
   558  
   559  # Call the update-pins target with the GIT_PR_BRANCH_BASE as the PIN_BRANCH
   560  trigger-pin-updates:
   561  	PIN_BRANCH=$(GIT_PR_BRANCH_BASE) $(MAKE) update-pins
   562  
   563  # POST_PIN_UPDATE_TARGETS is used to specify targets that should be run after the pins have been updated to run targets
   564  # that modify files that are tied to the dependencies. An example would be generated files that would changed based on
   565  # a dependency update. This target would likely need to be used in tandem with GIT_PIN_UPDATE_COMMIT_EXTRA_FILES so the
   566  # update files are committed with the pin update.
   567  POST_PIN_UPDATE_TARGETS ?=
   568  
   569  # Trigger the auto pin update process. This involves updating the pins, committing and pushing them to github, creating
   570  # a pull request, and add the "/merge-when-ready" comment command. If there is already a pin update PR for the base
   571  # branch the pin update is not done and the target will exit.
   572  trigger-auto-pin-update-process: check-if-pin-update-pr-exists
   573  	$(if $(filter $(PR_EXISTS),0),echo "A pull request for head '$(GIT_PR_BRANCH_HEAD)' and base '$(GIT_PR_BRANCH_BASE)' already exists.",\
   574  		$(MAKE) trigger-auto-pin-update-process-wrapped)
   575  
   576  trigger-auto-pin-update-process-wrapped: create-pin-update-head trigger-pin-updates $(POST_PIN_UPDATE_TARGETS)
   577  	$(if $(shell git diff --quiet HEAD $(GIT_COMMIT_FILES) || echo "true"),\
   578  		$(MAKE) commit-and-push-pr create-pin-update-pr set-merge-when-ready-on-pin-update-pr,echo "Pins are up to date")
   579  
   580  ###############################################################################
   581  # Static checks
   582  #   repos can specify additional checks by setting LOCAL_CHECKS
   583  ###############################################################################
   584  .PHONY: static-checks
   585  ## Run static source code checks (lint, formatting, ...)
   586  static-checks: $(LOCAL_CHECKS)
   587  	$(MAKE) check-fmt golangci-lint
   588  
   589  LINT_ARGS ?= --max-issues-per-linter 0 --max-same-issues 0 --timeout 8m
   590  
   591  .PHONY: golangci-lint
   592  golangci-lint: $(GENERATED_FILES)
   593  	$(DOCKER_RUN) $(CALICO_BUILD) sh -c '$(GIT_CONFIG_SSH) golangci-lint run $(LINT_ARGS)'
   594  
   595  .PHONY: go-fmt goimports fix
   596  fix go-fmt goimports:
   597  	$(DOCKER_RUN) $(CALICO_BUILD) sh -c 'find . -iname "*.go" ! -wholename "./vendor/*" | xargs goimports -w -local github.com/projectcalico/calico/'
   598  
   599  check-fmt:
   600  	@echo "Checking code formatting.  Any listed files don't match goimports:"
   601  	$(DOCKER_RUN) $(CALICO_BUILD) bash -c 'exec 5>&1; ! [[ `find . -iname "*.go" ! -wholename "./vendor/*" | xargs goimports -l -local github.com/projectcalico/calico/ | tee >(cat >&5)` ]]'
   602  
   603  .PHONY: pre-commit
   604  pre-commit:
   605  	$(DOCKER_RUN) $(CALICO_BUILD) git-hooks/pre-commit-in-container
   606  
   607  .PHONY: install-git-hooks
   608  install-git-hooks:
   609  	./install-git-hooks
   610  
   611  .PHONY: check-module-path-tigera-api
   612  check-module-path-tigera-api:
   613  	@echo "Checking the repo importing tigera/api and not importing projectcalico/api"
   614  	@IMPORT_TIGERA_API=$$($(DOCKER_GO_BUILD) sh -c 'go list -m github.com/tigera/api > /dev/null 2>&1 && echo yes || echo no'); \
   615  	echo Is tigera/api imported? $$IMPORT_TIGERA_API; \
   616  	if [ "$$IMPORT_TIGERA_API" != "yes" ]; then \
   617  	     echo "Error: This repo should import tigera/api module."; \
   618  	     false; \
   619  	fi
   620  	@IMPORT_PROJECTCALICO_API=$$($(DOCKER_GO_BUILD) sh -c 'go list -m github.com/projectcalico/calico/api > /dev/null 2>&1 && echo yes || echo no'); \
   621  	echo Is projectcalico/api imported? $$IMPORT_PROJECTCALICO_API; \
   622  	if [ "$$IMPORT_PROJECTCALICO_API" != "no" ]; then \
   623  	     echo "Error: This repo should NOT import projectcalico/api module."; \
   624  	     false; \
   625  	fi
   626  
   627  .PHONY: check-module-path-projectcalico-api
   628  check-module-path-projectcalico-api:
   629  	@echo "Checking the repo importing projectcalico/api and not importing tigera/api"
   630  	@IMPORT_PROJECTCALICO_API=$$($(DOCKER_GO_BUILD) sh -c 'go list -m github.com/projectcalico/calico/api > /dev/null 2>&1 && echo yes || echo no'); \
   631  	echo Is projectcalico/api imported? $$IMPORT_PROJECTCALICO_API; \
   632  	if [ "$$IMPORT_PROJECTCALICO_API" != "yes" ]; then \
   633  	     echo "Error: This repo should import projectcalico/api module."; \
   634  	     false; \
   635  	fi
   636  	@IMPORT_TIGERA_API=$$($(DOCKER_GO_BUILD) sh -c 'go list -m github.com/tigera/api > /dev/null 2>&1 && echo yes || echo no'); \
   637  	echo Is tigera/api imported? $$IMPORT_TIGERA_API; \
   638  	if [ "$$IMPORT_TIGERA_API" != "no" ]; then \
   639  	     echo "Error: This repo should NOT import tigera/api module."; \
   640  	     false; \
   641  	fi
   642  
   643  ###############################################################################
   644  # go mod helpers
   645  ###############################################################################
   646  mod-download:
   647  	-$(DOCKER_RUN) $(CALICO_BUILD) sh -c '$(GIT_CONFIG_SSH) go mod download'
   648  
   649  mod-tidy:
   650  	-$(DOCKER_RUN) $(CALICO_BUILD) sh -c '$(GIT_CONFIG_SSH) go mod tidy'
   651  
   652  ###############################################################################
   653  # Semaphore helpers
   654  ###############################################################################
   655  
   656  # This semaphore project IDs are defined here because you cannot easily look them up in the semaphore API. This gives
   657  # us a single place to define these values, then projects can reference the readable ENV variable when they need a semaphore
   658  # project ID.
   659  SEMAPHORE_API_PROJECT_ID=9625623e-bfc5-435f-9c22-74f9cd8622fc
   660  SEMAPHORE_API_TIGERA_PROJECT_ID=48d23719-405f-4827-b58a-7de0598a6bf5
   661  SEMAPHORE_ANOMALY_DETECTION_JOBS_PROJECT_ID=e506a098-3e89-4802-8165-c59b2a95f8ae
   662  SEMAPHORE_API_SERVER_PROJECT_ID=6e4eb5b2-0150-4624-968d-f96a1cd9c37d
   663  SEMAPHORE_API_SERVER_OSS_PROJECT_ID=10f6c7c1-7eaa-4e75-a9d1-83e5426158b1
   664  SEMAPHORE_APP_POLICY_PRIVATE_PROJECT_ID=fa098f05-b2d2-4cf6-ac83-aa1e38e95670
   665  SEMAPHORE_APP_POLICY_PROJECT_ID=bc654d5c-bb68-4b00-9d02-289291762b1d
   666  SEMAPHORE_BIRD_PROJECT_ID=c1cc5eaf-873b-4113-a85e-a555361413e6
   667  SEMAPHORE_CC_PORTAL=2b3f9721-a851-4a97-981f-0cb81f93ddd0
   668  SEMAPHORE_CALICO_PRIVATE_PROJECT_ID=8a309869-f767-49dc-924f-fa927edbf657
   669  SEMAPHORE_CALICO_PROJECT_ID=828e6de6-ed4b-49c7-9cb5-ac1246d454de
   670  SEMAPHORE_CALICO_USAGE_PROJECT_ID=29f53c2b-8266-4873-879d-19b65960b3fd
   671  SEMAPHORE_CALICOCTL_PRIVATE_PROJECT_ID=8d885379-6a1b-4fc8-aa45-dc0cfb87894a
   672  SEMAPHORE_CALICOCTL_PROJECT_ID=193ce75a-7a47-4c9f-b966-f25c83e62213
   673  SEMAPHORE_CALICOQ_PROJECT_ID=dc79e0e9-a7b3-40f5-8dc2-2818210ee0a9
   674  SEMAPHORE_CLOUD_CONTROLLERS_PRIVATE_PROJECT_ID=f70e6c08-887b-481d-9591-68e243b32b32
   675  SEMAPHORE_CNI_PLUGIN_PRIVATE_PROJECT_ID=f2c02a84-5fcd-49ed-b4cb-a6273409f0de
   676  SEMAPHORE_CNI_PLUGIN_PROJECT_ID=741ec781-5dbb-4494-ba90-ec6831a9b176
   677  SEMAPHORE_COMPLIANCE_PROJECT_ID=958a9147-ec94-4e99-b4c8-de7857653bb9
   678  SEMAPHORE_CONFD_PROJECT_ID=4c6b815f-d42c-4436-aafa-651fbaf5859e
   679  SEMAPHORE_CONFD_PRIVATE_PROJECT_ID=d3a7649a-3a39-45bf-95e9-fd6df3d0a7b1
   680  SEMAPHORE_CURATOR_PROJECT_ID=c391dcff-6933-40e7-a6d1-1dcf7e6e231d
   681  SEMAPHORE_DEEP_PACKET_INSPECTION_PROJECT_ID=81c0981e-979c-4741-8143-22166384afa1
   682  SEMAPHORE_DEXIDP_DOCKER_PROJECT_ID=ee618372-35c8-4f83-bd05-d3a96ac2b276
   683  SEMAPHORE_EGRESS_GATEWAY_PROJECT_ID=f01056ec-75f9-46a0-9ae2-6fc5e391136c
   684  SEMAPHORE_ELASTICSEARCH_DOCKER_PROJECT_ID=0a3a5bf6-19e4-4210-a3fa-15fc857596ac
   685  SEMAPHORE_ELASTICSEARCH_METRICS_PROJECT_ID=306b29c0-aa86-4b76-9c3e-c78a327e7d83
   686  SEMAPHORE_ENVOY_DOCKER_PROJECT_ID=b8db000b-c2c4-44cd-a22d-51df73dfdcba
   687  SEMAPHORE_ES_PROXY_IMAGE_PROJECT_ID=bc7ee48d-0051-4ceb-961d-03659463ada4
   688  SEMAPHORE_ES_GATEWAY_PROJECT_ID=3c01c819-532b-4ccc-8305-5dd45c10bf93
   689  SEMAPHORE_FELIX_PRIVATE_PROJECT_ID=e439cca4-156c-4d23-b611-002601440ad0
   690  SEMAPHORE_FELIX_PROJECT_ID=48267e65-4acc-4f27-a88f-c3df0e8e2c3b
   691  SEMAPHORE_FIREWALL_INTEGRATION_PROJECT_ID=d4307a31-1e46-4622-82e2-886165b77008
   692  SEMAPHORE_FLUENTD_DOCKER_PROJECT_ID=50383fb9-d234-461a-ae00-23e18b7cd5b8
   693  SEMAPHORE_HONEYPOD_CONTROLLER_PROJECT_ID=c010a63a-ac85-48b4-9077-06188408eaee
   694  SEMAPHORE_HONEYPOD_RECOMMENDATION_PROJECT_ID=f07f5fd4-b15a-4ded-ae1e-04801ae4d99a
   695  SEMAPHORE_INGRESS_COLLECTOR_PROJECT_ID=cf7947e4-a886-404d-ac6a-c3f3ac1a7b93
   696  SEMAPHORE_INTRUSION_DETECTION_PROJECT_ID=2beffe81-b05a-41e0-90ce-e0d847dee2ee
   697  SEMAPHORE_KEY_CERT_PROVISIONER_PROJECT_ID=9efb25f3-8c5d-4f22-aab5-4a1f5519bc7c
   698  SEMAPHORE_KUBE_CONTROLLERS_PRIVATE_PROJECT_ID=0b8651d0-6c5d-4076-ab1d-25b120d0f670
   699  SEMAPHORE_KUBE_CONTROLLERS_PROJECT_ID=d688e2ce-8c4a-4402-ba54-3aaa0eb53e5e
   700  SEMAPHORE_KUBECTL_CALICO_PROJECT_ID=37d7cb2b-62b0-4178-9424-de766f2de59b
   701  SEMAPHORE_KIBANA_DOCKER_PROJECT_ID=eaafdbad-4546-4582-b8fa-cea05a80a04d
   702  SEMAPHORE_LIBCALICO_GO_PRIVATE_PROJECT_ID=72fa12b5-5ad5-43ae-b0ac-17f9f7c71030
   703  SEMAPHORE_LIBCALICO_GO_PROJECT_ID=ce3e6bed-1fb6-4501-80e5-2121a266a386
   704  SEMAPHORE_LICENSE_AGENT_PROJECT_ID=beb13609-8ee0-461a-a08b-dab86af1c128
   705  SEMAPHORE_LICENSING_PROJECT_ID=344f1cf0-0c3f-4fa3-b89b-3c35127b3054
   706  SEMAPHORE_L7_COLLECTOR_PROJECT_ID=b02e7bbf-39ee-4c0c-a6f6-793cdf89daa7
   707  SEMAPHORE_LMA_PROJECT_ID=5130e1d3-d9cd-4270-9e62-57f98d34495e
   708  SEMAPHORE_MANAGER_PROJECT_ID=325ca49d-5111-4b07-a54f-dc0c7ec538bb
   709  SEMAPHORE_NETWORKING_CALICO_PROJECT_ID=0a7883cb-b727-4113-948d-b95cb00df6b6
   710  SEMAPHORE_NODE_PRIVATE_PROJECT_ID=edd8246c-7116-473a-81c8-7a3bbbc07228
   711  SEMAPHORE_NODE_PROJECT_ID=980a06a4-9d43-43f8-aedd-a3bfad258de6
   712  SEMAPHORE_OPERATOR_PROJECT_ID=8343e619-cc44-4be4-a9d7-21963ebc1c8f
   713  SEMAPHORE_PACKETCAPTURE_API_PROJECT_ID=f505b00c-57c3-4859-8b97-ff4095b5ab25
   714  SEMAPHORE_PERFORMANCE_HOTSPOTS_PROJECT_ID=6a343a02-0acf-4c52-9cc7-24ee51377e32
   715  SEMAPHORE_POD2DAEMON_PROJECT_ID=eb2eea4f-c185-408e-9837-da0d231428fb
   716  SEMAPHORE_PROMETHEUS_SERVICE_PROJECT_ID=d5b7ed99-8966-46cc-90f2-9027c428db48
   717  SEMAPHORE_SKIMBLE_PROJECT_ID=35171baf-8daf-4725-882f-c301851a6e1d
   718  SEMAPHORE_TS_QUERYSERVER_PROJECT_ID=5dbe4688-0c21-40fb-89f7-a2d64c17401b
   719  SEMAPHORE_TYPHA_PROJECT_ID=c2ea3f0a-58a0-427a-9ed5-6eff8d6543b3
   720  SEMAPHORE_TYPHA_PRIVATE_PROJECT_ID=51e84cb9-0f38-408a-a113-0f5ca71844d7
   721  SEMAPHORE_VOLTRON_PROJECT_ID=9d239362-9594-4c84-8983-868ee19ebd41
   722  
   723  SEMAPHORE_WORKFLOW_BRANCH?=master
   724  
   725  # Sends a request to the semaphore API to run the request workflow. It requires setting the SEMAPHORE_API_TOKEN, SEMAPHORE_PROJECT_ID,
   726  # SEMAPHORE_WORKFLOW_BRANCH, and SEMAPHORE_WORKFLOW_FILE ENV variables.
   727  semaphore-run-workflow:
   728  	$(eval CMD := curl -f -X POST \
   729  		-H "Authorization: Token $(SEMAPHORE_API_TOKEN)" \
   730  		-d "project_id=$(SEMAPHORE_PROJECT_ID)&reference=$(SEMAPHORE_WORKFLOW_BRANCH)&commit_sha=$(SEMAPHORE_COMMIT_SHA)&pipeline_file=.semaphore/$(SEMAPHORE_WORKFLOW_FILE)" \
   731  		"https://tigera.semaphoreci.com/api/v1alpha/plumber-workflows")
   732  	$(eval SEMAPHORE_API_RESPONSE := $(shell $(CMD) | jq -R '.' | sed -e 's/#/\\\#/g'))
   733  	$(if $(SEMAPHORE_API_RESPONSE),,exit 1)
   734  	$(eval WORKFLOW_ID := $(shell echo $(SEMAPHORE_API_RESPONSE) | jq -r '.workflow_id'))
   735  	@echo Semaphore workflow successfully created here https://tigera.semaphoreci.com/workflows/$(WORKFLOW_ID)
   736  
   737  # This is a helpful wrapper of the semaphore-run-workflow target to run the update_pins workflow file for a project.
   738  semaphore-run-auto-pin-update-workflow:
   739  	SEMAPHORE_WORKFLOW_FILE=update_pins.yml $(MAKE) semaphore-run-workflow
   740  	@echo Successfully triggered the semaphore pin update workflow
   741  
   742  # This target triggers the 'semaphore-run-auto-pin-update-workflow' target for every SEMAPHORE_PROJECT_ID in the list of
   743  # SEMAPHORE_AUTO_PIN_UPDATE_PROJECT_IDS.
   744  semaphore-run-auto-pin-update-workflows:
   745  	for ID in $(SEMAPHORE_AUTO_PIN_UPDATE_PROJECT_IDS); do\
   746  		SEMAPHORE_WORKFLOW_BRANCH=$(SEMAPHORE_GIT_BRANCH) SEMAPHORE_PROJECT_ID=$$ID $(MAKE) semaphore-run-auto-pin-update-workflow; \
   747  	done
   748  
   749  ###############################################################################
   750  # Mock helpers
   751  ###############################################################################
   752  # Helper targets for testify mock generation
   753  
   754  # Generate testify mocks in the build container.
   755  gen-mocks:
   756  	$(DOCKER_RUN) $(CALICO_BUILD) sh -c '$(MAKE) mockery-run'
   757  
   758  # Run mockery for each path in MOCKERY_FILE_PATHS. The the generated mocks are
   759  # created in package and in test files. Look here for more information https://github.com/vektra/mockery
   760  mockery-run:
   761  	for FILE_PATH in $(MOCKERY_FILE_PATHS); do\
   762  		DIR=$$(dirname $$FILE_PATH); \
   763  		INTERFACE_NAME=$$(basename $$FILE_PATH); \
   764  		mockery --dir $$DIR --name $$INTERFACE_NAME --inpackage; \
   765  	done
   766  
   767  ###############################################################################
   768  # Docker helpers
   769  ###############################################################################
   770  # Helper targets working with docker images.
   771  
   772  # docker-compress takes the docker image specified by IMAGE_NAME and compresses all the layers into a single one. This is
   773  # done by exporting the given image then re importing it with the given IMAGE_NAME.
   774  #
   775  # When a docker image is exported all of the instructions are lost (i.e. ENTRYPOINT, ENV, ...), so before the image is
   776  # compressed the target inspects the image and pulls out the instructions. Each instruction that is pulled out is converted
   777  # into a change directive, or change directives, of the format "--change 'INSTRUCTION <instruction>". These directives
   778  # are given to the docker import command so the instructions can be re added to the compressed image.
   779  #
   780  # NOTE: This target does not attempt to copy every instruction from the original image to the compressed one. Any user of
   781  # this target should ensure that any required instructions are copied over by this target.
   782  docker-compress:
   783  	$(eval JSONOBJ := "$(shell docker inspect $(IMAGE_NAME) | jq '.[0].Config' | jq -R '.' | sed -e 's/#/\\\#/g' ) ")
   784  #	Re add the entry point.
   785  	$(eval CHANGE := $(CHANGE)$(shell echo $(JSONOBJ) | jq -r \
   786  		"if has(\"Entrypoint\") and .Entrypoint != \"\" then \" --change 'ENTRYPOINT \(.Entrypoint)'\" else \"\" end"\
   787  	))
   788  #	Re add the command.
   789  	$(eval CHANGE := $(CHANGE)$(shell echo $(JSONOBJ) | jq -r \
   790  		"if has(\"Cmd\") and .Cmd != \"\" then \" --change 'CMD \(.Cmd)'\" else \"\" end"\
   791  	))
   792  #	Re add the working directory.
   793  	$(eval CHANGE := $(CHANGE)$(shell echo $(JSONOBJ) | jq -r \
   794  		"if has(\"WorkingDir\") and .WorkingDir != \"\" then \" --change 'WORKDIR \(.WorkingDir)'\" else \"\" end"\
   795  	))
   796  #	Re add the user.
   797  	$(eval CHANGE := $(CHANGE)$(shell echo $(JSONOBJ) | jq -r \
   798  		"if has(\"User\") and .User != \"\" then \" --change 'USER \(.User)'\" else \"\" end"\
   799  	))
   800  #	Re add the environment variables. .Env is an array of strings so add a "--change 'ENV <value>'" for each value in
   801  #	the array.
   802  	$(eval CHANGE := $(CHANGE)$(shell echo $(JSONOBJ) | jq -r \
   803  		"if has(\"Env\") and (.Env | length) > 0 then .Env | map(\" --change 'ENV \(.)'\") | join(\"\") else \"\" end"\
   804  	))
   805  #	Re add the labels. .Labels is a map of label names to label values, so add a "--change 'LABEL <key> <value>'" for
   806  #	each map entry.
   807  	$(eval CHANGE := $(CHANGE)$(shell echo $(JSONOBJ) | jq -r \
   808  		"if has(\"Labels\") and (.Labels | length) > 0 then .Labels | to_entries | map(\" --change 'LABEL \(.key) \(.value)'\") | join(\"\") else \"\" end"\
   809  	))
   810  #	Re add the exposed ports. .ExposedPorts is a map, but we're only interested in the keys of the map so for each key
   811  #	add "--change EXPOSE <key>".
   812  	$(eval CHANGE := $(CHANGE)$(shell echo $(JSONOBJ) | jq -r \
   813  		"if has(\"ExposedPorts\") and (.ExposedPorts | length) > 0 then .ExposedPorts | keys | map(\" --change 'EXPOSE \(.)'\") | join(\"\") else \"\" end"\
   814  	))
   815  	$(eval CONTAINER_ID := $(shell docker run -d -it --entrypoint /bin/true $(IMAGE_NAME) /bin/true))
   816  	docker export $(CONTAINER_ID) | docker import $(CHANGE) - $(IMAGE_NAME)
   817  
   818  ###############################################################################
   819  # Image building and pushing
   820  ###############################################################################
   821  
   822  ###############################################################################
   823  # we want to be able to run the same recipe on multiple targets keyed on the image name
   824  # to do that, we would use the entire image name, e.g. calico/node:abcdefg, as the stem, or '%', in the target
   825  # however, make does **not** allow the usage of invalid filename characters - like / and : - in a stem, and thus errors out
   826  # to get around that, we "escape" those characters by converting all : to --- and all / to ___ , so that we can use them
   827  # in the target, we then unescape them back
   828  escapefs = $(subst :,---,$(subst /,___,$(1)))
   829  unescapefs = $(subst ---,:,$(subst ___,/,$(1)))
   830  
   831  # retag-build-images-with-registries retags the build / arch images specified by BUILD_IMAGES and VALIDARCHES with
   832  # the registries specified by DEV_REGISTRIES. The end tagged images are of the format
   833  # $(REGISTRY)/$(BUILD_IMAGES):<tag>-$(ARCH).
   834  retag-build-images-with-registries: $(addprefix retag-build-images-with-registry-,$(call escapefs,$(DEV_REGISTRIES)))
   835  
   836  # retag-build-images-with-registry-% retags the build / arch images specified by BUILD_IMAGES and VALIDARCHES with
   837  # the registry specified by $*.
   838  retag-build-images-with-registry-%:
   839  	$(MAKE) $(addprefix retag-build-image-with-registry-,$(call escapefs,$(BUILD_IMAGES))) REGISTRY=$(call unescapefs,$*)
   840  
   841  # retag-build-image-with-registry-% retags the build arch images specified by $* and VALIDARCHES with the
   842  # registry specified by REGISTRY.
   843  retag-build-image-with-registry-%: var-require-all-REGISTRY-BUILD_IMAGES
   844  	$(MAKE) -j12 $(addprefix retag-build-image-arch-with-registry-,$(VALIDARCHES)) BUILD_IMAGE=$(call unescapefs,$*)
   845  
   846  # retag-build-image-arch-with-registry-% retags the build / arch image specified by $* and BUILD_IMAGE with the
   847  # registry specified by REGISTRY.
   848  retag-build-image-arch-with-registry-%: var-require-all-REGISTRY-BUILD_IMAGE-IMAGETAG
   849  	docker tag $(BUILD_IMAGE):$(LATEST_IMAGE_TAG)-$* $(call filter-registry,$(REGISTRY))$(BUILD_IMAGE):$(IMAGETAG)-$*
   850  	$(if $(filter $*,amd64),\
   851  		docker tag $(BUILD_IMAGE):$(LATEST_IMAGE_TAG)-$(ARCH) $(REGISTRY)/$(BUILD_IMAGE):$(IMAGETAG),\
   852  		$(NOECHO) $(NOOP)\
   853  	)
   854  
   855  # push-images-to-registries pushes the build / arch images specified by BUILD_IMAGES and VALIDARCHES to the registries
   856  # specified by DEV_REGISTRY.
   857  push-images-to-registries: $(addprefix push-images-to-registry-,$(call escapefs,$(DEV_REGISTRIES)))
   858  
   859  # push-images-to-registry-% pushes the build / arch images specified by BUILD_IMAGES and VALIDARCHES to the registry
   860  # specified by %*.
   861  push-images-to-registry-%:
   862  	$(MAKE) $(addprefix push-image-to-registry-,$(call escapefs,$(BUILD_IMAGES))) REGISTRY=$(call unescapefs,$*)
   863  
   864  # push-image-to-registry-% pushes the build / arch images specified by $* and VALIDARCHES to the registry
   865  # specified by REGISTRY.
   866  push-image-to-registry-%:
   867  	$(MAKE) -j6 $(addprefix push-image-arch-to-registry-,$(VALIDARCHES)) BUILD_IMAGE=$(call unescapefs,$*)
   868  
   869  # push-image-arch-to-registry-% pushes the build / arch image specified by $* and BUILD_IMAGE to the registry
   870  # specified by REGISTRY.
   871  push-image-arch-to-registry-%:
   872  # If the registry we want to push to doesn't not support manifests don't push the ARCH image.
   873  	$(DOCKER) push --quiet $(call filter-registry,$(REGISTRY))$(BUILD_IMAGE):$(IMAGETAG)-$*
   874  	$(if $(filter $*,amd64),\
   875  		$(DOCKER) push $(REGISTRY)/$(BUILD_IMAGE):$(IMAGETAG),\
   876  		$(NOECHO) $(NOOP)\
   877  	)
   878  
   879  # push multi-arch manifest where supported.
   880  push-manifests: var-require-all-IMAGETAG  $(addprefix sub-manifest-,$(call escapefs,$(PUSH_MANIFEST_IMAGES)))
   881  sub-manifest-%:
   882  	$(DOCKER) manifest create $(call unescapefs,$*):$(IMAGETAG) $(addprefix --amend ,$(addprefix $(call unescapefs,$*):$(IMAGETAG)-,$(VALIDARCHES)))
   883  	$(DOCKER) manifest push --purge $(call unescapefs,$*):$(IMAGETAG)
   884  
   885  # cd-common tags and pushes images with the branch name and git version. This target uses PUSH_IMAGES, BUILD_IMAGE,
   886  # and BRANCH_NAME env variables to figure out what to tag and where to push it to.
   887  cd-common: var-require-one-of-CONFIRM-DRYRUN var-require-all-BRANCH_NAME
   888  	$(MAKE) retag-build-images-with-registries push-images-to-registries push-manifests IMAGETAG=$(if $(IMAGETAG_PREFIX),$(IMAGETAG_PREFIX)-)$(BRANCH_NAME) EXCLUDEARCH="$(EXCLUDEARCH)"
   889  	$(MAKE) retag-build-images-with-registries push-images-to-registries push-manifests IMAGETAG=$(if $(IMAGETAG_PREFIX),$(IMAGETAG_PREFIX)-)$(shell git describe --tags --dirty --long --always --abbrev=12) EXCLUDEARCH="$(EXCLUDEARCH)"
   890  
   891  ###############################################################################
   892  # Release targets and helpers
   893  #
   894  # The following targets and macros are used to help start and cut releases.
   895  # At high level, this involves:
   896  # - Creating release branches
   897  # - Adding empty commits to start next release, and updating the 'dev' tag
   898  # - Adding 'release' tag to the commit that will be release
   899  # - Creating an empty commit for the next potential patch release, and updating
   900  #   the dev tag on that commit
   901  # - Copying images for the released commit over to the release registries, and
   902  #   re tagging those images with the release tag
   903  #
   904  # The following definitions will be helpful in understanding this process:
   905  # - 'dev' tag: A git tag of the form of `v3.8.0-calient-0.dev-36-g3a618e61c2d3`
   906  #   that every commit has. The start of the dev tag, i.e. v3.8.0, is the
   907  #   the release that this commit will go into.
   908  # - 'release' tag: A git tag of the form of `v3.8.0`. The commit that a release
   909  #   is cut from will have this tag, i.e. you can find the commit that release
   910  #   3.8 uses by finding the commit with the tag v3.8.0.
   911  # - 'dev' image: The image that is created for every commit that is merged to
   912  #   master or a release branch. This image is tagged with the dev tag, i.e.
   913  #   if commit 3a618e61c2d3 is on master or a release branch, there will be
   914  #   an image for that commit in the dev registry with the tag
   915  #   `v3.8.0-calient-0.dev-36-g3a618e61c2d3`.
   916  # - 'release' image: The public image the customers will use to install our
   917  #   our product. Producing this is the goal of cutting the release. This image
   918  #   will be in the release registries, and will be tagged with the release tag,
   919  #   i.e. the release image for release 3.8 will have the v3.8.0 tag, or if it's
   920  #   a patch release it will be v3.8.<patch version>
   921  ###############################################################################
   922  fetch-all:
   923  	git fetch --all -q
   924  
   925  # git-dev-tag retrieves the dev tag for the current commit (the one are dev images are tagged with).
   926  git-dev-tag = $(shell git describe --tags --long --always --abbrev=12 --match "*dev*")
   927  # git-release-tag-from-dev-tag gets the release version from the current commits dev tag.
   928  git-release-tag-from-dev-tag = $(shell echo $(call git-dev-tag) | grep -P -o "^v\d*.\d*.\d*")
   929  # git-release-tag-for-current-commit gets the release tag for the current commit if there is one.
   930  git-release-tag-for-current-commit = $(shell git describe --tags --exact-match --exclude "*dev*")
   931  
   932  # release-branch-for-tag finds the latest branch that corresponds to the given tag.
   933  release-branch-for-tag = $(firstword $(shell git --no-pager branch --format='%(refname:short)' --contains $1 | grep -P "^release"))
   934  # commit-for-tag finds the latest commit that corresponds to the given tag.
   935  commit-for-tag = $(shell git rev-list -n 1 $1)
   936  git-commit-for-remote-tag = $(shell git ls-remote -q --tags $(GIT_REMOTE) $1 | awk '{print $$1}')
   937  # current-branch gets the name of the branch for the current commit.
   938  current-branch = $(shell git rev-parse --abbrev-ref HEAD)
   939  
   940  # RELEASE_BRANCH_BASE is used when creating a release branch to confirm the correct base is being used. It's
   941  # configurable so that a dry run can be done from a PR branch.
   942  RELEASE_BRANCH_BASE ?=master
   943  
   944  # var-set-% checks if there is a non empty variable for the value describe by %. If FAIL_NOT_SET is set, then var-set-%
   945  # fails with an error message. If FAIL_NOT_SET is not set, then var-set-% appends a 1 to VARSET if the variable isn't
   946  # set.
   947  var-set-%:
   948  	$(if $($*),$(eval VARSET+=1),$(if $(FAIL_NOT_SET),$(error $* is required but not set),))
   949  
   950  # var-require is used to check if one or all of the variables are set in REQUIRED_VARS, and fails if not. The variables
   951  # in REQUIRE_VARS are hyphen separated.
   952  #
   953  # If FAIL_NOT_SET is set, then all variables described in REQUIRED_VARS must be set for var-require to not fail,
   954  # otherwise only one variable needs to be set for var-require to not fail.
   955  var-require: $(addprefix var-set-,$(subst -, ,$(REQUIRED_VARS)))
   956  	$(if $(VARSET),,$(error one of $(subst -, ,$(REQUIRED_VARS)) is not set or empty, but at least one is required))
   957  
   958  # var-require-all-% checks if the there are non empty variables set for the hyphen separated values in %, and fails if
   959  # there isn't a non empty variable for each given value. For instance, to require FOO and BAR both must be set you would
   960  # call var-require-all-FOO-BAR.
   961  var-require-all-%:
   962  	$(MAKE) var-require REQUIRED_VARS=$* FAIL_NOT_SET=true
   963  
   964  # var-require-one-of-% checks if the there are non empty variables set for the hyphen separated values in %, and fails
   965  # there isn't a non empty variable for at least one of the given values. For instance, to require either FOO or BAR both
   966  # must be set you would call var-require-all-FOO-BAR.
   967  var-require-one-of-%:
   968  	$(MAKE) var-require REQUIRED_VARS=$*
   969  
   970  # sem-cut-release triggers the cut-release pipeline (or test-cut-release if CONFIRM is not specified) in semaphore to
   971  # cut the release. The pipeline is triggered for the current commit, and the branch it's triggered on is calculated
   972  # from the RELEASE_VERSION, CNX, and OS variables given.
   973  #
   974  # Before the pipeline is triggered, this target validates that the expected release will be cut using the
   975  # RELEASE_TAG (optional and defaults to the current tag) and RELEASE_VERSION (required) variables. The RELEASE_TAG
   976  # should be the dev tag that the release is cut from, and RELEASE_VERSION should be the version expected to be released.
   977  # This target verifies that the current commit is tagged with the RELEASE_TAG and that cutting this commit will result
   978  # in RELEASE_VERSION being cut.
   979  sem-cut-release: var-require-one-of-CONFIRM-DRYRUN var-require-all-RELEASE_VERSION var-require-one-of-CNX-OS
   980  ifndef RELEASE_TAG
   981  	$(eval RELEASE_TAG = $(call git-dev-tag))
   982  else
   983  	$(eval RELEASE_TAG_COMMIT = $(call commit-for-tag,$(RELEASE_TAG)))
   984  	$(if $(filter-out $(RELEASE_TAG_COMMIT),$(GIT_COMMIT)),\
   985  		echo Current commit is not tagged with $(RELEASE_TAG) && exit 1)
   986  endif
   987  	$(eval CURRENT_RELEASE_VERSION = $(call git-release-tag-from-dev-tag))
   988  	$(if $(filter-out $(CURRENT_RELEASE_VERSION),$(RELEASE_VERSION)),\
   989  		echo Given release version $(RELEASE_VERSION) does not match current commit release version $(CURRENT_RELEASE_VERSION). && exit 1)
   990  
   991  	$(eval RELEASE_BRANCH = release-$(if $CNX,calient-,)$(shell echo "$(RELEASE_VERSION)" | awk -F  "." '{print $$1"."$$2}'))
   992  	$(eval WORKFLOW_FILE = $(if $(CONFIRM),cut-release.yml,test-cut-release.yml))
   993  
   994  	@echo Cutting release for $(RELEASE_VERSION) from dev tag $(RELEASE_TAG) \(commit $(GIT_COMMIT)\)
   995  	SEMAPHORE_WORKFLOW_BRANCH=$(RELEASE_BRANCH) SEMAPHORE_COMMIT_SHA=$(GIT_COMMIT) SEMAPHORE_WORKFLOW_FILE=$(WORKFLOW_FILE) $(MAKE) semaphore-run-workflow
   996  
   997  # cut-release uses the dev tags on the current commit to cut the release, more specifically cut-release does the
   998  # following:
   999  # - Calculates the release tag from the dev tag on the commit
  1000  # - tags the current commit with the release tag then pushes that tag to github
  1001  # - retags the build images (specified by BUILD_IMAGES) in the dev registries (specified DEV_REGISTRIES) with the
  1002  #	release tag
  1003  # - copies the build images (specified by BUILD_IMAGES) from the first dev registry to the release registries (specified
  1004  #	by RELEASE_REGISTRIES) and retags those images with the release tag
  1005  # - tags an empty commit at the head of the release branch with the next patch release dev tag and pushed that to github
  1006  cut-release: var-require-one-of-CONFIRM-DRYRUN
  1007  	$(MAKE) cut-release-wrapped RELEASE=true
  1008  
  1009  cut-release-wrapped: var-require-one-of-CONFIRM-DRYRUN
  1010  	$(eval DEV_TAG = $(call git-dev-tag))
  1011  	$(eval RELEASE_TAG = $(call git-release-tag-from-dev-tag))
  1012  	$(eval RELEASE_BRANCH = $(call release-branch-for-tag,$(DEV_TAG)))
  1013  ifdef EXPECTED_RELEASE_TAG
  1014  	$(if $(filter-out $(RELEASE_TAG),$(EXPECTED_RELEASE_TAG)),\
  1015  		@echo "Failed to verify release tag$(comma) expected release version is $(EXPECTED_RELEASE_TAG)$(comma) actual is $(RELEASE_TAG)."\
  1016  		&& exit 1)
  1017  endif
  1018  	$(eval NEXT_RELEASE_VERSION = $(shell echo "$(call git-release-tag-from-dev-tag)" | awk -F  "." '{print $$1"."$$2"."$$3+1}'))
  1019  ifndef IMAGE_ONLY
  1020  	$(MAKE) maybe-tag-release maybe-push-release-tag\
  1021  		RELEASE_TAG=$(RELEASE_TAG) BRANCH=$(RELEASE_BRANCH) DEV_TAG=$(DEV_TAG)
  1022  endif
  1023  ifdef BUILD_IMAGES
  1024  	$(eval IMAGE_DEV_TAG = $(if $(IMAGETAG_PREFIX),$(IMAGETAG_PREFIX)-)$(DEV_TAG))
  1025  	$(eval IMAGE_RELEASE_TAG = $(if $(IMAGETAG_PREFIX),$(IMAGETAG_PREFIX)-)$(RELEASE_TAG))
  1026  	$(MAKE) release-dev-images\
  1027  		RELEASE_TAG=$(IMAGE_RELEASE_TAG) BRANCH=$(RELEASE_BRANCH) DEV_TAG=$(IMAGE_DEV_TAG)
  1028  endif
  1029  ifndef IMAGE_ONLY
  1030  	$(MAKE) maybe-dev-tag-next-release maybe-push-next-release-dev-tag\
  1031  		NEXT_RELEASE_VERSION=$(NEXT_RELEASE_VERSION) BRANCH=$(RELEASE_BRANCH) DEV_TAG=$(DEV_TAG)
  1032  endif
  1033  
  1034  # maybe-tag-release calls the tag-release target only if the current commit is not tagged with the tag in RELEASE_TAG.
  1035  # If the current commit is already tagged with the value in RELEASE_TAG then this is a NOOP.
  1036  maybe-tag-release: var-require-all-RELEASE_TAG
  1037  	$(if $(filter-out $(call git-release-tag-for-current-commit),$(RELEASE_TAG)),\
  1038  		$(MAKE) tag-release,\
  1039  		@echo "Current commit already tagged with $(RELEASE_TAG)")
  1040  
  1041  # tag-release tags the current commit with an annotated tag with the value in RELEASE_TAG. This target throws an error
  1042  # if the current branch is not master.
  1043  tag-release: var-require-one-of-CONFIRM-DRYRUN var-require-all-DEV_TAG_SUFFIX-RELEASE_TAG
  1044  	$(if $(filter-out $(RELEASE_BRANCH_BASE),$(call current-branch)),,$(error tag-release cannot be called on $(RELEASE_BRANCH_BASE)))
  1045  	git tag -a $(RELEASE_TAG) -m "Release $(RELEASE_TAG)"
  1046  
  1047  # maybe-push-release-tag calls the push-release-tag target only if the tag in RELEASE_TAG is not already pushed to
  1048  # github. If the tag is pushed to github then this is a NOOP.
  1049  # TODO should we check the commit tagged in remote is the current commit? Probably yes... that could catch some annoying problems that would be hard to find if they happened...
  1050  maybe-push-release-tag: var-require-all-RELEASE_TAG
  1051  	$(if $(shell git ls-remote -q --tags $(GIT_REMOTE) $(RELEASE_TAG)),\
  1052  		@echo Release $(RELEASE_TAG) already in github,\
  1053  		$(MAKE) push-release-tag)
  1054  
  1055  # push-release-tag pushes the tag in RELEASE_TAG to github. If the current commit is not tagged with this tag then this
  1056  # target fails.
  1057  push-release-tag: var-require-one-of-CONFIRM-DRYRUN var-require-all-DEV_TAG_SUFFIX-RELEASE_TAG
  1058  	$(if $(call git-release-tag-for-current-commit),,$(error Commit does not have a release tag))
  1059  	$(GIT) push $(GIT_REMOTE) $(RELEASE_TAG)
  1060  
  1061  # maybe-dev-tag-next-release calls the dev-tag-next-release-target only if the tag NEXT_RELEASE_VERSION-DEV_TAG_SUFFIX
  1062  # doesn't exist locally. If the tag does exist then this is a NOOP.
  1063  maybe-dev-tag-next-release: var-require-all-NEXT_RELEASE_VERSION-DEV_TAG_SUFFIX
  1064  	$(if $(shell git rev-parse --verify -q "$(NEXT_RELEASE_VERSION)-$(DEV_TAG_SUFFIX)"),\
  1065  		echo "Tag for next release $(NEXT_RELEASE_VERSION) already exists$(comma) not creating.",\
  1066  		$(MAKE) dev-tag-next-release)
  1067  
  1068  # dev-tag-next-release creates a new commit empty commit at the head of BRANCH and tags it with
  1069  # NEXT_RELEASE_VERSION-DEV_TAG_SUFFIX.
  1070  dev-tag-next-release: var-require-one-of-CONFIRM-DRYRUN var-require-all-NEXT_RELEASE_VERSION-DEV_TAG_SUFFIX-BRANCH
  1071  	git checkout $(BRANCH)
  1072  	$(GIT) pull $(GIT_REMOTE) $(BRANCH)
  1073  	git commit --allow-empty -m "Begin development on $(NEXT_RELEASE_VERSION)"
  1074  	git tag $(NEXT_RELEASE_VERSION)-$(DEV_TAG_SUFFIX)
  1075  
  1076  # maybe-push-next-release-dev-tag calls the push-next-release-dev-tag target if the tag
  1077  # NEXT_RELEASE_VERSION-DEV_TAG_SUFFIX doesn't exist remotely. If the tag exists remotely then this is a NOOP.
  1078  maybe-push-next-release-dev-tag: var-require-one-of-CONFIRM-DRYRUN var-require-all-NEXT_RELEASE_VERSION-DEV_TAG_SUFFIX
  1079  	$(if $(shell git ls-remote --tags $(GIT_REMOTE) $(NEXT_RELEASE_VERSION)-$(DEV_TAG_SUFFIX)),\
  1080  		echo "Dev tag for next release $(NEXT_RELEASE_VERSION) already pushed to github.",\
  1081  		$(MAKE) push-next-release-dev-tag)
  1082  
  1083  # push-next-release-dev-tag pushes the tag NEXT_RELEASE_VERSION-DEV_TAG_SUFFIX and the current branch to github. If
  1084  # the current branch is not the head of the branch then this target fails.
  1085  push-next-release-dev-tag: var-require-one-of-CONFIRM-DRYRUN var-require-all-NEXT_RELEASE_VERSION-DEV_TAG_SUFFIX
  1086  	# The next release commit should always be at the head of a release branch.
  1087  	$(if $(filter-out HEAD,$(call current-branch)),,\
  1088  		$(error "Refusing to push commit for next release while in a detached state."))
  1089  	$(GIT) push $(GIT_REMOTE) $(call current-branch)
  1090  	$(GIT) push $(GIT_REMOTE) $(NEXT_RELEASE_VERSION)-$(DEV_TAG_SUFFIX)
  1091  
  1092  # release-dev-images releases the dev images by calling the release-tag-dev-image-% and publish-dev-image-% on each
  1093  # value in BUILD_IMAGES. This results in retagging all the dev images with the release tag and copying the dev images
  1094  # over to the release registries.
  1095  ifndef SKIP_DEV_IMAGE_RETAG
  1096  RELEASE_DEV_IMAGES_RETAG_TARGETS ?= $(addprefix release-retag-dev-images-in-registry-,$(call escapefs, $(DEV_REGISTRIES)))
  1097  endif
  1098  
  1099  RELEASE_DEV_IMAGES_TARGETS ?= $(addprefix release-dev-images-to-registry-,$(call escapefs, $(RELEASE_REGISTRIES)))
  1100  release-dev-images: var-require-one-of-CONFIRM-DRYRUN var-require-all-BUILD_IMAGES $(RELEASE_DEV_IMAGES_RETAG_TARGETS) $(RELEASE_DEV_IMAGES_TARGETS)
  1101  
  1102  # release-retag-dev-images-in-registry-% retags all the build / arch images specified by BUILD_IMAGES and VALIDARCHES in
  1103  # the registry specified by $* with the release tag specified by RELEASE_TAG.
  1104  release-retag-dev-images-in-registry-%:
  1105  	$(MAKE) $(addprefix release-retag-dev-image-in-registry-,$(call escapefs, $(BUILD_IMAGES))) DEV_REGISTRY=$(call unescapefs,$*)
  1106  
  1107  # release-retag-dev-image-in-registry-% retags the build image specified by $* in the dev registry specified by
  1108  # DEV_REGISTRY with the release tag specified by RELEASE_TAG. If DEV_REGISTRY is in the list of registries specified by
  1109  # RELEASE_REGISTRIES then the retag is not done
  1110  release-retag-dev-image-in-registry-%:
  1111  	$(if $(filter-out $(RELEASE_REGISTRIES),$(DEV_REGISTRY)),\
  1112  		$(CRANE) cp $(DEV_REGISTRY)/$(call unescapefs,$*):$(DEV_TAG) $(DEV_REGISTRY)/$(call unescapefs,$*):$(RELEASE_TAG))$(double_quote)
  1113  
  1114  # release-dev-images-to-registry-% copies and retags all the build / arch images specified by BUILD_IMAGES and
  1115  # VALIDARCHES from the registry specified by DEV_REGISTRY to the registry specified by RELEASE_REGISTRY using the tag
  1116  # specified by DEV_TAG and RELEASE_TAG.
  1117  release-dev-images-to-registry-%:
  1118  	$(MAKE) $(addprefix release-dev-image-to-registry-,$(call escapefs, $(BUILD_IMAGES))) RELEASE_REGISTRY=$(call unescapefs,$*)
  1119  
  1120  # release-dev-image-to-registry-% copies the build image and build arch images specified by $* and VALIDARCHES from
  1121  # the dev repo specified by DEV_TAG and RELEASE.
  1122  release-dev-image-to-registry-%:
  1123  	$(if $(SKIP_MANIFEST_RELEASE),,\
  1124  		$(CRANE) cp $(DEV_REGISTRY)/$(call unescapefs,$*):$(DEV_TAG) $(RELEASE_REGISTRY)/$(call unescapefs,$*):$(RELEASE_TAG))$(double_quote)
  1125  	$(if $(SKIP_ARCH_RELEASE),,\
  1126  		$(MAKE) $(addprefix release-dev-image-arch-to-registry-,$(VALIDARCHES)) BUILD_IMAGE=$(call unescapefs,$*))
  1127  
  1128  # release-dev-image-to-registry-% copies the build arch image specified by BUILD_IMAGE and ARCH from the dev repo
  1129  # specified by DEV_TAG and RELEASE.
  1130  release-dev-image-arch-to-registry-%:
  1131  	$(CRANE) cp $(DEV_REGISTRY)/$(BUILD_IMAGE):$(DEV_TAG)-$* $(RELEASE_REGISTRY)/$(BUILD_IMAGE):$(RELEASE_TAG)-$*$(double_quote)
  1132  
  1133  # release-prereqs checks that the environment is configured properly to create a release.
  1134  .PHONY: release-prereqs
  1135  release-prereqs:
  1136  ifndef VERSION
  1137  	$(error VERSION is undefined - run using make release VERSION=vX.Y.Z)
  1138  endif
  1139  
  1140  # Check if the codebase is dirty or not.
  1141  check-dirty:
  1142  	@if [ "$$(git --no-pager diff --stat)" != "" ]; then \
  1143  	echo "The following files are dirty"; git --no-pager diff --stat; exit 1; fi
  1144  
  1145  bin/yq:
  1146  	mkdir -p bin
  1147  	$(eval TMP := $(shell mktemp -d))
  1148  	curl -sSf -L --retry 5 -o $(TMP)/yq4.tar.gz https://github.com/mikefarah/yq/releases/download/v4.34.2/yq_linux_$(BUILDARCH).tar.gz
  1149  	tar -zxvf $(TMP)/yq4.tar.gz -C $(TMP)
  1150  	mv $(TMP)/yq_linux_$(BUILDARCH) bin/yq
  1151  
  1152  ###############################################################################
  1153  # Common functions for launching a local Kubernetes control plane.
  1154  ###############################################################################
  1155  ## Kubernetes apiserver used for tests
  1156  APISERVER_NAME := calico-local-apiserver
  1157  run-k8s-apiserver: stop-k8s-apiserver run-etcd
  1158  	docker run --detach --net=host \
  1159  		--name $(APISERVER_NAME) \
  1160  		-v $(REPO_ROOT):/go/src/github.com/projectcalico/calico \
  1161  		-v $(CERTS_PATH):/home/user/certs \
  1162  		-e KUBECONFIG=/home/user/certs/kubeconfig \
  1163  		$(CALICO_BUILD) kube-apiserver \
  1164  		--etcd-servers=http://$(LOCAL_IP_ENV):2379 \
  1165  		--service-cluster-ip-range=10.101.0.0/16,fd00:96::/112 \
  1166  		--authorization-mode=RBAC \
  1167  		--service-account-key-file=/home/user/certs/service-account.pem \
  1168  		--service-account-signing-key-file=/home/user/certs/service-account-key.pem \
  1169  		--service-account-issuer=https://localhost:443 \
  1170  		--api-audiences=kubernetes.default \
  1171  		--client-ca-file=/home/user/certs/ca.pem \
  1172  		--tls-cert-file=/home/user/certs/kubernetes.pem \
  1173  		--tls-private-key-file=/home/user/certs/kubernetes-key.pem \
  1174  		--enable-priority-and-fairness=false \
  1175  		--max-mutating-requests-inflight=0 \
  1176  		--max-requests-inflight=0
  1177  
  1178  	# Wait until the apiserver is accepting requests.
  1179  	while ! docker exec $(APISERVER_NAME) kubectl get nodes; do echo "Waiting for apiserver to come up..."; sleep 2; done
  1180  
  1181  	# Wait until we can configure a cluster role binding which allows anonymous auth.
  1182  	while ! docker exec $(APISERVER_NAME) kubectl create \
  1183  		clusterrolebinding anonymous-admin \
  1184  		--clusterrole=cluster-admin \
  1185  		--user=system:anonymous 2>/dev/null ; \
  1186  		do echo "Waiting for $(APISERVER_NAME) to come up"; \
  1187  		sleep 1; \
  1188  		done
  1189  
  1190  	# Create CustomResourceDefinition (CRD) for Calico resources
  1191  	while ! docker exec $(APISERVER_NAME) kubectl \
  1192  		apply -f /go/src/github.com/projectcalico/calico/libcalico-go/config/crd/; \
  1193  		do echo "Trying to create CRDs"; \
  1194  		sleep 1; \
  1195  		done
  1196  
  1197  # Stop Kubernetes apiserver
  1198  stop-k8s-apiserver:
  1199  	@-docker rm -f $(APISERVER_NAME)
  1200  
  1201  # Run a local Kubernetes controller-manager in a docker container, useful for tests.
  1202  CONTROLLER_MANAGER_NAME := calico-local-controller-manager
  1203  run-k8s-controller-manager: stop-k8s-controller-manager run-k8s-apiserver
  1204  	docker run --detach --net=host \
  1205  		--name $(CONTROLLER_MANAGER_NAME) \
  1206  		-v $(CERTS_PATH):/home/user/certs \
  1207  		$(CALICO_BUILD) kube-controller-manager \
  1208  		--master=https://127.0.0.1:6443 \
  1209  		--kubeconfig=/home/user/certs/kube-controller-manager.kubeconfig \
  1210  		--min-resync-period=3m \
  1211  		--allocate-node-cidrs=true \
  1212  		--cluster-cidr=192.168.0.0/16 \
  1213  		--v=5 \
  1214  		--service-account-private-key-file=/home/user/certs/service-account-key.pem \
  1215  		--root-ca-file=/home/user/certs/ca.pem
  1216  
  1217  ## Stop Kubernetes controller manager
  1218  stop-k8s-controller-manager:
  1219  	@-docker rm -f $(CONTROLLER_MANAGER_NAME)
  1220  
  1221  ###############################################################################
  1222  # Common functions for create a local kind cluster.
  1223  ###############################################################################
  1224  KIND_DIR := $(REPO_ROOT)/hack/test/kind
  1225  KIND ?= $(KIND_DIR)/kind
  1226  KUBECTL ?= $(KIND_DIR)/kubectl
  1227  
  1228  # Different tests may require different kind configurations.
  1229  KIND_CONFIG ?= $(KIND_DIR)/kind.config
  1230  KIND_NAME = $(basename $(notdir $(KIND_CONFIG)))
  1231  KIND_KUBECONFIG?=$(KIND_DIR)/$(KIND_NAME)-kubeconfig.yaml
  1232  
  1233  kind-cluster-create: $(REPO_ROOT)/.$(KIND_NAME).created
  1234  $(REPO_ROOT)/.$(KIND_NAME).created: $(KUBECTL) $(KIND)
  1235  	# First make sure any previous cluster is deleted
  1236  	$(MAKE) kind-cluster-destroy
  1237  
  1238  	# Create a kind cluster.
  1239  	$(KIND) create cluster \
  1240  		--config $(KIND_CONFIG) \
  1241  		--kubeconfig $(KIND_KUBECONFIG) \
  1242  		--name $(KIND_NAME) \
  1243  		--image kindest/node:$(KINDEST_NODE_VERSION)
  1244  
  1245  	# Wait for controller manager to be running and healthy, then create Calico CRDs.
  1246  	while ! KUBECONFIG=$(KIND_KUBECONFIG) $(KUBECTL) get serviceaccount default; do echo "Waiting for default serviceaccount to be created..."; sleep 2; done
  1247  	while ! KUBECONFIG=$(KIND_KUBECONFIG) $(KUBECTL) create -f $(REPO_ROOT)/libcalico-go/config/crd; do echo "Waiting for CRDs to be created"; sleep 2; done
  1248  	touch $@
  1249  
  1250  kind-cluster-destroy: $(KIND) $(KUBECTL)
  1251  	-$(KUBECTL) --kubeconfig=$(KIND_KUBECONFIG) drain kind-control-plane kind-worker kind-worker2 kind-worker3 --ignore-daemonsets --force
  1252  	-$(KIND) delete cluster --name $(KIND_NAME)
  1253  	rm -f $(KIND_KUBECONFIG)
  1254  	rm -f $(REPO_ROOT)/.$(KIND_NAME).created
  1255  
  1256  kind $(KIND):
  1257  	mkdir -p $(KIND_DIR)
  1258  	$(DOCKER_GO_BUILD) sh -c "GOBIN=/go/src/github.com/projectcalico/calico/hack/test/kind go install sigs.k8s.io/kind@v0.14.0"
  1259  
  1260  kubectl $(KUBECTL):
  1261  	mkdir -p $(KIND_DIR)
  1262  	curl -L https://storage.googleapis.com/kubernetes-release/release/$(K8S_VERSION)/bin/linux/$(ARCH)/kubectl -o $@
  1263  	chmod +x $@
  1264  
  1265  bin/helm:
  1266  	mkdir -p bin
  1267  	$(eval TMP := $(shell mktemp -d))
  1268  	curl -sSf -L --retry 5 -o $(TMP)/helm3.tar.gz https://get.helm.sh/helm-v3.11.0-linux-$(ARCH).tar.gz
  1269  	tar -zxvf $(TMP)/helm3.tar.gz -C $(TMP)
  1270  	mv $(TMP)/linux-$(ARCH)/helm bin/helm
  1271  
  1272  ###############################################################################
  1273  # Common functions for launching a local etcd instance.
  1274  ###############################################################################
  1275  ## Run etcd as a container (calico-etcd)
  1276  # TODO: We shouldn't need to tear this down every time it is called.
  1277  # TODO: We shouldn't need to enable the v2 API, but some of our test code still relies on it.
  1278  .PHONY: run-etcd stop-etcd
  1279  run-etcd: stop-etcd
  1280  	docker run --detach \
  1281  		--net=host \
  1282  		--entrypoint=/usr/local/bin/etcd \
  1283  		--name calico-etcd $(ETCD_IMAGE) \
  1284  		--enable-v2 \
  1285  		--advertise-client-urls "http://$(LOCAL_IP_ENV):2379,http://127.0.0.1:2379,http://$(LOCAL_IP_ENV):4001,http://127.0.0.1:4001" \
  1286  		--listen-client-urls "http://0.0.0.0:2379,http://0.0.0.0:4001"
  1287  
  1288  stop-etcd:
  1289  	@-docker rm -f calico-etcd
  1290  
  1291  ###############################################################################
  1292  # Helpers
  1293  ###############################################################################
  1294  ## Help
  1295  .PHONY: help
  1296  help:
  1297  	$(info Available targets)
  1298  	@echo
  1299  	@awk '/^[a-zA-Z\-\_\%0-9\/]+:/ {                                  \
  1300  	   nb = sub( /^## /, "", helpMsg );                               \
  1301  	   if(nb == 0) {                                                  \
  1302  	      helpMsg = $$0;                                              \
  1303  	      nb = sub( /^[^:]*:.* ## /, "", helpMsg );                   \
  1304  	   }                                                              \
  1305  	   if (nb)                                                        \
  1306  	      printf "\033[1;31m%-" width "s\033[0m %s\n", $$1, helpMsg;  \
  1307  	}                                                                 \
  1308  	{ helpMsg = $$0 }'                                                \
  1309  	width=30                                                          \
  1310  	$(MAKEFILE_LIST)
  1311  	@echo
  1312  	@echo "-----------------------------------------------------------"
  1313  	@echo "Building for $(BUILDOS)-$(ARCH) INSTALL_FLAG=$(INSTALL_FLAG)"
  1314  	@echo
  1315  	@echo "ARCH (target):		$(ARCH)"
  1316  	@echo "OS (target):		$(BUILDOS)"
  1317  	@echo "BUILDARCH (host):	$(BUILDARCH)"
  1318  	@echo "CALICO_BUILD:		$(CALICO_BUILD)"
  1319  	@echo "-----------------------------------------------------------"
  1320  
  1321  ###############################################################################
  1322  # Common functions for building windows images.
  1323  ###############################################################################
  1324  
  1325  # When running on semaphore, just copy the docker config, otherwise run
  1326  # 'docker-credential-gcr configure-docker' as well.
  1327  ifdef SEMAPHORE
  1328  DOCKER_CREDENTIAL_CMD = cp /root/.docker/config.json_host /root/.docker/config.json
  1329  else
  1330  DOCKER_CREDENTIAL_CMD = cp /root/.docker/config.json_host /root/.docker/config.json && \
  1331  						docker-credential-gcr configure-docker
  1332  endif
  1333  
  1334  # This needs the $(WINDOWS_DIST)/bin/docker-credential-gcr binary in $PATH and
  1335  # also the local ~/.config/gcloud dir to be able to push to gcr.io.  It mounts
  1336  # $(DOCKER_CONFIG) and copies it so that it can be written to on the container,
  1337  # but not have any effect on the host config.
  1338  CRANE_BINDMOUNT_CMD := \
  1339  	docker run --rm \
  1340  		--net=host \
  1341  		--init \
  1342  		--entrypoint /bin/sh \
  1343  		-e LOCAL_USER_ID=$(LOCAL_USER_ID) \
  1344  		-v $(CURDIR):/go/src/$(PACKAGE_NAME):rw \
  1345  		-v $(DOCKER_CONFIG):/root/.docker/config.json_host:ro \
  1346  		-e PATH=$${PATH}:/go/src/$(PACKAGE_NAME)/$(WINDOWS_DIST)/bin \
  1347  		-v $(HOME)/.config/gcloud:/root/.config/gcloud \
  1348  		-w /go/src/$(PACKAGE_NAME) \
  1349  		$(CALICO_BUILD) -c $(double_quote)$(DOCKER_CREDENTIAL_CMD) && crane
  1350  
  1351  DOCKER_MANIFEST_CMD := docker manifest
  1352  
  1353  ifdef CONFIRM
  1354  CRANE_BINDMOUNT = $(CRANE_BINDMOUNT_CMD)
  1355  DOCKER_MANIFEST = $(DOCKER_MANIFEST_CMD)
  1356  else
  1357  CRANE_BINDMOUNT = echo [DRY RUN] $(CRANE_BINDMOUNT_CMD)
  1358  DOCKER_MANIFEST = echo [DRY RUN] $(DOCKER_MANIFEST_CMD)
  1359  endif
  1360  
  1361  # Clean up the docker builder used to create Windows image tarballs.
  1362  .PHONY: clean-windows-builder
  1363  clean-windows-builder:
  1364  	-docker buildx rm calico-windows-builder
  1365  
  1366  # Set up the docker builder used to create Windows image tarballs.
  1367  .PHONY: setup-windows-builder
  1368  setup-windows-builder: clean-windows-builder
  1369  	docker buildx create --name=calico-windows-builder --use --platform windows/amd64
  1370  
  1371  # FIXME: Use WINDOWS_HPC_VERSION and image instead of nanoserver and WINDOWS_VERSIONS when containerd v1.6 is EOL'd
  1372  # .PHONY: image-windows release-windows
  1373  # NOTE: WINDOWS_IMAGE_REQS must be defined with the requirements to build the windows
  1374  # image. These must be added as reqs to 'image-windows' (originally defined in
  1375  # lib.Makefile) on the specific package Makefile otherwise they are not correctly
  1376  # recognized.
  1377  # # Build Windows image with tag and possibly push it to $DEV_REGISTRIES
  1378  # image-windows-with-tag: var-require-all-WINDOWS_IMAGE-WINDOWS_DIST-WINDOWS_IMAGE_REQS-IMAGETAG
  1379  # 	push="$${PUSH:-false}"; \
  1380  # 	for registry in $(DEV_REGISTRIES); do \
  1381  # 		echo Building and pushing Windows image to $${registry}; \
  1382  # 		image="$${registry}/$(WINDOWS_IMAGE):$(IMAGETAG)"; \
  1383  # 		docker buildx build \
  1384  # 			--platform windows/amd64 \
  1385  # 			--output=type=image,push=$${push} \
  1386  # 			-t $${image} \
  1387  # 			--pull \
  1388  # 			--no-cache \
  1389  # 			--build-arg GIT_VERSION=$(GIT_VERSION) \
  1390  # 			--build-arg WINDOWS_HPC_VERSION=$(WINDOWS_HPC_VERSION) \
  1391  # 			-f Dockerfile-windows .; \
  1392  # 	done ;
  1393  
  1394  # image-windows: var-require-all-BRANCH_NAME
  1395  # 	$(MAKE) image-windows-with-tag PUSH=$(PUSH) IMAGETAG=$(if $(IMAGETAG_PREFIX),$(IMAGETAG_PREFIX)-)$(BRANCH_NAME)
  1396  # 	$(MAKE) image-windows-with-tag PUSH=$(PUSH) IMAGETAG=$(if $(IMAGETAG_PREFIX),$(IMAGETAG_PREFIX)-)$(shell git describe --tags --dirty --long --always --abbrev=12)
  1397  
  1398  # # Build and push Windows image
  1399  # release-windows: var-require-one-of-CONFIRM-DRYRUN release-prereqs clean-windows
  1400  # 	$(MAKE) image-windows PUSH=true
  1401  
  1402  # Windows image pushing is different because we do not build docker images directly.
  1403  # Since the build machine is linux, we output the images to a tarball. (We can
  1404  # produce images but there will be no output because docker images
  1405  # built for Windows cannot be loaded on linux.)
  1406  #
  1407  # The resulting image tarball is then pushed to registries during cd/release.
  1408  # The image tarballs are located in WINDOWS_DIST and have files names
  1409  # with the format 'node-windows-v3.21.0-2-abcdef-20H2.tar'.
  1410  #
  1411  # In addition to pushing the individual images, we also create the manifest
  1412  # directly using 'docker manifest'. This is possible because Semaphore is using
  1413  # a recent enough docker CLI version (20.10.0)
  1414  #
  1415  # - Create the manifest with 'docker manifest create' using the list of all images.
  1416  # - For each windows version, 'docker manifest annotate' its image with "os.image: <windows_version>".
  1417  #   <windows_version> is the version string that looks like, e.g. 10.0.19041.1288.
  1418  #   Setting os.image in the manifest is required for Windows hosts to load the
  1419  #   correct image in manifest.
  1420  # - Finally we push the manifest, "purging" the local manifest.
  1421  
  1422  $(WINDOWS_DIST)/$(WINDOWS_IMAGE)-$(GIT_VERSION)-%.tar: windows-sub-image-$*
  1423  
  1424  DOCKER_CREDENTIAL_VERSION="2.1.18"
  1425  DOCKER_CREDENTIAL_OS="linux"
  1426  DOCKER_CREDENTIAL_ARCH="amd64"
  1427  $(WINDOWS_DIST)/bin/docker-credential-gcr:
  1428  	-mkdir -p $(WINDOWS_DIST)/bin
  1429  	curl -fsSL "https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v$(DOCKER_CREDENTIAL_VERSION)/docker-credential-gcr_$(DOCKER_CREDENTIAL_OS)_$(DOCKER_CREDENTIAL_ARCH)-$(DOCKER_CREDENTIAL_VERSION).tar.gz" \
  1430  	| tar xz --to-stdout docker-credential-gcr \
  1431  	| tee $(WINDOWS_DIST)/bin/docker-credential-gcr > /dev/null && chmod +x $(WINDOWS_DIST)/bin/docker-credential-gcr
  1432  
  1433  .PHONY: docker-credential-gcr-binary
  1434  docker-credential-gcr-binary: var-require-all-WINDOWS_DIST-DOCKER_CREDENTIAL_VERSION-DOCKER_CREDENTIAL_OS-DOCKER_CREDENTIAL_ARCH $(WINDOWS_DIST)/bin/docker-credential-gcr
  1435  
  1436  # NOTE: WINDOWS_IMAGE_REQS must be defined with the requirements to build the windows
  1437  # image. These must be added as reqs to 'image-windows' (originally defined in
  1438  # lib.Makefile) on the specific package Makefile otherwise they are not correctly
  1439  # recognized.
  1440  windows-sub-image-%: var-require-all-GIT_VERSION-WINDOWS_IMAGE-WINDOWS_DIST-WINDOWS_IMAGE_REQS
  1441  	# ensure dir for windows image tars exits
  1442  	-mkdir -p $(WINDOWS_DIST)
  1443  	docker buildx build \
  1444  		--platform windows/amd64 \
  1445  		--output=type=docker,dest=$(CURDIR)/$(WINDOWS_DIST)/$(WINDOWS_IMAGE)-$(GIT_VERSION)-$*.tar \
  1446  		--pull \
  1447  		-t $(WINDOWS_IMAGE):latest \
  1448  		--build-arg GIT_VERSION=$(GIT_VERSION) \
  1449  		--build-arg=WINDOWS_VERSION=$* \
  1450  		-f Dockerfile-windows .
  1451  
  1452  .PHONY: image-windows release-windows release-windows-with-tag
  1453  image-windows: setup-windows-builder var-require-all-WINDOWS_VERSIONS
  1454  	for version in $(WINDOWS_VERSIONS); do \
  1455  		$(MAKE) windows-sub-image-$${version}; \
  1456  	done;
  1457  
  1458  release-windows-with-tag: var-require-one-of-CONFIRM-DRYRUN var-require-all-IMAGETAG-DEV_REGISTRIES image-windows docker-credential-gcr-binary
  1459  	for registry in $(DEV_REGISTRIES); do \
  1460  		echo Pushing Windows images to $${registry}; \
  1461  		all_images=""; \
  1462  		manifest_image="$${registry}/$(WINDOWS_IMAGE):$(IMAGETAG)"; \
  1463  		for win_ver in $(WINDOWS_VERSIONS); do \
  1464  			image_tar="$(WINDOWS_DIST)/$(WINDOWS_IMAGE)-$(GIT_VERSION)-$${win_ver}.tar"; \
  1465  			image="$${registry}/$(WINDOWS_IMAGE):$(IMAGETAG)-windows-$${win_ver}"; \
  1466  			echo Pushing image $${image} ...; \
  1467  			$(CRANE_BINDMOUNT) push $${image_tar} $${image}$(double_quote) & \
  1468  			all_images="$${all_images} $${image}"; \
  1469  		done; \
  1470  		wait; \
  1471  		$(DOCKER_MANIFEST) create --amend $${manifest_image} $${all_images}; \
  1472  		for win_ver in $(WINDOWS_VERSIONS); do \
  1473  			version=$$(docker manifest inspect mcr.microsoft.com/windows/nanoserver:$${win_ver} | grep "os.version" | head -n 1 | awk -F\" '{print $$4}'); \
  1474  			image="$${registry}/$(WINDOWS_IMAGE):$(IMAGETAG)-windows-$${win_ver}"; \
  1475  			$(DOCKER_MANIFEST) annotate --os windows --arch amd64 --os-version $${version} $${manifest_image} $${image}; \
  1476  		done; \
  1477  		$(DOCKER_MANIFEST) push --purge $${manifest_image}; \
  1478  	done;
  1479  
  1480  release-windows: var-require-one-of-CONFIRM-DRYRUN var-require-all-DEV_REGISTRIES-WINDOWS_IMAGE var-require-one-of-VERSION-BRANCH_NAME
  1481  	describe_tag=$(if $(IMAGETAG_PREFIX),$(IMAGETAG_PREFIX)-)$(shell git describe --tags --dirty --long --always --abbrev=12); \
  1482  	release_tag=$(if $(VERSION),$(VERSION),$(if $(IMAGETAG_PREFIX),$(IMAGETAG_PREFIX)-)$(BRANCH_NAME)); \
  1483  	$(MAKE) release-windows-with-tag IMAGETAG=$${describe_tag}; \
  1484  	for registry in $(DEV_REGISTRIES); do \
  1485  		$(CRANE_BINDMOUNT) cp $${registry}/$(WINDOWS_IMAGE):$${describe_tag} $${registry}/$(WINDOWS_IMAGE):$${release_tag}$(double_quote); \
  1486  	done;