github.com/tigera/api@v0.0.0-20240320170621-278e89a8c5fb/lib.Makefile (about)

     1  # Find path to the repo root dir (i.e. this files's dir).  Must be first in the file, before including anything.
     2  REPO_DIR:=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
     3  
     4  # Always install the git hooks to prevent publishing closed source code to a non-private repo.
     5  install_hooks:=$(shell $(REPO_DIR)/hack/install-git-hooks)
     6  
     7  # Disable built-in rules
     8  .SUFFIXES:
     9  
    10  # Shortcut targets
    11  default: build
    12  
    13  ## Build binary for current platform
    14  all: build
    15  
    16  ## Run the tests for the current platform/architecture
    17  test: ut fv st
    18  
    19  ###############################################################################
    20  # Both native and cross architecture builds are supported.
    21  # The target architecture is select by setting the ARCH variable.
    22  # When ARCH is undefined it is set to the detected host architecture.
    23  # When ARCH differs from the host architecture a crossbuild will be performed.
    24  # This variable is only set if ARCHES is not set
    25  ARCHES ?= $(patsubst docker-image/Dockerfile.%,%,$(wildcard docker-image/Dockerfile.*))
    26  
    27  # Some repositories keep their Dockerfile(s) in the sub-directories of the 'docker-image'
    28  # directory (e.g., voltron). Make sure ARCHES gets filled from all unique Dockerfiles.
    29  ifeq ($(ARCHES),)
    30  	dockerfiles_in_subdir=$(wildcard docker-image/**/Dockerfile.*)
    31  	ifneq ($(dockerfiles_in_subdir),)
    32  		ARCHES=$(patsubst Dockerfile.%,%,$(shell basename -a $(dockerfiles_in_subdir) | sort | uniq))
    33  	endif
    34  endif
    35  
    36  # Some repositories keep their Dockerfile(s) in the root directory instead of in
    37  # the 'docker-image' subdir. Make sure ARCHES gets filled in either way.
    38  ifeq ($(ARCHES),)
    39  	ARCHES=$(patsubst Dockerfile.%,%,$(wildcard Dockerfile.*))
    40  endif
    41  
    42  # If architectures cannot infer from Dockerfiles, set default supported architecture.
    43  ifeq ($(ARCHES),)
    44  	ARCHES=amd64 arm64
    45  endif
    46  
    47  # list of arches *not* to build when doing *-all
    48  EXCLUDEARCH?=ppc64le s390x
    49  VALIDARCHES = $(filter-out $(EXCLUDEARCH),$(ARCHES))
    50  
    51  # BUILDARCH is the host architecture
    52  # ARCH is the target architecture
    53  # we need to keep track of them separately
    54  # Note: OS is always set on Windows
    55  ifeq ($(OS),Windows_NT)
    56  BUILDARCH = x86_64
    57  BUILDOS = x86_64
    58  else
    59  BUILDARCH ?= $(shell uname -m)
    60  BUILDOS ?= $(shell uname -s | tr A-Z a-z)
    61  endif
    62  
    63  # canonicalized names for host architecture
    64  ifeq ($(BUILDARCH),aarch64)
    65  	BUILDARCH=arm64
    66  endif
    67  ifeq ($(BUILDARCH),x86_64)
    68  	BUILDARCH=amd64
    69  endif
    70  
    71  # unless otherwise set, I am building for my own architecture, i.e. not cross-compiling
    72  ARCH ?= $(BUILDARCH)
    73  
    74  # canonicalized names for target architecture
    75  ifeq ($(ARCH),aarch64)
    76  	override ARCH=arm64
    77  endif
    78  ifeq ($(ARCH),x86_64)
    79  	override ARCH=amd64
    80  endif
    81  
    82  # detect the local outbound ip address
    83  LOCAL_IP_ENV?=$(shell ip route get 8.8.8.8 | head -1 | awk '{print $$7}')
    84  
    85  LATEST_IMAGE_TAG?=latest
    86  
    87  # these macros create a list of valid architectures for pushing manifests
    88  comma := ,
    89  double_quote := $(shell echo '"')
    90  
    91  ## Targets used when cross building.
    92  .PHONY: native register
    93  native:
    94  ifneq ($(BUILDARCH),$(ARCH))
    95  	@echo "Target $(MAKECMDGOALS)" is not supported when cross building! && false
    96  endif
    97  
    98  # Enable binfmt adding support for miscellaneous binary formats.
    99  # This is only needed when running non-native binaries.
   100  register:
   101  ifneq ($(BUILDARCH),$(ARCH))
   102  	docker run --rm --privileged multiarch/qemu-user-static:register || true
   103  endif
   104  
   105  # If this is a release, also tag and push additional images.
   106  ifeq ($(RELEASE),true)
   107  PUSH_IMAGES+=$(RELEASE_IMAGES)
   108  endif
   109  
   110  DOCKERHUB_REGISTRY ?=registry.hub.docker.com
   111  # filter-registry filters out registries we don't want to include when tagging / pushing docker images. For instance,
   112  # we don't include the registry name when pushing to docker hub because that registry is the default.
   113  filter-registry ?= $(if $(filter-out $(1),$(DOCKERHUB_REGISTRY)),$(1)/)
   114  
   115  # Convenience function to get the first dev image repo in the list.
   116  DEV_REGISTRY ?= $(firstword $(DEV_REGISTRIES))
   117  
   118  # remove from the list to push to manifest any registries that do not support multi-arch
   119  MANIFEST_REGISTRIES         ?= $(DEV_REGISTRIES)
   120  
   121  PUSH_MANIFEST_IMAGES := $(foreach registry,$(MANIFEST_REGISTRIES),$(foreach image,$(BUILD_IMAGES),$(call filter-registry,$(registry))$(image)))
   122  
   123  # location of docker credentials to push manifests
   124  DOCKER_CONFIG ?= $(HOME)/.docker/config.json
   125  
   126  # If a repository still relies on vendoring, it must set GOMOD_VENDOR to "true".
   127  # If that's not the case and we're running in CI, set -mod=readonly to prevent builds
   128  # from being flagged as dirty due to updates in go.mod or go.sum _except_ for:
   129  # - for local builds, which _require_ a change to go.mod.
   130  # - the targets 'commit-pin-updates' and  'golangci-lint' which require
   131  #   updating go.mod and/or go.sum
   132  SKIP_GOMOD_READONLY_FLAG =
   133  ifeq ($(MAKECMDGOALS),commit-pin-updates)
   134  	SKIP_GOMOD_READONLY_FLAG = yes
   135  endif
   136  ifeq ($(MAKECMDGOALS),golangci-lint)
   137  	SKIP_GOMOD_READONLY_FLAG = yes
   138  endif
   139  
   140  ifeq ($(GOMOD_VENDOR),true)
   141  	GOFLAGS?="-mod=vendor"
   142  else
   143  ifeq ($(CI),true)
   144  ifndef SKIP_GOMOD_READONLY_FLAG
   145  	GOFLAGS?="-mod=readonly"
   146  endif
   147  endif
   148  endif
   149  
   150  # For building, we use the go-build image for the *host* architecture, even if the target is different
   151  # the one for the host should contain all the necessary cross-compilation tools
   152  # we do not need to use the arch since go-build:v0.15 now is multi-arch manifest
   153  GO_BUILD_IMAGE ?= calico/go-build
   154  CALICO_BUILD    = $(GO_BUILD_IMAGE):$(GO_BUILD_VER)-$(BUILDARCH)
   155  
   156  # Build a binary with boring crypto support.
   157  # This function expects you to pass in two arguments:
   158  #   1st arg: path/to/input/package(s)
   159  #   2nd arg: path/to/output/binary
   160  # Only when arch = amd64 it will use boring crypto to build the binary.
   161  # Uses LDFLAGS, CGO_LDFLAGS, CGO_CFLAGS when set.
   162  # Tests that the resulting binary contains boringcrypto symbols.
   163  define build_cgo_boring_binary
   164  	$(DOCKER_RUN) \
   165  		-e CGO_ENABLED=1 \
   166  		-e CGO_CFLAGS=$(CGO_CFLAGS) \
   167  		-e CGO_LDFLAGS=$(CGO_LDFLAGS) \
   168  		$(CALICO_BUILD) \
   169  		sh -c '$(GIT_CONFIG_SSH) GOEXPERIMENT=boringcrypto go build -o $(2) -tags fipsstrict$(if $(BUILD_TAGS),$(comma)$(BUILD_TAGS)) -v -buildvcs=false -ldflags "$(LDFLAGS) -s -w" $(1) \
   170  			&& strings $(2) | grep '_Cfunc__goboringcrypto_' 1> /dev/null'
   171  endef
   172  
   173  # Use this when building binaries that need cgo, but have no crypto and therefore would not contain any boring symbols.
   174  define build_cgo_binary
   175  	$(DOCKER_RUN) \
   176  		-e CGO_ENABLED=1 \
   177  		-e CGO_CFLAGS=$(CGO_CFLAGS) \
   178  		-e CGO_LDFLAGS=$(CGO_LDFLAGS) \
   179  		$(CALICO_BUILD) \
   180  		sh -c '$(GIT_CONFIG_SSH) go build -o $(2) $(if $(BUILD_TAGS),-tags $(BUILD_TAGS)) -v -buildvcs=false -ldflags "$(LDFLAGS) -s -w" $(1)'
   181  endef
   182  
   183  # For binaries that do not require boring crypto.
   184  define build_binary
   185  	$(DOCKER_RUN) \
   186  		-e CGO_ENABLED=0 \
   187  		$(CALICO_BUILD) \
   188  		sh -c '$(GIT_CONFIG_SSH) go build -o $(2) $(if $(BUILD_TAGS),-tags $(BUILD_TAGS)) -v -buildvcs=false -ldflags "$(LDFLAGS) -s -w" $(1)'
   189  endef
   190  
   191  # For windows builds that require cgo.
   192  define build_cgo_windows_binary
   193  	$(DOCKER_RUN) \
   194  		-e CC=x86_64-w64-mingw32-gcc \
   195  		-e CGO_ENABLED=1 \
   196  		-e GOARCH=amd64 \
   197  		-e GOOS=windows \
   198  		$(CALICO_BUILD) \
   199  		sh -c '$(GIT_CONFIG_SSH) go build -o $(2) $(if $(BUILD_TAGS),-tags $(BUILD_TAGS)) -v -buildvcs=false -ldflags "$(LDFLAGS)" $(1)'
   200  endef
   201  
   202  # For windows builds that do not require cgo.
   203  define build_windows_binary
   204  	$(DOCKER_RUN) \
   205  		-e CGO_ENABLED=0 \
   206  		-e GOARCH=amd64 \
   207  		-e GOOS=windows \
   208  		$(CALICO_BUILD) \
   209  		sh -c '$(GIT_CONFIG_SSH) go build -o $(2) $(if $(BUILD_TAGS),-tags $(BUILD_TAGS)) -v -buildvcs=false -ldflags "$(LDFLAGS)" $(1)'
   210  endef
   211  
   212  # Images used in build / test across multiple directories.
   213  PROTOC_CONTAINER=calico/protoc:$(PROTOC_VER)-$(BUILDARCH)
   214  ETCD_IMAGE ?= quay.io/coreos/etcd:$(ETCD_VERSION)-$(ARCH)
   215  ifeq ($(BUILDARCH),amd64)
   216  	# *-amd64 tagged images for etcd are not available until v3.5.0
   217  	ETCD_IMAGE = quay.io/coreos/etcd:$(ETCD_VERSION)
   218  endif
   219  UBI8_IMAGE ?= registry.access.redhat.com/ubi8/ubi-minimal:$(UBI8_VERSION)
   220  UBI9_IMAGE ?= registry.access.redhat.com/ubi9/ubi-minimal:$(UBI9_VERSION)
   221  
   222  ifeq ($(GIT_USE_SSH),true)
   223  	GIT_CONFIG_SSH ?= git config --global url."ssh://git@github.com/".insteadOf "https://github.com/";
   224  endif
   225  
   226  # Get version from git.
   227  GIT_VERSION:=$(shell git describe --tags --dirty --always --abbrev=12)
   228  
   229  # Figure out version information.  To support builds from release tarballs, we default to
   230  # <unknown> if this isn't a git checkout.
   231  GIT_COMMIT:=$(shell git rev-parse HEAD || echo '<unknown>')
   232  BUILD_ID:=$(shell git rev-parse HEAD || uuidgen | sed 's/-//g')
   233  
   234  # Lazily set the git version we embed into the binaries we build. We want the
   235  # git tag at the time we build the binary.
   236  # Variables elsewhere that depend on this (such as LDFLAGS) must also be lazy.
   237  GIT_DESCRIPTION=$(shell git describe --tags --dirty --always --abbrev=12 || echo '<unknown>')
   238  
   239  # Calculate a timestamp for any build artifacts.
   240  ifneq ($(OS),Windows_NT)
   241  DATE:=$(shell date -u +'%FT%T%z')
   242  endif
   243  
   244  # Figure out the users UID/GID.  These are needed to run docker containers
   245  # as the current user and ensure that files built inside containers are
   246  # owned by the current user.
   247  ifneq ($(OS),Windows_NT)
   248  LOCAL_USER_ID:=$(shell id -u)
   249  LOCAL_GROUP_ID:=$(shell id -g)
   250  endif
   251  
   252  ifeq ("$(LOCAL_USER_ID)", "0")
   253  # The build needs to run as root.
   254  EXTRA_DOCKER_ARGS+=-e RUN_AS_ROOT='true'
   255  endif
   256  
   257  # Allow the ssh auth sock to be mapped into the build container.
   258  ifdef SSH_AUTH_SOCK
   259  	EXTRA_DOCKER_ARGS += -v $(SSH_AUTH_SOCK):/ssh-agent --env SSH_AUTH_SOCK=/ssh-agent
   260  endif
   261  
   262  # Volume-mount gopath into the build container to cache go module's packages. If the environment is using multiple
   263  # comma-separated directories for gopath, use the first one, as that is the default one used by go modules.
   264  ifneq ($(GOPATH),)
   265  	# If the environment is using multiple comma-separated directories for gopath, use the first one, as that
   266  	# is the default one used by go modules.
   267  	GOMOD_CACHE = $(shell echo $(GOPATH) | cut -d':' -f1)/pkg/mod
   268  else
   269  	# If gopath is empty, default to $(HOME)/go.
   270  	GOMOD_CACHE = $(HOME)/go/pkg/mod
   271  endif
   272  
   273  EXTRA_DOCKER_ARGS += -v $(GOMOD_CACHE):/go/pkg/mod:rw
   274  
   275  # Define go architecture flags to support arm variants
   276  GOARCH_FLAGS :=-e GOARCH=$(ARCH)
   277  
   278  # Location of certificates used in UTs.
   279  REPO_ROOT := $(shell git rev-parse --show-toplevel)
   280  CERTS_PATH := $(REPO_ROOT)/hack/test/certs
   281  
   282  QEMU_IMAGE ?= calico/qemu-user-static:latest
   283  
   284  # DOCKER_BUILD is the base build command used for building all images.
   285  DOCKER_BUILD=docker buildx build --load --platform=linux/$(ARCH) --pull \
   286  	--build-arg QEMU_IMAGE=$(QEMU_IMAGE) \
   287  	--build-arg UBI8_IMAGE=$(UBI8_IMAGE) \
   288  	--build-arg UBI9_IMAGE=$(UBI9_IMAGE) \
   289  	--build-arg GIT_VERSION=$(GIT_VERSION)
   290  
   291  DOCKER_RUN := mkdir -p $(REPO_ROOT)/.go-pkg-cache bin $(GOMOD_CACHE) && \
   292  	docker run --rm \
   293  		--net=host \
   294  		--init \
   295  		$(EXTRA_DOCKER_ARGS) \
   296  		-e LOCAL_USER_ID=$(LOCAL_USER_ID) \
   297  		-e GOCACHE=/go-cache \
   298  		$(GOARCH_FLAGS) \
   299  		-e GOPATH=/go \
   300  		-e OS=$(BUILDOS) \
   301  		-e GOOS=$(BUILDOS) \
   302  		-e GOFLAGS=$(GOFLAGS) \
   303  		-e ACK_GINKGO_DEPRECATIONS=1.16.5 \
   304  		-e ACK_GINKGO_RC=true \
   305  		-v $(REPO_ROOT):/go/src/github.com/projectcalico/calico:rw \
   306  		-v $(REPO_ROOT)/.go-pkg-cache:/go-cache:rw \
   307  		-w /go/src/$(PACKAGE_NAME)
   308  
   309  DOCKER_GO_BUILD := $(DOCKER_RUN) $(CALICO_BUILD)
   310  
   311  # A target that does nothing but it always stale, used to force a rebuild on certain targets based on some non-file criteria.
   312  .PHONY: force-rebuild
   313  force-rebuild:
   314  
   315  ###############################################################################
   316  # Updating pins
   317  #   the repo importing this Makefile _must_ define the update-pins target
   318  #   for example:
   319  #     update-pins: update-libcalico-pin update-typha-pin
   320  ###############################################################################
   321  PIN_BRANCH?=$(shell git rev-parse --abbrev-ref HEAD)
   322  
   323  # The docker entrypoint script might echo output that could be included in the output of the following command, so this
   324  # prefixes the commit tag with "commit-tag:" so can reliable get the commit tag from the output.
   325  define get_remote_version
   326  	$(shell $(DOCKER_RUN) $(CALICO_BUILD) sh -c '$(GIT_CONFIG_SSH) echo "commit-tag:$$(git ls-remote https://$(1) $(2) | cut -f1)"' | awk -F "commit-tag:" '{print $$2}')
   327  endef
   328  
   329  # update_pin updates the given package's version to the latest available in the specified repo and branch.
   330  # $(1) should be the name of the package, $(2) and $(3) the repository and branch from which to update it.
   331  # If $(4) is specified it's treated as the module version and use in the go get -d command.
   332  define update_pin
   333  	$(eval new_ver := $(call get_remote_version,$(2),$(3)))
   334  	$(eval repo := $(if $(4),$(1)/$(4),$(1)))
   335  
   336  	$(DOCKER_RUN) -i $(CALICO_BUILD) sh -c '\
   337  		if [ ! -z "$(new_ver)" ]; then \
   338  			$(GIT_CONFIG_SSH) \
   339  			go get -d $(repo)@$(new_ver); \
   340  			go mod tidy; \
   341  		fi'
   342  endef
   343  
   344  # update_replace_pin updates the given package's version to the latest available in the specified repo and branch.
   345  # This routine can only be used for packages being replaced in go.mod, such as private versions of open-source packages.
   346  # $(1) should be the name of the package, $(2) and $(3) the repository and branch from which to update it. If $(4) is
   347  # specified it's treated as the module version and use in the go mod edit -replace command.
   348  define update_replace_pin
   349  	$(eval new_ver := $(call get_remote_version,$(2),$(3)))
   350  	$(eval original_repo := $(if $(4),$(1)/$(4),$(1)))
   351  	$(eval replace_repo := $(if $(4),$(2)/$(4),$(2)))
   352  
   353  	$(DOCKER_RUN) -i $(CALICO_BUILD) sh -c '\
   354  		if [ ! -z "$(new_ver)" ]; then \
   355  			$(GIT_CONFIG_SSH) \
   356  			go mod edit -replace $(original_repo)=$(replace_repo)@$(new_ver); \
   357  			go mod tidy; \
   358  		fi'
   359  endef
   360  
   361  GIT_REMOTE?=origin
   362  API_BRANCH?=$(PIN_BRANCH)
   363  API_REPO?=github.com/projectcalico/calico/api
   364  BASE_API_REPO?=github.com/projectcalico/calico/api
   365  APISERVER_BRANCH?=$(PIN_BRANCH)
   366  APISERVER_REPO?=github.com/projectcalico/calico/apiserver
   367  TYPHA_BRANCH?=$(PIN_BRANCH)
   368  TYPHA_REPO?=github.com/projectcalico/calico/typha
   369  LIBCALICO_BRANCH?=$(PIN_BRANCH)
   370  LIBCALICO_REPO?=github.com/projectcalico/calico/libcalico-go
   371  CONFD_BRANCH?=$(PIN_BRANCH)
   372  CONFD_REPO?=github.com/projectcalico/calico/confd
   373  FELIX_BRANCH?=$(PIN_BRANCH)
   374  FELIX_REPO?=github.com/projectcalico/calico/felix
   375  CNI_BRANCH?=$(PIN_BRANCH)
   376  CNI_REPO?=github.com/projectcalico/calico/cni-plugin
   377  
   378  update-api-pin:
   379  	$(call update_pin,$(API_REPO),$(API_REPO),$(API_BRANCH))
   380  
   381  replace-api-pin:
   382  	$(call update_replace_pin,$(BASE_API_REPO),$(API_REPO),$(API_BRANCH))
   383  
   384  update-apiserver-pin:
   385  	$(call update_pin,github.com/projectcalico/calico/apiserver,$(APISERVER_REPO),$(APISERVER_BRANCH))
   386  
   387  replace-apiserver-pin:
   388  	$(call update_replace_pin,github.com/projectcalico/calico/apiserver,$(APISERVER_REPO),$(APISERVER_BRANCH))
   389  
   390  update-typha-pin:
   391  	$(call update_pin,github.com/projectcalico/calico/typha,$(TYPHA_REPO),$(TYPHA_BRANCH))
   392  
   393  replace-typha-pin:
   394  	$(call update_replace_pin,github.com/projectcalico/calico/typha,$(TYPHA_REPO),$(TYPHA_BRANCH))
   395  
   396  update-libcalico-pin:
   397  	$(call update_pin,github.com/projectcalico/calico/libcalico-go,$(LIBCALICO_REPO),$(LIBCALICO_BRANCH))
   398  
   399  replace-libcalico-pin:
   400  	$(call update_replace_pin,github.com/projectcalico/calico/libcalico-go,$(LIBCALICO_REPO),$(LIBCALICO_BRANCH))
   401  
   402  update-confd-pin:
   403  	$(call update_replace_pin,github.com/kelseyhightower/confd,$(CONFD_REPO),$(CONFD_BRANCH))
   404  
   405  update-felix-pin:
   406  	$(call update_pin,github.com/projectcalico/calico/felix,$(FELIX_REPO),$(FELIX_BRANCH))
   407  
   408  replace-felix-pin:
   409  	$(call update_replace_pin,github.com/projectcalico/calico/felix,$(FELIX_REPO),$(FELIX_BRANCH))
   410  
   411  update-cni-plugin-pin:
   412  	$(call update_pin,github.com/projectcalico/calico/cni-plugin,$(CNI_REPO),$(CNI_BRANCH))
   413  
   414  replace-cni-pin:
   415  	$(call update_replace_pin,github.com/projectcalico/calico/cni-plugin,$(CNI_REPO),$(CNI_BRANCH))
   416  
   417  git-status:
   418  	git status --porcelain
   419  
   420  git-config:
   421  ifdef CONFIRM
   422  	git config --global user.name "marvin-tigera"
   423  	git config --global user.email "marvin@projectcalico.io"
   424  endif
   425  
   426  git-commit:
   427  	git diff --quiet HEAD || git commit -m "Semaphore Automatic Update" go.mod go.sum $(EXTRA_FILES_TO_COMMIT)
   428  
   429  ###############################################################################
   430  # External resource affecting macros
   431  # The following macros affect resources outside of the local environment that
   432  # they're run in, i.e. pushing to docker or github. If CONFIM is not defined,
   433  # then the commands are just printed, instead of run.
   434  #
   435  # The <command>-cmd macro should never be run directly, it's used to define
   436  # the command the macro runs but depending on whether CONFIRM is defined the
   437  # command may be printed or run.
   438  #
   439  # You can redefine <command>-cmd to have the targets in this makefile use a
   440  # different implementation.
   441  ###############################################################################
   442  
   443  define yq_cmd
   444  	$(shell yq --version | grep v$1.* >/dev/null && which yq || echo docker run --rm --user="root" -i -v "$(shell pwd)":/workdir mikefarah/yq:$1 $(if $(shell [ $1 -lt 4 ] && echo "true"), yq,))
   445  endef
   446  YQ_V4 = $(call yq_cmd,4)
   447  
   448  ifdef LOCAL_CRANE
   449  CRANE_CMD         = bash -c $(double_quote)crane
   450  else
   451  CRANE_CMD         = docker run -t --entrypoint /bin/sh -v $(DOCKER_CONFIG):/root/.docker/config.json $(CALICO_BUILD) -c \
   452                      $(double_quote)crane
   453  endif
   454  
   455  GIT_CMD           = git
   456  DOCKER_CMD        = docker
   457  
   458  ifdef CONFIRM
   459  CRANE         = $(CRANE_CMD)
   460  GIT           = $(GIT_CMD)
   461  DOCKER        = $(DOCKER_CMD)
   462  else
   463  CRANE         = echo [DRY RUN] $(CRANE_CMD)
   464  GIT           = echo [DRY RUN] $(GIT_CMD)
   465  DOCKER        = echo [DRY RUN] $(DOCKER_CMD)
   466  endif
   467  
   468  commit-and-push-pr:
   469  	$(GIT) add $(GIT_COMMIT_FILES)
   470  	$(GIT) commit -m $(GIT_COMMIT_MESSAGE)
   471  	$(GIT) push $(GIT_REMOTE) $(GIT_PR_BRANCH_HEAD)
   472  
   473  ###############################################################################
   474  # GitHub API helpers
   475  #   Helper macros and targets to help with communicating with the github API
   476  ###############################################################################
   477  GIT_COMMIT_MESSAGE?="Automatic Pin Updates"
   478  GIT_PR_BRANCH_BASE?=$(SEMAPHORE_GIT_BRANCH)
   479  PIN_UPDATE_BRANCH?=semaphore-auto-pin-updates-$(GIT_PR_BRANCH_BASE)
   480  GIT_PR_BRANCH_HEAD?=$(PIN_UPDATE_BRANCH)
   481  GIT_REPO_SLUG?=$(SEMAPHORE_GIT_REPO_SLUG)
   482  GIT_PIN_UPDATE_COMMIT_FILES?=go.mod go.sum
   483  GIT_PIN_UPDATE_COMMIT_EXTRA_FILES?=$(GIT_COMMIT_EXTRA_FILES)
   484  GIT_COMMIT_FILES?=$(GIT_PIN_UPDATE_COMMIT_FILES) $(GIT_PIN_UPDATE_COMMIT_EXTRA_FILES)
   485  
   486  # Call the github API. $(1) is the http method type for the https request, $(2) is the repo slug, and is $(3) is for json
   487  # data (if omitted then no data is set for the request). If GITHUB_API_EXIT_ON_FAILURE is set then the macro exits with 1
   488  # on failure. On success, the ENV variable GITHUB_API_RESPONSE will contain the response from github
   489  define github_call_api
   490  	$(eval CMD := curl -f -X$(1) \
   491  		-H "Content-Type: application/json"\
   492  		-H "Authorization: token ${GITHUB_TOKEN}"\
   493  		https://api.github.com/repos/$(2) $(if $(3),--data '$(3)',))
   494  	$(eval GITHUB_API_RESPONSE := $(shell $(CMD) | sed -e 's/#/\\\#/g'))
   495  	$(if $(GITHUB_API_EXIT_ON_FAILURE), $(if $(GITHUB_API_RESPONSE),,exit 1),)
   496  endef
   497  
   498  # Create the pull request. $(1) is the repo slug, $(2) is the title, $(3) is the head branch and $(4) is the base branch.
   499  # If the call was successful then the ENV variable PR_NUMBER will contain the pull request number of the created pull request.
   500  define github_pr_create
   501  	$(eval JSON := {"title": "$(2)", "head": "$(3)", "base": "$(4)"})
   502  	$(call github_call_api,POST,$(1)/pulls,$(JSON))
   503  	$(eval PR_NUMBER := $(filter-out null,$(shell echo '$(GITHUB_API_RESPONSE)' | jq '.number')))
   504  endef
   505  
   506  # Create a comment on a pull request. $(1) is the repo slug, $(2) is the pull request number, and $(3) is the comment
   507  # body.
   508  define github_pr_add_comment
   509  	$(eval JSON := {"body":"$(3)"})
   510  	$(call github_call_api,POST,$(1)/issues/$(2)/comments,$(JSON))
   511  endef
   512  
   513  # List pull open pull requests for a head and base. $(1) is the repo slug, $(2) is the branch head, $(3) is the branch base,
   514  # and $(4) is the state.
   515  define github_pr_list
   516  	$(eval QUERY := $(if $(2),head=$(2),)$(if $(3),\&base=$(3))$(if $(4),\&state=$(4),))
   517  	$(call github_call_api,GET,$(1)/pulls?$(QUERY),)
   518  endef
   519  
   520  # Check if there is a pull request with head GIT_PR_BRANCH_HEAD and base GIT_PR_BRANCH_BASE for the repo with slug
   521  # GIT_REPO_SLUG. If there is a PR that exists the PR_EXISTS will be set to 0, otherwise it is set to 1.
   522  check-if-pin-update-pr-exists:
   523  ifndef ORGANIZATION
   524  	@echo "ORGANIZATION must be set for the project."
   525  	exit 1
   526  endif
   527  	$(call github_pr_list,$(GIT_REPO_SLUG),$(ORGANIZATION):$(GIT_PR_BRANCH_HEAD),$(GIT_PR_BRANCH_BASE),open)
   528  	$(eval PR_EXISTS := $(if $(filter-out 0,$(shell echo '$(GITHUB_API_RESPONSE)' | jq '. | length')),0,1))
   529  
   530  ###############################################################################
   531  # Auto pin update targets
   532  #   Targets updating the pins
   533  ###############################################################################
   534  GITHUB_API_EXIT_ON_FAILURE?=1
   535  
   536  ## Update dependency pins to their latest changeset, committing and pushing it.
   537  ## DEPRECATED This will be removed along with associated helper functions in future releases. Use the trigger-auto-pin-update-process
   538  ## to create PR with the pin updates.
   539  .PHONY: commit-pin-updates
   540  commit-pin-updates: update-pins git-status git-config git-commit ci git-push
   541  
   542  # Creates and checks out the branch defined by GIT_PR_BRANCH_HEAD. It attempts to delete the branch from the local and
   543  # remote repositories. Requires CONFIRM to be set, otherwise it fails with an error.
   544  create-pin-update-head: var-require-one-of-CONFIRM-DRYRUN
   545  ifeq ($(shell git rev-parse --abbrev-ref HEAD),$(GIT_PR_BRANCH_HEAD))
   546  	@echo "Current branch is pull request head, cannot set it up."
   547  	exit 1
   548  endif
   549  	-git branch -D $(GIT_PR_BRANCH_HEAD)
   550  	-$(GIT) push $(GIT_REMOTE) --delete $(GIT_PR_BRANCH_HEAD)
   551  	git checkout -b $(GIT_PR_BRANCH_HEAD)
   552  
   553  create-pin-update-pr:
   554  	$(call github_pr_create,$(GIT_REPO_SLUG),[$(GIT_PR_BRANCH_BASE)] Semaphore Auto Pin Update,$(GIT_PR_BRANCH_HEAD),$(GIT_PR_BRANCH_BASE))
   555  	echo 'Created pin update pull request $(PR_NUMBER)'
   556  
   557  # Add the "/merge-when-ready" comment to enable the "merge when ready" functionality, i.e. when the pull request is passing
   558  # the tests and approved merge it. The PR_NUMBER is set by the dependent target
   559  set-merge-when-ready-on-pin-update-pr:
   560  	$(call github_pr_add_comment,$(GIT_REPO_SLUG),$(PR_NUMBER),/merge-when-ready delete-branch)
   561  	echo "Added '/merge-when-ready' comment command to pull request $(PR_NUMBER)"
   562  
   563  # Call the update-pins target with the GIT_PR_BRANCH_BASE as the PIN_BRANCH
   564  trigger-pin-updates:
   565  	PIN_BRANCH=$(GIT_PR_BRANCH_BASE) $(MAKE) update-pins
   566  
   567  # POST_PIN_UPDATE_TARGETS is used to specify targets that should be run after the pins have been updated to run targets
   568  # that modify files that are tied to the dependencies. An example would be generated files that would changed based on
   569  # a dependency update. This target would likely need to be used in tandem with GIT_PIN_UPDATE_COMMIT_EXTRA_FILES so the
   570  # update files are committed with the pin update.
   571  POST_PIN_UPDATE_TARGETS ?=
   572  
   573  # Trigger the auto pin update process. This involves updating the pins, committing and pushing them to github, creating
   574  # a pull request, and add the "/merge-when-ready" comment command. If there is already a pin update PR for the base
   575  # branch the pin update is not done and the target will exit.
   576  trigger-auto-pin-update-process: check-if-pin-update-pr-exists
   577  	$(if $(filter $(PR_EXISTS),0),echo "A pull request for head '$(GIT_PR_BRANCH_HEAD)' and base '$(GIT_PR_BRANCH_BASE)' already exists.",\
   578  		$(MAKE) trigger-auto-pin-update-process-wrapped)
   579  
   580  trigger-auto-pin-update-process-wrapped: create-pin-update-head trigger-pin-updates $(POST_PIN_UPDATE_TARGETS)
   581  	$(if $(shell git diff --quiet HEAD $(GIT_COMMIT_FILES) || echo "true"),\
   582  		$(MAKE) commit-and-push-pr create-pin-update-pr set-merge-when-ready-on-pin-update-pr,echo "Pins are up to date")
   583  
   584  ###############################################################################
   585  # Static checks
   586  #   repos can specify additional checks by setting LOCAL_CHECKS
   587  ###############################################################################
   588  .PHONY: static-checks
   589  ## Run static source code checks (lint, formatting, ...)
   590  static-checks: $(LOCAL_CHECKS)
   591  	$(MAKE) check-fmt golangci-lint
   592  
   593  LINT_ARGS ?= --max-issues-per-linter 0 --max-same-issues 0 --timeout 8m
   594  
   595  .PHONY: golangci-lint
   596  golangci-lint: $(GENERATED_FILES)
   597  	$(DOCKER_RUN) $(CALICO_BUILD) sh -c '$(GIT_CONFIG_SSH) golangci-lint run $(LINT_ARGS)'
   598  
   599  .PHONY: go-fmt goimports fix
   600  fix go-fmt goimports:
   601  	$(DOCKER_RUN) $(CALICO_BUILD) sh -c 'find . -iname "*.go" ! -wholename "./vendor/*" | xargs goimports -w -local github.com/projectcalico/calico/'
   602  
   603  check-fmt:
   604  	@echo "Checking code formatting.  Any listed files don't match goimports:"
   605  	$(DOCKER_RUN) $(CALICO_BUILD) bash -c 'exec 5>&1; ! [[ `find . -iname "*.go" ! -wholename "./vendor/*" | xargs goimports -l -local github.com/projectcalico/calico/ | tee >(cat >&5)` ]]'
   606  
   607  .PHONY: pre-commit
   608  pre-commit:
   609  	$(DOCKER_RUN) $(CALICO_BUILD) git-hooks/pre-commit-in-container
   610  
   611  .PHONY: install-git-hooks
   612  install-git-hooks:
   613  	$(REPO_DIR)/install-git-hooks
   614  
   615  .PHONY: check-module-path-tigera-api
   616  check-module-path-tigera-api:
   617  	@echo "Checking the repo importing tigera/api and not importing projectcalico/api"
   618  	@IMPORT_TIGERA_API=$$($(DOCKER_GO_BUILD) sh -c 'go list -m github.com/tigera/api > /dev/null 2>&1 && echo yes || echo no'); \
   619  	echo Is tigera/api imported? $$IMPORT_TIGERA_API; \
   620  	if [ "$$IMPORT_TIGERA_API" != "yes" ]; then \
   621  	     echo "Error: This repo should import tigera/api module."; \
   622  	     false; \
   623  	fi
   624  	@IMPORT_PROJECTCALICO_API=$$($(DOCKER_GO_BUILD) sh -c 'go list -m github.com/projectcalico/calico/api > /dev/null 2>&1 && echo yes || echo no'); \
   625  	echo Is projectcalico/api imported? $$IMPORT_PROJECTCALICO_API; \
   626  	if [ "$$IMPORT_PROJECTCALICO_API" != "no" ]; then \
   627  	     echo "Error: This repo should NOT import projectcalico/api module."; \
   628  	     false; \
   629  	fi
   630  
   631  .PHONY: check-module-path-projectcalico-api
   632  check-module-path-projectcalico-api:
   633  	@echo "Checking the repo importing projectcalico/api and not importing tigera/api"
   634  	@IMPORT_PROJECTCALICO_API=$$($(DOCKER_GO_BUILD) sh -c 'go list -m github.com/projectcalico/calico/api > /dev/null 2>&1 && echo yes || echo no'); \
   635  	echo Is projectcalico/api imported? $$IMPORT_PROJECTCALICO_API; \
   636  	if [ "$$IMPORT_PROJECTCALICO_API" != "yes" ]; then \
   637  	     echo "Error: This repo should import projectcalico/api module."; \
   638  	     false; \
   639  	fi
   640  	@IMPORT_TIGERA_API=$$($(DOCKER_GO_BUILD) sh -c 'go list -m github.com/tigera/api > /dev/null 2>&1 && echo yes || echo no'); \
   641  	echo Is tigera/api imported? $$IMPORT_TIGERA_API; \
   642  	if [ "$$IMPORT_TIGERA_API" != "no" ]; then \
   643  	     echo "Error: This repo should NOT import tigera/api module."; \
   644  	     false; \
   645  	fi
   646  
   647  ###############################################################################
   648  # go mod helpers
   649  ###############################################################################
   650  mod-download:
   651  	-$(DOCKER_RUN) $(CALICO_BUILD) sh -c '$(GIT_CONFIG_SSH) go mod download'
   652  
   653  mod-tidy:
   654  	-$(DOCKER_RUN) $(CALICO_BUILD) sh -c '$(GIT_CONFIG_SSH) go mod tidy'
   655  
   656  ###############################################################################
   657  # Semaphore helpers
   658  ###############################################################################
   659  
   660  # This semaphore project IDs are defined here because you cannot easily look them up in the semaphore API. This gives
   661  # us a single place to define these values, then projects can reference the readable ENV variable when they need a semaphore
   662  # project ID.
   663  SEMAPHORE_API_PROJECT_ID=9625623e-bfc5-435f-9c22-74f9cd8622fc
   664  SEMAPHORE_API_TIGERA_PROJECT_ID=48d23719-405f-4827-b58a-7de0598a6bf5
   665  SEMAPHORE_API_SERVER_PROJECT_ID=6e4eb5b2-0150-4624-968d-f96a1cd9c37d
   666  SEMAPHORE_API_SERVER_OSS_PROJECT_ID=10f6c7c1-7eaa-4e75-a9d1-83e5426158b1
   667  SEMAPHORE_APP_POLICY_PRIVATE_PROJECT_ID=fa098f05-b2d2-4cf6-ac83-aa1e38e95670
   668  SEMAPHORE_APP_POLICY_PROJECT_ID=bc654d5c-bb68-4b00-9d02-289291762b1d
   669  SEMAPHORE_BIRD_PROJECT_ID=c1cc5eaf-873b-4113-a85e-a555361413e6
   670  SEMAPHORE_CC_PORTAL=2b3f9721-a851-4a97-981f-0cb81f93ddd0
   671  SEMAPHORE_CALICO_PRIVATE_PROJECT_ID=8a309869-f767-49dc-924f-fa927edbf657
   672  SEMAPHORE_CALICO_PROJECT_ID=828e6de6-ed4b-49c7-9cb5-ac1246d454de
   673  SEMAPHORE_CALICO_USAGE_PROJECT_ID=29f53c2b-8266-4873-879d-19b65960b3fd
   674  SEMAPHORE_CALICOCTL_PRIVATE_PROJECT_ID=8d885379-6a1b-4fc8-aa45-dc0cfb87894a
   675  SEMAPHORE_CALICOCTL_PROJECT_ID=193ce75a-7a47-4c9f-b966-f25c83e62213
   676  SEMAPHORE_CALICOQ_PROJECT_ID=dc79e0e9-a7b3-40f5-8dc2-2818210ee0a9
   677  SEMAPHORE_CLOUD_CONTROLLERS_PRIVATE_PROJECT_ID=f70e6c08-887b-481d-9591-68e243b32b32
   678  SEMAPHORE_CNI_PLUGIN_PRIVATE_PROJECT_ID=f2c02a84-5fcd-49ed-b4cb-a6273409f0de
   679  SEMAPHORE_CNI_PLUGIN_PROJECT_ID=741ec781-5dbb-4494-ba90-ec6831a9b176
   680  SEMAPHORE_COMPLIANCE_PROJECT_ID=958a9147-ec94-4e99-b4c8-de7857653bb9
   681  SEMAPHORE_CONFD_PROJECT_ID=4c6b815f-d42c-4436-aafa-651fbaf5859e
   682  SEMAPHORE_CONFD_PRIVATE_PROJECT_ID=d3a7649a-3a39-45bf-95e9-fd6df3d0a7b1
   683  SEMAPHORE_CURATOR_PROJECT_ID=c391dcff-6933-40e7-a6d1-1dcf7e6e231d
   684  SEMAPHORE_DEEP_PACKET_INSPECTION_PROJECT_ID=81c0981e-979c-4741-8143-22166384afa1
   685  SEMAPHORE_DEXIDP_DOCKER_PROJECT_ID=ee618372-35c8-4f83-bd05-d3a96ac2b276
   686  SEMAPHORE_EGRESS_GATEWAY_PROJECT_ID=f01056ec-75f9-46a0-9ae2-6fc5e391136c
   687  SEMAPHORE_ELASTICSEARCH_DOCKER_PROJECT_ID=0a3a5bf6-19e4-4210-a3fa-15fc857596ac
   688  SEMAPHORE_ELASTICSEARCH_METRICS_PROJECT_ID=306b29c0-aa86-4b76-9c3e-c78a327e7d83
   689  SEMAPHORE_ENVOY_DOCKER_PROJECT_ID=b8db000b-c2c4-44cd-a22d-51df73dfdcba
   690  SEMAPHORE_ES_PROXY_IMAGE_PROJECT_ID=bc7ee48d-0051-4ceb-961d-03659463ada4
   691  SEMAPHORE_ES_GATEWAY_PROJECT_ID=3c01c819-532b-4ccc-8305-5dd45c10bf93
   692  SEMAPHORE_FELIX_PRIVATE_PROJECT_ID=e439cca4-156c-4d23-b611-002601440ad0
   693  SEMAPHORE_FELIX_PROJECT_ID=48267e65-4acc-4f27-a88f-c3df0e8e2c3b
   694  SEMAPHORE_FIREWALL_INTEGRATION_PROJECT_ID=d4307a31-1e46-4622-82e2-886165b77008
   695  SEMAPHORE_FLUENTD_DOCKER_PROJECT_ID=50383fb9-d234-461a-ae00-23e18b7cd5b8
   696  SEMAPHORE_HONEYPOD_CONTROLLER_PROJECT_ID=c010a63a-ac85-48b4-9077-06188408eaee
   697  SEMAPHORE_HONEYPOD_RECOMMENDATION_PROJECT_ID=f07f5fd4-b15a-4ded-ae1e-04801ae4d99a
   698  SEMAPHORE_INGRESS_COLLECTOR_PROJECT_ID=cf7947e4-a886-404d-ac6a-c3f3ac1a7b93
   699  SEMAPHORE_INTRUSION_DETECTION_PROJECT_ID=2beffe81-b05a-41e0-90ce-e0d847dee2ee
   700  SEMAPHORE_KEY_CERT_PROVISIONER_PROJECT_ID=9efb25f3-8c5d-4f22-aab5-4a1f5519bc7c
   701  SEMAPHORE_KUBE_CONTROLLERS_PRIVATE_PROJECT_ID=0b8651d0-6c5d-4076-ab1d-25b120d0f670
   702  SEMAPHORE_KUBE_CONTROLLERS_PROJECT_ID=d688e2ce-8c4a-4402-ba54-3aaa0eb53e5e
   703  SEMAPHORE_KUBECTL_CALICO_PROJECT_ID=37d7cb2b-62b0-4178-9424-de766f2de59b
   704  SEMAPHORE_KIBANA_DOCKER_PROJECT_ID=eaafdbad-4546-4582-b8fa-cea05a80a04d
   705  SEMAPHORE_LIBCALICO_GO_PRIVATE_PROJECT_ID=72fa12b5-5ad5-43ae-b0ac-17f9f7c71030
   706  SEMAPHORE_LIBCALICO_GO_PROJECT_ID=ce3e6bed-1fb6-4501-80e5-2121a266a386
   707  SEMAPHORE_LICENSE_AGENT_PROJECT_ID=beb13609-8ee0-461a-a08b-dab86af1c128
   708  SEMAPHORE_LICENSING_PROJECT_ID=344f1cf0-0c3f-4fa3-b89b-3c35127b3054
   709  SEMAPHORE_L7_COLLECTOR_PROJECT_ID=b02e7bbf-39ee-4c0c-a6f6-793cdf89daa7
   710  SEMAPHORE_LMA_PROJECT_ID=5130e1d3-d9cd-4270-9e62-57f98d34495e
   711  SEMAPHORE_MANAGER_PROJECT_ID=325ca49d-5111-4b07-a54f-dc0c7ec538bb
   712  SEMAPHORE_NETWORKING_CALICO_PROJECT_ID=0a7883cb-b727-4113-948d-b95cb00df6b6
   713  SEMAPHORE_NODE_PRIVATE_PROJECT_ID=edd8246c-7116-473a-81c8-7a3bbbc07228
   714  SEMAPHORE_NODE_PROJECT_ID=980a06a4-9d43-43f8-aedd-a3bfad258de6
   715  SEMAPHORE_OPERATOR_PROJECT_ID=8343e619-cc44-4be4-a9d7-21963ebc1c8f
   716  SEMAPHORE_PACKETCAPTURE_API_PROJECT_ID=f505b00c-57c3-4859-8b97-ff4095b5ab25
   717  SEMAPHORE_PERFORMANCE_HOTSPOTS_PROJECT_ID=6a343a02-0acf-4c52-9cc7-24ee51377e32
   718  SEMAPHORE_POD2DAEMON_PROJECT_ID=eb2eea4f-c185-408e-9837-da0d231428fb
   719  SEMAPHORE_PROMETHEUS_SERVICE_PROJECT_ID=d5b7ed99-8966-46cc-90f2-9027c428db48
   720  SEMAPHORE_SKIMBLE_PROJECT_ID=35171baf-8daf-4725-882f-c301851a6e1d
   721  SEMAPHORE_TS_QUERYSERVER_PROJECT_ID=5dbe4688-0c21-40fb-89f7-a2d64c17401b
   722  SEMAPHORE_TYPHA_PROJECT_ID=c2ea3f0a-58a0-427a-9ed5-6eff8d6543b3
   723  SEMAPHORE_TYPHA_PRIVATE_PROJECT_ID=51e84cb9-0f38-408a-a113-0f5ca71844d7
   724  SEMAPHORE_VOLTRON_PROJECT_ID=9d239362-9594-4c84-8983-868ee19ebd41
   725  
   726  SEMAPHORE_WORKFLOW_BRANCH?=master
   727  
   728  # Sends a request to the semaphore API to run the request workflow. It requires setting the SEMAPHORE_API_TOKEN, SEMAPHORE_PROJECT_ID,
   729  # SEMAPHORE_WORKFLOW_BRANCH, and SEMAPHORE_WORKFLOW_FILE ENV variables.
   730  semaphore-run-workflow:
   731  	$(eval CMD := curl -f -X POST \
   732  		-H "Authorization: Token $(SEMAPHORE_API_TOKEN)" \
   733  		-d "project_id=$(SEMAPHORE_PROJECT_ID)&reference=$(SEMAPHORE_WORKFLOW_BRANCH)&commit_sha=$(SEMAPHORE_COMMIT_SHA)&pipeline_file=.semaphore/$(SEMAPHORE_WORKFLOW_FILE)" \
   734  		"https://tigera.semaphoreci.com/api/v1alpha/plumber-workflows")
   735  	$(eval SEMAPHORE_API_RESPONSE := $(shell $(CMD) | jq -R '.' | sed -e 's/#/\\\#/g'))
   736  	$(if $(SEMAPHORE_API_RESPONSE),,exit 1)
   737  	$(eval WORKFLOW_ID := $(shell echo $(SEMAPHORE_API_RESPONSE) | jq -r '.workflow_id'))
   738  	@echo Semaphore workflow successfully created here https://tigera.semaphoreci.com/workflows/$(WORKFLOW_ID)
   739  
   740  # This is a helpful wrapper of the semaphore-run-workflow target to run the update_pins workflow file for a project.
   741  semaphore-run-auto-pin-update-workflow:
   742  	SEMAPHORE_WORKFLOW_FILE=update_pins.yml $(MAKE) semaphore-run-workflow
   743  	@echo Successfully triggered the semaphore pin update workflow
   744  
   745  # This target triggers the 'semaphore-run-auto-pin-update-workflow' target for every SEMAPHORE_PROJECT_ID in the list of
   746  # SEMAPHORE_AUTO_PIN_UPDATE_PROJECT_IDS.
   747  semaphore-run-auto-pin-update-workflows:
   748  	for ID in $(SEMAPHORE_AUTO_PIN_UPDATE_PROJECT_IDS); do\
   749  		SEMAPHORE_WORKFLOW_BRANCH=$(SEMAPHORE_GIT_BRANCH) SEMAPHORE_PROJECT_ID=$$ID $(MAKE) semaphore-run-auto-pin-update-workflow; \
   750  	done
   751  
   752  ###############################################################################
   753  # Mock helpers
   754  ###############################################################################
   755  # Helper targets for testify mock generation
   756  
   757  # Generate testify mocks in the build container.
   758  gen-mocks:
   759  	$(DOCKER_RUN) $(CALICO_BUILD) sh -c '$(MAKE) mockery-run'
   760  	# The generated files need import reordering to pass static-checks
   761  	$(MAKE) fix
   762  
   763  # Run mockery for each path in MOCKERY_FILE_PATHS. The the generated mocks are
   764  # created in package and in test files. Look here for more information https://github.com/vektra/mockery
   765  mockery-run:
   766  	for FILE_PATH in $(MOCKERY_FILE_PATHS); do\
   767  		DIR=$$(dirname $$FILE_PATH); \
   768  		INTERFACE_NAME=$$(basename $$FILE_PATH); \
   769  		mockery --dir $$DIR --name $$INTERFACE_NAME --inpackage; \
   770  	done
   771  
   772  ###############################################################################
   773  # Docker helpers
   774  ###############################################################################
   775  # Helper targets working with docker images.
   776  
   777  # docker-compress takes the docker image specified by IMAGE_NAME and compresses all the layers into a single one. This is
   778  # done by exporting the given image then re importing it with the given IMAGE_NAME.
   779  #
   780  # When a docker image is exported all of the instructions are lost (i.e. ENTRYPOINT, ENV, ...), so before the image is
   781  # compressed the target inspects the image and pulls out the instructions. Each instruction that is pulled out is converted
   782  # into a change directive, or change directives, of the format "--change 'INSTRUCTION <instruction>". These directives
   783  # are given to the docker import command so the instructions can be re added to the compressed image.
   784  #
   785  # NOTE: This target does not attempt to copy every instruction from the original image to the compressed one. Any user of
   786  # this target should ensure that any required instructions are copied over by this target.
   787  docker-compress:
   788  	$(eval JSONOBJ := "$(shell docker inspect $(IMAGE_NAME) | jq '.[0].Config' | jq -R '.' | sed -e 's/#/\\\#/g' ) ")
   789  #	Re add the entry point.
   790  	$(eval CHANGE := $(CHANGE)$(shell echo $(JSONOBJ) | jq -r \
   791  		"if has(\"Entrypoint\") and .Entrypoint != \"\" then \" --change 'ENTRYPOINT \(.Entrypoint)'\" else \"\" end"\
   792  	))
   793  #	Re add the command.
   794  	$(eval CHANGE := $(CHANGE)$(shell echo $(JSONOBJ) | jq -r \
   795  		"if has(\"Cmd\") and .Cmd != \"\" then \" --change 'CMD \(.Cmd)'\" else \"\" end"\
   796  	))
   797  #	Re add the working directory.
   798  	$(eval CHANGE := $(CHANGE)$(shell echo $(JSONOBJ) | jq -r \
   799  		"if has(\"WorkingDir\") and .WorkingDir != \"\" then \" --change 'WORKDIR \(.WorkingDir)'\" else \"\" end"\
   800  	))
   801  #	Re add the user.
   802  	$(eval CHANGE := $(CHANGE)$(shell echo $(JSONOBJ) | jq -r \
   803  		"if has(\"User\") and .User != \"\" then \" --change 'USER \(.User)'\" else \"\" end"\
   804  	))
   805  #	Re add the environment variables. .Env is an array of strings so add a "--change 'ENV <value>'" for each value in
   806  #	the array.
   807  	$(eval CHANGE := $(CHANGE)$(shell echo $(JSONOBJ) | jq -r \
   808  		"if has(\"Env\") and (.Env | length) > 0 then .Env | map(\" --change 'ENV \(.)'\") | join(\"\") else \"\" end"\
   809  	))
   810  #	Re add the labels. .Labels is a map of label names to label values, so add a "--change 'LABEL <key> <value>'" for
   811  #	each map entry.
   812  	$(eval CHANGE := $(CHANGE)$(shell echo $(JSONOBJ) | jq -r \
   813  		"if has(\"Labels\") and (.Labels | length) > 0 then .Labels | to_entries | map(\" --change 'LABEL \(.key) \(.value)'\") | join(\"\") else \"\" end"\
   814  	))
   815  #	Re add the exposed ports. .ExposedPorts is a map, but we're only interested in the keys of the map so for each key
   816  #	add "--change EXPOSE <key>".
   817  	$(eval CHANGE := $(CHANGE)$(shell echo $(JSONOBJ) | jq -r \
   818  		"if has(\"ExposedPorts\") and (.ExposedPorts | length) > 0 then .ExposedPorts | keys | map(\" --change 'EXPOSE \(.)'\") | join(\"\") else \"\" end"\
   819  	))
   820  	$(eval CONTAINER_ID := $(shell docker run -d -it --entrypoint /bin/true $(IMAGE_NAME) /bin/true))
   821  	docker export $(CONTAINER_ID) | docker import $(CHANGE) - $(IMAGE_NAME)
   822  
   823  ###############################################################################
   824  # Image building and pushing
   825  ###############################################################################
   826  
   827  ###############################################################################
   828  # we want to be able to run the same recipe on multiple targets keyed on the image name
   829  # to do that, we would use the entire image name, e.g. calico/node:abcdefg, as the stem, or '%', in the target
   830  # however, make does **not** allow the usage of invalid filename characters - like / and : - in a stem, and thus errors out
   831  # to get around that, we "escape" those characters by converting all : to --- and all / to ___ , so that we can use them
   832  # in the target, we then unescape them back
   833  escapefs = $(subst :,---,$(subst /,___,$(1)))
   834  unescapefs = $(subst ---,:,$(subst ___,/,$(1)))
   835  
   836  # retag-build-images-with-registries retags the build / arch images specified by BUILD_IMAGES and VALIDARCHES with
   837  # the registries specified by DEV_REGISTRIES. The end tagged images are of the format
   838  # $(REGISTRY)/$(BUILD_IMAGES):<tag>-$(ARCH).
   839  retag-build-images-with-registries: $(addprefix retag-build-images-with-registry-,$(call escapefs,$(DEV_REGISTRIES)))
   840  
   841  # retag-build-images-with-registry-% retags the build / arch images specified by BUILD_IMAGES and VALIDARCHES with
   842  # the registry specified by $*.
   843  retag-build-images-with-registry-%:
   844  	$(MAKE) $(addprefix retag-build-image-with-registry-,$(call escapefs,$(BUILD_IMAGES))) REGISTRY=$(call unescapefs,$*)
   845  
   846  # retag-build-image-with-registry-% retags the build arch images specified by $* and VALIDARCHES with the
   847  # registry specified by REGISTRY.
   848  retag-build-image-with-registry-%: var-require-all-REGISTRY-BUILD_IMAGES
   849  	$(MAKE) $(addprefix retag-build-image-arch-with-registry-,$(VALIDARCHES)) BUILD_IMAGE=$(call unescapefs,$*)
   850  
   851  # retag-build-image-arch-with-registry-% retags the build / arch image specified by $* and BUILD_IMAGE with the
   852  # registry specified by REGISTRY.
   853  retag-build-image-arch-with-registry-%: var-require-all-REGISTRY-BUILD_IMAGE-IMAGETAG
   854  	docker tag $(BUILD_IMAGE):$(LATEST_IMAGE_TAG)-$* $(call filter-registry,$(REGISTRY))$(BUILD_IMAGE):$(IMAGETAG)-$*
   855  	$(if $(filter $*,amd64),\
   856  		docker tag $(BUILD_IMAGE):$(LATEST_IMAGE_TAG)-$(ARCH) $(REGISTRY)/$(BUILD_IMAGE):$(IMAGETAG),\
   857  		$(NOECHO) $(NOOP)\
   858  	)
   859  
   860  # push-images-to-registries pushes the build / arch images specified by BUILD_IMAGES and VALIDARCHES to the registries
   861  # specified by DEV_REGISTRY.
   862  push-images-to-registries: $(addprefix push-images-to-registry-,$(call escapefs,$(DEV_REGISTRIES)))
   863  
   864  # push-images-to-registry-% pushes the build / arch images specified by BUILD_IMAGES and VALIDARCHES to the registry
   865  # specified by %*.
   866  push-images-to-registry-%:
   867  	$(MAKE) $(addprefix push-image-to-registry-,$(call escapefs,$(BUILD_IMAGES))) REGISTRY=$(call unescapefs,$*)
   868  
   869  # push-image-to-registry-% pushes the build / arch images specified by $* and VALIDARCHES to the registry
   870  # specified by REGISTRY.
   871  push-image-to-registry-%:
   872  	$(MAKE) $(addprefix push-image-arch-to-registry-,$(VALIDARCHES)) BUILD_IMAGE=$(call unescapefs,$*)
   873  
   874  # push-image-arch-to-registry-% pushes the build / arch image specified by $* and BUILD_IMAGE to the registry
   875  # specified by REGISTRY.
   876  push-image-arch-to-registry-%:
   877  # If the registry we want to push to doesn't not support manifests don't push the ARCH image.
   878  	$(DOCKER) push --quiet $(call filter-registry,$(REGISTRY))$(BUILD_IMAGE):$(IMAGETAG)-$*
   879  	$(if $(filter $*,amd64),\
   880  		$(DOCKER) push $(REGISTRY)/$(BUILD_IMAGE):$(IMAGETAG),\
   881  		$(NOECHO) $(NOOP)\
   882  	)
   883  
   884  # push multi-arch manifest where supported.
   885  push-manifests: var-require-all-IMAGETAG  $(addprefix sub-manifest-,$(call escapefs,$(PUSH_MANIFEST_IMAGES)))
   886  sub-manifest-%:
   887  	$(DOCKER) manifest create $(call unescapefs,$*):$(IMAGETAG) $(addprefix --amend ,$(addprefix $(call unescapefs,$*):$(IMAGETAG)-,$(VALIDARCHES)))
   888  	$(DOCKER) manifest push --purge $(call unescapefs,$*):$(IMAGETAG)
   889  
   890  push-manifests-with-tag: var-require-one-of-CONFIRM-DRYRUN var-require-all-BRANCH_NAME
   891  	$(MAKE) push-manifests IMAGETAG=$(if $(IMAGETAG_PREFIX),$(IMAGETAG_PREFIX)-)$(BRANCH_NAME) EXCLUDEARCH="$(EXCLUDEARCH)"
   892  	$(MAKE) push-manifests IMAGETAG=$(if $(IMAGETAG_PREFIX),$(IMAGETAG_PREFIX)-)$(GIT_VERSION) EXCLUDEARCH="$(EXCLUDEARCH)"
   893  
   894  # cd-common tags and pushes images with the branch name and git version. This target uses PUSH_IMAGES, BUILD_IMAGE,
   895  # and BRANCH_NAME env variables to figure out what to tag and where to push it to.
   896  cd-common: var-require-one-of-CONFIRM-DRYRUN var-require-all-BRANCH_NAME
   897  	$(MAKE) retag-build-images-with-registries push-images-to-registries IMAGETAG=$(if $(IMAGETAG_PREFIX),$(IMAGETAG_PREFIX)-)$(BRANCH_NAME) EXCLUDEARCH="$(EXCLUDEARCH)"
   898  	$(MAKE) retag-build-images-with-registries push-images-to-registries IMAGETAG=$(if $(IMAGETAG_PREFIX),$(IMAGETAG_PREFIX)-)$(GIT_VERSION) EXCLUDEARCH="$(EXCLUDEARCH)"
   899  
   900  ###############################################################################
   901  # Release targets and helpers
   902  #
   903  # The following targets and macros are used to help start and cut releases.
   904  # At high level, this involves:
   905  # - Creating release branches
   906  # - Adding empty commits to start next release, and updating the 'dev' tag
   907  # - Adding 'release' tag to the commit that will be release
   908  # - Creating an empty commit for the next potential patch release, and updating
   909  #   the dev tag on that commit
   910  # - Copying images for the released commit over to the release registries, and
   911  #   re tagging those images with the release tag
   912  #
   913  # The following definitions will be helpful in understanding this process:
   914  # - 'dev' tag: A git tag of the form of `v3.8.0-calient-0.dev-36-g3a618e61c2d3`
   915  #   that every commit has. The start of the dev tag, i.e. v3.8.0, is the
   916  #   the release that this commit will go into.
   917  # - 'release' tag: A git tag of the form of `v3.8.0`. The commit that a release
   918  #   is cut from will have this tag, i.e. you can find the commit that release
   919  #   3.8 uses by finding the commit with the tag v3.8.0.
   920  # - 'dev' image: The image that is created for every commit that is merged to
   921  #   master or a release branch. This image is tagged with the dev tag, i.e.
   922  #   if commit 3a618e61c2d3 is on master or a release branch, there will be
   923  #   an image for that commit in the dev registry with the tag
   924  #   `v3.8.0-calient-0.dev-36-g3a618e61c2d3`.
   925  # - 'release' image: The public image the customers will use to install our
   926  #   our product. Producing this is the goal of cutting the release. This image
   927  #   will be in the release registries, and will be tagged with the release tag,
   928  #   i.e. the release image for release 3.8 will have the v3.8.0 tag, or if it's
   929  #   a patch release it will be v3.8.<patch version>
   930  ###############################################################################
   931  fetch-all:
   932  	git fetch --all -q
   933  
   934  # git-dev-tag retrieves the dev tag for the current commit (the one are dev images are tagged with).
   935  git-dev-tag = $(shell git describe --tags --long --always --abbrev=12 --match "*dev*")
   936  # git-release-tag-from-dev-tag gets the release version from the current commits dev tag.
   937  git-release-tag-from-dev-tag = $(shell echo $(call git-dev-tag) | grep -P -o "^v\d*.\d*.\d*(-.*)?(?=-$(DEV_TAG_SUFFIX))")
   938  # git-release-tag-for-current-commit gets the release tag for the current commit if there is one.
   939  git-release-tag-for-current-commit = $(shell git describe --tags --exact-match --exclude "*dev*")
   940  
   941  # release-branch-for-tag finds the latest branch that corresponds to the given tag.
   942  release-branch-for-tag = $(firstword $(shell git --no-pager branch --format='%(refname:short)' --contains $1 | grep -P "^release"))
   943  # commit-for-tag finds the latest commit that corresponds to the given tag.
   944  commit-for-tag = $(shell git rev-list -n 1 $1)
   945  git-commit-for-remote-tag = $(shell git ls-remote -q --tags $(GIT_REMOTE) $1 | awk '{print $$1}')
   946  # current-branch gets the name of the branch for the current commit.
   947  current-branch = $(shell git rev-parse --abbrev-ref HEAD)
   948  
   949  # RELEASE_BRANCH_BASE is used when creating a release branch to confirm the correct base is being used. It's
   950  # configurable so that a dry run can be done from a PR branch.
   951  RELEASE_BRANCH_BASE ?=master
   952  
   953  # var-set-% checks if there is a non empty variable for the value describe by %. If FAIL_NOT_SET is set, then var-set-%
   954  # fails with an error message. If FAIL_NOT_SET is not set, then var-set-% appends a 1 to VARSET if the variable isn't
   955  # set.
   956  var-set-%:
   957  	$(if $($*),$(eval VARSET+=1),$(if $(FAIL_NOT_SET),$(error $* is required but not set),))
   958  
   959  # var-require is used to check if one or all of the variables are set in REQUIRED_VARS, and fails if not. The variables
   960  # in REQUIRE_VARS are hyphen separated.
   961  #
   962  # If FAIL_NOT_SET is set, then all variables described in REQUIRED_VARS must be set for var-require to not fail,
   963  # otherwise only one variable needs to be set for var-require to not fail.
   964  var-require: $(addprefix var-set-,$(subst -, ,$(REQUIRED_VARS)))
   965  	$(if $(VARSET),,$(error one of $(subst -, ,$(REQUIRED_VARS)) is not set or empty, but at least one is required))
   966  
   967  # var-require-all-% checks if the there are non empty variables set for the hyphen separated values in %, and fails if
   968  # there isn't a non empty variable for each given value. For instance, to require FOO and BAR both must be set you would
   969  # call var-require-all-FOO-BAR.
   970  var-require-all-%:
   971  	$(MAKE) var-require REQUIRED_VARS=$* FAIL_NOT_SET=true
   972  
   973  # var-require-one-of-% checks if the there are non empty variables set for the hyphen separated values in %, and fails
   974  # there isn't a non empty variable for at least one of the given values. For instance, to require either FOO or BAR both
   975  # must be set you would call var-require-all-FOO-BAR.
   976  var-require-one-of-%:
   977  	$(MAKE) var-require REQUIRED_VARS=$*
   978  
   979  # sem-cut-release triggers the cut-release pipeline (or test-cut-release if CONFIRM is not specified) in semaphore to
   980  # cut the release. The pipeline is triggered for the current commit, and the branch it's triggered on is calculated
   981  # from the RELEASE_VERSION, CNX, and OS variables given.
   982  #
   983  # Before the pipeline is triggered, this target validates that the expected release will be cut using the
   984  # RELEASE_TAG (optional and defaults to the current tag) and RELEASE_VERSION (required) variables. The RELEASE_TAG
   985  # should be the dev tag that the release is cut from, and RELEASE_VERSION should be the version expected to be released.
   986  # This target verifies that the current commit is tagged with the RELEASE_TAG and that cutting this commit will result
   987  # in RELEASE_VERSION being cut.
   988  sem-cut-release: var-require-one-of-CONFIRM-DRYRUN var-require-all-RELEASE_VERSION var-require-one-of-CNX-OS
   989  ifndef RELEASE_TAG
   990  	$(eval RELEASE_TAG = $(call git-dev-tag))
   991  else
   992  	$(eval RELEASE_TAG_COMMIT = $(call commit-for-tag,$(RELEASE_TAG)))
   993  	$(if $(filter-out $(RELEASE_TAG_COMMIT),$(GIT_COMMIT)),\
   994  		echo Current commit is not tagged with $(RELEASE_TAG) && exit 1)
   995  endif
   996  	$(eval CURRENT_RELEASE_VERSION = $(call git-release-tag-from-dev-tag))
   997  	$(if $(filter-out $(CURRENT_RELEASE_VERSION),$(RELEASE_VERSION)),\
   998  		echo Given release version $(RELEASE_VERSION) does not match current commit release version $(CURRENT_RELEASE_VERSION). && exit 1)
   999  
  1000  	$(eval RELEASE_BRANCH = release-$(if $CNX,calient-,)$(shell echo "$(RELEASE_VERSION)" | awk -F  "." '{print $$1"."$$2}'))
  1001  	$(eval WORKFLOW_FILE = $(if $(CONFIRM),cut-release.yml,test-cut-release.yml))
  1002  
  1003  	@echo Cutting release for $(RELEASE_VERSION) from dev tag $(RELEASE_TAG) \(commit $(GIT_COMMIT)\)
  1004  	SEMAPHORE_WORKFLOW_BRANCH=$(RELEASE_BRANCH) SEMAPHORE_COMMIT_SHA=$(GIT_COMMIT) SEMAPHORE_WORKFLOW_FILE=$(WORKFLOW_FILE) $(MAKE) semaphore-run-workflow
  1005  
  1006  # cut-release uses the dev tags on the current commit to cut the release, more specifically cut-release does the
  1007  # following:
  1008  # - Calculates the release tag from the dev tag on the commit
  1009  # - tags the current commit with the release tag then pushes that tag to github
  1010  # - retags the build images (specified by BUILD_IMAGES) in the dev registries (specified DEV_REGISTRIES) with the
  1011  #	release tag
  1012  # - copies the build images (specified by BUILD_IMAGES) from the first dev registry to the release registries (specified
  1013  #	by RELEASE_REGISTRIES) and retags those images with the release tag
  1014  # - tags an empty commit at the head of the release branch with the next patch release dev tag and pushed that to github
  1015  cut-release: var-require-one-of-CONFIRM-DRYRUN
  1016  	$(MAKE) cut-release-wrapped RELEASE=true
  1017  
  1018  cut-release-wrapped: var-require-one-of-CONFIRM-DRYRUN
  1019  	$(eval DEV_TAG = $(call git-dev-tag))
  1020  	$(eval RELEASE_TAG = $(call git-release-tag-from-dev-tag))
  1021  	$(eval RELEASE_BRANCH = $(call release-branch-for-tag,$(DEV_TAG)))
  1022  ifdef EXPECTED_RELEASE_TAG
  1023  	$(if $(filter-out $(RELEASE_TAG),$(EXPECTED_RELEASE_TAG)),\
  1024  		@echo "Failed to verify release tag$(comma) expected release version is $(EXPECTED_RELEASE_TAG)$(comma) actual is $(RELEASE_TAG)."\
  1025  		&& exit 1)
  1026  endif
  1027  	$(eval NEXT_RELEASE_VERSION = $(shell echo "$(call git-release-tag-from-dev-tag)" | awk -F  "." '{print $$1"."$$2"."$$3+1}'))
  1028  ifndef IMAGE_ONLY
  1029  	$(MAKE) maybe-tag-release maybe-push-release-tag\
  1030  		RELEASE_TAG=$(RELEASE_TAG) BRANCH=$(RELEASE_BRANCH) DEV_TAG=$(DEV_TAG)
  1031  endif
  1032  ifdef BUILD_IMAGES
  1033  	$(eval IMAGE_DEV_TAG = $(if $(IMAGETAG_PREFIX),$(IMAGETAG_PREFIX)-)$(DEV_TAG))
  1034  	$(eval IMAGE_RELEASE_TAG = $(if $(IMAGETAG_PREFIX),$(IMAGETAG_PREFIX)-)$(RELEASE_TAG))
  1035  	$(MAKE) release-dev-images\
  1036  		RELEASE_TAG=$(IMAGE_RELEASE_TAG) BRANCH=$(RELEASE_BRANCH) DEV_TAG=$(IMAGE_DEV_TAG)
  1037  endif
  1038  ifndef IMAGE_ONLY
  1039  	$(MAKE) maybe-dev-tag-next-release maybe-push-next-release-dev-tag\
  1040  		NEXT_RELEASE_VERSION=$(NEXT_RELEASE_VERSION) BRANCH=$(RELEASE_BRANCH) DEV_TAG=$(DEV_TAG)
  1041  endif
  1042  
  1043  # maybe-tag-release calls the tag-release target only if the current commit is not tagged with the tag in RELEASE_TAG.
  1044  # If the current commit is already tagged with the value in RELEASE_TAG then this is a NOOP.
  1045  maybe-tag-release: var-require-all-RELEASE_TAG
  1046  	$(if $(filter-out $(call git-release-tag-for-current-commit),$(RELEASE_TAG)),\
  1047  		$(MAKE) tag-release,\
  1048  		@echo "Current commit already tagged with $(RELEASE_TAG)")
  1049  
  1050  # tag-release tags the current commit with an annotated tag with the value in RELEASE_TAG. This target throws an error
  1051  # if the current branch is not master.
  1052  tag-release: var-require-one-of-CONFIRM-DRYRUN var-require-all-DEV_TAG_SUFFIX-RELEASE_TAG
  1053  	$(if $(filter-out $(RELEASE_BRANCH_BASE),$(call current-branch)),,$(error tag-release cannot be called on $(RELEASE_BRANCH_BASE)))
  1054  	git tag -a $(RELEASE_TAG) -m "Release $(RELEASE_TAG)"
  1055  
  1056  # maybe-push-release-tag calls the push-release-tag target only if the tag in RELEASE_TAG is not already pushed to
  1057  # github. If the tag is pushed to github then this is a NOOP.
  1058  # TODO should we check the commit tagged in remote is the current commit? Probably yes... that could catch some annoying problems that would be hard to find if they happened...
  1059  maybe-push-release-tag: var-require-all-RELEASE_TAG
  1060  	$(if $(shell git ls-remote -q --tags $(GIT_REMOTE) $(RELEASE_TAG)),\
  1061  		@echo Release $(RELEASE_TAG) already in github,\
  1062  		$(MAKE) push-release-tag)
  1063  
  1064  # push-release-tag pushes the tag in RELEASE_TAG to github. If the current commit is not tagged with this tag then this
  1065  # target fails.
  1066  push-release-tag: var-require-one-of-CONFIRM-DRYRUN var-require-all-DEV_TAG_SUFFIX-RELEASE_TAG
  1067  	$(if $(call git-release-tag-for-current-commit),,$(error Commit does not have a release tag))
  1068  	$(GIT) push $(GIT_REMOTE) $(RELEASE_TAG)
  1069  
  1070  # maybe-dev-tag-next-release calls the dev-tag-next-release-target only if the tag NEXT_RELEASE_VERSION-DEV_TAG_SUFFIX
  1071  # doesn't exist locally. If the tag does exist then this is a NOOP.
  1072  maybe-dev-tag-next-release: var-require-all-NEXT_RELEASE_VERSION-DEV_TAG_SUFFIX
  1073  	$(if $(shell git rev-parse --verify -q "$(NEXT_RELEASE_VERSION)-$(DEV_TAG_SUFFIX)"),\
  1074  		echo "Tag for next release $(NEXT_RELEASE_VERSION) already exists$(comma) not creating.",\
  1075  		$(MAKE) dev-tag-next-release)
  1076  
  1077  # dev-tag-next-release creates a new commit empty commit at the head of BRANCH and tags it with
  1078  # NEXT_RELEASE_VERSION-DEV_TAG_SUFFIX.
  1079  dev-tag-next-release: var-require-one-of-CONFIRM-DRYRUN var-require-all-NEXT_RELEASE_VERSION-DEV_TAG_SUFFIX-BRANCH
  1080  	git checkout $(BRANCH)
  1081  	$(GIT) pull $(GIT_REMOTE) $(BRANCH)
  1082  	git commit --allow-empty -m "Begin development on $(NEXT_RELEASE_VERSION)"
  1083  	git tag $(NEXT_RELEASE_VERSION)-$(DEV_TAG_SUFFIX)
  1084  
  1085  # maybe-push-next-release-dev-tag calls the push-next-release-dev-tag target if the tag
  1086  # NEXT_RELEASE_VERSION-DEV_TAG_SUFFIX doesn't exist remotely. If the tag exists remotely then this is a NOOP.
  1087  maybe-push-next-release-dev-tag: var-require-one-of-CONFIRM-DRYRUN var-require-all-NEXT_RELEASE_VERSION-DEV_TAG_SUFFIX
  1088  	$(if $(shell git ls-remote --tags $(GIT_REMOTE) $(NEXT_RELEASE_VERSION)-$(DEV_TAG_SUFFIX)),\
  1089  		echo "Dev tag for next release $(NEXT_RELEASE_VERSION) already pushed to github.",\
  1090  		$(MAKE) push-next-release-dev-tag)
  1091  
  1092  # push-next-release-dev-tag pushes the tag NEXT_RELEASE_VERSION-DEV_TAG_SUFFIX and the current branch to github. If
  1093  # the current branch is not the head of the branch then this target fails.
  1094  push-next-release-dev-tag: var-require-one-of-CONFIRM-DRYRUN var-require-all-NEXT_RELEASE_VERSION-DEV_TAG_SUFFIX
  1095  	# The next release commit should always be at the head of a release branch.
  1096  	$(if $(filter-out HEAD,$(call current-branch)),,\
  1097  		$(error "Refusing to push commit for next release while in a detached state."))
  1098  	$(GIT) push $(GIT_REMOTE) $(call current-branch)
  1099  	$(GIT) push $(GIT_REMOTE) $(NEXT_RELEASE_VERSION)-$(DEV_TAG_SUFFIX)
  1100  
  1101  # release-dev-images releases the dev images by calling the release-tag-dev-image-% and publish-dev-image-% on each
  1102  # value in BUILD_IMAGES. This results in retagging all the dev images with the release tag and copying the dev images
  1103  # over to the release registries.
  1104  ifndef SKIP_DEV_IMAGE_RETAG
  1105  RELEASE_DEV_IMAGES_RETAG_TARGETS ?= $(addprefix release-retag-dev-images-in-registry-,$(call escapefs, $(DEV_REGISTRIES)))
  1106  endif
  1107  
  1108  RELEASE_DEV_IMAGES_TARGETS ?= $(addprefix release-dev-images-to-registry-,$(call escapefs, $(RELEASE_REGISTRIES)))
  1109  release-dev-images: var-require-one-of-CONFIRM-DRYRUN var-require-all-BUILD_IMAGES $(RELEASE_DEV_IMAGES_RETAG_TARGETS) $(RELEASE_DEV_IMAGES_TARGETS)
  1110  
  1111  # release-retag-dev-images-in-registry-% retags all the build / arch images specified by BUILD_IMAGES and VALIDARCHES in
  1112  # the registry specified by $* with the release tag specified by RELEASE_TAG.
  1113  release-retag-dev-images-in-registry-%:
  1114  	$(MAKE) $(addprefix release-retag-dev-image-in-registry-,$(call escapefs, $(BUILD_IMAGES))) DEV_REGISTRY=$(call unescapefs,$*)
  1115  
  1116  # release-retag-dev-image-in-registry-% retags the build image specified by $* in the dev registry specified by
  1117  # DEV_REGISTRY with the release tag specified by RELEASE_TAG. If DEV_REGISTRY is in the list of registries specified by
  1118  # RELEASE_REGISTRIES then the retag is not done
  1119  release-retag-dev-image-in-registry-%:
  1120  	$(if $(filter-out $(RELEASE_REGISTRIES),$(DEV_REGISTRY)),\
  1121  		$(CRANE) cp $(DEV_REGISTRY)/$(call unescapefs,$*):$(DEV_TAG) $(DEV_REGISTRY)/$(call unescapefs,$*):$(RELEASE_TAG))$(double_quote)
  1122  
  1123  # release-dev-images-to-registry-% copies and retags all the build / arch images specified by BUILD_IMAGES and
  1124  # VALIDARCHES from the registry specified by DEV_REGISTRY to the registry specified by RELEASE_REGISTRY using the tag
  1125  # specified by DEV_TAG and RELEASE_TAG.
  1126  release-dev-images-to-registry-%:
  1127  	$(MAKE) $(addprefix release-dev-image-to-registry-,$(call escapefs, $(BUILD_IMAGES))) RELEASE_REGISTRY=$(call unescapefs,$*)
  1128  
  1129  # release-dev-image-to-registry-% copies the build image and build arch images specified by $* and VALIDARCHES from
  1130  # the dev repo specified by DEV_TAG and RELEASE.
  1131  release-dev-image-to-registry-%:
  1132  	$(if $(SKIP_MANIFEST_RELEASE),,\
  1133  		$(CRANE) cp $(DEV_REGISTRY)/$(call unescapefs,$*):$(DEV_TAG) $(RELEASE_REGISTRY)/$(call unescapefs,$*):$(RELEASE_TAG))$(double_quote)
  1134  	$(if $(SKIP_ARCH_RELEASE),,\
  1135  		$(MAKE) $(addprefix release-dev-image-arch-to-registry-,$(VALIDARCHES)) BUILD_IMAGE=$(call unescapefs,$*))
  1136  
  1137  # release-dev-image-to-registry-% copies the build arch image specified by BUILD_IMAGE and ARCH from the dev repo
  1138  # specified by DEV_TAG and RELEASE.
  1139  release-dev-image-arch-to-registry-%:
  1140  	$(CRANE) cp $(DEV_REGISTRY)/$(BUILD_IMAGE):$(DEV_TAG)-$* $(RELEASE_REGISTRY)/$(BUILD_IMAGE):$(RELEASE_TAG)-$*$(double_quote)
  1141  
  1142  # create-release-branch creates a release branch based off of the dev tag for the current commit on master. After the
  1143  # release branch is created and pushed, git-create-next-dev-tag is called to create a new empty commit on master and
  1144  # tag that empty commit with an incremented minor version of the previous dev tag for the next release.
  1145  create-release-branch: var-require-one-of-CONFIRM-DRYRUN var-require-all-DEV_TAG_SUFFIX-RELEASE_BRANCH_PREFIX fetch-all
  1146  	$(if $(filter-out $(RELEASE_BRANCH_BASE),$(call current-branch)),$(error create-release-branch must be called on $(RELEASE_BRANCH_BASE)),)
  1147  	$(eval NEXT_RELEASE_VERSION := $(shell echo "$(call git-release-tag-from-dev-tag)" | awk -F  "." '{print $$1"."$$2+1"."0}'))
  1148  	$(eval RELEASE_BRANCH_VERSION := $(shell echo "$(call git-release-tag-from-dev-tag)" | awk -F  "." '{print $$1"."$$2}'))
  1149  	git checkout -B $(RELEASE_BRANCH_PREFIX)-$(RELEASE_BRANCH_VERSION) $(GIT_REMOTE)/$(RELEASE_BRANCH_BASE)
  1150  	$(GIT) push $(GIT_REMOTE) $(RELEASE_BRANCH_PREFIX)-$(RELEASE_BRANCH_VERSION)
  1151  	$(MAKE) dev-tag-next-release push-next-release-dev-tag\
  1152   		BRANCH=$(call current-branch) NEXT_RELEASE_VERSION=$(NEXT_RELEASE_VERSION) DEV_TAG_SUFFIX=$(DEV_TAG_SUFFIX)
  1153  
  1154  # release-prereqs checks that the environment is configured properly to create a release.
  1155  .PHONY: release-prereqs
  1156  release-prereqs:
  1157  ifndef VERSION
  1158  	$(error VERSION is undefined - run using make release VERSION=vX.Y.Z)
  1159  endif
  1160  
  1161  # Check if the codebase is dirty or not.
  1162  check-dirty:
  1163  	@if [ "$$(git --no-pager diff --stat)" != "" ]; then \
  1164  	echo "The following files are dirty"; git --no-pager diff --stat; exit 1; fi
  1165  
  1166  ###############################################################################
  1167  # Common functions for launching a local Kubernetes control plane.
  1168  ###############################################################################
  1169  ## Kubernetes apiserver used for tests
  1170  APISERVER_NAME := calico-local-apiserver
  1171  run-k8s-apiserver: stop-k8s-apiserver run-etcd
  1172  	docker run --detach --net=host \
  1173  		--name $(APISERVER_NAME) \
  1174  		-v $(REPO_ROOT):/go/src/github.com/projectcalico/calico \
  1175  		-v $(CERTS_PATH):/home/user/certs \
  1176  		-e KUBECONFIG=/home/user/certs/kubeconfig \
  1177  		$(CALICO_BUILD) kube-apiserver \
  1178  		--etcd-servers=http://$(LOCAL_IP_ENV):2379 \
  1179  		--service-cluster-ip-range=10.101.0.0/16,fd00:96::/112 \
  1180  		--authorization-mode=RBAC \
  1181  		--service-account-key-file=/home/user/certs/service-account.pem \
  1182  		--service-account-signing-key-file=/home/user/certs/service-account-key.pem \
  1183  		--service-account-issuer=https://localhost:443 \
  1184  		--api-audiences=kubernetes.default \
  1185  		--client-ca-file=/home/user/certs/ca.pem \
  1186  		--tls-cert-file=/home/user/certs/kubernetes.pem \
  1187  		--tls-private-key-file=/home/user/certs/kubernetes-key.pem \
  1188  		--enable-priority-and-fairness=false \
  1189  		--max-mutating-requests-inflight=0 \
  1190  		--max-requests-inflight=0
  1191  
  1192  	# Wait until the apiserver is accepting requests.
  1193  	while ! docker exec $(APISERVER_NAME) kubectl get namespace default; do echo "Waiting for apiserver to come up..."; sleep 2; done
  1194  
  1195  	# Wait until we can configure a cluster role binding which allows anonymous auth.
  1196  	while ! docker exec $(APISERVER_NAME) kubectl create \
  1197  		clusterrolebinding anonymous-admin \
  1198  		--clusterrole=cluster-admin \
  1199  		--user=system:anonymous 2>/dev/null ; \
  1200  		do echo "Waiting for $(APISERVER_NAME) to come up"; \
  1201  		sleep 1; \
  1202  		done
  1203  
  1204  	# Create CustomResourceDefinition (CRD) for Calico resources
  1205  	while ! docker exec $(APISERVER_NAME) kubectl \
  1206  		apply -f /go/src/github.com/projectcalico/calico/libcalico-go/config/crd/; \
  1207  		do echo "Trying to create CRDs"; \
  1208  		sleep 1; \
  1209  		done
  1210  
  1211  # Stop Kubernetes apiserver
  1212  stop-k8s-apiserver:
  1213  	@-docker rm -f $(APISERVER_NAME)
  1214  
  1215  # Run a local Kubernetes controller-manager in a docker container, useful for tests.
  1216  CONTROLLER_MANAGER_NAME := calico-local-controller-manager
  1217  run-k8s-controller-manager: stop-k8s-controller-manager run-k8s-apiserver
  1218  	docker run --detach --net=host \
  1219  		--name $(CONTROLLER_MANAGER_NAME) \
  1220  		-v $(CERTS_PATH):/home/user/certs \
  1221  		$(CALICO_BUILD) kube-controller-manager \
  1222  		--master=https://127.0.0.1:6443 \
  1223  		--kubeconfig=/home/user/certs/kube-controller-manager.kubeconfig \
  1224  		--min-resync-period=3m \
  1225  		--allocate-node-cidrs=true \
  1226  		--cluster-cidr=192.168.0.0/16 \
  1227  		--v=5 \
  1228  		--service-account-private-key-file=/home/user/certs/service-account-key.pem \
  1229  		--root-ca-file=/home/user/certs/ca.pem
  1230  
  1231  ## Stop Kubernetes controller manager
  1232  stop-k8s-controller-manager:
  1233  	@-docker rm -f $(CONTROLLER_MANAGER_NAME)
  1234  
  1235  ###############################################################################
  1236  # Common functions for create a local kind cluster.
  1237  ###############################################################################
  1238  KIND_DIR := $(REPO_ROOT)/hack/test/kind
  1239  KIND ?= $(KIND_DIR)/kind
  1240  KUBECTL ?= $(KIND_DIR)/kubectl
  1241  
  1242  # Different tests may require different kind configurations.
  1243  KIND_CONFIG ?= $(KIND_DIR)/kind.config
  1244  KIND_NAME = $(basename $(notdir $(KIND_CONFIG)))
  1245  KIND_KUBECONFIG?=$(KIND_DIR)/$(KIND_NAME)-kubeconfig.yaml
  1246  
  1247  kind-cluster-create: $(REPO_ROOT)/.$(KIND_NAME).created
  1248  $(REPO_ROOT)/.$(KIND_NAME).created: $(KUBECTL) $(KIND)
  1249  	# First make sure any previous cluster is deleted
  1250  	$(MAKE) kind-cluster-destroy
  1251  
  1252  	# Create a kind cluster.
  1253  	$(KIND) create cluster \
  1254  		--config $(KIND_CONFIG) \
  1255  		--kubeconfig $(KIND_KUBECONFIG) \
  1256  		--name $(KIND_NAME) \
  1257  		--image kindest/node:$(KINDEST_NODE_VERSION)
  1258  
  1259  	# Wait for controller manager to be running and healthy, then create Calico CRDs.
  1260  	while ! KUBECONFIG=$(KIND_KUBECONFIG) $(KUBECTL) get serviceaccount default; do echo "Waiting for default serviceaccount to be created..."; sleep 2; done
  1261  	while ! KUBECONFIG=$(KIND_KUBECONFIG) $(KUBECTL) create -f $(REPO_ROOT)/libcalico-go/config/crd; do echo "Waiting for CRDs to be created"; sleep 2; done
  1262  	touch $@
  1263  
  1264  kind-cluster-destroy: $(KIND) $(KUBECTL)
  1265  	-$(KUBECTL) --kubeconfig=$(KIND_KUBECONFIG) drain kind-control-plane kind-worker kind-worker2 kind-worker3 --ignore-daemonsets --force
  1266  	-$(KIND) delete cluster --name $(KIND_NAME)
  1267  	rm -f $(KIND_KUBECONFIG)
  1268  	rm -f $(REPO_ROOT)/.$(KIND_NAME).created
  1269  
  1270  $(KIND)-$(KIND_VERSION):
  1271  	mkdir -p $(KIND_DIR)/$(KIND_VERSION)
  1272  	$(DOCKER_GO_BUILD) sh -c "GOBIN=/go/src/github.com/projectcalico/calico/hack/test/kind/$(KIND_VERSION) go install sigs.k8s.io/kind@$(KIND_VERSION)"
  1273  	mv $(KIND_DIR)/$(KIND_VERSION)/kind $(KIND_DIR)/kind-$(KIND_VERSION)
  1274  	rm -r $(KIND_DIR)/$(KIND_VERSION)
  1275  
  1276  $(KIND_DIR)/.kind-updated-$(KIND_VERSION): $(KIND)-$(KIND_VERSION)
  1277  	rm -f $(KIND_DIR)/.kind-updated-*
  1278  	cd $(KIND_DIR) && ln -fs kind-$(KIND_VERSION) kind
  1279  	touch $@
  1280  
  1281  .PHONY: kind
  1282  kind: $(KIND)
  1283  	@echo "kind: $(KIND)"
  1284  $(KIND): $(KIND_DIR)/.kind-updated-$(KIND_VERSION)
  1285  
  1286  $(KUBECTL)-$(K8S_VERSION):
  1287  	mkdir -p $(KIND_DIR)
  1288  	curl -L https://storage.googleapis.com/kubernetes-release/release/$(K8S_VERSION)/bin/linux/$(ARCH)/kubectl -o $@
  1289  	chmod +x $@
  1290  
  1291  $(KIND_DIR)/.kubectl-updated-$(K8S_VERSION): $(KUBECTL)-$(K8S_VERSION)
  1292  	rm -f $(KIND_DIR)/.kubectl-updated-*
  1293  	cd $(KIND_DIR) && ln -fs kubectl-$(K8S_VERSION) kubectl
  1294  	touch $@
  1295  
  1296  .PHONY: kubectl
  1297  kubectl: $(KUBECTL)
  1298  	@echo "kubectl: $(KUBECTL)"
  1299  $(KUBECTL): $(KIND_DIR)/.kubectl-updated-$(K8S_VERSION)
  1300  
  1301  bin/helm-$(HELM_VERSION):
  1302  	mkdir -p bin
  1303  	$(eval TMP := $(shell mktemp -d))
  1304  	curl -sSf -L --retry 5 -o $(TMP)/helm3.tar.gz https://get.helm.sh/helm-$(HELM_VERSION)-linux-$(ARCH).tar.gz
  1305  	tar -zxvf $(TMP)/helm3.tar.gz -C $(TMP)
  1306  	mv $(TMP)/linux-$(ARCH)/helm bin/helm-$(HELM_VERSION)
  1307  
  1308  bin/.helm-updated-$(HELM_VERSION): bin/helm-$(HELM_VERSION)
  1309  	# Remove old marker files so that bin/helm will be stale if we switch
  1310  	# branches and the helm version changes.
  1311  	rm -f bin/.helm-updated-*
  1312  	cd bin && ln -fs helm-$(HELM_VERSION) helm
  1313  	touch $@
  1314  
  1315  .PHONY: helm
  1316  helm: bin/helm
  1317  	@echo "helm: $^"
  1318  bin/helm: bin/.helm-updated-$(HELM_VERSION)
  1319  
  1320  helm-install-gcs-plugin:
  1321  	bin/helm plugin install https://github.com/viglesiasce/helm-gcs.git
  1322  
  1323  # Upload to Google tigera-helm-charts storage bucket.
  1324  publish-charts:
  1325  	bin/helm repo add tigera gs://tigera-helm-charts
  1326  	for chart in ./bin/*.tgz; do \
  1327  		bin/helm gcs push $$chart gs://tigera-helm-charts; \
  1328  	done
  1329  
  1330  bin/yq:
  1331  	mkdir -p bin
  1332  	$(eval TMP := $(shell mktemp -d))
  1333  	curl -sSf -L --retry 5 -o $(TMP)/yq4.tar.gz https://github.com/mikefarah/yq/releases/download/v4.27.3/yq_linux_$(BUILDARCH).tar.gz
  1334  	tar -zxvf $(TMP)/yq4.tar.gz -C $(TMP)
  1335  	mv $(TMP)/yq_linux_$(BUILDARCH) bin/yq
  1336  
  1337  ###############################################################################
  1338  # Common functions for launching a local etcd instance.
  1339  ###############################################################################
  1340  ## Run etcd as a container (calico-etcd)
  1341  # TODO: We shouldn't need to tear this down every time it is called.
  1342  # TODO: We shouldn't need to enable the v2 API, but some of our test code still relies on it.
  1343  .PHONY: run-etcd stop-etcd
  1344  run-etcd: stop-etcd
  1345  	docker run --detach \
  1346  		--net=host \
  1347  		--entrypoint=/usr/local/bin/etcd \
  1348  		--name calico-etcd $(ETCD_IMAGE) \
  1349  		--enable-v2 \
  1350  		--advertise-client-urls "http://$(LOCAL_IP_ENV):2379,http://127.0.0.1:2379,http://$(LOCAL_IP_ENV):4001,http://127.0.0.1:4001" \
  1351  		--listen-client-urls "http://0.0.0.0:2379,http://0.0.0.0:4001"
  1352  
  1353  stop-etcd:
  1354  	@-docker rm -f calico-etcd
  1355  
  1356  ###############################################################################
  1357  # Helpers
  1358  ###############################################################################
  1359  ## Help
  1360  .PHONY: help
  1361  help:
  1362  	$(info Available targets)
  1363  	@echo
  1364  	@awk '/^[a-zA-Z\-\_\%0-9\/]+:/ {                                  \
  1365  	   nb = sub( /^## /, "", helpMsg );                               \
  1366  	   if(nb == 0) {                                                  \
  1367  	      helpMsg = $$0;                                              \
  1368  	      nb = sub( /^[^:]*:.* ## /, "", helpMsg );                   \
  1369  	   }                                                              \
  1370  	   if (nb)                                                        \
  1371  	      printf "\033[1;31m%-" width "s\033[0m %s\n", $$1, helpMsg;  \
  1372  	}                                                                 \
  1373  	{ helpMsg = $$0 }'                                                \
  1374  	width=30                                                          \
  1375  	$(MAKEFILE_LIST)
  1376  	@echo
  1377  	@echo "-----------------------------------------------------------"
  1378  	@echo "Building for $(BUILDOS)-$(ARCH) INSTALL_FLAG=$(INSTALL_FLAG)"
  1379  	@echo
  1380  	@echo "ARCH (target):		$(ARCH)"
  1381  	@echo "OS (target):		$(BUILDOS)"
  1382  	@echo "BUILDARCH (host):	$(BUILDARCH)"
  1383  	@echo "CALICO_BUILD:		$(CALICO_BUILD)"
  1384  	@echo "-----------------------------------------------------------"
  1385  
  1386  ###############################################################################
  1387  # Common functions for launching a local Elastic instance.
  1388  ###############################################################################
  1389  ELASTIC_IMAGE   ?= docker.elastic.co/elasticsearch/elasticsearch:$(ELASTIC_VERSION)
  1390  
  1391  ## Run elasticsearch as a container (tigera-elastic)
  1392  .PHONY: run-elastic
  1393  run-elastic: $(REPO_ROOT)/.elasticsearch.created
  1394  $(REPO_ROOT)/.elasticsearch.created:
  1395  	# Run ES on Docker.
  1396  	docker run --detach \
  1397  	-m 2GB \
  1398  	--net=host \
  1399  	--name=tigera-elastic \
  1400  	-e "discovery.type=single-node" \
  1401  	$(ELASTIC_IMAGE)
  1402  
  1403  	# Wait until ES is accepting requests.
  1404  	@while ! docker exec tigera-elastic curl localhost:9200 2> /dev/null; do echo "Waiting for Elasticsearch to come up..."; sleep 2; done
  1405  	touch $@
  1406  
  1407  	# Configure elastic to ignore high watermark errors, since this is just for tests.
  1408  	curl -XPUT -H "Content-Type: application/json" http://localhost:9200/_cluster/settings -d '{"transient": {"cluster.routing.allocation.disk.threshold_enabled": false }}'
  1409  	curl -XPUT -H "Content-Type: application/json" http://localhost:9200/_all/_settings -d '{"index.blocks.read_only_allow_delete": null}'
  1410  
  1411  ## Stop elasticsearch with name tigera-elastic
  1412  .PHONY: stop-elastic
  1413  stop-elastic:
  1414  	-docker rm -f tigera-elastic
  1415  	rm -rf $(REPO_ROOT)/.elasticsearch.created
  1416  
  1417  ###############################################################################
  1418  # Common functions for building windows images.
  1419  ###############################################################################
  1420  
  1421  # When running on semaphore, just copy the docker config, otherwise run
  1422  # 'docker-credential-gcr configure-docker' as well.
  1423  ifdef SEMAPHORE
  1424  DOCKER_CREDENTIAL_CMD = cp /root/.docker/config.json_host /root/.docker/config.json
  1425  else
  1426  DOCKER_CREDENTIAL_CMD = cp /root/.docker/config.json_host /root/.docker/config.json && \
  1427  						docker-credential-gcr configure-docker
  1428  endif
  1429  
  1430  # This needs the $(WINDOWS_DIST)/bin/docker-credential-gcr binary in $PATH and
  1431  # also the local ~/.config/gcloud dir to be able to push to gcr.io.  It mounts
  1432  # $(DOCKER_CONFIG) and copies it so that it can be written to on the container,
  1433  # but not have any effect on the host config.
  1434  CRANE_BINDMOUNT_CMD := \
  1435  	docker run --rm \
  1436  		--net=host \
  1437  		--init \
  1438  		--entrypoint /bin/sh \
  1439  		-e LOCAL_USER_ID=$(LOCAL_USER_ID) \
  1440  		-v $(CURDIR):/go/src/$(PACKAGE_NAME):rw \
  1441  		-v $(DOCKER_CONFIG):/root/.docker/config.json_host:ro \
  1442  		-e PATH=$${PATH}:/go/src/$(PACKAGE_NAME)/$(WINDOWS_DIST)/bin \
  1443  		-v $(HOME)/.config/gcloud:/root/.config/gcloud \
  1444  		-w /go/src/$(PACKAGE_NAME) \
  1445  		$(CALICO_BUILD) -c $(double_quote)$(DOCKER_CREDENTIAL_CMD) && crane
  1446  
  1447  DOCKER_MANIFEST_CMD := docker manifest
  1448  
  1449  ifdef CONFIRM
  1450  CRANE_BINDMOUNT = $(CRANE_BINDMOUNT_CMD)
  1451  DOCKER_MANIFEST = $(DOCKER_MANIFEST_CMD)
  1452  else
  1453  CRANE_BINDMOUNT = echo [DRY RUN] $(CRANE_BINDMOUNT_CMD)
  1454  DOCKER_MANIFEST = echo [DRY RUN] $(DOCKER_MANIFEST_CMD)
  1455  endif
  1456  
  1457  # Clean up the docker builder used to create Windows image tarballs.
  1458  .PHONY: clean-windows-builder
  1459  clean-windows-builder:
  1460  	-docker buildx rm calico-windows-builder
  1461  
  1462  # Set up the docker builder used to create Windows image tarballs.
  1463  .PHONY: setup-windows-builder
  1464  setup-windows-builder: clean-windows-builder
  1465  	docker buildx create --name=calico-windows-builder --use --platform windows/amd64
  1466  
  1467  # FIXME: Use WINDOWS_HPC_VERSION and image instead of nanoserver and WINDOWS_VERSIONS when containerd v1.6 is EOL'd
  1468  # .PHONY: image-windows release-windows
  1469  # NOTE: WINDOWS_IMAGE_REQS must be defined with the requirements to build the windows
  1470  # image. These must be added as reqs to 'image-windows' (originally defined in
  1471  # lib.Makefile) on the specific package Makefile otherwise they are not correctly
  1472  # recognized.
  1473  # # Build Windows image with tag and possibly push it to $DEV_REGISTRIES
  1474  # image-windows-with-tag: var-require-all-WINDOWS_IMAGE-WINDOWS_DIST-WINDOWS_IMAGE_REQS-IMAGETAG
  1475  # 	push="$${PUSH:-false}"; \
  1476  # 	for registry in $(DEV_REGISTRIES); do \
  1477  # 		echo Building and pushing Windows image to $${registry}; \
  1478  # 		image="$${registry}/$(WINDOWS_IMAGE):$(IMAGETAG)"; \
  1479  # 		docker buildx build \
  1480  # 			--platform windows/amd64 \
  1481  # 			--output=type=image,push=$${push} \
  1482  # 			-t $${image} \
  1483  # 			--pull \
  1484  # 			--no-cache \
  1485  # 			--build-arg GIT_VERSION=$(GIT_VERSION) \
  1486  # 			--build-arg WINDOWS_HPC_VERSION=$(WINDOWS_HPC_VERSION) \
  1487  # 			-f Dockerfile-windows .; \
  1488  # 	done ;
  1489  
  1490  # image-windows: var-require-all-BRANCH_NAME
  1491  # 	$(MAKE) image-windows-with-tag PUSH=$(PUSH) IMAGETAG=$(if $(IMAGETAG_PREFIX),$(IMAGETAG_PREFIX)-)$(BRANCH_NAME)
  1492  # 	$(MAKE) image-windows-with-tag PUSH=$(PUSH) IMAGETAG=$(if $(IMAGETAG_PREFIX),$(IMAGETAG_PREFIX)-)$(GIT_VERSION)
  1493  
  1494  # # Build and push Windows image
  1495  # release-windows: var-require-one-of-CONFIRM-DRYRUN release-prereqs clean-windows
  1496  # 	$(MAKE) image-windows PUSH=true
  1497  
  1498  # Windows image pushing is different because we do not build docker images directly.
  1499  # Since the build machine is linux, we output the images to a tarball. (We can
  1500  # produce images but there will be no output because docker images
  1501  # built for Windows cannot be loaded on linux.)
  1502  #
  1503  # The resulting image tarball is then pushed to registries during cd/release.
  1504  # The image tarballs are located in WINDOWS_DIST and have files names
  1505  # with the format 'node-windows-v3.21.0-2-abcdef-20H2.tar'.
  1506  #
  1507  # In addition to pushing the individual images, we also create the manifest
  1508  # directly using 'docker manifest'. This is possible because Semaphore is using
  1509  # a recent enough docker CLI version (20.10.0)
  1510  #
  1511  # - Create the manifest with 'docker manifest create' using the list of all images.
  1512  # - For each windows version, 'docker manifest annotate' its image with "os.image: <windows_version>".
  1513  #   <windows_version> is the version string that looks like, e.g. 10.0.19041.1288.
  1514  #   Setting os.image in the manifest is required for Windows hosts to load the
  1515  #   correct image in manifest.
  1516  # - Finally we push the manifest, "purging" the local manifest.
  1517  
  1518  $(WINDOWS_DIST)/$(WINDOWS_IMAGE)-$(GIT_VERSION)-%.tar: windows-sub-image-$*
  1519  
  1520  DOCKER_CREDENTIAL_VERSION="2.1.18"
  1521  DOCKER_CREDENTIAL_OS="linux"
  1522  DOCKER_CREDENTIAL_ARCH="amd64"
  1523  $(WINDOWS_DIST)/bin/docker-credential-gcr:
  1524  	-mkdir -p $(WINDOWS_DIST)/bin
  1525  	curl -fsSL "https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v$(DOCKER_CREDENTIAL_VERSION)/docker-credential-gcr_$(DOCKER_CREDENTIAL_OS)_$(DOCKER_CREDENTIAL_ARCH)-$(DOCKER_CREDENTIAL_VERSION).tar.gz" \
  1526  	| tar xz --to-stdout docker-credential-gcr \
  1527  	| tee $(WINDOWS_DIST)/bin/docker-credential-gcr > /dev/null && chmod +x $(WINDOWS_DIST)/bin/docker-credential-gcr
  1528  
  1529  .PHONY: docker-credential-gcr-binary
  1530  docker-credential-gcr-binary: var-require-all-WINDOWS_DIST-DOCKER_CREDENTIAL_VERSION-DOCKER_CREDENTIAL_OS-DOCKER_CREDENTIAL_ARCH $(WINDOWS_DIST)/bin/docker-credential-gcr
  1531  
  1532  # NOTE: WINDOWS_IMAGE_REQS must be defined with the requirements to build the windows
  1533  # image. These must be added as reqs to 'image-windows' (originally defined in
  1534  # lib.Makefile) on the specific package Makefile otherwise they are not correctly
  1535  # recognized.
  1536  windows-sub-image-%: var-require-all-GIT_VERSION-WINDOWS_IMAGE-WINDOWS_DIST-WINDOWS_IMAGE_REQS
  1537  	# ensure dir for windows image tars exits
  1538  	-mkdir -p $(WINDOWS_DIST)
  1539  	docker buildx build \
  1540  		--platform windows/amd64 \
  1541  		--output=type=docker,dest=$(CURDIR)/$(WINDOWS_DIST)/$(WINDOWS_IMAGE)-$(GIT_VERSION)-$*.tar \
  1542  		--pull \
  1543  		-t $(WINDOWS_IMAGE):latest \
  1544  		--build-arg GIT_VERSION=$(GIT_VERSION) \
  1545  		--build-arg=WINDOWS_VERSION=$* \
  1546  		-f Dockerfile-windows .
  1547  
  1548  .PHONY: image-windows release-windows release-windows-with-tag
  1549  image-windows: setup-windows-builder var-require-all-WINDOWS_VERSIONS
  1550  	for version in $(WINDOWS_VERSIONS); do \
  1551  		$(MAKE) windows-sub-image-$${version}; \
  1552  	done;
  1553  
  1554  release-windows-with-tag: var-require-one-of-CONFIRM-DRYRUN var-require-all-IMAGETAG-DEV_REGISTRIES image-windows docker-credential-gcr-binary
  1555  	for registry in $(DEV_REGISTRIES); do \
  1556  		echo Pushing Windows images to $${registry}; \
  1557  		all_images=""; \
  1558  		manifest_image="$${registry}/$(WINDOWS_IMAGE):$(IMAGETAG)"; \
  1559  		for win_ver in $(WINDOWS_VERSIONS); do \
  1560  			image_tar="$(WINDOWS_DIST)/$(WINDOWS_IMAGE)-$(GIT_VERSION)-$${win_ver}.tar"; \
  1561  			image="$${registry}/$(WINDOWS_IMAGE):$(IMAGETAG)-windows-$${win_ver}"; \
  1562  			echo Pushing image $${image} ...; \
  1563  			$(CRANE_BINDMOUNT) push $${image_tar} $${image}$(double_quote) & \
  1564  			all_images="$${all_images} $${image}"; \
  1565  		done; \
  1566  		wait; \
  1567  		$(DOCKER_MANIFEST) create --amend $${manifest_image} $${all_images}; \
  1568  		for win_ver in $(WINDOWS_VERSIONS); do \
  1569  			version=$$(docker manifest inspect mcr.microsoft.com/windows/nanoserver:$${win_ver} | jq -r '.manifests[0].platform."os.version"'); \
  1570  			image="$${registry}/$(WINDOWS_IMAGE):$(IMAGETAG)-windows-$${win_ver}"; \
  1571  			$(DOCKER_MANIFEST) annotate --os windows --arch amd64 --os-version $${version} $${manifest_image} $${image}; \
  1572  		done; \
  1573  		$(DOCKER_MANIFEST) push --purge $${manifest_image}; \
  1574  	done;
  1575  
  1576  release-windows: var-require-one-of-CONFIRM-DRYRUN var-require-all-DEV_REGISTRIES-WINDOWS_IMAGE var-require-one-of-VERSION-BRANCH_NAME
  1577  	describe_tag=$(if $(IMAGETAG_PREFIX),$(IMAGETAG_PREFIX)-)$(GIT_VERSION); \
  1578  	release_tag=$(if $(VERSION),$(VERSION),$(if $(IMAGETAG_PREFIX),$(IMAGETAG_PREFIX)-)$(BRANCH_NAME)); \
  1579  	$(MAKE) release-windows-with-tag IMAGETAG=$${describe_tag}; \
  1580  	for registry in $(DEV_REGISTRIES); do \
  1581  		$(CRANE_BINDMOUNT) cp $${registry}/$(WINDOWS_IMAGE):$${describe_tag} $${registry}/$(WINDOWS_IMAGE):$${release_tag}$(double_quote); \
  1582  	done;