gopkg.in/docker/docker.v20@v20.10.27/integration-cli/docker_api_swarm_test.go (about)

     1  //go:build !windows
     2  // +build !windows
     3  
     4  package main
     5  
     6  import (
     7  	"context"
     8  	"fmt"
     9  	"net"
    10  	"net/http"
    11  	"os"
    12  	"path/filepath"
    13  	"runtime"
    14  	"strings"
    15  	"sync"
    16  	"testing"
    17  	"time"
    18  
    19  	"github.com/cloudflare/cfssl/csr"
    20  	"github.com/cloudflare/cfssl/helpers"
    21  	"github.com/cloudflare/cfssl/initca"
    22  	"github.com/docker/docker/api/types"
    23  	"github.com/docker/docker/api/types/container"
    24  	"github.com/docker/docker/api/types/swarm"
    25  	"github.com/docker/docker/client"
    26  	"github.com/docker/docker/integration-cli/checker"
    27  	"github.com/docker/docker/integration-cli/daemon"
    28  	testdaemon "github.com/docker/docker/testutil/daemon"
    29  	"github.com/docker/docker/testutil/request"
    30  	"github.com/docker/swarmkit/ca"
    31  	"gotest.tools/v3/assert"
    32  	is "gotest.tools/v3/assert/cmp"
    33  	"gotest.tools/v3/poll"
    34  )
    35  
    36  var defaultReconciliationTimeout = 30 * time.Second
    37  
    38  func (s *DockerSwarmSuite) TestAPISwarmInit(c *testing.T) {
    39  	// todo: should find a better way to verify that components are running than /info
    40  	d1 := s.AddDaemon(c, true, true)
    41  	info := d1.SwarmInfo(c)
    42  	assert.Equal(c, info.ControlAvailable, true)
    43  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
    44  	assert.Equal(c, info.Cluster.RootRotationInProgress, false)
    45  
    46  	d2 := s.AddDaemon(c, true, false)
    47  	info = d2.SwarmInfo(c)
    48  	assert.Equal(c, info.ControlAvailable, false)
    49  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
    50  
    51  	// Leaving cluster
    52  	assert.NilError(c, d2.SwarmLeave(c, false))
    53  
    54  	info = d2.SwarmInfo(c)
    55  	assert.Equal(c, info.ControlAvailable, false)
    56  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
    57  
    58  	d2.SwarmJoin(c, swarm.JoinRequest{
    59  		ListenAddr:  d1.SwarmListenAddr(),
    60  		JoinToken:   d1.JoinTokens(c).Worker,
    61  		RemoteAddrs: []string{d1.SwarmListenAddr()},
    62  	})
    63  
    64  	info = d2.SwarmInfo(c)
    65  	assert.Equal(c, info.ControlAvailable, false)
    66  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
    67  
    68  	// Current state restoring after restarts
    69  	d1.Stop(c)
    70  	d2.Stop(c)
    71  
    72  	d1.StartNode(c)
    73  	d2.StartNode(c)
    74  
    75  	info = d1.SwarmInfo(c)
    76  	assert.Equal(c, info.ControlAvailable, true)
    77  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
    78  
    79  	info = d2.SwarmInfo(c)
    80  	assert.Equal(c, info.ControlAvailable, false)
    81  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
    82  }
    83  
    84  func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *testing.T) {
    85  	d1 := s.AddDaemon(c, false, false)
    86  	d1.SwarmInit(c, swarm.InitRequest{})
    87  
    88  	// todo: error message differs depending if some components of token are valid
    89  
    90  	d2 := s.AddDaemon(c, false, false)
    91  	c2 := d2.NewClientT(c)
    92  	err := c2.SwarmJoin(context.Background(), swarm.JoinRequest{
    93  		ListenAddr:  d2.SwarmListenAddr(),
    94  		RemoteAddrs: []string{d1.SwarmListenAddr()},
    95  	})
    96  	assert.ErrorContains(c, err, "join token is necessary")
    97  	info := d2.SwarmInfo(c)
    98  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
    99  
   100  	err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
   101  		ListenAddr:  d2.SwarmListenAddr(),
   102  		JoinToken:   "foobaz",
   103  		RemoteAddrs: []string{d1.SwarmListenAddr()},
   104  	})
   105  	assert.ErrorContains(c, err, "invalid join token")
   106  	info = d2.SwarmInfo(c)
   107  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
   108  
   109  	workerToken := d1.JoinTokens(c).Worker
   110  
   111  	d2.SwarmJoin(c, swarm.JoinRequest{
   112  		ListenAddr:  d2.SwarmListenAddr(),
   113  		JoinToken:   workerToken,
   114  		RemoteAddrs: []string{d1.SwarmListenAddr()},
   115  	})
   116  	info = d2.SwarmInfo(c)
   117  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
   118  	assert.NilError(c, d2.SwarmLeave(c, false))
   119  	info = d2.SwarmInfo(c)
   120  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
   121  
   122  	// change tokens
   123  	d1.RotateTokens(c)
   124  
   125  	err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
   126  		ListenAddr:  d2.SwarmListenAddr(),
   127  		JoinToken:   workerToken,
   128  		RemoteAddrs: []string{d1.SwarmListenAddr()},
   129  	})
   130  	assert.ErrorContains(c, err, "join token is necessary")
   131  	info = d2.SwarmInfo(c)
   132  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
   133  
   134  	workerToken = d1.JoinTokens(c).Worker
   135  
   136  	d2.SwarmJoin(c, swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.SwarmListenAddr()}})
   137  	info = d2.SwarmInfo(c)
   138  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
   139  	assert.NilError(c, d2.SwarmLeave(c, false))
   140  	info = d2.SwarmInfo(c)
   141  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
   142  
   143  	// change spec, don't change tokens
   144  	d1.UpdateSwarm(c, func(s *swarm.Spec) {})
   145  
   146  	err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
   147  		ListenAddr:  d2.SwarmListenAddr(),
   148  		RemoteAddrs: []string{d1.SwarmListenAddr()},
   149  	})
   150  	assert.ErrorContains(c, err, "join token is necessary")
   151  	info = d2.SwarmInfo(c)
   152  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
   153  
   154  	d2.SwarmJoin(c, swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.SwarmListenAddr()}})
   155  	info = d2.SwarmInfo(c)
   156  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
   157  	assert.NilError(c, d2.SwarmLeave(c, false))
   158  	info = d2.SwarmInfo(c)
   159  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
   160  }
   161  
   162  func (s *DockerSwarmSuite) TestUpdateSwarmAddExternalCA(c *testing.T) {
   163  	d1 := s.AddDaemon(c, false, false)
   164  	d1.SwarmInit(c, swarm.InitRequest{})
   165  	d1.UpdateSwarm(c, func(s *swarm.Spec) {
   166  		s.CAConfig.ExternalCAs = []*swarm.ExternalCA{
   167  			{
   168  				Protocol: swarm.ExternalCAProtocolCFSSL,
   169  				URL:      "https://thishasnoca.org",
   170  			},
   171  			{
   172  				Protocol: swarm.ExternalCAProtocolCFSSL,
   173  				URL:      "https://thishasacacert.org",
   174  				CACert:   "cacert",
   175  			},
   176  		}
   177  	})
   178  	info := d1.SwarmInfo(c)
   179  	assert.Equal(c, len(info.Cluster.Spec.CAConfig.ExternalCAs), 2)
   180  	assert.Equal(c, info.Cluster.Spec.CAConfig.ExternalCAs[0].CACert, "")
   181  	assert.Equal(c, info.Cluster.Spec.CAConfig.ExternalCAs[1].CACert, "cacert")
   182  }
   183  
   184  func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *testing.T) {
   185  	d1 := s.AddDaemon(c, true, true)
   186  	d2 := s.AddDaemon(c, false, false)
   187  	splitToken := strings.Split(d1.JoinTokens(c).Worker, "-")
   188  	splitToken[2] = "1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e"
   189  	replacementToken := strings.Join(splitToken, "-")
   190  	c2 := d2.NewClientT(c)
   191  	err := c2.SwarmJoin(context.Background(), swarm.JoinRequest{
   192  		ListenAddr:  d2.SwarmListenAddr(),
   193  		JoinToken:   replacementToken,
   194  		RemoteAddrs: []string{d1.SwarmListenAddr()},
   195  	})
   196  	assert.ErrorContains(c, err, "remote CA does not match fingerprint")
   197  }
   198  
   199  func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *testing.T) {
   200  	d1 := s.AddDaemon(c, false, false)
   201  	d1.SwarmInit(c, swarm.InitRequest{})
   202  	d2 := s.AddDaemon(c, true, false)
   203  
   204  	info := d2.SwarmInfo(c)
   205  	assert.Equal(c, info.ControlAvailable, false)
   206  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
   207  
   208  	d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
   209  		n.Spec.Role = swarm.NodeRoleManager
   210  	})
   211  
   212  	poll.WaitOn(c, pollCheck(c, d2.CheckControlAvailable, checker.True()), poll.WithTimeout(defaultReconciliationTimeout))
   213  
   214  	d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
   215  		n.Spec.Role = swarm.NodeRoleWorker
   216  	})
   217  
   218  	poll.WaitOn(c, pollCheck(c, d2.CheckControlAvailable, checker.False()), poll.WithTimeout(defaultReconciliationTimeout))
   219  
   220  	// Wait for the role to change to worker in the cert. This is partially
   221  	// done because it's something worth testing in its own right, and
   222  	// partially because changing the role from manager to worker and then
   223  	// back to manager quickly might cause the node to pause for awhile
   224  	// while waiting for the role to change to worker, and the test can
   225  	// time out during this interval.
   226  	poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
   227  		certBytes, err := os.ReadFile(filepath.Join(d2.Folder, "root", "swarm", "certificates", "swarm-node.crt"))
   228  		if err != nil {
   229  			return "", fmt.Sprintf("error: %v", err)
   230  		}
   231  		certs, err := helpers.ParseCertificatesPEM(certBytes)
   232  		if err == nil && len(certs) > 0 && len(certs[0].Subject.OrganizationalUnit) > 0 {
   233  			return certs[0].Subject.OrganizationalUnit[0], ""
   234  		}
   235  		return "", "could not get organizational unit from certificate"
   236  	}, checker.Equals("swarm-worker")), poll.WithTimeout(defaultReconciliationTimeout))
   237  
   238  	// Demoting last node should fail
   239  	node := d1.GetNode(c, d1.NodeID())
   240  	node.Spec.Role = swarm.NodeRoleWorker
   241  	url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index)
   242  	res, body, err := request.Post(url, request.Host(d1.Sock()), request.JSONBody(node.Spec))
   243  	assert.NilError(c, err)
   244  	b, err := request.ReadBody(body)
   245  	assert.NilError(c, err)
   246  	assert.Equal(c, res.StatusCode, http.StatusBadRequest, "output: %q", string(b))
   247  
   248  	// The warning specific to demoting the last manager is best-effort and
   249  	// won't appear until the Role field of the demoted manager has been
   250  	// updated.
   251  	// Yes, I know this looks silly, but checker.Matches is broken, since
   252  	// it anchors the regexp contrary to the documentation, and this makes
   253  	// it impossible to match something that includes a line break.
   254  	if !strings.Contains(string(b), "last manager of the swarm") {
   255  		assert.Assert(c, strings.Contains(string(b), "this would result in a loss of quorum"))
   256  	}
   257  	info = d1.SwarmInfo(c)
   258  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
   259  	assert.Equal(c, info.ControlAvailable, true)
   260  
   261  	// Promote already demoted node
   262  	d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
   263  		n.Spec.Role = swarm.NodeRoleManager
   264  	})
   265  
   266  	poll.WaitOn(c, pollCheck(c, d2.CheckControlAvailable, checker.True()), poll.WithTimeout(defaultReconciliationTimeout))
   267  }
   268  
   269  func (s *DockerSwarmSuite) TestAPISwarmLeaderProxy(c *testing.T) {
   270  	// add three managers, one of these is leader
   271  	d1 := s.AddDaemon(c, true, true)
   272  	d2 := s.AddDaemon(c, true, true)
   273  	d3 := s.AddDaemon(c, true, true)
   274  
   275  	// start a service by hitting each of the 3 managers
   276  	d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
   277  		s.Spec.Name = "test1"
   278  	})
   279  	d2.CreateService(c, simpleTestService, func(s *swarm.Service) {
   280  		s.Spec.Name = "test2"
   281  	})
   282  	d3.CreateService(c, simpleTestService, func(s *swarm.Service) {
   283  		s.Spec.Name = "test3"
   284  	})
   285  
   286  	// 3 services should be started now, because the requests were proxied to leader
   287  	// query each node and make sure it returns 3 services
   288  	for _, d := range []*daemon.Daemon{d1, d2, d3} {
   289  		services := d.ListServices(c)
   290  		assert.Equal(c, len(services), 3)
   291  	}
   292  }
   293  
   294  func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *testing.T) {
   295  	if runtime.GOARCH == "s390x" {
   296  		c.Skip("Disabled on s390x")
   297  	}
   298  	if runtime.GOARCH == "ppc64le" {
   299  		c.Skip("Disabled on  ppc64le")
   300  	}
   301  
   302  	// Create 3 nodes
   303  	d1 := s.AddDaemon(c, true, true)
   304  	d2 := s.AddDaemon(c, true, true)
   305  	d3 := s.AddDaemon(c, true, true)
   306  
   307  	// assert that the first node we made is the leader, and the other two are followers
   308  	assert.Equal(c, d1.GetNode(c, d1.NodeID()).ManagerStatus.Leader, true)
   309  	assert.Equal(c, d1.GetNode(c, d2.NodeID()).ManagerStatus.Leader, false)
   310  	assert.Equal(c, d1.GetNode(c, d3.NodeID()).ManagerStatus.Leader, false)
   311  
   312  	d1.Stop(c)
   313  
   314  	var (
   315  		leader    *daemon.Daemon   // keep track of leader
   316  		followers []*daemon.Daemon // keep track of followers
   317  	)
   318  	var lastErr error
   319  	checkLeader := func(nodes ...*daemon.Daemon) checkF {
   320  		return func(c *testing.T) (interface{}, string) {
   321  			// clear these out before each run
   322  			leader = nil
   323  			followers = nil
   324  			for _, d := range nodes {
   325  				n := d.GetNode(c, d.NodeID(), func(err error) bool {
   326  					if strings.Contains(err.Error(), context.DeadlineExceeded.Error()) || strings.Contains(err.Error(), "swarm does not have a leader") {
   327  						lastErr = err
   328  						return true
   329  					}
   330  					return false
   331  				})
   332  				if n == nil {
   333  					return false, fmt.Sprintf("failed to get node: %v", lastErr)
   334  				}
   335  				if n.ManagerStatus.Leader {
   336  					leader = d
   337  				} else {
   338  					followers = append(followers, d)
   339  				}
   340  			}
   341  
   342  			if leader == nil {
   343  				return false, "no leader elected"
   344  			}
   345  
   346  			return true, fmt.Sprintf("elected %v", leader.ID())
   347  		}
   348  	}
   349  
   350  	// wait for an election to occur
   351  	c.Logf("Waiting for election to occur...")
   352  	poll.WaitOn(c, pollCheck(c, checkLeader(d2, d3), checker.True()), poll.WithTimeout(defaultReconciliationTimeout))
   353  
   354  	// assert that we have a new leader
   355  	assert.Assert(c, leader != nil)
   356  
   357  	// Keep track of the current leader, since we want that to be chosen.
   358  	stableleader := leader
   359  
   360  	// add the d1, the initial leader, back
   361  	d1.StartNode(c)
   362  
   363  	// wait for possible election
   364  	c.Logf("Waiting for possible election...")
   365  	poll.WaitOn(c, pollCheck(c, checkLeader(d1, d2, d3), checker.True()), poll.WithTimeout(defaultReconciliationTimeout))
   366  	// pick out the leader and the followers again
   367  
   368  	// verify that we still only have 1 leader and 2 followers
   369  	assert.Assert(c, leader != nil)
   370  	assert.Equal(c, len(followers), 2)
   371  	// and that after we added d1 back, the leader hasn't changed
   372  	assert.Equal(c, leader.NodeID(), stableleader.NodeID())
   373  }
   374  
   375  func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *testing.T) {
   376  	if runtime.GOARCH == "s390x" {
   377  		c.Skip("Disabled on s390x")
   378  	}
   379  	if runtime.GOARCH == "ppc64le" {
   380  		c.Skip("Disabled on  ppc64le")
   381  	}
   382  
   383  	d1 := s.AddDaemon(c, true, true)
   384  	d2 := s.AddDaemon(c, true, true)
   385  	d3 := s.AddDaemon(c, true, true)
   386  
   387  	d1.CreateService(c, simpleTestService)
   388  
   389  	d2.Stop(c)
   390  
   391  	// make sure there is a leader
   392  	poll.WaitOn(c, pollCheck(c, d1.CheckLeader, checker.IsNil()), poll.WithTimeout(defaultReconciliationTimeout))
   393  
   394  	d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
   395  		s.Spec.Name = "top1"
   396  	})
   397  
   398  	d3.Stop(c)
   399  
   400  	var service swarm.Service
   401  	simpleTestService(&service)
   402  	service.Spec.Name = "top2"
   403  	cli := d1.NewClientT(c)
   404  	defer cli.Close()
   405  
   406  	// d1 will eventually step down from leader because there is no longer an active quorum, wait for that to happen
   407  	poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
   408  		_, err := cli.ServiceCreate(context.Background(), service.Spec, types.ServiceCreateOptions{})
   409  		return err.Error(), ""
   410  	}, checker.Contains("Make sure more than half of the managers are online.")), poll.WithTimeout(defaultReconciliationTimeout*2))
   411  
   412  	d2.StartNode(c)
   413  
   414  	// make sure there is a leader
   415  	poll.WaitOn(c, pollCheck(c, d1.CheckLeader, checker.IsNil()), poll.WithTimeout(defaultReconciliationTimeout))
   416  
   417  	d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
   418  		s.Spec.Name = "top3"
   419  	})
   420  }
   421  
   422  func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *testing.T) {
   423  	d := s.AddDaemon(c, true, true)
   424  
   425  	instances := 2
   426  	d.CreateService(c, simpleTestService, setInstances(instances))
   427  
   428  	id, err := d.Cmd("run", "-d", "busybox", "top")
   429  	assert.NilError(c, err, id)
   430  	id = strings.TrimSpace(id)
   431  
   432  	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances+1)), poll.WithTimeout(defaultReconciliationTimeout))
   433  
   434  	assert.ErrorContains(c, d.SwarmLeave(c, false), "")
   435  	assert.NilError(c, d.SwarmLeave(c, true))
   436  
   437  	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
   438  
   439  	id2, err := d.Cmd("ps", "-q")
   440  	assert.NilError(c, err, id2)
   441  	assert.Assert(c, strings.HasPrefix(id, strings.TrimSpace(id2)))
   442  }
   443  
   444  // #23629
   445  func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *testing.T) {
   446  	testRequires(c, Network)
   447  	s.AddDaemon(c, true, true)
   448  	d2 := s.AddDaemon(c, false, false)
   449  
   450  	id, err := d2.Cmd("run", "-d", "busybox", "top")
   451  	assert.NilError(c, err, id)
   452  	id = strings.TrimSpace(id)
   453  
   454  	c2 := d2.NewClientT(c)
   455  	err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
   456  		ListenAddr:  d2.SwarmListenAddr(),
   457  		RemoteAddrs: []string{"123.123.123.123:1234"},
   458  	})
   459  	assert.ErrorContains(c, err, "Timeout was reached")
   460  
   461  	info := d2.SwarmInfo(c)
   462  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStatePending)
   463  
   464  	assert.NilError(c, d2.SwarmLeave(c, true))
   465  
   466  	poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
   467  
   468  	id2, err := d2.Cmd("ps", "-q")
   469  	assert.NilError(c, err, id2)
   470  	assert.Assert(c, strings.HasPrefix(id, strings.TrimSpace(id2)))
   471  }
   472  
   473  // #23705
   474  func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *testing.T) {
   475  	testRequires(c, Network)
   476  	d := s.AddDaemon(c, false, false)
   477  	client := d.NewClientT(c)
   478  	err := client.SwarmJoin(context.Background(), swarm.JoinRequest{
   479  		ListenAddr:  d.SwarmListenAddr(),
   480  		RemoteAddrs: []string{"123.123.123.123:1234"},
   481  	})
   482  	assert.ErrorContains(c, err, "Timeout was reached")
   483  
   484  	poll.WaitOn(c, pollCheck(c, d.CheckLocalNodeState, checker.Equals(swarm.LocalNodeStatePending)), poll.WithTimeout(defaultReconciliationTimeout))
   485  
   486  	d.RestartNode(c)
   487  
   488  	info := d.SwarmInfo(c)
   489  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
   490  }
   491  
   492  func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *testing.T) {
   493  	d1 := s.AddDaemon(c, true, true)
   494  
   495  	instances := 2
   496  	id := d1.CreateService(c, simpleTestService, setInstances(instances))
   497  
   498  	d1.GetService(c, id)
   499  	d1.RestartNode(c)
   500  	d1.GetService(c, id)
   501  
   502  	d2 := s.AddDaemon(c, true, true)
   503  	d2.GetService(c, id)
   504  	d2.RestartNode(c)
   505  	d2.GetService(c, id)
   506  
   507  	d3 := s.AddDaemon(c, true, true)
   508  	d3.GetService(c, id)
   509  	d3.RestartNode(c)
   510  	d3.GetService(c, id)
   511  
   512  	err := d3.Kill()
   513  	assert.NilError(c, err)
   514  	time.Sleep(1 * time.Second) // time to handle signal
   515  	d3.StartNode(c)
   516  	d3.GetService(c, id)
   517  }
   518  
   519  func (s *DockerSwarmSuite) TestAPISwarmScaleNoRollingUpdate(c *testing.T) {
   520  	d := s.AddDaemon(c, true, true)
   521  
   522  	instances := 2
   523  	id := d.CreateService(c, simpleTestService, setInstances(instances))
   524  
   525  	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
   526  	containers := d.ActiveContainers(c)
   527  	instances = 4
   528  	d.UpdateService(c, d.GetService(c, id), setInstances(instances))
   529  	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
   530  	containers2 := d.ActiveContainers(c)
   531  
   532  loop0:
   533  	for _, c1 := range containers {
   534  		for _, c2 := range containers2 {
   535  			if c1 == c2 {
   536  				continue loop0
   537  			}
   538  		}
   539  		c.Errorf("container %v not found in new set %#v", c1, containers2)
   540  	}
   541  }
   542  
   543  func (s *DockerSwarmSuite) TestAPISwarmInvalidAddress(c *testing.T) {
   544  	d := s.AddDaemon(c, false, false)
   545  	req := swarm.InitRequest{
   546  		ListenAddr: "",
   547  	}
   548  	res, _, err := request.Post("/swarm/init", request.Host(d.Sock()), request.JSONBody(req))
   549  	assert.NilError(c, err)
   550  	assert.Equal(c, res.StatusCode, http.StatusBadRequest)
   551  
   552  	req2 := swarm.JoinRequest{
   553  		ListenAddr:  "0.0.0.0:2377",
   554  		RemoteAddrs: []string{""},
   555  	}
   556  	res, _, err = request.Post("/swarm/join", request.Host(d.Sock()), request.JSONBody(req2))
   557  	assert.NilError(c, err)
   558  	assert.Equal(c, res.StatusCode, http.StatusBadRequest)
   559  }
   560  
   561  func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *testing.T) {
   562  	d1 := s.AddDaemon(c, true, true)
   563  	d2 := s.AddDaemon(c, true, true)
   564  
   565  	instances := 2
   566  	id := d1.CreateService(c, simpleTestService, setInstances(instances))
   567  	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
   568  
   569  	// drain d2, all containers should move to d1
   570  	d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
   571  		n.Spec.Availability = swarm.NodeAvailabilityDrain
   572  	})
   573  	poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
   574  	poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
   575  
   576  	d2.Stop(c)
   577  
   578  	d1.SwarmInit(c, swarm.InitRequest{
   579  		ForceNewCluster: true,
   580  		Spec:            swarm.Spec{},
   581  	})
   582  
   583  	poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
   584  
   585  	d3 := s.AddDaemon(c, true, true)
   586  	info := d3.SwarmInfo(c)
   587  	assert.Equal(c, info.ControlAvailable, true)
   588  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
   589  
   590  	instances = 4
   591  	d3.UpdateService(c, d3.GetService(c, id), setInstances(instances))
   592  
   593  	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
   594  }
   595  
   596  func simpleTestService(s *swarm.Service) {
   597  	ureplicas := uint64(1)
   598  	restartDelay := 100 * time.Millisecond
   599  
   600  	s.Spec = swarm.ServiceSpec{
   601  		TaskTemplate: swarm.TaskSpec{
   602  			ContainerSpec: &swarm.ContainerSpec{
   603  				Image:   "busybox:latest",
   604  				Command: []string{"/bin/top"},
   605  			},
   606  			RestartPolicy: &swarm.RestartPolicy{
   607  				Delay: &restartDelay,
   608  			},
   609  		},
   610  		Mode: swarm.ServiceMode{
   611  			Replicated: &swarm.ReplicatedService{
   612  				Replicas: &ureplicas,
   613  			},
   614  		},
   615  	}
   616  	s.Spec.Name = "top"
   617  }
   618  
   619  func serviceForUpdate(s *swarm.Service) {
   620  	ureplicas := uint64(1)
   621  	restartDelay := 100 * time.Millisecond
   622  
   623  	s.Spec = swarm.ServiceSpec{
   624  		TaskTemplate: swarm.TaskSpec{
   625  			ContainerSpec: &swarm.ContainerSpec{
   626  				Image:   "busybox:latest",
   627  				Command: []string{"/bin/top"},
   628  			},
   629  			RestartPolicy: &swarm.RestartPolicy{
   630  				Delay: &restartDelay,
   631  			},
   632  		},
   633  		Mode: swarm.ServiceMode{
   634  			Replicated: &swarm.ReplicatedService{
   635  				Replicas: &ureplicas,
   636  			},
   637  		},
   638  		UpdateConfig: &swarm.UpdateConfig{
   639  			Parallelism:   2,
   640  			Delay:         4 * time.Second,
   641  			FailureAction: swarm.UpdateFailureActionContinue,
   642  		},
   643  		RollbackConfig: &swarm.UpdateConfig{
   644  			Parallelism:   3,
   645  			Delay:         4 * time.Second,
   646  			FailureAction: swarm.UpdateFailureActionContinue,
   647  		},
   648  	}
   649  	s.Spec.Name = "updatetest"
   650  }
   651  
   652  func setInstances(replicas int) testdaemon.ServiceConstructor {
   653  	ureplicas := uint64(replicas)
   654  	return func(s *swarm.Service) {
   655  		s.Spec.Mode = swarm.ServiceMode{
   656  			Replicated: &swarm.ReplicatedService{
   657  				Replicas: &ureplicas,
   658  			},
   659  		}
   660  	}
   661  }
   662  
   663  func setUpdateOrder(order string) testdaemon.ServiceConstructor {
   664  	return func(s *swarm.Service) {
   665  		if s.Spec.UpdateConfig == nil {
   666  			s.Spec.UpdateConfig = &swarm.UpdateConfig{}
   667  		}
   668  		s.Spec.UpdateConfig.Order = order
   669  	}
   670  }
   671  
   672  func setRollbackOrder(order string) testdaemon.ServiceConstructor {
   673  	return func(s *swarm.Service) {
   674  		if s.Spec.RollbackConfig == nil {
   675  			s.Spec.RollbackConfig = &swarm.UpdateConfig{}
   676  		}
   677  		s.Spec.RollbackConfig.Order = order
   678  	}
   679  }
   680  
   681  func setImage(image string) testdaemon.ServiceConstructor {
   682  	return func(s *swarm.Service) {
   683  		if s.Spec.TaskTemplate.ContainerSpec == nil {
   684  			s.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{}
   685  		}
   686  		s.Spec.TaskTemplate.ContainerSpec.Image = image
   687  	}
   688  }
   689  
   690  func setFailureAction(failureAction string) testdaemon.ServiceConstructor {
   691  	return func(s *swarm.Service) {
   692  		s.Spec.UpdateConfig.FailureAction = failureAction
   693  	}
   694  }
   695  
   696  func setMaxFailureRatio(maxFailureRatio float32) testdaemon.ServiceConstructor {
   697  	return func(s *swarm.Service) {
   698  		s.Spec.UpdateConfig.MaxFailureRatio = maxFailureRatio
   699  	}
   700  }
   701  
   702  func setParallelism(parallelism uint64) testdaemon.ServiceConstructor {
   703  	return func(s *swarm.Service) {
   704  		s.Spec.UpdateConfig.Parallelism = parallelism
   705  	}
   706  }
   707  
   708  func setConstraints(constraints []string) testdaemon.ServiceConstructor {
   709  	return func(s *swarm.Service) {
   710  		if s.Spec.TaskTemplate.Placement == nil {
   711  			s.Spec.TaskTemplate.Placement = &swarm.Placement{}
   712  		}
   713  		s.Spec.TaskTemplate.Placement.Constraints = constraints
   714  	}
   715  }
   716  
   717  func setPlacementPrefs(prefs []swarm.PlacementPreference) testdaemon.ServiceConstructor {
   718  	return func(s *swarm.Service) {
   719  		if s.Spec.TaskTemplate.Placement == nil {
   720  			s.Spec.TaskTemplate.Placement = &swarm.Placement{}
   721  		}
   722  		s.Spec.TaskTemplate.Placement.Preferences = prefs
   723  	}
   724  }
   725  
   726  func setGlobalMode(s *swarm.Service) {
   727  	s.Spec.Mode = swarm.ServiceMode{
   728  		Global: &swarm.GlobalService{},
   729  	}
   730  }
   731  
   732  func checkClusterHealth(c *testing.T, cl []*daemon.Daemon, managerCount, workerCount int) {
   733  	var totalMCount, totalWCount int
   734  
   735  	for _, d := range cl {
   736  		var (
   737  			info swarm.Info
   738  		)
   739  
   740  		// check info in a poll.WaitOn(), because if the cluster doesn't have a leader, `info` will return an error
   741  		checkInfo := func(c *testing.T) (interface{}, string) {
   742  			client := d.NewClientT(c)
   743  			daemonInfo, err := client.Info(context.Background())
   744  			info = daemonInfo.Swarm
   745  			return err, "cluster not ready in time"
   746  		}
   747  		poll.WaitOn(c, pollCheck(c, checkInfo, checker.IsNil()), poll.WithTimeout(defaultReconciliationTimeout))
   748  		if !info.ControlAvailable {
   749  			totalWCount++
   750  			continue
   751  		}
   752  
   753  		var leaderFound bool
   754  		totalMCount++
   755  		var mCount, wCount int
   756  
   757  		for _, n := range d.ListNodes(c) {
   758  			waitReady := func(c *testing.T) (interface{}, string) {
   759  				if n.Status.State == swarm.NodeStateReady {
   760  					return true, ""
   761  				}
   762  				nn := d.GetNode(c, n.ID)
   763  				n = *nn
   764  				return n.Status.State == swarm.NodeStateReady, fmt.Sprintf("state of node %s, reported by %s", n.ID, d.NodeID())
   765  			}
   766  			poll.WaitOn(c, pollCheck(c, waitReady, checker.True()), poll.WithTimeout(defaultReconciliationTimeout))
   767  
   768  			waitActive := func(c *testing.T) (interface{}, string) {
   769  				if n.Spec.Availability == swarm.NodeAvailabilityActive {
   770  					return true, ""
   771  				}
   772  				nn := d.GetNode(c, n.ID)
   773  				n = *nn
   774  				return n.Spec.Availability == swarm.NodeAvailabilityActive, fmt.Sprintf("availability of node %s, reported by %s", n.ID, d.NodeID())
   775  			}
   776  			poll.WaitOn(c, pollCheck(c, waitActive, checker.True()), poll.WithTimeout(defaultReconciliationTimeout))
   777  
   778  			if n.Spec.Role == swarm.NodeRoleManager {
   779  				assert.Assert(c, n.ManagerStatus != nil, "manager status of node %s (manager), reported by %s", n.ID, d.NodeID())
   780  				if n.ManagerStatus.Leader {
   781  					leaderFound = true
   782  				}
   783  				mCount++
   784  			} else {
   785  				assert.Assert(c, n.ManagerStatus == nil, "manager status of node %s (worker), reported by %s", n.ID, d.NodeID())
   786  				wCount++
   787  			}
   788  		}
   789  		assert.Equal(c, leaderFound, true, "lack of leader reported by node %s", info.NodeID)
   790  		assert.Equal(c, mCount, managerCount, "managers count reported by node %s", info.NodeID)
   791  		assert.Equal(c, wCount, workerCount, "workers count reported by node %s", info.NodeID)
   792  	}
   793  	assert.Equal(c, totalMCount, managerCount)
   794  	assert.Equal(c, totalWCount, workerCount)
   795  }
   796  
   797  func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *testing.T) {
   798  	mCount, wCount := 5, 1
   799  
   800  	var nodes []*daemon.Daemon
   801  	for i := 0; i < mCount; i++ {
   802  		manager := s.AddDaemon(c, true, true)
   803  		info := manager.SwarmInfo(c)
   804  		assert.Equal(c, info.ControlAvailable, true)
   805  		assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
   806  		nodes = append(nodes, manager)
   807  	}
   808  
   809  	for i := 0; i < wCount; i++ {
   810  		worker := s.AddDaemon(c, true, false)
   811  		info := worker.SwarmInfo(c)
   812  		assert.Equal(c, info.ControlAvailable, false)
   813  		assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
   814  		nodes = append(nodes, worker)
   815  	}
   816  
   817  	// stop whole cluster
   818  	{
   819  		var wg sync.WaitGroup
   820  		wg.Add(len(nodes))
   821  		errs := make(chan error, len(nodes))
   822  
   823  		for _, d := range nodes {
   824  			go func(daemon *daemon.Daemon) {
   825  				defer wg.Done()
   826  				if err := daemon.StopWithError(); err != nil {
   827  					errs <- err
   828  				}
   829  			}(d)
   830  		}
   831  		wg.Wait()
   832  		close(errs)
   833  		for err := range errs {
   834  			assert.NilError(c, err)
   835  		}
   836  	}
   837  
   838  	// start whole cluster
   839  	{
   840  		var wg sync.WaitGroup
   841  		wg.Add(len(nodes))
   842  		errs := make(chan error, len(nodes))
   843  
   844  		for _, d := range nodes {
   845  			go func(daemon *daemon.Daemon) {
   846  				defer wg.Done()
   847  				if err := daemon.StartWithError("--iptables=false"); err != nil {
   848  					errs <- err
   849  				}
   850  			}(d)
   851  		}
   852  		wg.Wait()
   853  		close(errs)
   854  		for err := range errs {
   855  			assert.NilError(c, err)
   856  		}
   857  	}
   858  
   859  	checkClusterHealth(c, nodes, mCount, wCount)
   860  }
   861  
   862  func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *testing.T) {
   863  	d := s.AddDaemon(c, true, true)
   864  
   865  	instances := 2
   866  	id := d.CreateService(c, simpleTestService, setInstances(instances))
   867  	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
   868  
   869  	service := d.GetService(c, id)
   870  	instances = 5
   871  
   872  	setInstances(instances)(service)
   873  	cli := d.NewClientT(c)
   874  	defer cli.Close()
   875  	_, err := cli.ServiceUpdate(context.Background(), service.Spec.Name, service.Version, service.Spec, types.ServiceUpdateOptions{})
   876  	assert.NilError(c, err)
   877  	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
   878  }
   879  
   880  // Unlocking an unlocked swarm results in an error
   881  func (s *DockerSwarmSuite) TestAPISwarmUnlockNotLocked(c *testing.T) {
   882  	d := s.AddDaemon(c, true, true)
   883  	err := d.SwarmUnlock(c, swarm.UnlockRequest{UnlockKey: "wrong-key"})
   884  	assert.ErrorContains(c, err, "swarm is not locked")
   885  }
   886  
   887  // #29885
   888  func (s *DockerSwarmSuite) TestAPISwarmErrorHandling(c *testing.T) {
   889  	ln, err := net.Listen("tcp", fmt.Sprintf(":%d", defaultSwarmPort))
   890  	assert.NilError(c, err)
   891  	defer ln.Close()
   892  	d := s.AddDaemon(c, false, false)
   893  	client := d.NewClientT(c)
   894  	_, err = client.SwarmInit(context.Background(), swarm.InitRequest{
   895  		ListenAddr: d.SwarmListenAddr(),
   896  	})
   897  	assert.ErrorContains(c, err, "address already in use")
   898  }
   899  
   900  // Test case for 30242, where duplicate networks, with different drivers `bridge` and `overlay`,
   901  // caused both scopes to be `swarm` for `docker network inspect` and `docker network ls`.
   902  // This test makes sure the fixes correctly output scopes instead.
   903  func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *testing.T) {
   904  	d := s.AddDaemon(c, true, true)
   905  	cli := d.NewClientT(c)
   906  	defer cli.Close()
   907  
   908  	name := "foo"
   909  	networkCreate := types.NetworkCreate{
   910  		CheckDuplicate: false,
   911  	}
   912  
   913  	networkCreate.Driver = "bridge"
   914  
   915  	n1, err := cli.NetworkCreate(context.Background(), name, networkCreate)
   916  	assert.NilError(c, err)
   917  
   918  	networkCreate.Driver = "overlay"
   919  
   920  	n2, err := cli.NetworkCreate(context.Background(), name, networkCreate)
   921  	assert.NilError(c, err)
   922  
   923  	r1, err := cli.NetworkInspect(context.Background(), n1.ID, types.NetworkInspectOptions{})
   924  	assert.NilError(c, err)
   925  	assert.Equal(c, r1.Scope, "local")
   926  
   927  	r2, err := cli.NetworkInspect(context.Background(), n2.ID, types.NetworkInspectOptions{})
   928  	assert.NilError(c, err)
   929  	assert.Equal(c, r2.Scope, "swarm")
   930  }
   931  
   932  // Test case for 30178
   933  func (s *DockerSwarmSuite) TestAPISwarmHealthcheckNone(c *testing.T) {
   934  	// Issue #36386 can be a independent one, which is worth further investigation.
   935  	c.Skip("Root cause of Issue #36386 is needed")
   936  	d := s.AddDaemon(c, true, true)
   937  
   938  	out, err := d.Cmd("network", "create", "-d", "overlay", "lb")
   939  	assert.NilError(c, err, out)
   940  
   941  	instances := 1
   942  	d.CreateService(c, simpleTestService, setInstances(instances), func(s *swarm.Service) {
   943  		if s.Spec.TaskTemplate.ContainerSpec == nil {
   944  			s.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{}
   945  		}
   946  		s.Spec.TaskTemplate.ContainerSpec.Healthcheck = &container.HealthConfig{}
   947  		s.Spec.TaskTemplate.Networks = []swarm.NetworkAttachmentConfig{
   948  			{Target: "lb"},
   949  		}
   950  	})
   951  
   952  	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
   953  
   954  	containers := d.ActiveContainers(c)
   955  
   956  	out, err = d.Cmd("exec", containers[0], "ping", "-c1", "-W3", "top")
   957  	assert.NilError(c, err, out)
   958  }
   959  
   960  func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *testing.T) {
   961  	m := s.AddDaemon(c, true, true)
   962  	w := s.AddDaemon(c, true, false)
   963  
   964  	info := m.SwarmInfo(c)
   965  
   966  	currentTrustRoot := info.Cluster.TLSInfo.TrustRoot
   967  
   968  	// rotate multiple times
   969  	for i := 0; i < 4; i++ {
   970  		var err error
   971  		var cert, key []byte
   972  		if i%2 != 0 {
   973  			cert, _, key, err = initca.New(&csr.CertificateRequest{
   974  				CN:         "newRoot",
   975  				KeyRequest: csr.NewBasicKeyRequest(),
   976  				CA:         &csr.CAConfig{Expiry: ca.RootCAExpiration},
   977  			})
   978  			assert.NilError(c, err)
   979  		}
   980  		expectedCert := string(cert)
   981  		m.UpdateSwarm(c, func(s *swarm.Spec) {
   982  			s.CAConfig.SigningCACert = expectedCert
   983  			s.CAConfig.SigningCAKey = string(key)
   984  			s.CAConfig.ForceRotate++
   985  		})
   986  
   987  		// poll to make sure update succeeds
   988  		var clusterTLSInfo swarm.TLSInfo
   989  		for j := 0; j < 18; j++ {
   990  			info := m.SwarmInfo(c)
   991  
   992  			// the desired CA cert and key is always redacted
   993  			assert.Equal(c, info.Cluster.Spec.CAConfig.SigningCAKey, "")
   994  			assert.Equal(c, info.Cluster.Spec.CAConfig.SigningCACert, "")
   995  
   996  			clusterTLSInfo = info.Cluster.TLSInfo
   997  
   998  			// if root rotation is done and the trust root has changed, we don't have to poll anymore
   999  			if !info.Cluster.RootRotationInProgress && clusterTLSInfo.TrustRoot != currentTrustRoot {
  1000  				break
  1001  			}
  1002  
  1003  			// root rotation not done
  1004  			time.Sleep(250 * time.Millisecond)
  1005  		}
  1006  		if cert != nil {
  1007  			assert.Equal(c, clusterTLSInfo.TrustRoot, expectedCert)
  1008  		}
  1009  		// could take another second or two for the nodes to trust the new roots after they've all gotten
  1010  		// new TLS certificates
  1011  		for j := 0; j < 18; j++ {
  1012  			mInfo := m.GetNode(c, m.NodeID()).Description.TLSInfo
  1013  			wInfo := m.GetNode(c, w.NodeID()).Description.TLSInfo
  1014  
  1015  			if mInfo.TrustRoot == clusterTLSInfo.TrustRoot && wInfo.TrustRoot == clusterTLSInfo.TrustRoot {
  1016  				break
  1017  			}
  1018  
  1019  			// nodes don't trust root certs yet
  1020  			time.Sleep(250 * time.Millisecond)
  1021  		}
  1022  
  1023  		assert.DeepEqual(c, m.GetNode(c, m.NodeID()).Description.TLSInfo, clusterTLSInfo)
  1024  		assert.DeepEqual(c, m.GetNode(c, w.NodeID()).Description.TLSInfo, clusterTLSInfo)
  1025  		currentTrustRoot = clusterTLSInfo.TrustRoot
  1026  	}
  1027  }
  1028  
  1029  func (s *DockerSwarmSuite) TestAPINetworkInspectWithScope(c *testing.T) {
  1030  	d := s.AddDaemon(c, true, true)
  1031  
  1032  	name := "test-scoped-network"
  1033  	ctx := context.Background()
  1034  	apiclient := d.NewClientT(c)
  1035  
  1036  	resp, err := apiclient.NetworkCreate(ctx, name, types.NetworkCreate{Driver: "overlay"})
  1037  	assert.NilError(c, err)
  1038  
  1039  	network, err := apiclient.NetworkInspect(ctx, name, types.NetworkInspectOptions{})
  1040  	assert.NilError(c, err)
  1041  	assert.Check(c, is.Equal("swarm", network.Scope))
  1042  	assert.Check(c, is.Equal(resp.ID, network.ID))
  1043  
  1044  	_, err = apiclient.NetworkInspect(ctx, name, types.NetworkInspectOptions{Scope: "local"})
  1045  	assert.Check(c, client.IsErrNotFound(err))
  1046  }