github.com/afbjorklund/moby@v20.10.5+incompatible/integration-cli/docker_api_swarm_test.go (about)

     1  // +build !windows
     2  
     3  package main
     4  
     5  import (
     6  	"context"
     7  	"fmt"
     8  	"io/ioutil"
     9  	"net"
    10  	"net/http"
    11  	"path/filepath"
    12  	"runtime"
    13  	"strings"
    14  	"sync"
    15  	"testing"
    16  	"time"
    17  
    18  	"github.com/cloudflare/cfssl/csr"
    19  	"github.com/cloudflare/cfssl/helpers"
    20  	"github.com/cloudflare/cfssl/initca"
    21  	"github.com/docker/docker/api/types"
    22  	"github.com/docker/docker/api/types/container"
    23  	"github.com/docker/docker/api/types/swarm"
    24  	"github.com/docker/docker/client"
    25  	"github.com/docker/docker/integration-cli/checker"
    26  	"github.com/docker/docker/integration-cli/daemon"
    27  	testdaemon "github.com/docker/docker/testutil/daemon"
    28  	"github.com/docker/docker/testutil/request"
    29  	"github.com/docker/swarmkit/ca"
    30  	"gotest.tools/v3/assert"
    31  	is "gotest.tools/v3/assert/cmp"
    32  	"gotest.tools/v3/poll"
    33  )
    34  
    35  var defaultReconciliationTimeout = 30 * time.Second
    36  
    37  func (s *DockerSwarmSuite) TestAPISwarmInit(c *testing.T) {
    38  	// todo: should find a better way to verify that components are running than /info
    39  	d1 := s.AddDaemon(c, true, true)
    40  	info := d1.SwarmInfo(c)
    41  	assert.Equal(c, info.ControlAvailable, true)
    42  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
    43  	assert.Equal(c, info.Cluster.RootRotationInProgress, false)
    44  
    45  	d2 := s.AddDaemon(c, true, false)
    46  	info = d2.SwarmInfo(c)
    47  	assert.Equal(c, info.ControlAvailable, false)
    48  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
    49  
    50  	// Leaving cluster
    51  	assert.NilError(c, d2.SwarmLeave(c, false))
    52  
    53  	info = d2.SwarmInfo(c)
    54  	assert.Equal(c, info.ControlAvailable, false)
    55  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
    56  
    57  	d2.SwarmJoin(c, swarm.JoinRequest{
    58  		ListenAddr:  d1.SwarmListenAddr(),
    59  		JoinToken:   d1.JoinTokens(c).Worker,
    60  		RemoteAddrs: []string{d1.SwarmListenAddr()},
    61  	})
    62  
    63  	info = d2.SwarmInfo(c)
    64  	assert.Equal(c, info.ControlAvailable, false)
    65  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
    66  
    67  	// Current state restoring after restarts
    68  	d1.Stop(c)
    69  	d2.Stop(c)
    70  
    71  	d1.StartNode(c)
    72  	d2.StartNode(c)
    73  
    74  	info = d1.SwarmInfo(c)
    75  	assert.Equal(c, info.ControlAvailable, true)
    76  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
    77  
    78  	info = d2.SwarmInfo(c)
    79  	assert.Equal(c, info.ControlAvailable, false)
    80  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
    81  }
    82  
    83  func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *testing.T) {
    84  	d1 := s.AddDaemon(c, false, false)
    85  	d1.SwarmInit(c, swarm.InitRequest{})
    86  
    87  	// todo: error message differs depending if some components of token are valid
    88  
    89  	d2 := s.AddDaemon(c, false, false)
    90  	c2 := d2.NewClientT(c)
    91  	err := c2.SwarmJoin(context.Background(), swarm.JoinRequest{
    92  		ListenAddr:  d2.SwarmListenAddr(),
    93  		RemoteAddrs: []string{d1.SwarmListenAddr()},
    94  	})
    95  	assert.ErrorContains(c, err, "join token is necessary")
    96  	info := d2.SwarmInfo(c)
    97  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
    98  
    99  	err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
   100  		ListenAddr:  d2.SwarmListenAddr(),
   101  		JoinToken:   "foobaz",
   102  		RemoteAddrs: []string{d1.SwarmListenAddr()},
   103  	})
   104  	assert.ErrorContains(c, err, "invalid join token")
   105  	info = d2.SwarmInfo(c)
   106  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
   107  
   108  	workerToken := d1.JoinTokens(c).Worker
   109  
   110  	d2.SwarmJoin(c, swarm.JoinRequest{
   111  		ListenAddr:  d2.SwarmListenAddr(),
   112  		JoinToken:   workerToken,
   113  		RemoteAddrs: []string{d1.SwarmListenAddr()},
   114  	})
   115  	info = d2.SwarmInfo(c)
   116  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
   117  	assert.NilError(c, d2.SwarmLeave(c, false))
   118  	info = d2.SwarmInfo(c)
   119  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
   120  
   121  	// change tokens
   122  	d1.RotateTokens(c)
   123  
   124  	err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
   125  		ListenAddr:  d2.SwarmListenAddr(),
   126  		JoinToken:   workerToken,
   127  		RemoteAddrs: []string{d1.SwarmListenAddr()},
   128  	})
   129  	assert.ErrorContains(c, err, "join token is necessary")
   130  	info = d2.SwarmInfo(c)
   131  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
   132  
   133  	workerToken = d1.JoinTokens(c).Worker
   134  
   135  	d2.SwarmJoin(c, swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.SwarmListenAddr()}})
   136  	info = d2.SwarmInfo(c)
   137  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
   138  	assert.NilError(c, d2.SwarmLeave(c, false))
   139  	info = d2.SwarmInfo(c)
   140  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
   141  
   142  	// change spec, don't change tokens
   143  	d1.UpdateSwarm(c, func(s *swarm.Spec) {})
   144  
   145  	err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
   146  		ListenAddr:  d2.SwarmListenAddr(),
   147  		RemoteAddrs: []string{d1.SwarmListenAddr()},
   148  	})
   149  	assert.ErrorContains(c, err, "join token is necessary")
   150  	info = d2.SwarmInfo(c)
   151  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
   152  
   153  	d2.SwarmJoin(c, swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.SwarmListenAddr()}})
   154  	info = d2.SwarmInfo(c)
   155  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
   156  	assert.NilError(c, d2.SwarmLeave(c, false))
   157  	info = d2.SwarmInfo(c)
   158  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
   159  }
   160  
   161  func (s *DockerSwarmSuite) TestUpdateSwarmAddExternalCA(c *testing.T) {
   162  	d1 := s.AddDaemon(c, false, false)
   163  	d1.SwarmInit(c, swarm.InitRequest{})
   164  	d1.UpdateSwarm(c, func(s *swarm.Spec) {
   165  		s.CAConfig.ExternalCAs = []*swarm.ExternalCA{
   166  			{
   167  				Protocol: swarm.ExternalCAProtocolCFSSL,
   168  				URL:      "https://thishasnoca.org",
   169  			},
   170  			{
   171  				Protocol: swarm.ExternalCAProtocolCFSSL,
   172  				URL:      "https://thishasacacert.org",
   173  				CACert:   "cacert",
   174  			},
   175  		}
   176  	})
   177  	info := d1.SwarmInfo(c)
   178  	assert.Equal(c, len(info.Cluster.Spec.CAConfig.ExternalCAs), 2)
   179  	assert.Equal(c, info.Cluster.Spec.CAConfig.ExternalCAs[0].CACert, "")
   180  	assert.Equal(c, info.Cluster.Spec.CAConfig.ExternalCAs[1].CACert, "cacert")
   181  }
   182  
   183  func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *testing.T) {
   184  	d1 := s.AddDaemon(c, true, true)
   185  	d2 := s.AddDaemon(c, false, false)
   186  	splitToken := strings.Split(d1.JoinTokens(c).Worker, "-")
   187  	splitToken[2] = "1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e"
   188  	replacementToken := strings.Join(splitToken, "-")
   189  	c2 := d2.NewClientT(c)
   190  	err := c2.SwarmJoin(context.Background(), swarm.JoinRequest{
   191  		ListenAddr:  d2.SwarmListenAddr(),
   192  		JoinToken:   replacementToken,
   193  		RemoteAddrs: []string{d1.SwarmListenAddr()},
   194  	})
   195  	assert.ErrorContains(c, err, "remote CA does not match fingerprint")
   196  }
   197  
   198  func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *testing.T) {
   199  	d1 := s.AddDaemon(c, false, false)
   200  	d1.SwarmInit(c, swarm.InitRequest{})
   201  	d2 := s.AddDaemon(c, true, false)
   202  
   203  	info := d2.SwarmInfo(c)
   204  	assert.Equal(c, info.ControlAvailable, false)
   205  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
   206  
   207  	d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
   208  		n.Spec.Role = swarm.NodeRoleManager
   209  	})
   210  
   211  	poll.WaitOn(c, pollCheck(c, d2.CheckControlAvailable, checker.True()), poll.WithTimeout(defaultReconciliationTimeout))
   212  
   213  	d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
   214  		n.Spec.Role = swarm.NodeRoleWorker
   215  	})
   216  
   217  	poll.WaitOn(c, pollCheck(c, d2.CheckControlAvailable, checker.False()), poll.WithTimeout(defaultReconciliationTimeout))
   218  
   219  	// Wait for the role to change to worker in the cert. This is partially
   220  	// done because it's something worth testing in its own right, and
   221  	// partially because changing the role from manager to worker and then
   222  	// back to manager quickly might cause the node to pause for awhile
   223  	// while waiting for the role to change to worker, and the test can
   224  	// time out during this interval.
   225  	poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
   226  		certBytes, err := ioutil.ReadFile(filepath.Join(d2.Folder, "root", "swarm", "certificates", "swarm-node.crt"))
   227  		if err != nil {
   228  			return "", fmt.Sprintf("error: %v", err)
   229  		}
   230  		certs, err := helpers.ParseCertificatesPEM(certBytes)
   231  		if err == nil && len(certs) > 0 && len(certs[0].Subject.OrganizationalUnit) > 0 {
   232  			return certs[0].Subject.OrganizationalUnit[0], ""
   233  		}
   234  		return "", "could not get organizational unit from certificate"
   235  	}, checker.Equals("swarm-worker")), poll.WithTimeout(defaultReconciliationTimeout))
   236  
   237  	// Demoting last node should fail
   238  	node := d1.GetNode(c, d1.NodeID())
   239  	node.Spec.Role = swarm.NodeRoleWorker
   240  	url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index)
   241  	res, body, err := request.Post(url, request.Host(d1.Sock()), request.JSONBody(node.Spec))
   242  	assert.NilError(c, err)
   243  	b, err := request.ReadBody(body)
   244  	assert.NilError(c, err)
   245  	assert.Equal(c, res.StatusCode, http.StatusBadRequest, "output: %q", string(b))
   246  
   247  	// The warning specific to demoting the last manager is best-effort and
   248  	// won't appear until the Role field of the demoted manager has been
   249  	// updated.
   250  	// Yes, I know this looks silly, but checker.Matches is broken, since
   251  	// it anchors the regexp contrary to the documentation, and this makes
   252  	// it impossible to match something that includes a line break.
   253  	if !strings.Contains(string(b), "last manager of the swarm") {
   254  		assert.Assert(c, strings.Contains(string(b), "this would result in a loss of quorum"))
   255  	}
   256  	info = d1.SwarmInfo(c)
   257  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
   258  	assert.Equal(c, info.ControlAvailable, true)
   259  
   260  	// Promote already demoted node
   261  	d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
   262  		n.Spec.Role = swarm.NodeRoleManager
   263  	})
   264  
   265  	poll.WaitOn(c, pollCheck(c, d2.CheckControlAvailable, checker.True()), poll.WithTimeout(defaultReconciliationTimeout))
   266  }
   267  
   268  func (s *DockerSwarmSuite) TestAPISwarmLeaderProxy(c *testing.T) {
   269  	// add three managers, one of these is leader
   270  	d1 := s.AddDaemon(c, true, true)
   271  	d2 := s.AddDaemon(c, true, true)
   272  	d3 := s.AddDaemon(c, true, true)
   273  
   274  	// start a service by hitting each of the 3 managers
   275  	d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
   276  		s.Spec.Name = "test1"
   277  	})
   278  	d2.CreateService(c, simpleTestService, func(s *swarm.Service) {
   279  		s.Spec.Name = "test2"
   280  	})
   281  	d3.CreateService(c, simpleTestService, func(s *swarm.Service) {
   282  		s.Spec.Name = "test3"
   283  	})
   284  
   285  	// 3 services should be started now, because the requests were proxied to leader
   286  	// query each node and make sure it returns 3 services
   287  	for _, d := range []*daemon.Daemon{d1, d2, d3} {
   288  		services := d.ListServices(c)
   289  		assert.Equal(c, len(services), 3)
   290  	}
   291  }
   292  
   293  func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *testing.T) {
   294  	if runtime.GOARCH == "s390x" {
   295  		c.Skip("Disabled on s390x")
   296  	}
   297  	if runtime.GOARCH == "ppc64le" {
   298  		c.Skip("Disabled on  ppc64le")
   299  	}
   300  
   301  	// Create 3 nodes
   302  	d1 := s.AddDaemon(c, true, true)
   303  	d2 := s.AddDaemon(c, true, true)
   304  	d3 := s.AddDaemon(c, true, true)
   305  
   306  	// assert that the first node we made is the leader, and the other two are followers
   307  	assert.Equal(c, d1.GetNode(c, d1.NodeID()).ManagerStatus.Leader, true)
   308  	assert.Equal(c, d1.GetNode(c, d2.NodeID()).ManagerStatus.Leader, false)
   309  	assert.Equal(c, d1.GetNode(c, d3.NodeID()).ManagerStatus.Leader, false)
   310  
   311  	d1.Stop(c)
   312  
   313  	var (
   314  		leader    *daemon.Daemon   // keep track of leader
   315  		followers []*daemon.Daemon // keep track of followers
   316  	)
   317  	var lastErr error
   318  	checkLeader := func(nodes ...*daemon.Daemon) checkF {
   319  		return func(c *testing.T) (interface{}, string) {
   320  			// clear these out before each run
   321  			leader = nil
   322  			followers = nil
   323  			for _, d := range nodes {
   324  				n := d.GetNode(c, d.NodeID(), func(err error) bool {
   325  					if strings.Contains(err.Error(), context.DeadlineExceeded.Error()) || strings.Contains(err.Error(), "swarm does not have a leader") {
   326  						lastErr = err
   327  						return true
   328  					}
   329  					return false
   330  				})
   331  				if n == nil {
   332  					return false, fmt.Sprintf("failed to get node: %v", lastErr)
   333  				}
   334  				if n.ManagerStatus.Leader {
   335  					leader = d
   336  				} else {
   337  					followers = append(followers, d)
   338  				}
   339  			}
   340  
   341  			if leader == nil {
   342  				return false, "no leader elected"
   343  			}
   344  
   345  			return true, fmt.Sprintf("elected %v", leader.ID())
   346  		}
   347  	}
   348  
   349  	// wait for an election to occur
   350  	c.Logf("Waiting for election to occur...")
   351  	poll.WaitOn(c, pollCheck(c, checkLeader(d2, d3), checker.True()), poll.WithTimeout(defaultReconciliationTimeout))
   352  
   353  	// assert that we have a new leader
   354  	assert.Assert(c, leader != nil)
   355  
   356  	// Keep track of the current leader, since we want that to be chosen.
   357  	stableleader := leader
   358  
   359  	// add the d1, the initial leader, back
   360  	d1.StartNode(c)
   361  
   362  	// wait for possible election
   363  	c.Logf("Waiting for possible election...")
   364  	poll.WaitOn(c, pollCheck(c, checkLeader(d1, d2, d3), checker.True()), poll.WithTimeout(defaultReconciliationTimeout))
   365  	// pick out the leader and the followers again
   366  
   367  	// verify that we still only have 1 leader and 2 followers
   368  	assert.Assert(c, leader != nil)
   369  	assert.Equal(c, len(followers), 2)
   370  	// and that after we added d1 back, the leader hasn't changed
   371  	assert.Equal(c, leader.NodeID(), stableleader.NodeID())
   372  }
   373  
   374  func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *testing.T) {
   375  	if runtime.GOARCH == "s390x" {
   376  		c.Skip("Disabled on s390x")
   377  	}
   378  	if runtime.GOARCH == "ppc64le" {
   379  		c.Skip("Disabled on  ppc64le")
   380  	}
   381  
   382  	d1 := s.AddDaemon(c, true, true)
   383  	d2 := s.AddDaemon(c, true, true)
   384  	d3 := s.AddDaemon(c, true, true)
   385  
   386  	d1.CreateService(c, simpleTestService)
   387  
   388  	d2.Stop(c)
   389  
   390  	// make sure there is a leader
   391  	poll.WaitOn(c, pollCheck(c, d1.CheckLeader, checker.IsNil()), poll.WithTimeout(defaultReconciliationTimeout))
   392  
   393  	d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
   394  		s.Spec.Name = "top1"
   395  	})
   396  
   397  	d3.Stop(c)
   398  
   399  	var service swarm.Service
   400  	simpleTestService(&service)
   401  	service.Spec.Name = "top2"
   402  	cli := d1.NewClientT(c)
   403  	defer cli.Close()
   404  
   405  	// d1 will eventually step down from leader because there is no longer an active quorum, wait for that to happen
   406  	poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
   407  		_, err := cli.ServiceCreate(context.Background(), service.Spec, types.ServiceCreateOptions{})
   408  		return err.Error(), ""
   409  	}, checker.Contains("Make sure more than half of the managers are online.")), poll.WithTimeout(defaultReconciliationTimeout*2))
   410  
   411  	d2.StartNode(c)
   412  
   413  	// make sure there is a leader
   414  	poll.WaitOn(c, pollCheck(c, d1.CheckLeader, checker.IsNil()), poll.WithTimeout(defaultReconciliationTimeout))
   415  
   416  	d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
   417  		s.Spec.Name = "top3"
   418  	})
   419  }
   420  
   421  func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *testing.T) {
   422  	d := s.AddDaemon(c, true, true)
   423  
   424  	instances := 2
   425  	d.CreateService(c, simpleTestService, setInstances(instances))
   426  
   427  	id, err := d.Cmd("run", "-d", "busybox", "top")
   428  	assert.NilError(c, err, id)
   429  	id = strings.TrimSpace(id)
   430  
   431  	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances+1)), poll.WithTimeout(defaultReconciliationTimeout))
   432  
   433  	assert.ErrorContains(c, d.SwarmLeave(c, false), "")
   434  	assert.NilError(c, d.SwarmLeave(c, true))
   435  
   436  	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
   437  
   438  	id2, err := d.Cmd("ps", "-q")
   439  	assert.NilError(c, err, id2)
   440  	assert.Assert(c, strings.HasPrefix(id, strings.TrimSpace(id2)))
   441  }
   442  
   443  // #23629
   444  func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *testing.T) {
   445  	testRequires(c, Network)
   446  	s.AddDaemon(c, true, true)
   447  	d2 := s.AddDaemon(c, false, false)
   448  
   449  	id, err := d2.Cmd("run", "-d", "busybox", "top")
   450  	assert.NilError(c, err, id)
   451  	id = strings.TrimSpace(id)
   452  
   453  	c2 := d2.NewClientT(c)
   454  	err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
   455  		ListenAddr:  d2.SwarmListenAddr(),
   456  		RemoteAddrs: []string{"123.123.123.123:1234"},
   457  	})
   458  	assert.ErrorContains(c, err, "Timeout was reached")
   459  
   460  	info := d2.SwarmInfo(c)
   461  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStatePending)
   462  
   463  	assert.NilError(c, d2.SwarmLeave(c, true))
   464  
   465  	poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
   466  
   467  	id2, err := d2.Cmd("ps", "-q")
   468  	assert.NilError(c, err, id2)
   469  	assert.Assert(c, strings.HasPrefix(id, strings.TrimSpace(id2)))
   470  }
   471  
   472  // #23705
   473  func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *testing.T) {
   474  	testRequires(c, Network)
   475  	d := s.AddDaemon(c, false, false)
   476  	client := d.NewClientT(c)
   477  	err := client.SwarmJoin(context.Background(), swarm.JoinRequest{
   478  		ListenAddr:  d.SwarmListenAddr(),
   479  		RemoteAddrs: []string{"123.123.123.123:1234"},
   480  	})
   481  	assert.ErrorContains(c, err, "Timeout was reached")
   482  
   483  	poll.WaitOn(c, pollCheck(c, d.CheckLocalNodeState, checker.Equals(swarm.LocalNodeStatePending)), poll.WithTimeout(defaultReconciliationTimeout))
   484  
   485  	d.RestartNode(c)
   486  
   487  	info := d.SwarmInfo(c)
   488  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
   489  }
   490  
   491  func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *testing.T) {
   492  	d1 := s.AddDaemon(c, true, true)
   493  
   494  	instances := 2
   495  	id := d1.CreateService(c, simpleTestService, setInstances(instances))
   496  
   497  	d1.GetService(c, id)
   498  	d1.RestartNode(c)
   499  	d1.GetService(c, id)
   500  
   501  	d2 := s.AddDaemon(c, true, true)
   502  	d2.GetService(c, id)
   503  	d2.RestartNode(c)
   504  	d2.GetService(c, id)
   505  
   506  	d3 := s.AddDaemon(c, true, true)
   507  	d3.GetService(c, id)
   508  	d3.RestartNode(c)
   509  	d3.GetService(c, id)
   510  
   511  	err := d3.Kill()
   512  	assert.NilError(c, err)
   513  	time.Sleep(1 * time.Second) // time to handle signal
   514  	d3.StartNode(c)
   515  	d3.GetService(c, id)
   516  }
   517  
   518  func (s *DockerSwarmSuite) TestAPISwarmScaleNoRollingUpdate(c *testing.T) {
   519  	d := s.AddDaemon(c, true, true)
   520  
   521  	instances := 2
   522  	id := d.CreateService(c, simpleTestService, setInstances(instances))
   523  
   524  	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
   525  	containers := d.ActiveContainers(c)
   526  	instances = 4
   527  	d.UpdateService(c, d.GetService(c, id), setInstances(instances))
   528  	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
   529  	containers2 := d.ActiveContainers(c)
   530  
   531  loop0:
   532  	for _, c1 := range containers {
   533  		for _, c2 := range containers2 {
   534  			if c1 == c2 {
   535  				continue loop0
   536  			}
   537  		}
   538  		c.Errorf("container %v not found in new set %#v", c1, containers2)
   539  	}
   540  }
   541  
   542  func (s *DockerSwarmSuite) TestAPISwarmInvalidAddress(c *testing.T) {
   543  	d := s.AddDaemon(c, false, false)
   544  	req := swarm.InitRequest{
   545  		ListenAddr: "",
   546  	}
   547  	res, _, err := request.Post("/swarm/init", request.Host(d.Sock()), request.JSONBody(req))
   548  	assert.NilError(c, err)
   549  	assert.Equal(c, res.StatusCode, http.StatusBadRequest)
   550  
   551  	req2 := swarm.JoinRequest{
   552  		ListenAddr:  "0.0.0.0:2377",
   553  		RemoteAddrs: []string{""},
   554  	}
   555  	res, _, err = request.Post("/swarm/join", request.Host(d.Sock()), request.JSONBody(req2))
   556  	assert.NilError(c, err)
   557  	assert.Equal(c, res.StatusCode, http.StatusBadRequest)
   558  }
   559  
   560  func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *testing.T) {
   561  	d1 := s.AddDaemon(c, true, true)
   562  	d2 := s.AddDaemon(c, true, true)
   563  
   564  	instances := 2
   565  	id := d1.CreateService(c, simpleTestService, setInstances(instances))
   566  	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
   567  
   568  	// drain d2, all containers should move to d1
   569  	d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
   570  		n.Spec.Availability = swarm.NodeAvailabilityDrain
   571  	})
   572  	poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
   573  	poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
   574  
   575  	d2.Stop(c)
   576  
   577  	d1.SwarmInit(c, swarm.InitRequest{
   578  		ForceNewCluster: true,
   579  		Spec:            swarm.Spec{},
   580  	})
   581  
   582  	poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
   583  
   584  	d3 := s.AddDaemon(c, true, true)
   585  	info := d3.SwarmInfo(c)
   586  	assert.Equal(c, info.ControlAvailable, true)
   587  	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
   588  
   589  	instances = 4
   590  	d3.UpdateService(c, d3.GetService(c, id), setInstances(instances))
   591  
   592  	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
   593  }
   594  
   595  func simpleTestService(s *swarm.Service) {
   596  	ureplicas := uint64(1)
   597  	restartDelay := 100 * time.Millisecond
   598  
   599  	s.Spec = swarm.ServiceSpec{
   600  		TaskTemplate: swarm.TaskSpec{
   601  			ContainerSpec: &swarm.ContainerSpec{
   602  				Image:   "busybox:latest",
   603  				Command: []string{"/bin/top"},
   604  			},
   605  			RestartPolicy: &swarm.RestartPolicy{
   606  				Delay: &restartDelay,
   607  			},
   608  		},
   609  		Mode: swarm.ServiceMode{
   610  			Replicated: &swarm.ReplicatedService{
   611  				Replicas: &ureplicas,
   612  			},
   613  		},
   614  	}
   615  	s.Spec.Name = "top"
   616  }
   617  
   618  func serviceForUpdate(s *swarm.Service) {
   619  	ureplicas := uint64(1)
   620  	restartDelay := 100 * time.Millisecond
   621  
   622  	s.Spec = swarm.ServiceSpec{
   623  		TaskTemplate: swarm.TaskSpec{
   624  			ContainerSpec: &swarm.ContainerSpec{
   625  				Image:   "busybox:latest",
   626  				Command: []string{"/bin/top"},
   627  			},
   628  			RestartPolicy: &swarm.RestartPolicy{
   629  				Delay: &restartDelay,
   630  			},
   631  		},
   632  		Mode: swarm.ServiceMode{
   633  			Replicated: &swarm.ReplicatedService{
   634  				Replicas: &ureplicas,
   635  			},
   636  		},
   637  		UpdateConfig: &swarm.UpdateConfig{
   638  			Parallelism:   2,
   639  			Delay:         4 * time.Second,
   640  			FailureAction: swarm.UpdateFailureActionContinue,
   641  		},
   642  		RollbackConfig: &swarm.UpdateConfig{
   643  			Parallelism:   3,
   644  			Delay:         4 * time.Second,
   645  			FailureAction: swarm.UpdateFailureActionContinue,
   646  		},
   647  	}
   648  	s.Spec.Name = "updatetest"
   649  }
   650  
   651  func setInstances(replicas int) testdaemon.ServiceConstructor {
   652  	ureplicas := uint64(replicas)
   653  	return func(s *swarm.Service) {
   654  		s.Spec.Mode = swarm.ServiceMode{
   655  			Replicated: &swarm.ReplicatedService{
   656  				Replicas: &ureplicas,
   657  			},
   658  		}
   659  	}
   660  }
   661  
   662  func setUpdateOrder(order string) testdaemon.ServiceConstructor {
   663  	return func(s *swarm.Service) {
   664  		if s.Spec.UpdateConfig == nil {
   665  			s.Spec.UpdateConfig = &swarm.UpdateConfig{}
   666  		}
   667  		s.Spec.UpdateConfig.Order = order
   668  	}
   669  }
   670  
   671  func setRollbackOrder(order string) testdaemon.ServiceConstructor {
   672  	return func(s *swarm.Service) {
   673  		if s.Spec.RollbackConfig == nil {
   674  			s.Spec.RollbackConfig = &swarm.UpdateConfig{}
   675  		}
   676  		s.Spec.RollbackConfig.Order = order
   677  	}
   678  }
   679  
   680  func setImage(image string) testdaemon.ServiceConstructor {
   681  	return func(s *swarm.Service) {
   682  		if s.Spec.TaskTemplate.ContainerSpec == nil {
   683  			s.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{}
   684  		}
   685  		s.Spec.TaskTemplate.ContainerSpec.Image = image
   686  	}
   687  }
   688  
   689  func setFailureAction(failureAction string) testdaemon.ServiceConstructor {
   690  	return func(s *swarm.Service) {
   691  		s.Spec.UpdateConfig.FailureAction = failureAction
   692  	}
   693  }
   694  
   695  func setMaxFailureRatio(maxFailureRatio float32) testdaemon.ServiceConstructor {
   696  	return func(s *swarm.Service) {
   697  		s.Spec.UpdateConfig.MaxFailureRatio = maxFailureRatio
   698  	}
   699  }
   700  
   701  func setParallelism(parallelism uint64) testdaemon.ServiceConstructor {
   702  	return func(s *swarm.Service) {
   703  		s.Spec.UpdateConfig.Parallelism = parallelism
   704  	}
   705  }
   706  
   707  func setConstraints(constraints []string) testdaemon.ServiceConstructor {
   708  	return func(s *swarm.Service) {
   709  		if s.Spec.TaskTemplate.Placement == nil {
   710  			s.Spec.TaskTemplate.Placement = &swarm.Placement{}
   711  		}
   712  		s.Spec.TaskTemplate.Placement.Constraints = constraints
   713  	}
   714  }
   715  
   716  func setPlacementPrefs(prefs []swarm.PlacementPreference) testdaemon.ServiceConstructor {
   717  	return func(s *swarm.Service) {
   718  		if s.Spec.TaskTemplate.Placement == nil {
   719  			s.Spec.TaskTemplate.Placement = &swarm.Placement{}
   720  		}
   721  		s.Spec.TaskTemplate.Placement.Preferences = prefs
   722  	}
   723  }
   724  
   725  func setGlobalMode(s *swarm.Service) {
   726  	s.Spec.Mode = swarm.ServiceMode{
   727  		Global: &swarm.GlobalService{},
   728  	}
   729  }
   730  
   731  func checkClusterHealth(c *testing.T, cl []*daemon.Daemon, managerCount, workerCount int) {
   732  	var totalMCount, totalWCount int
   733  
   734  	for _, d := range cl {
   735  		var (
   736  			info swarm.Info
   737  		)
   738  
   739  		// check info in a poll.WaitOn(), because if the cluster doesn't have a leader, `info` will return an error
   740  		checkInfo := func(c *testing.T) (interface{}, string) {
   741  			client := d.NewClientT(c)
   742  			daemonInfo, err := client.Info(context.Background())
   743  			info = daemonInfo.Swarm
   744  			return err, "cluster not ready in time"
   745  		}
   746  		poll.WaitOn(c, pollCheck(c, checkInfo, checker.IsNil()), poll.WithTimeout(defaultReconciliationTimeout))
   747  		if !info.ControlAvailable {
   748  			totalWCount++
   749  			continue
   750  		}
   751  
   752  		var leaderFound bool
   753  		totalMCount++
   754  		var mCount, wCount int
   755  
   756  		for _, n := range d.ListNodes(c) {
   757  			waitReady := func(c *testing.T) (interface{}, string) {
   758  				if n.Status.State == swarm.NodeStateReady {
   759  					return true, ""
   760  				}
   761  				nn := d.GetNode(c, n.ID)
   762  				n = *nn
   763  				return n.Status.State == swarm.NodeStateReady, fmt.Sprintf("state of node %s, reported by %s", n.ID, d.NodeID())
   764  			}
   765  			poll.WaitOn(c, pollCheck(c, waitReady, checker.True()), poll.WithTimeout(defaultReconciliationTimeout))
   766  
   767  			waitActive := func(c *testing.T) (interface{}, string) {
   768  				if n.Spec.Availability == swarm.NodeAvailabilityActive {
   769  					return true, ""
   770  				}
   771  				nn := d.GetNode(c, n.ID)
   772  				n = *nn
   773  				return n.Spec.Availability == swarm.NodeAvailabilityActive, fmt.Sprintf("availability of node %s, reported by %s", n.ID, d.NodeID())
   774  			}
   775  			poll.WaitOn(c, pollCheck(c, waitActive, checker.True()), poll.WithTimeout(defaultReconciliationTimeout))
   776  
   777  			if n.Spec.Role == swarm.NodeRoleManager {
   778  				assert.Assert(c, n.ManagerStatus != nil, "manager status of node %s (manager), reported by %s", n.ID, d.NodeID())
   779  				if n.ManagerStatus.Leader {
   780  					leaderFound = true
   781  				}
   782  				mCount++
   783  			} else {
   784  				assert.Assert(c, n.ManagerStatus == nil, "manager status of node %s (worker), reported by %s", n.ID, d.NodeID())
   785  				wCount++
   786  			}
   787  		}
   788  		assert.Equal(c, leaderFound, true, "lack of leader reported by node %s", info.NodeID)
   789  		assert.Equal(c, mCount, managerCount, "managers count reported by node %s", info.NodeID)
   790  		assert.Equal(c, wCount, workerCount, "workers count reported by node %s", info.NodeID)
   791  	}
   792  	assert.Equal(c, totalMCount, managerCount)
   793  	assert.Equal(c, totalWCount, workerCount)
   794  }
   795  
   796  func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *testing.T) {
   797  	mCount, wCount := 5, 1
   798  
   799  	var nodes []*daemon.Daemon
   800  	for i := 0; i < mCount; i++ {
   801  		manager := s.AddDaemon(c, true, true)
   802  		info := manager.SwarmInfo(c)
   803  		assert.Equal(c, info.ControlAvailable, true)
   804  		assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
   805  		nodes = append(nodes, manager)
   806  	}
   807  
   808  	for i := 0; i < wCount; i++ {
   809  		worker := s.AddDaemon(c, true, false)
   810  		info := worker.SwarmInfo(c)
   811  		assert.Equal(c, info.ControlAvailable, false)
   812  		assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
   813  		nodes = append(nodes, worker)
   814  	}
   815  
   816  	// stop whole cluster
   817  	{
   818  		var wg sync.WaitGroup
   819  		wg.Add(len(nodes))
   820  		errs := make(chan error, len(nodes))
   821  
   822  		for _, d := range nodes {
   823  			go func(daemon *daemon.Daemon) {
   824  				defer wg.Done()
   825  				if err := daemon.StopWithError(); err != nil {
   826  					errs <- err
   827  				}
   828  			}(d)
   829  		}
   830  		wg.Wait()
   831  		close(errs)
   832  		for err := range errs {
   833  			assert.NilError(c, err)
   834  		}
   835  	}
   836  
   837  	// start whole cluster
   838  	{
   839  		var wg sync.WaitGroup
   840  		wg.Add(len(nodes))
   841  		errs := make(chan error, len(nodes))
   842  
   843  		for _, d := range nodes {
   844  			go func(daemon *daemon.Daemon) {
   845  				defer wg.Done()
   846  				if err := daemon.StartWithError("--iptables=false"); err != nil {
   847  					errs <- err
   848  				}
   849  			}(d)
   850  		}
   851  		wg.Wait()
   852  		close(errs)
   853  		for err := range errs {
   854  			assert.NilError(c, err)
   855  		}
   856  	}
   857  
   858  	checkClusterHealth(c, nodes, mCount, wCount)
   859  }
   860  
   861  func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *testing.T) {
   862  	d := s.AddDaemon(c, true, true)
   863  
   864  	instances := 2
   865  	id := d.CreateService(c, simpleTestService, setInstances(instances))
   866  	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
   867  
   868  	service := d.GetService(c, id)
   869  	instances = 5
   870  
   871  	setInstances(instances)(service)
   872  	cli := d.NewClientT(c)
   873  	defer cli.Close()
   874  	_, err := cli.ServiceUpdate(context.Background(), service.Spec.Name, service.Version, service.Spec, types.ServiceUpdateOptions{})
   875  	assert.NilError(c, err)
   876  	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
   877  }
   878  
   879  // Unlocking an unlocked swarm results in an error
   880  func (s *DockerSwarmSuite) TestAPISwarmUnlockNotLocked(c *testing.T) {
   881  	d := s.AddDaemon(c, true, true)
   882  	err := d.SwarmUnlock(c, swarm.UnlockRequest{UnlockKey: "wrong-key"})
   883  	assert.ErrorContains(c, err, "swarm is not locked")
   884  }
   885  
   886  // #29885
   887  func (s *DockerSwarmSuite) TestAPISwarmErrorHandling(c *testing.T) {
   888  	ln, err := net.Listen("tcp", fmt.Sprintf(":%d", defaultSwarmPort))
   889  	assert.NilError(c, err)
   890  	defer ln.Close()
   891  	d := s.AddDaemon(c, false, false)
   892  	client := d.NewClientT(c)
   893  	_, err = client.SwarmInit(context.Background(), swarm.InitRequest{
   894  		ListenAddr: d.SwarmListenAddr(),
   895  	})
   896  	assert.ErrorContains(c, err, "address already in use")
   897  }
   898  
   899  // Test case for 30242, where duplicate networks, with different drivers `bridge` and `overlay`,
   900  // caused both scopes to be `swarm` for `docker network inspect` and `docker network ls`.
   901  // This test makes sure the fixes correctly output scopes instead.
   902  func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *testing.T) {
   903  	d := s.AddDaemon(c, true, true)
   904  	cli := d.NewClientT(c)
   905  	defer cli.Close()
   906  
   907  	name := "foo"
   908  	networkCreate := types.NetworkCreate{
   909  		CheckDuplicate: false,
   910  	}
   911  
   912  	networkCreate.Driver = "bridge"
   913  
   914  	n1, err := cli.NetworkCreate(context.Background(), name, networkCreate)
   915  	assert.NilError(c, err)
   916  
   917  	networkCreate.Driver = "overlay"
   918  
   919  	n2, err := cli.NetworkCreate(context.Background(), name, networkCreate)
   920  	assert.NilError(c, err)
   921  
   922  	r1, err := cli.NetworkInspect(context.Background(), n1.ID, types.NetworkInspectOptions{})
   923  	assert.NilError(c, err)
   924  	assert.Equal(c, r1.Scope, "local")
   925  
   926  	r2, err := cli.NetworkInspect(context.Background(), n2.ID, types.NetworkInspectOptions{})
   927  	assert.NilError(c, err)
   928  	assert.Equal(c, r2.Scope, "swarm")
   929  }
   930  
   931  // Test case for 30178
   932  func (s *DockerSwarmSuite) TestAPISwarmHealthcheckNone(c *testing.T) {
   933  	// Issue #36386 can be a independent one, which is worth further investigation.
   934  	c.Skip("Root cause of Issue #36386 is needed")
   935  	d := s.AddDaemon(c, true, true)
   936  
   937  	out, err := d.Cmd("network", "create", "-d", "overlay", "lb")
   938  	assert.NilError(c, err, out)
   939  
   940  	instances := 1
   941  	d.CreateService(c, simpleTestService, setInstances(instances), func(s *swarm.Service) {
   942  		if s.Spec.TaskTemplate.ContainerSpec == nil {
   943  			s.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{}
   944  		}
   945  		s.Spec.TaskTemplate.ContainerSpec.Healthcheck = &container.HealthConfig{}
   946  		s.Spec.TaskTemplate.Networks = []swarm.NetworkAttachmentConfig{
   947  			{Target: "lb"},
   948  		}
   949  	})
   950  
   951  	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
   952  
   953  	containers := d.ActiveContainers(c)
   954  
   955  	out, err = d.Cmd("exec", containers[0], "ping", "-c1", "-W3", "top")
   956  	assert.NilError(c, err, out)
   957  }
   958  
   959  func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *testing.T) {
   960  	m := s.AddDaemon(c, true, true)
   961  	w := s.AddDaemon(c, true, false)
   962  
   963  	info := m.SwarmInfo(c)
   964  
   965  	currentTrustRoot := info.Cluster.TLSInfo.TrustRoot
   966  
   967  	// rotate multiple times
   968  	for i := 0; i < 4; i++ {
   969  		var err error
   970  		var cert, key []byte
   971  		if i%2 != 0 {
   972  			cert, _, key, err = initca.New(&csr.CertificateRequest{
   973  				CN:         "newRoot",
   974  				KeyRequest: csr.NewBasicKeyRequest(),
   975  				CA:         &csr.CAConfig{Expiry: ca.RootCAExpiration},
   976  			})
   977  			assert.NilError(c, err)
   978  		}
   979  		expectedCert := string(cert)
   980  		m.UpdateSwarm(c, func(s *swarm.Spec) {
   981  			s.CAConfig.SigningCACert = expectedCert
   982  			s.CAConfig.SigningCAKey = string(key)
   983  			s.CAConfig.ForceRotate++
   984  		})
   985  
   986  		// poll to make sure update succeeds
   987  		var clusterTLSInfo swarm.TLSInfo
   988  		for j := 0; j < 18; j++ {
   989  			info := m.SwarmInfo(c)
   990  
   991  			// the desired CA cert and key is always redacted
   992  			assert.Equal(c, info.Cluster.Spec.CAConfig.SigningCAKey, "")
   993  			assert.Equal(c, info.Cluster.Spec.CAConfig.SigningCACert, "")
   994  
   995  			clusterTLSInfo = info.Cluster.TLSInfo
   996  
   997  			// if root rotation is done and the trust root has changed, we don't have to poll anymore
   998  			if !info.Cluster.RootRotationInProgress && clusterTLSInfo.TrustRoot != currentTrustRoot {
   999  				break
  1000  			}
  1001  
  1002  			// root rotation not done
  1003  			time.Sleep(250 * time.Millisecond)
  1004  		}
  1005  		if cert != nil {
  1006  			assert.Equal(c, clusterTLSInfo.TrustRoot, expectedCert)
  1007  		}
  1008  		// could take another second or two for the nodes to trust the new roots after they've all gotten
  1009  		// new TLS certificates
  1010  		for j := 0; j < 18; j++ {
  1011  			mInfo := m.GetNode(c, m.NodeID()).Description.TLSInfo
  1012  			wInfo := m.GetNode(c, w.NodeID()).Description.TLSInfo
  1013  
  1014  			if mInfo.TrustRoot == clusterTLSInfo.TrustRoot && wInfo.TrustRoot == clusterTLSInfo.TrustRoot {
  1015  				break
  1016  			}
  1017  
  1018  			// nodes don't trust root certs yet
  1019  			time.Sleep(250 * time.Millisecond)
  1020  		}
  1021  
  1022  		assert.DeepEqual(c, m.GetNode(c, m.NodeID()).Description.TLSInfo, clusterTLSInfo)
  1023  		assert.DeepEqual(c, m.GetNode(c, w.NodeID()).Description.TLSInfo, clusterTLSInfo)
  1024  		currentTrustRoot = clusterTLSInfo.TrustRoot
  1025  	}
  1026  }
  1027  
  1028  func (s *DockerSwarmSuite) TestAPINetworkInspectWithScope(c *testing.T) {
  1029  	d := s.AddDaemon(c, true, true)
  1030  
  1031  	name := "test-scoped-network"
  1032  	ctx := context.Background()
  1033  	apiclient := d.NewClientT(c)
  1034  
  1035  	resp, err := apiclient.NetworkCreate(ctx, name, types.NetworkCreate{Driver: "overlay"})
  1036  	assert.NilError(c, err)
  1037  
  1038  	network, err := apiclient.NetworkInspect(ctx, name, types.NetworkInspectOptions{})
  1039  	assert.NilError(c, err)
  1040  	assert.Check(c, is.Equal("swarm", network.Scope))
  1041  	assert.Check(c, is.Equal(resp.ID, network.ID))
  1042  
  1043  	_, err = apiclient.NetworkInspect(ctx, name, types.NetworkInspectOptions{Scope: "local"})
  1044  	assert.Check(c, client.IsErrNotFound(err))
  1045  }