github.com/jwhonce/docker@v0.6.7-0.20190327063223-da823cf3a5a3/integration-cli/docker_api_swarm_test.go (about)

     1  // +build !windows
     2  
     3  package main
     4  
     5  import (
     6  	"context"
     7  	"fmt"
     8  	"io/ioutil"
     9  	"net"
    10  	"net/http"
    11  	"path/filepath"
    12  	"runtime"
    13  	"strings"
    14  	"sync"
    15  	"time"
    16  
    17  	"github.com/cloudflare/cfssl/csr"
    18  	"github.com/cloudflare/cfssl/helpers"
    19  	"github.com/cloudflare/cfssl/initca"
    20  	"github.com/docker/docker/api/types"
    21  	"github.com/docker/docker/api/types/container"
    22  	"github.com/docker/docker/api/types/swarm"
    23  	"github.com/docker/docker/client"
    24  	"github.com/docker/docker/integration-cli/checker"
    25  	"github.com/docker/docker/integration-cli/daemon"
    26  	testdaemon "github.com/docker/docker/internal/test/daemon"
    27  	"github.com/docker/docker/internal/test/request"
    28  	"github.com/docker/swarmkit/ca"
    29  	"github.com/go-check/check"
    30  	"gotest.tools/assert"
    31  	is "gotest.tools/assert/cmp"
    32  )
    33  
    34  var defaultReconciliationTimeout = 30 * time.Second
    35  
    36  func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) {
    37  	// todo: should find a better way to verify that components are running than /info
    38  	d1 := s.AddDaemon(c, true, true)
    39  	info := d1.SwarmInfo(c)
    40  	c.Assert(info.ControlAvailable, checker.True)
    41  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
    42  	c.Assert(info.Cluster.RootRotationInProgress, checker.False)
    43  
    44  	d2 := s.AddDaemon(c, true, false)
    45  	info = d2.SwarmInfo(c)
    46  	c.Assert(info.ControlAvailable, checker.False)
    47  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
    48  
    49  	// Leaving cluster
    50  	c.Assert(d2.SwarmLeave(c, false), checker.IsNil)
    51  
    52  	info = d2.SwarmInfo(c)
    53  	c.Assert(info.ControlAvailable, checker.False)
    54  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
    55  
    56  	d2.SwarmJoin(c, swarm.JoinRequest{
    57  		ListenAddr:  d1.SwarmListenAddr(),
    58  		JoinToken:   d1.JoinTokens(c).Worker,
    59  		RemoteAddrs: []string{d1.SwarmListenAddr()},
    60  	})
    61  
    62  	info = d2.SwarmInfo(c)
    63  	c.Assert(info.ControlAvailable, checker.False)
    64  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
    65  
    66  	// Current state restoring after restarts
    67  	d1.Stop(c)
    68  	d2.Stop(c)
    69  
    70  	d1.StartNode(c)
    71  	d2.StartNode(c)
    72  
    73  	info = d1.SwarmInfo(c)
    74  	c.Assert(info.ControlAvailable, checker.True)
    75  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
    76  
    77  	info = d2.SwarmInfo(c)
    78  	c.Assert(info.ControlAvailable, checker.False)
    79  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
    80  }
    81  
    82  func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *check.C) {
    83  	d1 := s.AddDaemon(c, false, false)
    84  	d1.SwarmInit(c, swarm.InitRequest{})
    85  
    86  	// todo: error message differs depending if some components of token are valid
    87  
    88  	d2 := s.AddDaemon(c, false, false)
    89  	c2 := d2.NewClientT(c)
    90  	err := c2.SwarmJoin(context.Background(), swarm.JoinRequest{
    91  		ListenAddr:  d2.SwarmListenAddr(),
    92  		RemoteAddrs: []string{d1.SwarmListenAddr()},
    93  	})
    94  	c.Assert(err, checker.NotNil)
    95  	c.Assert(err.Error(), checker.Contains, "join token is necessary")
    96  	info := d2.SwarmInfo(c)
    97  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
    98  
    99  	err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
   100  		ListenAddr:  d2.SwarmListenAddr(),
   101  		JoinToken:   "foobaz",
   102  		RemoteAddrs: []string{d1.SwarmListenAddr()},
   103  	})
   104  	c.Assert(err, checker.NotNil)
   105  	c.Assert(err.Error(), checker.Contains, "invalid join token")
   106  	info = d2.SwarmInfo(c)
   107  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
   108  
   109  	workerToken := d1.JoinTokens(c).Worker
   110  
   111  	d2.SwarmJoin(c, swarm.JoinRequest{
   112  		ListenAddr:  d2.SwarmListenAddr(),
   113  		JoinToken:   workerToken,
   114  		RemoteAddrs: []string{d1.SwarmListenAddr()},
   115  	})
   116  	info = d2.SwarmInfo(c)
   117  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
   118  	c.Assert(d2.SwarmLeave(c, false), checker.IsNil)
   119  	info = d2.SwarmInfo(c)
   120  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
   121  
   122  	// change tokens
   123  	d1.RotateTokens(c)
   124  
   125  	err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
   126  		ListenAddr:  d2.SwarmListenAddr(),
   127  		JoinToken:   workerToken,
   128  		RemoteAddrs: []string{d1.SwarmListenAddr()},
   129  	})
   130  	c.Assert(err, checker.NotNil)
   131  	c.Assert(err.Error(), checker.Contains, "join token is necessary")
   132  	info = d2.SwarmInfo(c)
   133  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
   134  
   135  	workerToken = d1.JoinTokens(c).Worker
   136  
   137  	d2.SwarmJoin(c, swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.SwarmListenAddr()}})
   138  	info = d2.SwarmInfo(c)
   139  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
   140  	c.Assert(d2.SwarmLeave(c, false), checker.IsNil)
   141  	info = d2.SwarmInfo(c)
   142  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
   143  
   144  	// change spec, don't change tokens
   145  	d1.UpdateSwarm(c, func(s *swarm.Spec) {})
   146  
   147  	err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
   148  		ListenAddr:  d2.SwarmListenAddr(),
   149  		RemoteAddrs: []string{d1.SwarmListenAddr()},
   150  	})
   151  	c.Assert(err, checker.NotNil)
   152  	c.Assert(err.Error(), checker.Contains, "join token is necessary")
   153  	info = d2.SwarmInfo(c)
   154  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
   155  
   156  	d2.SwarmJoin(c, swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.SwarmListenAddr()}})
   157  	info = d2.SwarmInfo(c)
   158  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
   159  	c.Assert(d2.SwarmLeave(c, false), checker.IsNil)
   160  	info = d2.SwarmInfo(c)
   161  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
   162  }
   163  
   164  func (s *DockerSwarmSuite) TestUpdateSwarmAddExternalCA(c *check.C) {
   165  	d1 := s.AddDaemon(c, false, false)
   166  	d1.SwarmInit(c, swarm.InitRequest{})
   167  	d1.UpdateSwarm(c, func(s *swarm.Spec) {
   168  		s.CAConfig.ExternalCAs = []*swarm.ExternalCA{
   169  			{
   170  				Protocol: swarm.ExternalCAProtocolCFSSL,
   171  				URL:      "https://thishasnoca.org",
   172  			},
   173  			{
   174  				Protocol: swarm.ExternalCAProtocolCFSSL,
   175  				URL:      "https://thishasacacert.org",
   176  				CACert:   "cacert",
   177  			},
   178  		}
   179  	})
   180  	info := d1.SwarmInfo(c)
   181  	c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs, checker.HasLen, 2)
   182  	c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs[0].CACert, checker.Equals, "")
   183  	c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs[1].CACert, checker.Equals, "cacert")
   184  }
   185  
   186  func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *check.C) {
   187  	d1 := s.AddDaemon(c, true, true)
   188  	d2 := s.AddDaemon(c, false, false)
   189  	splitToken := strings.Split(d1.JoinTokens(c).Worker, "-")
   190  	splitToken[2] = "1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e"
   191  	replacementToken := strings.Join(splitToken, "-")
   192  	c2 := d2.NewClientT(c)
   193  	err := c2.SwarmJoin(context.Background(), swarm.JoinRequest{
   194  		ListenAddr:  d2.SwarmListenAddr(),
   195  		JoinToken:   replacementToken,
   196  		RemoteAddrs: []string{d1.SwarmListenAddr()},
   197  	})
   198  	c.Assert(err, checker.NotNil)
   199  	c.Assert(err.Error(), checker.Contains, "remote CA does not match fingerprint")
   200  }
   201  
   202  func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *check.C) {
   203  	d1 := s.AddDaemon(c, false, false)
   204  	d1.SwarmInit(c, swarm.InitRequest{})
   205  	d2 := s.AddDaemon(c, true, false)
   206  
   207  	info := d2.SwarmInfo(c)
   208  	c.Assert(info.ControlAvailable, checker.False)
   209  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
   210  
   211  	d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
   212  		n.Spec.Role = swarm.NodeRoleManager
   213  	})
   214  
   215  	waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True)
   216  
   217  	d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
   218  		n.Spec.Role = swarm.NodeRoleWorker
   219  	})
   220  
   221  	waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.False)
   222  
   223  	// Wait for the role to change to worker in the cert. This is partially
   224  	// done because it's something worth testing in its own right, and
   225  	// partially because changing the role from manager to worker and then
   226  	// back to manager quickly might cause the node to pause for awhile
   227  	// while waiting for the role to change to worker, and the test can
   228  	// time out during this interval.
   229  	waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
   230  		certBytes, err := ioutil.ReadFile(filepath.Join(d2.Folder, "root", "swarm", "certificates", "swarm-node.crt"))
   231  		if err != nil {
   232  			return "", check.Commentf("error: %v", err)
   233  		}
   234  		certs, err := helpers.ParseCertificatesPEM(certBytes)
   235  		if err == nil && len(certs) > 0 && len(certs[0].Subject.OrganizationalUnit) > 0 {
   236  			return certs[0].Subject.OrganizationalUnit[0], nil
   237  		}
   238  		return "", check.Commentf("could not get organizational unit from certificate")
   239  	}, checker.Equals, "swarm-worker")
   240  
   241  	// Demoting last node should fail
   242  	node := d1.GetNode(c, d1.NodeID())
   243  	node.Spec.Role = swarm.NodeRoleWorker
   244  	url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index)
   245  	res, body, err := request.Post(url, request.Host(d1.Sock()), request.JSONBody(node.Spec))
   246  	c.Assert(err, checker.IsNil)
   247  	b, err := request.ReadBody(body)
   248  	c.Assert(err, checker.IsNil)
   249  	c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest, check.Commentf("output: %q", string(b)))
   250  
   251  	// The warning specific to demoting the last manager is best-effort and
   252  	// won't appear until the Role field of the demoted manager has been
   253  	// updated.
   254  	// Yes, I know this looks silly, but checker.Matches is broken, since
   255  	// it anchors the regexp contrary to the documentation, and this makes
   256  	// it impossible to match something that includes a line break.
   257  	if !strings.Contains(string(b), "last manager of the swarm") {
   258  		c.Assert(string(b), checker.Contains, "this would result in a loss of quorum")
   259  	}
   260  	info = d1.SwarmInfo(c)
   261  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
   262  	c.Assert(info.ControlAvailable, checker.True)
   263  
   264  	// Promote already demoted node
   265  	d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
   266  		n.Spec.Role = swarm.NodeRoleManager
   267  	})
   268  
   269  	waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True)
   270  }
   271  
   272  func (s *DockerSwarmSuite) TestAPISwarmLeaderProxy(c *check.C) {
   273  	// add three managers, one of these is leader
   274  	d1 := s.AddDaemon(c, true, true)
   275  	d2 := s.AddDaemon(c, true, true)
   276  	d3 := s.AddDaemon(c, true, true)
   277  
   278  	// start a service by hitting each of the 3 managers
   279  	d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
   280  		s.Spec.Name = "test1"
   281  	})
   282  	d2.CreateService(c, simpleTestService, func(s *swarm.Service) {
   283  		s.Spec.Name = "test2"
   284  	})
   285  	d3.CreateService(c, simpleTestService, func(s *swarm.Service) {
   286  		s.Spec.Name = "test3"
   287  	})
   288  
   289  	// 3 services should be started now, because the requests were proxied to leader
   290  	// query each node and make sure it returns 3 services
   291  	for _, d := range []*daemon.Daemon{d1, d2, d3} {
   292  		services := d.ListServices(c)
   293  		c.Assert(services, checker.HasLen, 3)
   294  	}
   295  }
   296  
   297  func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) {
   298  	if runtime.GOARCH == "s390x" {
   299  		c.Skip("Disabled on s390x")
   300  	}
   301  	if runtime.GOARCH == "ppc64le" {
   302  		c.Skip("Disabled on  ppc64le")
   303  	}
   304  
   305  	// Create 3 nodes
   306  	d1 := s.AddDaemon(c, true, true)
   307  	d2 := s.AddDaemon(c, true, true)
   308  	d3 := s.AddDaemon(c, true, true)
   309  
   310  	// assert that the first node we made is the leader, and the other two are followers
   311  	c.Assert(d1.GetNode(c, d1.NodeID()).ManagerStatus.Leader, checker.True)
   312  	c.Assert(d1.GetNode(c, d2.NodeID()).ManagerStatus.Leader, checker.False)
   313  	c.Assert(d1.GetNode(c, d3.NodeID()).ManagerStatus.Leader, checker.False)
   314  
   315  	d1.Stop(c)
   316  
   317  	var (
   318  		leader    *daemon.Daemon   // keep track of leader
   319  		followers []*daemon.Daemon // keep track of followers
   320  	)
   321  	checkLeader := func(nodes ...*daemon.Daemon) checkF {
   322  		return func(c *check.C) (interface{}, check.CommentInterface) {
   323  			// clear these out before each run
   324  			leader = nil
   325  			followers = nil
   326  			for _, d := range nodes {
   327  				if d.GetNode(c, d.NodeID()).ManagerStatus.Leader {
   328  					leader = d
   329  				} else {
   330  					followers = append(followers, d)
   331  				}
   332  			}
   333  
   334  			if leader == nil {
   335  				return false, check.Commentf("no leader elected")
   336  			}
   337  
   338  			return true, check.Commentf("elected %v", leader.ID())
   339  		}
   340  	}
   341  
   342  	// wait for an election to occur
   343  	c.Logf("Waiting for election to occur...")
   344  	waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d2, d3), checker.True)
   345  
   346  	// assert that we have a new leader
   347  	c.Assert(leader, checker.NotNil)
   348  
   349  	// Keep track of the current leader, since we want that to be chosen.
   350  	stableleader := leader
   351  
   352  	// add the d1, the initial leader, back
   353  	d1.StartNode(c)
   354  
   355  	// wait for possible election
   356  	c.Logf("Waiting for possible election...")
   357  	waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d1, d2, d3), checker.True)
   358  	// pick out the leader and the followers again
   359  
   360  	// verify that we still only have 1 leader and 2 followers
   361  	c.Assert(leader, checker.NotNil)
   362  	c.Assert(followers, checker.HasLen, 2)
   363  	// and that after we added d1 back, the leader hasn't changed
   364  	c.Assert(leader.NodeID(), checker.Equals, stableleader.NodeID())
   365  }
   366  
   367  func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) {
   368  	if runtime.GOARCH == "s390x" {
   369  		c.Skip("Disabled on s390x")
   370  	}
   371  	if runtime.GOARCH == "ppc64le" {
   372  		c.Skip("Disabled on  ppc64le")
   373  	}
   374  
   375  	d1 := s.AddDaemon(c, true, true)
   376  	d2 := s.AddDaemon(c, true, true)
   377  	d3 := s.AddDaemon(c, true, true)
   378  
   379  	d1.CreateService(c, simpleTestService)
   380  
   381  	d2.Stop(c)
   382  
   383  	// make sure there is a leader
   384  	waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil)
   385  
   386  	d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
   387  		s.Spec.Name = "top1"
   388  	})
   389  
   390  	d3.Stop(c)
   391  
   392  	var service swarm.Service
   393  	simpleTestService(&service)
   394  	service.Spec.Name = "top2"
   395  	cli := d1.NewClientT(c)
   396  	defer cli.Close()
   397  
   398  	// d1 will eventually step down from leader because there is no longer an active quorum, wait for that to happen
   399  	waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
   400  		_, err := cli.ServiceCreate(context.Background(), service.Spec, types.ServiceCreateOptions{})
   401  		return err.Error(), nil
   402  	}, checker.Contains, "Make sure more than half of the managers are online.")
   403  
   404  	d2.StartNode(c)
   405  
   406  	// make sure there is a leader
   407  	waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil)
   408  
   409  	d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
   410  		s.Spec.Name = "top3"
   411  	})
   412  }
   413  
   414  func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *check.C) {
   415  	d := s.AddDaemon(c, true, true)
   416  
   417  	instances := 2
   418  	d.CreateService(c, simpleTestService, setInstances(instances))
   419  
   420  	id, err := d.Cmd("run", "-d", "busybox", "top")
   421  	c.Assert(err, checker.IsNil, check.Commentf("%s", id))
   422  	id = strings.TrimSpace(id)
   423  
   424  	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances+1)
   425  
   426  	c.Assert(d.SwarmLeave(c, false), checker.NotNil)
   427  	c.Assert(d.SwarmLeave(c, true), checker.IsNil)
   428  
   429  	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1)
   430  
   431  	id2, err := d.Cmd("ps", "-q")
   432  	c.Assert(err, checker.IsNil, check.Commentf("%s", id2))
   433  	c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2))
   434  }
   435  
   436  // #23629
   437  func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *check.C) {
   438  	testRequires(c, Network)
   439  	s.AddDaemon(c, true, true)
   440  	d2 := s.AddDaemon(c, false, false)
   441  
   442  	id, err := d2.Cmd("run", "-d", "busybox", "top")
   443  	c.Assert(err, checker.IsNil, check.Commentf("%s", id))
   444  	id = strings.TrimSpace(id)
   445  
   446  	c2 := d2.NewClientT(c)
   447  	err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
   448  		ListenAddr:  d2.SwarmListenAddr(),
   449  		RemoteAddrs: []string{"123.123.123.123:1234"},
   450  	})
   451  	c.Assert(err, check.NotNil)
   452  	c.Assert(err.Error(), checker.Contains, "Timeout was reached")
   453  
   454  	info := d2.SwarmInfo(c)
   455  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStatePending)
   456  
   457  	c.Assert(d2.SwarmLeave(c, true), checker.IsNil)
   458  
   459  	waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 1)
   460  
   461  	id2, err := d2.Cmd("ps", "-q")
   462  	c.Assert(err, checker.IsNil, check.Commentf("%s", id2))
   463  	c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2))
   464  }
   465  
   466  // #23705
   467  func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *check.C) {
   468  	testRequires(c, Network)
   469  	d := s.AddDaemon(c, false, false)
   470  	client := d.NewClientT(c)
   471  	err := client.SwarmJoin(context.Background(), swarm.JoinRequest{
   472  		ListenAddr:  d.SwarmListenAddr(),
   473  		RemoteAddrs: []string{"123.123.123.123:1234"},
   474  	})
   475  	c.Assert(err, check.NotNil)
   476  	c.Assert(err.Error(), checker.Contains, "Timeout was reached")
   477  
   478  	waitAndAssert(c, defaultReconciliationTimeout, d.CheckLocalNodeState, checker.Equals, swarm.LocalNodeStatePending)
   479  
   480  	d.RestartNode(c)
   481  
   482  	info := d.SwarmInfo(c)
   483  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
   484  }
   485  
   486  func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *check.C) {
   487  	d1 := s.AddDaemon(c, true, true)
   488  
   489  	instances := 2
   490  	id := d1.CreateService(c, simpleTestService, setInstances(instances))
   491  
   492  	d1.GetService(c, id)
   493  	d1.RestartNode(c)
   494  	d1.GetService(c, id)
   495  
   496  	d2 := s.AddDaemon(c, true, true)
   497  	d2.GetService(c, id)
   498  	d2.RestartNode(c)
   499  	d2.GetService(c, id)
   500  
   501  	d3 := s.AddDaemon(c, true, true)
   502  	d3.GetService(c, id)
   503  	d3.RestartNode(c)
   504  	d3.GetService(c, id)
   505  
   506  	err := d3.Kill()
   507  	assert.NilError(c, err)
   508  	time.Sleep(1 * time.Second) // time to handle signal
   509  	d3.StartNode(c)
   510  	d3.GetService(c, id)
   511  }
   512  
   513  func (s *DockerSwarmSuite) TestAPISwarmScaleNoRollingUpdate(c *check.C) {
   514  	d := s.AddDaemon(c, true, true)
   515  
   516  	instances := 2
   517  	id := d.CreateService(c, simpleTestService, setInstances(instances))
   518  
   519  	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
   520  	containers := d.ActiveContainers(c)
   521  	instances = 4
   522  	d.UpdateService(c, d.GetService(c, id), setInstances(instances))
   523  	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
   524  	containers2 := d.ActiveContainers(c)
   525  
   526  loop0:
   527  	for _, c1 := range containers {
   528  		for _, c2 := range containers2 {
   529  			if c1 == c2 {
   530  				continue loop0
   531  			}
   532  		}
   533  		c.Errorf("container %v not found in new set %#v", c1, containers2)
   534  	}
   535  }
   536  
   537  func (s *DockerSwarmSuite) TestAPISwarmInvalidAddress(c *check.C) {
   538  	d := s.AddDaemon(c, false, false)
   539  	req := swarm.InitRequest{
   540  		ListenAddr: "",
   541  	}
   542  	res, _, err := request.Post("/swarm/init", request.Host(d.Sock()), request.JSONBody(req))
   543  	c.Assert(err, checker.IsNil)
   544  	c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest)
   545  
   546  	req2 := swarm.JoinRequest{
   547  		ListenAddr:  "0.0.0.0:2377",
   548  		RemoteAddrs: []string{""},
   549  	}
   550  	res, _, err = request.Post("/swarm/join", request.Host(d.Sock()), request.JSONBody(req2))
   551  	c.Assert(err, checker.IsNil)
   552  	c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest)
   553  }
   554  
   555  func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *check.C) {
   556  	d1 := s.AddDaemon(c, true, true)
   557  	d2 := s.AddDaemon(c, true, true)
   558  
   559  	instances := 2
   560  	id := d1.CreateService(c, simpleTestService, setInstances(instances))
   561  	waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances)
   562  
   563  	// drain d2, all containers should move to d1
   564  	d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
   565  		n.Spec.Availability = swarm.NodeAvailabilityDrain
   566  	})
   567  	waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances)
   568  	waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 0)
   569  
   570  	d2.Stop(c)
   571  
   572  	d1.SwarmInit(c, swarm.InitRequest{
   573  		ForceNewCluster: true,
   574  		Spec:            swarm.Spec{},
   575  	})
   576  
   577  	waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances)
   578  
   579  	d3 := s.AddDaemon(c, true, true)
   580  	info := d3.SwarmInfo(c)
   581  	c.Assert(info.ControlAvailable, checker.True)
   582  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
   583  
   584  	instances = 4
   585  	d3.UpdateService(c, d3.GetService(c, id), setInstances(instances))
   586  
   587  	waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances)
   588  }
   589  
   590  func simpleTestService(s *swarm.Service) {
   591  	ureplicas := uint64(1)
   592  	restartDelay := time.Duration(100 * time.Millisecond)
   593  
   594  	s.Spec = swarm.ServiceSpec{
   595  		TaskTemplate: swarm.TaskSpec{
   596  			ContainerSpec: &swarm.ContainerSpec{
   597  				Image:   "busybox:latest",
   598  				Command: []string{"/bin/top"},
   599  			},
   600  			RestartPolicy: &swarm.RestartPolicy{
   601  				Delay: &restartDelay,
   602  			},
   603  		},
   604  		Mode: swarm.ServiceMode{
   605  			Replicated: &swarm.ReplicatedService{
   606  				Replicas: &ureplicas,
   607  			},
   608  		},
   609  	}
   610  	s.Spec.Name = "top"
   611  }
   612  
   613  func serviceForUpdate(s *swarm.Service) {
   614  	ureplicas := uint64(1)
   615  	restartDelay := time.Duration(100 * time.Millisecond)
   616  
   617  	s.Spec = swarm.ServiceSpec{
   618  		TaskTemplate: swarm.TaskSpec{
   619  			ContainerSpec: &swarm.ContainerSpec{
   620  				Image:   "busybox:latest",
   621  				Command: []string{"/bin/top"},
   622  			},
   623  			RestartPolicy: &swarm.RestartPolicy{
   624  				Delay: &restartDelay,
   625  			},
   626  		},
   627  		Mode: swarm.ServiceMode{
   628  			Replicated: &swarm.ReplicatedService{
   629  				Replicas: &ureplicas,
   630  			},
   631  		},
   632  		UpdateConfig: &swarm.UpdateConfig{
   633  			Parallelism:   2,
   634  			Delay:         4 * time.Second,
   635  			FailureAction: swarm.UpdateFailureActionContinue,
   636  		},
   637  		RollbackConfig: &swarm.UpdateConfig{
   638  			Parallelism:   3,
   639  			Delay:         4 * time.Second,
   640  			FailureAction: swarm.UpdateFailureActionContinue,
   641  		},
   642  	}
   643  	s.Spec.Name = "updatetest"
   644  }
   645  
   646  func setInstances(replicas int) testdaemon.ServiceConstructor {
   647  	ureplicas := uint64(replicas)
   648  	return func(s *swarm.Service) {
   649  		s.Spec.Mode = swarm.ServiceMode{
   650  			Replicated: &swarm.ReplicatedService{
   651  				Replicas: &ureplicas,
   652  			},
   653  		}
   654  	}
   655  }
   656  
   657  func setUpdateOrder(order string) testdaemon.ServiceConstructor {
   658  	return func(s *swarm.Service) {
   659  		if s.Spec.UpdateConfig == nil {
   660  			s.Spec.UpdateConfig = &swarm.UpdateConfig{}
   661  		}
   662  		s.Spec.UpdateConfig.Order = order
   663  	}
   664  }
   665  
   666  func setRollbackOrder(order string) testdaemon.ServiceConstructor {
   667  	return func(s *swarm.Service) {
   668  		if s.Spec.RollbackConfig == nil {
   669  			s.Spec.RollbackConfig = &swarm.UpdateConfig{}
   670  		}
   671  		s.Spec.RollbackConfig.Order = order
   672  	}
   673  }
   674  
   675  func setImage(image string) testdaemon.ServiceConstructor {
   676  	return func(s *swarm.Service) {
   677  		if s.Spec.TaskTemplate.ContainerSpec == nil {
   678  			s.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{}
   679  		}
   680  		s.Spec.TaskTemplate.ContainerSpec.Image = image
   681  	}
   682  }
   683  
   684  func setFailureAction(failureAction string) testdaemon.ServiceConstructor {
   685  	return func(s *swarm.Service) {
   686  		s.Spec.UpdateConfig.FailureAction = failureAction
   687  	}
   688  }
   689  
   690  func setMaxFailureRatio(maxFailureRatio float32) testdaemon.ServiceConstructor {
   691  	return func(s *swarm.Service) {
   692  		s.Spec.UpdateConfig.MaxFailureRatio = maxFailureRatio
   693  	}
   694  }
   695  
   696  func setParallelism(parallelism uint64) testdaemon.ServiceConstructor {
   697  	return func(s *swarm.Service) {
   698  		s.Spec.UpdateConfig.Parallelism = parallelism
   699  	}
   700  }
   701  
   702  func setConstraints(constraints []string) testdaemon.ServiceConstructor {
   703  	return func(s *swarm.Service) {
   704  		if s.Spec.TaskTemplate.Placement == nil {
   705  			s.Spec.TaskTemplate.Placement = &swarm.Placement{}
   706  		}
   707  		s.Spec.TaskTemplate.Placement.Constraints = constraints
   708  	}
   709  }
   710  
   711  func setPlacementPrefs(prefs []swarm.PlacementPreference) testdaemon.ServiceConstructor {
   712  	return func(s *swarm.Service) {
   713  		if s.Spec.TaskTemplate.Placement == nil {
   714  			s.Spec.TaskTemplate.Placement = &swarm.Placement{}
   715  		}
   716  		s.Spec.TaskTemplate.Placement.Preferences = prefs
   717  	}
   718  }
   719  
   720  func setGlobalMode(s *swarm.Service) {
   721  	s.Spec.Mode = swarm.ServiceMode{
   722  		Global: &swarm.GlobalService{},
   723  	}
   724  }
   725  
   726  func checkClusterHealth(c *check.C, cl []*daemon.Daemon, managerCount, workerCount int) {
   727  	var totalMCount, totalWCount int
   728  
   729  	for _, d := range cl {
   730  		var (
   731  			info swarm.Info
   732  		)
   733  
   734  		// check info in a waitAndAssert, because if the cluster doesn't have a leader, `info` will return an error
   735  		checkInfo := func(c *check.C) (interface{}, check.CommentInterface) {
   736  			client := d.NewClientT(c)
   737  			daemonInfo, err := client.Info(context.Background())
   738  			info = daemonInfo.Swarm
   739  			return err, check.Commentf("cluster not ready in time")
   740  		}
   741  		waitAndAssert(c, defaultReconciliationTimeout, checkInfo, checker.IsNil)
   742  		if !info.ControlAvailable {
   743  			totalWCount++
   744  			continue
   745  		}
   746  
   747  		var leaderFound bool
   748  		totalMCount++
   749  		var mCount, wCount int
   750  
   751  		for _, n := range d.ListNodes(c) {
   752  			waitReady := func(c *check.C) (interface{}, check.CommentInterface) {
   753  				if n.Status.State == swarm.NodeStateReady {
   754  					return true, nil
   755  				}
   756  				nn := d.GetNode(c, n.ID)
   757  				n = *nn
   758  				return n.Status.State == swarm.NodeStateReady, check.Commentf("state of node %s, reported by %s", n.ID, d.NodeID())
   759  			}
   760  			waitAndAssert(c, defaultReconciliationTimeout, waitReady, checker.True)
   761  
   762  			waitActive := func(c *check.C) (interface{}, check.CommentInterface) {
   763  				if n.Spec.Availability == swarm.NodeAvailabilityActive {
   764  					return true, nil
   765  				}
   766  				nn := d.GetNode(c, n.ID)
   767  				n = *nn
   768  				return n.Spec.Availability == swarm.NodeAvailabilityActive, check.Commentf("availability of node %s, reported by %s", n.ID, d.NodeID())
   769  			}
   770  			waitAndAssert(c, defaultReconciliationTimeout, waitActive, checker.True)
   771  
   772  			if n.Spec.Role == swarm.NodeRoleManager {
   773  				c.Assert(n.ManagerStatus, checker.NotNil, check.Commentf("manager status of node %s (manager), reported by %s", n.ID, d.NodeID()))
   774  				if n.ManagerStatus.Leader {
   775  					leaderFound = true
   776  				}
   777  				mCount++
   778  			} else {
   779  				c.Assert(n.ManagerStatus, checker.IsNil, check.Commentf("manager status of node %s (worker), reported by %s", n.ID, d.NodeID()))
   780  				wCount++
   781  			}
   782  		}
   783  		c.Assert(leaderFound, checker.True, check.Commentf("lack of leader reported by node %s", info.NodeID))
   784  		c.Assert(mCount, checker.Equals, managerCount, check.Commentf("managers count reported by node %s", info.NodeID))
   785  		c.Assert(wCount, checker.Equals, workerCount, check.Commentf("workers count reported by node %s", info.NodeID))
   786  	}
   787  	c.Assert(totalMCount, checker.Equals, managerCount)
   788  	c.Assert(totalWCount, checker.Equals, workerCount)
   789  }
   790  
   791  func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) {
   792  	mCount, wCount := 5, 1
   793  
   794  	var nodes []*daemon.Daemon
   795  	for i := 0; i < mCount; i++ {
   796  		manager := s.AddDaemon(c, true, true)
   797  		info := manager.SwarmInfo(c)
   798  		c.Assert(info.ControlAvailable, checker.True)
   799  		c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
   800  		nodes = append(nodes, manager)
   801  	}
   802  
   803  	for i := 0; i < wCount; i++ {
   804  		worker := s.AddDaemon(c, true, false)
   805  		info := worker.SwarmInfo(c)
   806  		c.Assert(info.ControlAvailable, checker.False)
   807  		c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
   808  		nodes = append(nodes, worker)
   809  	}
   810  
   811  	// stop whole cluster
   812  	{
   813  		var wg sync.WaitGroup
   814  		wg.Add(len(nodes))
   815  		errs := make(chan error, len(nodes))
   816  
   817  		for _, d := range nodes {
   818  			go func(daemon *daemon.Daemon) {
   819  				defer wg.Done()
   820  				if err := daemon.StopWithError(); err != nil {
   821  					errs <- err
   822  				}
   823  			}(d)
   824  		}
   825  		wg.Wait()
   826  		close(errs)
   827  		for err := range errs {
   828  			c.Assert(err, check.IsNil)
   829  		}
   830  	}
   831  
   832  	// start whole cluster
   833  	{
   834  		var wg sync.WaitGroup
   835  		wg.Add(len(nodes))
   836  		errs := make(chan error, len(nodes))
   837  
   838  		for _, d := range nodes {
   839  			go func(daemon *daemon.Daemon) {
   840  				defer wg.Done()
   841  				if err := daemon.StartWithError("--iptables=false"); err != nil {
   842  					errs <- err
   843  				}
   844  			}(d)
   845  		}
   846  		wg.Wait()
   847  		close(errs)
   848  		for err := range errs {
   849  			c.Assert(err, check.IsNil)
   850  		}
   851  	}
   852  
   853  	checkClusterHealth(c, nodes, mCount, wCount)
   854  }
   855  
   856  func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *check.C) {
   857  	d := s.AddDaemon(c, true, true)
   858  
   859  	instances := 2
   860  	id := d.CreateService(c, simpleTestService, setInstances(instances))
   861  	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
   862  
   863  	service := d.GetService(c, id)
   864  	instances = 5
   865  
   866  	setInstances(instances)(service)
   867  	cli := d.NewClientT(c)
   868  	defer cli.Close()
   869  	_, err := cli.ServiceUpdate(context.Background(), service.Spec.Name, service.Version, service.Spec, types.ServiceUpdateOptions{})
   870  	c.Assert(err, checker.IsNil)
   871  	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
   872  }
   873  
   874  // Unlocking an unlocked swarm results in an error
   875  func (s *DockerSwarmSuite) TestAPISwarmUnlockNotLocked(c *check.C) {
   876  	d := s.AddDaemon(c, true, true)
   877  	err := d.SwarmUnlock(c, swarm.UnlockRequest{UnlockKey: "wrong-key"})
   878  	c.Assert(err, checker.NotNil)
   879  	c.Assert(err.Error(), checker.Contains, "swarm is not locked")
   880  }
   881  
   882  // #29885
   883  func (s *DockerSwarmSuite) TestAPISwarmErrorHandling(c *check.C) {
   884  	ln, err := net.Listen("tcp", fmt.Sprintf(":%d", defaultSwarmPort))
   885  	c.Assert(err, checker.IsNil)
   886  	defer ln.Close()
   887  	d := s.AddDaemon(c, false, false)
   888  	client := d.NewClientT(c)
   889  	_, err = client.SwarmInit(context.Background(), swarm.InitRequest{
   890  		ListenAddr: d.SwarmListenAddr(),
   891  	})
   892  	c.Assert(err, checker.NotNil)
   893  	c.Assert(err.Error(), checker.Contains, "address already in use")
   894  }
   895  
   896  // Test case for 30242, where duplicate networks, with different drivers `bridge` and `overlay`,
   897  // caused both scopes to be `swarm` for `docker network inspect` and `docker network ls`.
   898  // This test makes sure the fixes correctly output scopes instead.
   899  func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *check.C) {
   900  	d := s.AddDaemon(c, true, true)
   901  	cli := d.NewClientT(c)
   902  	defer cli.Close()
   903  
   904  	name := "foo"
   905  	networkCreate := types.NetworkCreate{
   906  		CheckDuplicate: false,
   907  	}
   908  
   909  	networkCreate.Driver = "bridge"
   910  
   911  	n1, err := cli.NetworkCreate(context.Background(), name, networkCreate)
   912  	c.Assert(err, checker.IsNil)
   913  
   914  	networkCreate.Driver = "overlay"
   915  
   916  	n2, err := cli.NetworkCreate(context.Background(), name, networkCreate)
   917  	c.Assert(err, checker.IsNil)
   918  
   919  	r1, err := cli.NetworkInspect(context.Background(), n1.ID, types.NetworkInspectOptions{})
   920  	c.Assert(err, checker.IsNil)
   921  	c.Assert(r1.Scope, checker.Equals, "local")
   922  
   923  	r2, err := cli.NetworkInspect(context.Background(), n2.ID, types.NetworkInspectOptions{})
   924  	c.Assert(err, checker.IsNil)
   925  	c.Assert(r2.Scope, checker.Equals, "swarm")
   926  }
   927  
   928  // Test case for 30178
   929  func (s *DockerSwarmSuite) TestAPISwarmHealthcheckNone(c *check.C) {
   930  	// Issue #36386 can be a independent one, which is worth further investigation.
   931  	c.Skip("Root cause of Issue #36386 is needed")
   932  	d := s.AddDaemon(c, true, true)
   933  
   934  	out, err := d.Cmd("network", "create", "-d", "overlay", "lb")
   935  	c.Assert(err, checker.IsNil, check.Commentf("%s", out))
   936  
   937  	instances := 1
   938  	d.CreateService(c, simpleTestService, setInstances(instances), func(s *swarm.Service) {
   939  		if s.Spec.TaskTemplate.ContainerSpec == nil {
   940  			s.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{}
   941  		}
   942  		s.Spec.TaskTemplate.ContainerSpec.Healthcheck = &container.HealthConfig{}
   943  		s.Spec.TaskTemplate.Networks = []swarm.NetworkAttachmentConfig{
   944  			{Target: "lb"},
   945  		}
   946  	})
   947  
   948  	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
   949  
   950  	containers := d.ActiveContainers(c)
   951  
   952  	out, err = d.Cmd("exec", containers[0], "ping", "-c1", "-W3", "top")
   953  	c.Assert(err, checker.IsNil, check.Commentf("%s", out))
   954  }
   955  
   956  func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *check.C) {
   957  	m := s.AddDaemon(c, true, true)
   958  	w := s.AddDaemon(c, true, false)
   959  
   960  	info := m.SwarmInfo(c)
   961  
   962  	currentTrustRoot := info.Cluster.TLSInfo.TrustRoot
   963  
   964  	// rotate multiple times
   965  	for i := 0; i < 4; i++ {
   966  		var err error
   967  		var cert, key []byte
   968  		if i%2 != 0 {
   969  			cert, _, key, err = initca.New(&csr.CertificateRequest{
   970  				CN:         "newRoot",
   971  				KeyRequest: csr.NewBasicKeyRequest(),
   972  				CA:         &csr.CAConfig{Expiry: ca.RootCAExpiration},
   973  			})
   974  			c.Assert(err, checker.IsNil)
   975  		}
   976  		expectedCert := string(cert)
   977  		m.UpdateSwarm(c, func(s *swarm.Spec) {
   978  			s.CAConfig.SigningCACert = expectedCert
   979  			s.CAConfig.SigningCAKey = string(key)
   980  			s.CAConfig.ForceRotate++
   981  		})
   982  
   983  		// poll to make sure update succeeds
   984  		var clusterTLSInfo swarm.TLSInfo
   985  		for j := 0; j < 18; j++ {
   986  			info := m.SwarmInfo(c)
   987  
   988  			// the desired CA cert and key is always redacted
   989  			c.Assert(info.Cluster.Spec.CAConfig.SigningCAKey, checker.Equals, "")
   990  			c.Assert(info.Cluster.Spec.CAConfig.SigningCACert, checker.Equals, "")
   991  
   992  			clusterTLSInfo = info.Cluster.TLSInfo
   993  
   994  			// if root rotation is done and the trust root has changed, we don't have to poll anymore
   995  			if !info.Cluster.RootRotationInProgress && clusterTLSInfo.TrustRoot != currentTrustRoot {
   996  				break
   997  			}
   998  
   999  			// root rotation not done
  1000  			time.Sleep(250 * time.Millisecond)
  1001  		}
  1002  		if cert != nil {
  1003  			c.Assert(clusterTLSInfo.TrustRoot, checker.Equals, expectedCert)
  1004  		}
  1005  		// could take another second or two for the nodes to trust the new roots after they've all gotten
  1006  		// new TLS certificates
  1007  		for j := 0; j < 18; j++ {
  1008  			mInfo := m.GetNode(c, m.NodeID()).Description.TLSInfo
  1009  			wInfo := m.GetNode(c, w.NodeID()).Description.TLSInfo
  1010  
  1011  			if mInfo.TrustRoot == clusterTLSInfo.TrustRoot && wInfo.TrustRoot == clusterTLSInfo.TrustRoot {
  1012  				break
  1013  			}
  1014  
  1015  			// nodes don't trust root certs yet
  1016  			time.Sleep(250 * time.Millisecond)
  1017  		}
  1018  
  1019  		c.Assert(m.GetNode(c, m.NodeID()).Description.TLSInfo, checker.DeepEquals, clusterTLSInfo)
  1020  		c.Assert(m.GetNode(c, w.NodeID()).Description.TLSInfo, checker.DeepEquals, clusterTLSInfo)
  1021  		currentTrustRoot = clusterTLSInfo.TrustRoot
  1022  	}
  1023  }
  1024  
  1025  func (s *DockerSwarmSuite) TestAPINetworkInspectWithScope(c *check.C) {
  1026  	d := s.AddDaemon(c, true, true)
  1027  
  1028  	name := "test-scoped-network"
  1029  	ctx := context.Background()
  1030  	apiclient := d.NewClientT(c)
  1031  
  1032  	resp, err := apiclient.NetworkCreate(ctx, name, types.NetworkCreate{Driver: "overlay"})
  1033  	assert.NilError(c, err)
  1034  
  1035  	network, err := apiclient.NetworkInspect(ctx, name, types.NetworkInspectOptions{})
  1036  	assert.NilError(c, err)
  1037  	assert.Check(c, is.Equal("swarm", network.Scope))
  1038  	assert.Check(c, is.Equal(resp.ID, network.ID))
  1039  
  1040  	_, err = apiclient.NetworkInspect(ctx, name, types.NetworkInspectOptions{Scope: "local"})
  1041  	assert.Check(c, client.IsErrNotFound(err))
  1042  }