github.com/rita33cool1/iot-system-gateway@v0.0.0-20200911033302-e65bde238cc5/docker-engine/integration-cli/docker_api_swarm_test.go (about)

     1  // +build !windows
     2  
     3  package main
     4  
     5  import (
     6  	"fmt"
     7  	"io/ioutil"
     8  	"net"
     9  	"net/http"
    10  	"os"
    11  	"path/filepath"
    12  	"strings"
    13  	"sync"
    14  	"time"
    15  
    16  	"github.com/cloudflare/cfssl/csr"
    17  	"github.com/cloudflare/cfssl/helpers"
    18  	"github.com/cloudflare/cfssl/initca"
    19  	"github.com/docker/docker/api/types"
    20  	"github.com/docker/docker/api/types/container"
    21  	"github.com/docker/docker/api/types/swarm"
    22  	"github.com/docker/docker/client"
    23  	"github.com/docker/docker/integration-cli/checker"
    24  	"github.com/docker/docker/integration-cli/daemon"
    25  	"github.com/docker/docker/integration-cli/request"
    26  	testdaemon "github.com/docker/docker/internal/test/daemon"
    27  	"github.com/docker/swarmkit/ca"
    28  	"github.com/go-check/check"
    29  	"github.com/gotestyourself/gotestyourself/assert"
    30  	is "github.com/gotestyourself/gotestyourself/assert/cmp"
    31  	"golang.org/x/net/context"
    32  )
    33  
    34  var defaultReconciliationTimeout = 30 * time.Second
    35  
    36  func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) {
    37  	// todo: should find a better way to verify that components are running than /info
    38  	d1 := s.AddDaemon(c, true, true)
    39  	info := d1.SwarmInfo(c)
    40  	c.Assert(info.ControlAvailable, checker.True)
    41  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
    42  	c.Assert(info.Cluster.RootRotationInProgress, checker.False)
    43  
    44  	d2 := s.AddDaemon(c, true, false)
    45  	info = d2.SwarmInfo(c)
    46  	c.Assert(info.ControlAvailable, checker.False)
    47  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
    48  
    49  	// Leaving cluster
    50  	c.Assert(d2.SwarmLeave(false), checker.IsNil)
    51  
    52  	info = d2.SwarmInfo(c)
    53  	c.Assert(info.ControlAvailable, checker.False)
    54  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
    55  
    56  	d2.SwarmJoin(c, swarm.JoinRequest{
    57  		ListenAddr:  d1.SwarmListenAddr(),
    58  		JoinToken:   d1.JoinTokens(c).Worker,
    59  		RemoteAddrs: []string{d1.SwarmListenAddr()},
    60  	})
    61  
    62  	info = d2.SwarmInfo(c)
    63  	c.Assert(info.ControlAvailable, checker.False)
    64  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
    65  
    66  	// Current state restoring after restarts
    67  	d1.Stop(c)
    68  	d2.Stop(c)
    69  
    70  	d1.Start(c)
    71  	d2.Start(c)
    72  
    73  	info = d1.SwarmInfo(c)
    74  	c.Assert(info.ControlAvailable, checker.True)
    75  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
    76  
    77  	info = d2.SwarmInfo(c)
    78  	c.Assert(info.ControlAvailable, checker.False)
    79  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
    80  }
    81  
    82  func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *check.C) {
    83  	d1 := s.AddDaemon(c, false, false)
    84  	d1.SwarmInit(c, swarm.InitRequest{})
    85  
    86  	// todo: error message differs depending if some components of token are valid
    87  
    88  	d2 := s.AddDaemon(c, false, false)
    89  	c2 := d2.NewClientT(c)
    90  	err := c2.SwarmJoin(context.Background(), swarm.JoinRequest{
    91  		ListenAddr:  d2.SwarmListenAddr(),
    92  		RemoteAddrs: []string{d1.SwarmListenAddr()},
    93  	})
    94  	c.Assert(err, checker.NotNil)
    95  	c.Assert(err.Error(), checker.Contains, "join token is necessary")
    96  	info := d2.SwarmInfo(c)
    97  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
    98  
    99  	err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
   100  		ListenAddr:  d2.SwarmListenAddr(),
   101  		JoinToken:   "foobaz",
   102  		RemoteAddrs: []string{d1.SwarmListenAddr()},
   103  	})
   104  	c.Assert(err, checker.NotNil)
   105  	c.Assert(err.Error(), checker.Contains, "invalid join token")
   106  	info = d2.SwarmInfo(c)
   107  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
   108  
   109  	workerToken := d1.JoinTokens(c).Worker
   110  
   111  	d2.SwarmJoin(c, swarm.JoinRequest{
   112  		ListenAddr:  d2.SwarmListenAddr(),
   113  		JoinToken:   workerToken,
   114  		RemoteAddrs: []string{d1.SwarmListenAddr()},
   115  	})
   116  	info = d2.SwarmInfo(c)
   117  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
   118  	c.Assert(d2.SwarmLeave(false), checker.IsNil)
   119  	info = d2.SwarmInfo(c)
   120  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
   121  
   122  	// change tokens
   123  	d1.RotateTokens(c)
   124  
   125  	err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
   126  		ListenAddr:  d2.SwarmListenAddr(),
   127  		JoinToken:   workerToken,
   128  		RemoteAddrs: []string{d1.SwarmListenAddr()},
   129  	})
   130  	c.Assert(err, checker.NotNil)
   131  	c.Assert(err.Error(), checker.Contains, "join token is necessary")
   132  	info = d2.SwarmInfo(c)
   133  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
   134  
   135  	workerToken = d1.JoinTokens(c).Worker
   136  
   137  	d2.SwarmJoin(c, swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.SwarmListenAddr()}})
   138  	info = d2.SwarmInfo(c)
   139  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
   140  	c.Assert(d2.SwarmLeave(false), checker.IsNil)
   141  	info = d2.SwarmInfo(c)
   142  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
   143  
   144  	// change spec, don't change tokens
   145  	d1.UpdateSwarm(c, func(s *swarm.Spec) {})
   146  
   147  	err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
   148  		ListenAddr:  d2.SwarmListenAddr(),
   149  		RemoteAddrs: []string{d1.SwarmListenAddr()},
   150  	})
   151  	c.Assert(err, checker.NotNil)
   152  	c.Assert(err.Error(), checker.Contains, "join token is necessary")
   153  	info = d2.SwarmInfo(c)
   154  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
   155  
   156  	d2.SwarmJoin(c, swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.SwarmListenAddr()}})
   157  	info = d2.SwarmInfo(c)
   158  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
   159  	c.Assert(d2.SwarmLeave(false), checker.IsNil)
   160  	info = d2.SwarmInfo(c)
   161  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
   162  }
   163  
   164  func (s *DockerSwarmSuite) TestUpdateSwarmAddExternalCA(c *check.C) {
   165  	d1 := s.AddDaemon(c, false, false)
   166  	d1.SwarmInit(c, swarm.InitRequest{})
   167  	d1.UpdateSwarm(c, func(s *swarm.Spec) {
   168  		s.CAConfig.ExternalCAs = []*swarm.ExternalCA{
   169  			{
   170  				Protocol: swarm.ExternalCAProtocolCFSSL,
   171  				URL:      "https://thishasnoca.org",
   172  			},
   173  			{
   174  				Protocol: swarm.ExternalCAProtocolCFSSL,
   175  				URL:      "https://thishasacacert.org",
   176  				CACert:   "cacert",
   177  			},
   178  		}
   179  	})
   180  	info := d1.SwarmInfo(c)
   181  	c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs, checker.HasLen, 2)
   182  	c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs[0].CACert, checker.Equals, "")
   183  	c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs[1].CACert, checker.Equals, "cacert")
   184  }
   185  
   186  func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *check.C) {
   187  	d1 := s.AddDaemon(c, true, true)
   188  	d2 := s.AddDaemon(c, false, false)
   189  	splitToken := strings.Split(d1.JoinTokens(c).Worker, "-")
   190  	splitToken[2] = "1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e"
   191  	replacementToken := strings.Join(splitToken, "-")
   192  	c2 := d2.NewClientT(c)
   193  	err := c2.SwarmJoin(context.Background(), swarm.JoinRequest{
   194  		ListenAddr:  d2.SwarmListenAddr(),
   195  		JoinToken:   replacementToken,
   196  		RemoteAddrs: []string{d1.SwarmListenAddr()},
   197  	})
   198  	c.Assert(err, checker.NotNil)
   199  	c.Assert(err.Error(), checker.Contains, "remote CA does not match fingerprint")
   200  }
   201  
   202  func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *check.C) {
   203  	d1 := s.AddDaemon(c, false, false)
   204  	d1.SwarmInit(c, swarm.InitRequest{})
   205  	d2 := s.AddDaemon(c, true, false)
   206  
   207  	info := d2.SwarmInfo(c)
   208  	c.Assert(info.ControlAvailable, checker.False)
   209  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
   210  
   211  	d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
   212  		n.Spec.Role = swarm.NodeRoleManager
   213  	})
   214  
   215  	waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True)
   216  
   217  	d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
   218  		n.Spec.Role = swarm.NodeRoleWorker
   219  	})
   220  
   221  	waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.False)
   222  
   223  	// Wait for the role to change to worker in the cert. This is partially
   224  	// done because it's something worth testing in its own right, and
   225  	// partially because changing the role from manager to worker and then
   226  	// back to manager quickly might cause the node to pause for awhile
   227  	// while waiting for the role to change to worker, and the test can
   228  	// time out during this interval.
   229  	waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
   230  		certBytes, err := ioutil.ReadFile(filepath.Join(d2.Folder, "root", "swarm", "certificates", "swarm-node.crt"))
   231  		if err != nil {
   232  			return "", check.Commentf("error: %v", err)
   233  		}
   234  		certs, err := helpers.ParseCertificatesPEM(certBytes)
   235  		if err == nil && len(certs) > 0 && len(certs[0].Subject.OrganizationalUnit) > 0 {
   236  			return certs[0].Subject.OrganizationalUnit[0], nil
   237  		}
   238  		return "", check.Commentf("could not get organizational unit from certificate")
   239  	}, checker.Equals, "swarm-worker")
   240  
   241  	// Demoting last node should fail
   242  	node := d1.GetNode(c, d1.NodeID())
   243  	node.Spec.Role = swarm.NodeRoleWorker
   244  	url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index)
   245  	res, body, err := request.DoOnHost(d1.Sock(), url, request.Method("POST"), request.JSONBody(node.Spec))
   246  	c.Assert(err, checker.IsNil)
   247  	b, err := request.ReadBody(body)
   248  	c.Assert(err, checker.IsNil)
   249  	c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest, check.Commentf("output: %q", string(b)))
   250  
   251  	// The warning specific to demoting the last manager is best-effort and
   252  	// won't appear until the Role field of the demoted manager has been
   253  	// updated.
   254  	// Yes, I know this looks silly, but checker.Matches is broken, since
   255  	// it anchors the regexp contrary to the documentation, and this makes
   256  	// it impossible to match something that includes a line break.
   257  	if !strings.Contains(string(b), "last manager of the swarm") {
   258  		c.Assert(string(b), checker.Contains, "this would result in a loss of quorum")
   259  	}
   260  	info = d1.SwarmInfo(c)
   261  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
   262  	c.Assert(info.ControlAvailable, checker.True)
   263  
   264  	// Promote already demoted node
   265  	d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
   266  		n.Spec.Role = swarm.NodeRoleManager
   267  	})
   268  
   269  	waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True)
   270  }
   271  
   272  func (s *DockerSwarmSuite) TestAPISwarmLeaderProxy(c *check.C) {
   273  	// add three managers, one of these is leader
   274  	d1 := s.AddDaemon(c, true, true)
   275  	d2 := s.AddDaemon(c, true, true)
   276  	d3 := s.AddDaemon(c, true, true)
   277  
   278  	// start a service by hitting each of the 3 managers
   279  	d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
   280  		s.Spec.Name = "test1"
   281  	})
   282  	d2.CreateService(c, simpleTestService, func(s *swarm.Service) {
   283  		s.Spec.Name = "test2"
   284  	})
   285  	d3.CreateService(c, simpleTestService, func(s *swarm.Service) {
   286  		s.Spec.Name = "test3"
   287  	})
   288  
   289  	// 3 services should be started now, because the requests were proxied to leader
   290  	// query each node and make sure it returns 3 services
   291  	for _, d := range []*daemon.Daemon{d1, d2, d3} {
   292  		services := d.ListServices(c)
   293  		c.Assert(services, checker.HasLen, 3)
   294  	}
   295  }
   296  
   297  func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) {
   298  	// Create 3 nodes
   299  	d1 := s.AddDaemon(c, true, true)
   300  	d2 := s.AddDaemon(c, true, true)
   301  	d3 := s.AddDaemon(c, true, true)
   302  
   303  	// assert that the first node we made is the leader, and the other two are followers
   304  	c.Assert(d1.GetNode(c, d1.NodeID()).ManagerStatus.Leader, checker.True)
   305  	c.Assert(d1.GetNode(c, d2.NodeID()).ManagerStatus.Leader, checker.False)
   306  	c.Assert(d1.GetNode(c, d3.NodeID()).ManagerStatus.Leader, checker.False)
   307  
   308  	d1.Stop(c)
   309  
   310  	var (
   311  		leader    *daemon.Daemon   // keep track of leader
   312  		followers []*daemon.Daemon // keep track of followers
   313  	)
   314  	checkLeader := func(nodes ...*daemon.Daemon) checkF {
   315  		return func(c *check.C) (interface{}, check.CommentInterface) {
   316  			// clear these out before each run
   317  			leader = nil
   318  			followers = nil
   319  			for _, d := range nodes {
   320  				if d.GetNode(c, d.NodeID()).ManagerStatus.Leader {
   321  					leader = d
   322  				} else {
   323  					followers = append(followers, d)
   324  				}
   325  			}
   326  
   327  			if leader == nil {
   328  				return false, check.Commentf("no leader elected")
   329  			}
   330  
   331  			return true, check.Commentf("elected %v", leader.ID())
   332  		}
   333  	}
   334  
   335  	// wait for an election to occur
   336  	waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d2, d3), checker.True)
   337  
   338  	// assert that we have a new leader
   339  	c.Assert(leader, checker.NotNil)
   340  
   341  	// Keep track of the current leader, since we want that to be chosen.
   342  	stableleader := leader
   343  
   344  	// add the d1, the initial leader, back
   345  	d1.Start(c)
   346  
   347  	// TODO(stevvooe): may need to wait for rejoin here
   348  
   349  	// wait for possible election
   350  	waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d1, d2, d3), checker.True)
   351  	// pick out the leader and the followers again
   352  
   353  	// verify that we still only have 1 leader and 2 followers
   354  	c.Assert(leader, checker.NotNil)
   355  	c.Assert(followers, checker.HasLen, 2)
   356  	// and that after we added d1 back, the leader hasn't changed
   357  	c.Assert(leader.NodeID(), checker.Equals, stableleader.NodeID())
   358  }
   359  
   360  func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) {
   361  	d1 := s.AddDaemon(c, true, true)
   362  	d2 := s.AddDaemon(c, true, true)
   363  	d3 := s.AddDaemon(c, true, true)
   364  
   365  	d1.CreateService(c, simpleTestService)
   366  
   367  	d2.Stop(c)
   368  
   369  	// make sure there is a leader
   370  	waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil)
   371  
   372  	d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
   373  		s.Spec.Name = "top1"
   374  	})
   375  
   376  	d3.Stop(c)
   377  
   378  	var service swarm.Service
   379  	simpleTestService(&service)
   380  	service.Spec.Name = "top2"
   381  	cli, err := d1.NewClient()
   382  	c.Assert(err, checker.IsNil)
   383  	defer cli.Close()
   384  
   385  	// d1 will eventually step down from leader because there is no longer an active quorum, wait for that to happen
   386  	waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
   387  		_, err = cli.ServiceCreate(context.Background(), service.Spec, types.ServiceCreateOptions{})
   388  		return err.Error(), nil
   389  	}, checker.Contains, "Make sure more than half of the managers are online.")
   390  
   391  	d2.Start(c)
   392  
   393  	// make sure there is a leader
   394  	waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil)
   395  
   396  	d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
   397  		s.Spec.Name = "top3"
   398  	})
   399  }
   400  
   401  func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *check.C) {
   402  	d := s.AddDaemon(c, true, true)
   403  
   404  	instances := 2
   405  	d.CreateService(c, simpleTestService, setInstances(instances))
   406  
   407  	id, err := d.Cmd("run", "-d", "busybox", "top")
   408  	c.Assert(err, checker.IsNil)
   409  	id = strings.TrimSpace(id)
   410  
   411  	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances+1)
   412  
   413  	c.Assert(d.SwarmLeave(false), checker.NotNil)
   414  	c.Assert(d.SwarmLeave(true), checker.IsNil)
   415  
   416  	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1)
   417  
   418  	id2, err := d.Cmd("ps", "-q")
   419  	c.Assert(err, checker.IsNil)
   420  	c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2))
   421  }
   422  
   423  // #23629
   424  func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *check.C) {
   425  	testRequires(c, Network)
   426  	s.AddDaemon(c, true, true)
   427  	d2 := s.AddDaemon(c, false, false)
   428  
   429  	id, err := d2.Cmd("run", "-d", "busybox", "top")
   430  	c.Assert(err, checker.IsNil)
   431  	id = strings.TrimSpace(id)
   432  
   433  	c2 := d2.NewClientT(c)
   434  	err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
   435  		ListenAddr:  d2.SwarmListenAddr(),
   436  		RemoteAddrs: []string{"123.123.123.123:1234"},
   437  	})
   438  	c.Assert(err, check.NotNil)
   439  	c.Assert(err.Error(), checker.Contains, "Timeout was reached")
   440  
   441  	info := d2.SwarmInfo(c)
   442  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStatePending)
   443  
   444  	c.Assert(d2.SwarmLeave(true), checker.IsNil)
   445  
   446  	waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 1)
   447  
   448  	id2, err := d2.Cmd("ps", "-q")
   449  	c.Assert(err, checker.IsNil)
   450  	c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2))
   451  }
   452  
   453  // #23705
   454  func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *check.C) {
   455  	testRequires(c, Network)
   456  	d := s.AddDaemon(c, false, false)
   457  	client := d.NewClientT(c)
   458  	err := client.SwarmJoin(context.Background(), swarm.JoinRequest{
   459  		ListenAddr:  d.SwarmListenAddr(),
   460  		RemoteAddrs: []string{"123.123.123.123:1234"},
   461  	})
   462  	c.Assert(err, check.NotNil)
   463  	c.Assert(err.Error(), checker.Contains, "Timeout was reached")
   464  
   465  	waitAndAssert(c, defaultReconciliationTimeout, d.CheckLocalNodeState, checker.Equals, swarm.LocalNodeStatePending)
   466  
   467  	d.Stop(c)
   468  	d.Start(c)
   469  
   470  	info := d.SwarmInfo(c)
   471  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
   472  }
   473  
   474  func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *check.C) {
   475  	d1 := s.AddDaemon(c, true, true)
   476  
   477  	instances := 2
   478  	id := d1.CreateService(c, simpleTestService, setInstances(instances))
   479  
   480  	d1.GetService(c, id)
   481  	d1.Stop(c)
   482  	d1.Start(c)
   483  	d1.GetService(c, id)
   484  
   485  	d2 := s.AddDaemon(c, true, true)
   486  	d2.GetService(c, id)
   487  	d2.Stop(c)
   488  	d2.Start(c)
   489  	d2.GetService(c, id)
   490  
   491  	d3 := s.AddDaemon(c, true, true)
   492  	d3.GetService(c, id)
   493  	d3.Stop(c)
   494  	d3.Start(c)
   495  	d3.GetService(c, id)
   496  
   497  	d3.Kill()
   498  	time.Sleep(1 * time.Second) // time to handle signal
   499  	d3.Start(c)
   500  	d3.GetService(c, id)
   501  }
   502  
   503  func (s *DockerSwarmSuite) TestAPISwarmScaleNoRollingUpdate(c *check.C) {
   504  	d := s.AddDaemon(c, true, true)
   505  
   506  	instances := 2
   507  	id := d.CreateService(c, simpleTestService, setInstances(instances))
   508  
   509  	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
   510  	containers := d.ActiveContainers()
   511  	instances = 4
   512  	d.UpdateService(c, d.GetService(c, id), setInstances(instances))
   513  	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
   514  	containers2 := d.ActiveContainers()
   515  
   516  loop0:
   517  	for _, c1 := range containers {
   518  		for _, c2 := range containers2 {
   519  			if c1 == c2 {
   520  				continue loop0
   521  			}
   522  		}
   523  		c.Errorf("container %v not found in new set %#v", c1, containers2)
   524  	}
   525  }
   526  
   527  func (s *DockerSwarmSuite) TestAPISwarmInvalidAddress(c *check.C) {
   528  	d := s.AddDaemon(c, false, false)
   529  	req := swarm.InitRequest{
   530  		ListenAddr: "",
   531  	}
   532  	res, _, err := request.DoOnHost(d.Sock(), "/swarm/init", request.Method("POST"), request.JSONBody(req))
   533  	c.Assert(err, checker.IsNil)
   534  	c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest)
   535  
   536  	req2 := swarm.JoinRequest{
   537  		ListenAddr:  "0.0.0.0:2377",
   538  		RemoteAddrs: []string{""},
   539  	}
   540  	res, _, err = request.DoOnHost(d.Sock(), "/swarm/join", request.Method("POST"), request.JSONBody(req2))
   541  	c.Assert(err, checker.IsNil)
   542  	c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest)
   543  }
   544  
   545  func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *check.C) {
   546  	d1 := s.AddDaemon(c, true, true)
   547  	d2 := s.AddDaemon(c, true, true)
   548  
   549  	instances := 2
   550  	id := d1.CreateService(c, simpleTestService, setInstances(instances))
   551  	waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances)
   552  
   553  	// drain d2, all containers should move to d1
   554  	d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
   555  		n.Spec.Availability = swarm.NodeAvailabilityDrain
   556  	})
   557  	waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances)
   558  	waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 0)
   559  
   560  	d2.Stop(c)
   561  
   562  	d1.SwarmInit(c, swarm.InitRequest{
   563  		ForceNewCluster: true,
   564  		Spec:            swarm.Spec{},
   565  	})
   566  
   567  	waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances)
   568  
   569  	d3 := s.AddDaemon(c, true, true)
   570  	info := d3.SwarmInfo(c)
   571  	c.Assert(info.ControlAvailable, checker.True)
   572  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
   573  
   574  	instances = 4
   575  	d3.UpdateService(c, d3.GetService(c, id), setInstances(instances))
   576  
   577  	waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances)
   578  }
   579  
   580  func simpleTestService(s *swarm.Service) {
   581  	ureplicas := uint64(1)
   582  	restartDelay := time.Duration(100 * time.Millisecond)
   583  
   584  	s.Spec = swarm.ServiceSpec{
   585  		TaskTemplate: swarm.TaskSpec{
   586  			ContainerSpec: &swarm.ContainerSpec{
   587  				Image:   "busybox:latest",
   588  				Command: []string{"/bin/top"},
   589  			},
   590  			RestartPolicy: &swarm.RestartPolicy{
   591  				Delay: &restartDelay,
   592  			},
   593  		},
   594  		Mode: swarm.ServiceMode{
   595  			Replicated: &swarm.ReplicatedService{
   596  				Replicas: &ureplicas,
   597  			},
   598  		},
   599  	}
   600  	s.Spec.Name = "top"
   601  }
   602  
   603  func serviceForUpdate(s *swarm.Service) {
   604  	ureplicas := uint64(1)
   605  	restartDelay := time.Duration(100 * time.Millisecond)
   606  
   607  	s.Spec = swarm.ServiceSpec{
   608  		TaskTemplate: swarm.TaskSpec{
   609  			ContainerSpec: &swarm.ContainerSpec{
   610  				Image:   "busybox:latest",
   611  				Command: []string{"/bin/top"},
   612  			},
   613  			RestartPolicy: &swarm.RestartPolicy{
   614  				Delay: &restartDelay,
   615  			},
   616  		},
   617  		Mode: swarm.ServiceMode{
   618  			Replicated: &swarm.ReplicatedService{
   619  				Replicas: &ureplicas,
   620  			},
   621  		},
   622  		UpdateConfig: &swarm.UpdateConfig{
   623  			Parallelism:   2,
   624  			Delay:         4 * time.Second,
   625  			FailureAction: swarm.UpdateFailureActionContinue,
   626  		},
   627  		RollbackConfig: &swarm.UpdateConfig{
   628  			Parallelism:   3,
   629  			Delay:         4 * time.Second,
   630  			FailureAction: swarm.UpdateFailureActionContinue,
   631  		},
   632  	}
   633  	s.Spec.Name = "updatetest"
   634  }
   635  
   636  func setInstances(replicas int) testdaemon.ServiceConstructor {
   637  	ureplicas := uint64(replicas)
   638  	return func(s *swarm.Service) {
   639  		s.Spec.Mode = swarm.ServiceMode{
   640  			Replicated: &swarm.ReplicatedService{
   641  				Replicas: &ureplicas,
   642  			},
   643  		}
   644  	}
   645  }
   646  
   647  func setUpdateOrder(order string) testdaemon.ServiceConstructor {
   648  	return func(s *swarm.Service) {
   649  		if s.Spec.UpdateConfig == nil {
   650  			s.Spec.UpdateConfig = &swarm.UpdateConfig{}
   651  		}
   652  		s.Spec.UpdateConfig.Order = order
   653  	}
   654  }
   655  
   656  func setRollbackOrder(order string) testdaemon.ServiceConstructor {
   657  	return func(s *swarm.Service) {
   658  		if s.Spec.RollbackConfig == nil {
   659  			s.Spec.RollbackConfig = &swarm.UpdateConfig{}
   660  		}
   661  		s.Spec.RollbackConfig.Order = order
   662  	}
   663  }
   664  
   665  func setImage(image string) testdaemon.ServiceConstructor {
   666  	return func(s *swarm.Service) {
   667  		if s.Spec.TaskTemplate.ContainerSpec == nil {
   668  			s.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{}
   669  		}
   670  		s.Spec.TaskTemplate.ContainerSpec.Image = image
   671  	}
   672  }
   673  
   674  func setFailureAction(failureAction string) testdaemon.ServiceConstructor {
   675  	return func(s *swarm.Service) {
   676  		s.Spec.UpdateConfig.FailureAction = failureAction
   677  	}
   678  }
   679  
   680  func setMaxFailureRatio(maxFailureRatio float32) testdaemon.ServiceConstructor {
   681  	return func(s *swarm.Service) {
   682  		s.Spec.UpdateConfig.MaxFailureRatio = maxFailureRatio
   683  	}
   684  }
   685  
   686  func setParallelism(parallelism uint64) testdaemon.ServiceConstructor {
   687  	return func(s *swarm.Service) {
   688  		s.Spec.UpdateConfig.Parallelism = parallelism
   689  	}
   690  }
   691  
   692  func setConstraints(constraints []string) testdaemon.ServiceConstructor {
   693  	return func(s *swarm.Service) {
   694  		if s.Spec.TaskTemplate.Placement == nil {
   695  			s.Spec.TaskTemplate.Placement = &swarm.Placement{}
   696  		}
   697  		s.Spec.TaskTemplate.Placement.Constraints = constraints
   698  	}
   699  }
   700  
   701  func setPlacementPrefs(prefs []swarm.PlacementPreference) testdaemon.ServiceConstructor {
   702  	return func(s *swarm.Service) {
   703  		if s.Spec.TaskTemplate.Placement == nil {
   704  			s.Spec.TaskTemplate.Placement = &swarm.Placement{}
   705  		}
   706  		s.Spec.TaskTemplate.Placement.Preferences = prefs
   707  	}
   708  }
   709  
   710  func setGlobalMode(s *swarm.Service) {
   711  	s.Spec.Mode = swarm.ServiceMode{
   712  		Global: &swarm.GlobalService{},
   713  	}
   714  }
   715  
   716  func checkClusterHealth(c *check.C, cl []*daemon.Daemon, managerCount, workerCount int) {
   717  	var totalMCount, totalWCount int
   718  
   719  	for _, d := range cl {
   720  		var (
   721  			info swarm.Info
   722  		)
   723  
   724  		// check info in a waitAndAssert, because if the cluster doesn't have a leader, `info` will return an error
   725  		checkInfo := func(c *check.C) (interface{}, check.CommentInterface) {
   726  			client := d.NewClientT(c)
   727  			daemonInfo, err := client.Info(context.Background())
   728  			info = daemonInfo.Swarm
   729  			return err, check.Commentf("cluster not ready in time")
   730  		}
   731  		waitAndAssert(c, defaultReconciliationTimeout, checkInfo, checker.IsNil)
   732  		if !info.ControlAvailable {
   733  			totalWCount++
   734  			continue
   735  		}
   736  
   737  		var leaderFound bool
   738  		totalMCount++
   739  		var mCount, wCount int
   740  
   741  		for _, n := range d.ListNodes(c) {
   742  			waitReady := func(c *check.C) (interface{}, check.CommentInterface) {
   743  				if n.Status.State == swarm.NodeStateReady {
   744  					return true, nil
   745  				}
   746  				nn := d.GetNode(c, n.ID)
   747  				n = *nn
   748  				return n.Status.State == swarm.NodeStateReady, check.Commentf("state of node %s, reported by %s", n.ID, d.NodeID())
   749  			}
   750  			waitAndAssert(c, defaultReconciliationTimeout, waitReady, checker.True)
   751  
   752  			waitActive := func(c *check.C) (interface{}, check.CommentInterface) {
   753  				if n.Spec.Availability == swarm.NodeAvailabilityActive {
   754  					return true, nil
   755  				}
   756  				nn := d.GetNode(c, n.ID)
   757  				n = *nn
   758  				return n.Spec.Availability == swarm.NodeAvailabilityActive, check.Commentf("availability of node %s, reported by %s", n.ID, d.NodeID())
   759  			}
   760  			waitAndAssert(c, defaultReconciliationTimeout, waitActive, checker.True)
   761  
   762  			if n.Spec.Role == swarm.NodeRoleManager {
   763  				c.Assert(n.ManagerStatus, checker.NotNil, check.Commentf("manager status of node %s (manager), reported by %s", n.ID, d.NodeID()))
   764  				if n.ManagerStatus.Leader {
   765  					leaderFound = true
   766  				}
   767  				mCount++
   768  			} else {
   769  				c.Assert(n.ManagerStatus, checker.IsNil, check.Commentf("manager status of node %s (worker), reported by %s", n.ID, d.NodeID()))
   770  				wCount++
   771  			}
   772  		}
   773  		c.Assert(leaderFound, checker.True, check.Commentf("lack of leader reported by node %s", info.NodeID))
   774  		c.Assert(mCount, checker.Equals, managerCount, check.Commentf("managers count reported by node %s", info.NodeID))
   775  		c.Assert(wCount, checker.Equals, workerCount, check.Commentf("workers count reported by node %s", info.NodeID))
   776  	}
   777  	c.Assert(totalMCount, checker.Equals, managerCount)
   778  	c.Assert(totalWCount, checker.Equals, workerCount)
   779  }
   780  
   781  func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) {
   782  	mCount, wCount := 5, 1
   783  
   784  	var nodes []*daemon.Daemon
   785  	for i := 0; i < mCount; i++ {
   786  		manager := s.AddDaemon(c, true, true)
   787  		info := manager.SwarmInfo(c)
   788  		c.Assert(info.ControlAvailable, checker.True)
   789  		c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
   790  		nodes = append(nodes, manager)
   791  	}
   792  
   793  	for i := 0; i < wCount; i++ {
   794  		worker := s.AddDaemon(c, true, false)
   795  		info := worker.SwarmInfo(c)
   796  		c.Assert(info.ControlAvailable, checker.False)
   797  		c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
   798  		nodes = append(nodes, worker)
   799  	}
   800  
   801  	// stop whole cluster
   802  	{
   803  		var wg sync.WaitGroup
   804  		wg.Add(len(nodes))
   805  		errs := make(chan error, len(nodes))
   806  
   807  		for _, d := range nodes {
   808  			go func(daemon *daemon.Daemon) {
   809  				defer wg.Done()
   810  				if err := daemon.StopWithError(); err != nil {
   811  					errs <- err
   812  				}
   813  				// FIXME(vdemeester) This is duplicated…
   814  				if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
   815  					daemon.Root = filepath.Dir(daemon.Root)
   816  				}
   817  			}(d)
   818  		}
   819  		wg.Wait()
   820  		close(errs)
   821  		for err := range errs {
   822  			c.Assert(err, check.IsNil)
   823  		}
   824  	}
   825  
   826  	// start whole cluster
   827  	{
   828  		var wg sync.WaitGroup
   829  		wg.Add(len(nodes))
   830  		errs := make(chan error, len(nodes))
   831  
   832  		for _, d := range nodes {
   833  			go func(daemon *daemon.Daemon) {
   834  				defer wg.Done()
   835  				if err := daemon.StartWithError("--iptables=false"); err != nil {
   836  					errs <- err
   837  				}
   838  			}(d)
   839  		}
   840  		wg.Wait()
   841  		close(errs)
   842  		for err := range errs {
   843  			c.Assert(err, check.IsNil)
   844  		}
   845  	}
   846  
   847  	checkClusterHealth(c, nodes, mCount, wCount)
   848  }
   849  
   850  func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *check.C) {
   851  	d := s.AddDaemon(c, true, true)
   852  
   853  	instances := 2
   854  	id := d.CreateService(c, simpleTestService, setInstances(instances))
   855  	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
   856  
   857  	service := d.GetService(c, id)
   858  	instances = 5
   859  
   860  	setInstances(instances)(service)
   861  	cli, err := d.NewClient()
   862  	c.Assert(err, checker.IsNil)
   863  	defer cli.Close()
   864  	_, err = cli.ServiceUpdate(context.Background(), service.Spec.Name, service.Version, service.Spec, types.ServiceUpdateOptions{})
   865  	c.Assert(err, checker.IsNil)
   866  	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
   867  }
   868  
   869  // Unlocking an unlocked swarm results in an error
   870  func (s *DockerSwarmSuite) TestAPISwarmUnlockNotLocked(c *check.C) {
   871  	d := s.AddDaemon(c, true, true)
   872  	err := d.SwarmUnlock(swarm.UnlockRequest{UnlockKey: "wrong-key"})
   873  	c.Assert(err, checker.NotNil)
   874  	c.Assert(err.Error(), checker.Contains, "swarm is not locked")
   875  }
   876  
   877  // #29885
   878  func (s *DockerSwarmSuite) TestAPISwarmErrorHandling(c *check.C) {
   879  	ln, err := net.Listen("tcp", fmt.Sprintf(":%d", defaultSwarmPort))
   880  	c.Assert(err, checker.IsNil)
   881  	defer ln.Close()
   882  	d := s.AddDaemon(c, false, false)
   883  	client := d.NewClientT(c)
   884  	_, err = client.SwarmInit(context.Background(), swarm.InitRequest{
   885  		ListenAddr: d.SwarmListenAddr(),
   886  	})
   887  	c.Assert(err, checker.NotNil)
   888  	c.Assert(err.Error(), checker.Contains, "address already in use")
   889  }
   890  
   891  // Test case for 30242, where duplicate networks, with different drivers `bridge` and `overlay`,
   892  // caused both scopes to be `swarm` for `docker network inspect` and `docker network ls`.
   893  // This test makes sure the fixes correctly output scopes instead.
   894  func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *check.C) {
   895  	d := s.AddDaemon(c, true, true)
   896  	cli, err := d.NewClient()
   897  	c.Assert(err, checker.IsNil)
   898  	defer cli.Close()
   899  
   900  	name := "foo"
   901  	networkCreate := types.NetworkCreate{
   902  		CheckDuplicate: false,
   903  	}
   904  
   905  	networkCreate.Driver = "bridge"
   906  
   907  	n1, err := cli.NetworkCreate(context.Background(), name, networkCreate)
   908  	c.Assert(err, checker.IsNil)
   909  
   910  	networkCreate.Driver = "overlay"
   911  
   912  	n2, err := cli.NetworkCreate(context.Background(), name, networkCreate)
   913  	c.Assert(err, checker.IsNil)
   914  
   915  	r1, err := cli.NetworkInspect(context.Background(), n1.ID, types.NetworkInspectOptions{})
   916  	c.Assert(err, checker.IsNil)
   917  	c.Assert(r1.Scope, checker.Equals, "local")
   918  
   919  	r2, err := cli.NetworkInspect(context.Background(), n2.ID, types.NetworkInspectOptions{})
   920  	c.Assert(err, checker.IsNil)
   921  	c.Assert(r2.Scope, checker.Equals, "swarm")
   922  }
   923  
   924  // Test case for 30178
   925  func (s *DockerSwarmSuite) TestAPISwarmHealthcheckNone(c *check.C) {
   926  	// Issue #36386 can be a independent one, which is worth further investigation.
   927  	c.Skip("Root cause of Issue #36386 is needed")
   928  	d := s.AddDaemon(c, true, true)
   929  
   930  	out, err := d.Cmd("network", "create", "-d", "overlay", "lb")
   931  	c.Assert(err, checker.IsNil, check.Commentf(out))
   932  
   933  	instances := 1
   934  	d.CreateService(c, simpleTestService, setInstances(instances), func(s *swarm.Service) {
   935  		if s.Spec.TaskTemplate.ContainerSpec == nil {
   936  			s.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{}
   937  		}
   938  		s.Spec.TaskTemplate.ContainerSpec.Healthcheck = &container.HealthConfig{}
   939  		s.Spec.TaskTemplate.Networks = []swarm.NetworkAttachmentConfig{
   940  			{Target: "lb"},
   941  		}
   942  	})
   943  
   944  	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
   945  
   946  	containers := d.ActiveContainers()
   947  
   948  	out, err = d.Cmd("exec", containers[0], "ping", "-c1", "-W3", "top")
   949  	c.Assert(err, checker.IsNil, check.Commentf(out))
   950  }
   951  
   952  func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *check.C) {
   953  	m := s.AddDaemon(c, true, true)
   954  	w := s.AddDaemon(c, true, false)
   955  
   956  	info := m.SwarmInfo(c)
   957  
   958  	currentTrustRoot := info.Cluster.TLSInfo.TrustRoot
   959  
   960  	// rotate multiple times
   961  	for i := 0; i < 4; i++ {
   962  		var err error
   963  		var cert, key []byte
   964  		if i%2 != 0 {
   965  			cert, _, key, err = initca.New(&csr.CertificateRequest{
   966  				CN:         "newRoot",
   967  				KeyRequest: csr.NewBasicKeyRequest(),
   968  				CA:         &csr.CAConfig{Expiry: ca.RootCAExpiration},
   969  			})
   970  			c.Assert(err, checker.IsNil)
   971  		}
   972  		expectedCert := string(cert)
   973  		m.UpdateSwarm(c, func(s *swarm.Spec) {
   974  			s.CAConfig.SigningCACert = expectedCert
   975  			s.CAConfig.SigningCAKey = string(key)
   976  			s.CAConfig.ForceRotate++
   977  		})
   978  
   979  		// poll to make sure update succeeds
   980  		var clusterTLSInfo swarm.TLSInfo
   981  		for j := 0; j < 18; j++ {
   982  			info := m.SwarmInfo(c)
   983  
   984  			// the desired CA cert and key is always redacted
   985  			c.Assert(info.Cluster.Spec.CAConfig.SigningCAKey, checker.Equals, "")
   986  			c.Assert(info.Cluster.Spec.CAConfig.SigningCACert, checker.Equals, "")
   987  
   988  			clusterTLSInfo = info.Cluster.TLSInfo
   989  
   990  			// if root rotation is done and the trust root has changed, we don't have to poll anymore
   991  			if !info.Cluster.RootRotationInProgress && clusterTLSInfo.TrustRoot != currentTrustRoot {
   992  				break
   993  			}
   994  
   995  			// root rotation not done
   996  			time.Sleep(250 * time.Millisecond)
   997  		}
   998  		if cert != nil {
   999  			c.Assert(clusterTLSInfo.TrustRoot, checker.Equals, expectedCert)
  1000  		}
  1001  		// could take another second or two for the nodes to trust the new roots after they've all gotten
  1002  		// new TLS certificates
  1003  		for j := 0; j < 18; j++ {
  1004  			mInfo := m.GetNode(c, m.NodeID()).Description.TLSInfo
  1005  			wInfo := m.GetNode(c, w.NodeID()).Description.TLSInfo
  1006  
  1007  			if mInfo.TrustRoot == clusterTLSInfo.TrustRoot && wInfo.TrustRoot == clusterTLSInfo.TrustRoot {
  1008  				break
  1009  			}
  1010  
  1011  			// nodes don't trust root certs yet
  1012  			time.Sleep(250 * time.Millisecond)
  1013  		}
  1014  
  1015  		c.Assert(m.GetNode(c, m.NodeID()).Description.TLSInfo, checker.DeepEquals, clusterTLSInfo)
  1016  		c.Assert(m.GetNode(c, w.NodeID()).Description.TLSInfo, checker.DeepEquals, clusterTLSInfo)
  1017  		currentTrustRoot = clusterTLSInfo.TrustRoot
  1018  	}
  1019  }
  1020  
  1021  func (s *DockerSwarmSuite) TestAPINetworkInspectWithScope(c *check.C) {
  1022  	d := s.AddDaemon(c, true, true)
  1023  
  1024  	name := "test-scoped-network"
  1025  	ctx := context.Background()
  1026  	apiclient, err := d.NewClient()
  1027  	assert.NilError(c, err)
  1028  
  1029  	resp, err := apiclient.NetworkCreate(ctx, name, types.NetworkCreate{Driver: "overlay"})
  1030  	assert.NilError(c, err)
  1031  
  1032  	network, err := apiclient.NetworkInspect(ctx, name, types.NetworkInspectOptions{})
  1033  	assert.NilError(c, err)
  1034  	assert.Check(c, is.Equal("swarm", network.Scope))
  1035  	assert.Check(c, is.Equal(resp.ID, network.ID))
  1036  
  1037  	_, err = apiclient.NetworkInspect(ctx, name, types.NetworkInspectOptions{Scope: "local"})
  1038  	assert.Check(c, client.IsErrNotFound(err))
  1039  }