github.com/docker/docker-ce@v17.12.1-ce-rc2+incompatible/components/engine/integration-cli/docker_api_swarm_test.go (about)

     1  // +build !windows
     2  
     3  package main
     4  
     5  import (
     6  	"encoding/json"
     7  	"fmt"
     8  	"io/ioutil"
     9  	"net"
    10  	"net/http"
    11  	"net/url"
    12  	"os"
    13  	"path/filepath"
    14  	"strings"
    15  	"sync"
    16  	"time"
    17  
    18  	"github.com/cloudflare/cfssl/csr"
    19  	"github.com/cloudflare/cfssl/helpers"
    20  	"github.com/cloudflare/cfssl/initca"
    21  	"github.com/docker/docker/api/types"
    22  	"github.com/docker/docker/api/types/container"
    23  	"github.com/docker/docker/api/types/swarm"
    24  	"github.com/docker/docker/integration-cli/checker"
    25  	"github.com/docker/docker/integration-cli/daemon"
    26  	"github.com/docker/docker/integration-cli/request"
    27  	"github.com/docker/swarmkit/ca"
    28  	"github.com/go-check/check"
    29  	"golang.org/x/net/context"
    30  )
    31  
    32  var defaultReconciliationTimeout = 30 * time.Second
    33  
    34  func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) {
    35  	// todo: should find a better way to verify that components are running than /info
    36  	d1 := s.AddDaemon(c, true, true)
    37  	info, err := d1.SwarmInfo()
    38  	c.Assert(err, checker.IsNil)
    39  	c.Assert(info.ControlAvailable, checker.True)
    40  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
    41  	c.Assert(info.Cluster.RootRotationInProgress, checker.False)
    42  
    43  	d2 := s.AddDaemon(c, true, false)
    44  	info, err = d2.SwarmInfo()
    45  	c.Assert(err, checker.IsNil)
    46  	c.Assert(info.ControlAvailable, checker.False)
    47  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
    48  
    49  	// Leaving cluster
    50  	c.Assert(d2.Leave(false), checker.IsNil)
    51  
    52  	info, err = d2.SwarmInfo()
    53  	c.Assert(err, checker.IsNil)
    54  	c.Assert(info.ControlAvailable, checker.False)
    55  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
    56  
    57  	c.Assert(d2.Join(swarm.JoinRequest{JoinToken: d1.JoinTokens(c).Worker, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil)
    58  
    59  	info, err = d2.SwarmInfo()
    60  	c.Assert(err, checker.IsNil)
    61  	c.Assert(info.ControlAvailable, checker.False)
    62  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
    63  
    64  	// Current state restoring after restarts
    65  	d1.Stop(c)
    66  	d2.Stop(c)
    67  
    68  	d1.Start(c)
    69  	d2.Start(c)
    70  
    71  	info, err = d1.SwarmInfo()
    72  	c.Assert(err, checker.IsNil)
    73  	c.Assert(info.ControlAvailable, checker.True)
    74  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
    75  
    76  	info, err = d2.SwarmInfo()
    77  	c.Assert(err, checker.IsNil)
    78  	c.Assert(info.ControlAvailable, checker.False)
    79  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
    80  }
    81  
    82  func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *check.C) {
    83  	d1 := s.AddDaemon(c, false, false)
    84  	c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil)
    85  
    86  	// todo: error message differs depending if some components of token are valid
    87  
    88  	d2 := s.AddDaemon(c, false, false)
    89  	err := d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.ListenAddr}})
    90  	c.Assert(err, checker.NotNil)
    91  	c.Assert(err.Error(), checker.Contains, "join token is necessary")
    92  	info, err := d2.SwarmInfo()
    93  	c.Assert(err, checker.IsNil)
    94  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
    95  
    96  	err = d2.Join(swarm.JoinRequest{JoinToken: "foobaz", RemoteAddrs: []string{d1.ListenAddr}})
    97  	c.Assert(err, checker.NotNil)
    98  	c.Assert(err.Error(), checker.Contains, "invalid join token")
    99  	info, err = d2.SwarmInfo()
   100  	c.Assert(err, checker.IsNil)
   101  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
   102  
   103  	workerToken := d1.JoinTokens(c).Worker
   104  
   105  	c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil)
   106  	info, err = d2.SwarmInfo()
   107  	c.Assert(err, checker.IsNil)
   108  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
   109  	c.Assert(d2.Leave(false), checker.IsNil)
   110  	info, err = d2.SwarmInfo()
   111  	c.Assert(err, checker.IsNil)
   112  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
   113  
   114  	// change tokens
   115  	d1.RotateTokens(c)
   116  
   117  	err = d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}})
   118  	c.Assert(err, checker.NotNil)
   119  	c.Assert(err.Error(), checker.Contains, "join token is necessary")
   120  	info, err = d2.SwarmInfo()
   121  	c.Assert(err, checker.IsNil)
   122  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
   123  
   124  	workerToken = d1.JoinTokens(c).Worker
   125  
   126  	c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil)
   127  	info, err = d2.SwarmInfo()
   128  	c.Assert(err, checker.IsNil)
   129  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
   130  	c.Assert(d2.Leave(false), checker.IsNil)
   131  	info, err = d2.SwarmInfo()
   132  	c.Assert(err, checker.IsNil)
   133  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
   134  
   135  	// change spec, don't change tokens
   136  	d1.UpdateSwarm(c, func(s *swarm.Spec) {})
   137  
   138  	err = d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.ListenAddr}})
   139  	c.Assert(err, checker.NotNil)
   140  	c.Assert(err.Error(), checker.Contains, "join token is necessary")
   141  	info, err = d2.SwarmInfo()
   142  	c.Assert(err, checker.IsNil)
   143  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
   144  
   145  	c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil)
   146  	info, err = d2.SwarmInfo()
   147  	c.Assert(err, checker.IsNil)
   148  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
   149  	c.Assert(d2.Leave(false), checker.IsNil)
   150  	info, err = d2.SwarmInfo()
   151  	c.Assert(err, checker.IsNil)
   152  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
   153  }
   154  
   155  func (s *DockerSwarmSuite) TestUpdateSwarmAddExternalCA(c *check.C) {
   156  	d1 := s.AddDaemon(c, false, false)
   157  	c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil)
   158  	d1.UpdateSwarm(c, func(s *swarm.Spec) {
   159  		s.CAConfig.ExternalCAs = []*swarm.ExternalCA{
   160  			{
   161  				Protocol: swarm.ExternalCAProtocolCFSSL,
   162  				URL:      "https://thishasnoca.org",
   163  			},
   164  			{
   165  				Protocol: swarm.ExternalCAProtocolCFSSL,
   166  				URL:      "https://thishasacacert.org",
   167  				CACert:   "cacert",
   168  			},
   169  		}
   170  	})
   171  	info, err := d1.SwarmInfo()
   172  	c.Assert(err, checker.IsNil)
   173  	c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs, checker.HasLen, 2)
   174  	c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs[0].CACert, checker.Equals, "")
   175  	c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs[1].CACert, checker.Equals, "cacert")
   176  }
   177  
   178  func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *check.C) {
   179  	d1 := s.AddDaemon(c, true, true)
   180  	d2 := s.AddDaemon(c, false, false)
   181  	splitToken := strings.Split(d1.JoinTokens(c).Worker, "-")
   182  	splitToken[2] = "1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e"
   183  	replacementToken := strings.Join(splitToken, "-")
   184  	err := d2.Join(swarm.JoinRequest{JoinToken: replacementToken, RemoteAddrs: []string{d1.ListenAddr}})
   185  	c.Assert(err, checker.NotNil)
   186  	c.Assert(err.Error(), checker.Contains, "remote CA does not match fingerprint")
   187  }
   188  
   189  func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *check.C) {
   190  	d1 := s.AddDaemon(c, false, false)
   191  	c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil)
   192  	d2 := s.AddDaemon(c, true, false)
   193  
   194  	info, err := d2.SwarmInfo()
   195  	c.Assert(err, checker.IsNil)
   196  	c.Assert(info.ControlAvailable, checker.False)
   197  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
   198  
   199  	d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) {
   200  		n.Spec.Role = swarm.NodeRoleManager
   201  	})
   202  
   203  	waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True)
   204  
   205  	d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) {
   206  		n.Spec.Role = swarm.NodeRoleWorker
   207  	})
   208  
   209  	waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.False)
   210  
   211  	// Wait for the role to change to worker in the cert. This is partially
   212  	// done because it's something worth testing in its own right, and
   213  	// partially because changing the role from manager to worker and then
   214  	// back to manager quickly might cause the node to pause for awhile
   215  	// while waiting for the role to change to worker, and the test can
   216  	// time out during this interval.
   217  	waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
   218  		certBytes, err := ioutil.ReadFile(filepath.Join(d2.Folder, "root", "swarm", "certificates", "swarm-node.crt"))
   219  		if err != nil {
   220  			return "", check.Commentf("error: %v", err)
   221  		}
   222  		certs, err := helpers.ParseCertificatesPEM(certBytes)
   223  		if err == nil && len(certs) > 0 && len(certs[0].Subject.OrganizationalUnit) > 0 {
   224  			return certs[0].Subject.OrganizationalUnit[0], nil
   225  		}
   226  		return "", check.Commentf("could not get organizational unit from certificate")
   227  	}, checker.Equals, "swarm-worker")
   228  
   229  	// Demoting last node should fail
   230  	node := d1.GetNode(c, d1.NodeID)
   231  	node.Spec.Role = swarm.NodeRoleWorker
   232  	url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index)
   233  	res, body, err := request.DoOnHost(d1.Sock(), url, request.Method("POST"), request.JSONBody(node.Spec))
   234  	c.Assert(err, checker.IsNil)
   235  	b, err := request.ReadBody(body)
   236  	c.Assert(err, checker.IsNil)
   237  	c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest, check.Commentf("output: %q", string(b)))
   238  
   239  	// The warning specific to demoting the last manager is best-effort and
   240  	// won't appear until the Role field of the demoted manager has been
   241  	// updated.
   242  	// Yes, I know this looks silly, but checker.Matches is broken, since
   243  	// it anchors the regexp contrary to the documentation, and this makes
   244  	// it impossible to match something that includes a line break.
   245  	if !strings.Contains(string(b), "last manager of the swarm") {
   246  		c.Assert(string(b), checker.Contains, "this would result in a loss of quorum")
   247  	}
   248  	info, err = d1.SwarmInfo()
   249  	c.Assert(err, checker.IsNil)
   250  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
   251  	c.Assert(info.ControlAvailable, checker.True)
   252  
   253  	// Promote already demoted node
   254  	d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) {
   255  		n.Spec.Role = swarm.NodeRoleManager
   256  	})
   257  
   258  	waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True)
   259  }
   260  
   261  func (s *DockerSwarmSuite) TestAPISwarmLeaderProxy(c *check.C) {
   262  	// add three managers, one of these is leader
   263  	d1 := s.AddDaemon(c, true, true)
   264  	d2 := s.AddDaemon(c, true, true)
   265  	d3 := s.AddDaemon(c, true, true)
   266  
   267  	// start a service by hitting each of the 3 managers
   268  	d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
   269  		s.Spec.Name = "test1"
   270  	})
   271  	d2.CreateService(c, simpleTestService, func(s *swarm.Service) {
   272  		s.Spec.Name = "test2"
   273  	})
   274  	d3.CreateService(c, simpleTestService, func(s *swarm.Service) {
   275  		s.Spec.Name = "test3"
   276  	})
   277  
   278  	// 3 services should be started now, because the requests were proxied to leader
   279  	// query each node and make sure it returns 3 services
   280  	for _, d := range []*daemon.Swarm{d1, d2, d3} {
   281  		services := d.ListServices(c)
   282  		c.Assert(services, checker.HasLen, 3)
   283  	}
   284  }
   285  
   286  func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) {
   287  	// Create 3 nodes
   288  	d1 := s.AddDaemon(c, true, true)
   289  	d2 := s.AddDaemon(c, true, true)
   290  	d3 := s.AddDaemon(c, true, true)
   291  
   292  	// assert that the first node we made is the leader, and the other two are followers
   293  	c.Assert(d1.GetNode(c, d1.NodeID).ManagerStatus.Leader, checker.True)
   294  	c.Assert(d1.GetNode(c, d2.NodeID).ManagerStatus.Leader, checker.False)
   295  	c.Assert(d1.GetNode(c, d3.NodeID).ManagerStatus.Leader, checker.False)
   296  
   297  	d1.Stop(c)
   298  
   299  	var (
   300  		leader    *daemon.Swarm   // keep track of leader
   301  		followers []*daemon.Swarm // keep track of followers
   302  	)
   303  	checkLeader := func(nodes ...*daemon.Swarm) checkF {
   304  		return func(c *check.C) (interface{}, check.CommentInterface) {
   305  			// clear these out before each run
   306  			leader = nil
   307  			followers = nil
   308  			for _, d := range nodes {
   309  				if d.GetNode(c, d.NodeID).ManagerStatus.Leader {
   310  					leader = d
   311  				} else {
   312  					followers = append(followers, d)
   313  				}
   314  			}
   315  
   316  			if leader == nil {
   317  				return false, check.Commentf("no leader elected")
   318  			}
   319  
   320  			return true, check.Commentf("elected %v", leader.ID())
   321  		}
   322  	}
   323  
   324  	// wait for an election to occur
   325  	waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d2, d3), checker.True)
   326  
   327  	// assert that we have a new leader
   328  	c.Assert(leader, checker.NotNil)
   329  
   330  	// Keep track of the current leader, since we want that to be chosen.
   331  	stableleader := leader
   332  
   333  	// add the d1, the initial leader, back
   334  	d1.Start(c)
   335  
   336  	// TODO(stevvooe): may need to wait for rejoin here
   337  
   338  	// wait for possible election
   339  	waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d1, d2, d3), checker.True)
   340  	// pick out the leader and the followers again
   341  
   342  	// verify that we still only have 1 leader and 2 followers
   343  	c.Assert(leader, checker.NotNil)
   344  	c.Assert(followers, checker.HasLen, 2)
   345  	// and that after we added d1 back, the leader hasn't changed
   346  	c.Assert(leader.NodeID, checker.Equals, stableleader.NodeID)
   347  }
   348  
   349  func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) {
   350  	d1 := s.AddDaemon(c, true, true)
   351  	d2 := s.AddDaemon(c, true, true)
   352  	d3 := s.AddDaemon(c, true, true)
   353  
   354  	d1.CreateService(c, simpleTestService)
   355  
   356  	d2.Stop(c)
   357  
   358  	// make sure there is a leader
   359  	waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil)
   360  
   361  	d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
   362  		s.Spec.Name = "top1"
   363  	})
   364  
   365  	d3.Stop(c)
   366  
   367  	var service swarm.Service
   368  	simpleTestService(&service)
   369  	service.Spec.Name = "top2"
   370  	cli, err := d1.NewClient()
   371  	c.Assert(err, checker.IsNil)
   372  	defer cli.Close()
   373  
   374  	// d1 will eventually step down from leader because there is no longer an active quorum, wait for that to happen
   375  	waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
   376  		_, err = cli.ServiceCreate(context.Background(), service.Spec, types.ServiceCreateOptions{})
   377  		return err.Error(), nil
   378  	}, checker.Contains, "Make sure more than half of the managers are online.")
   379  
   380  	d2.Start(c)
   381  
   382  	// make sure there is a leader
   383  	waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil)
   384  
   385  	d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
   386  		s.Spec.Name = "top3"
   387  	})
   388  }
   389  
   390  func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *check.C) {
   391  	d := s.AddDaemon(c, true, true)
   392  
   393  	instances := 2
   394  	d.CreateService(c, simpleTestService, setInstances(instances))
   395  
   396  	id, err := d.Cmd("run", "-d", "busybox", "top")
   397  	c.Assert(err, checker.IsNil)
   398  	id = strings.TrimSpace(id)
   399  
   400  	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances+1)
   401  
   402  	c.Assert(d.Leave(false), checker.NotNil)
   403  	c.Assert(d.Leave(true), checker.IsNil)
   404  
   405  	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1)
   406  
   407  	id2, err := d.Cmd("ps", "-q")
   408  	c.Assert(err, checker.IsNil)
   409  	c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2))
   410  }
   411  
   412  // #23629
   413  func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *check.C) {
   414  	testRequires(c, Network)
   415  	s.AddDaemon(c, true, true)
   416  	d2 := s.AddDaemon(c, false, false)
   417  
   418  	id, err := d2.Cmd("run", "-d", "busybox", "top")
   419  	c.Assert(err, checker.IsNil)
   420  	id = strings.TrimSpace(id)
   421  
   422  	err = d2.Join(swarm.JoinRequest{
   423  		RemoteAddrs: []string{"123.123.123.123:1234"},
   424  	})
   425  	c.Assert(err, check.NotNil)
   426  	c.Assert(err.Error(), checker.Contains, "Timeout was reached")
   427  
   428  	info, err := d2.SwarmInfo()
   429  	c.Assert(err, checker.IsNil)
   430  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStatePending)
   431  
   432  	c.Assert(d2.Leave(true), checker.IsNil)
   433  
   434  	waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 1)
   435  
   436  	id2, err := d2.Cmd("ps", "-q")
   437  	c.Assert(err, checker.IsNil)
   438  	c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2))
   439  }
   440  
   441  // #23705
   442  func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *check.C) {
   443  	testRequires(c, Network)
   444  	d := s.AddDaemon(c, false, false)
   445  	err := d.Join(swarm.JoinRequest{
   446  		RemoteAddrs: []string{"123.123.123.123:1234"},
   447  	})
   448  	c.Assert(err, check.NotNil)
   449  	c.Assert(err.Error(), checker.Contains, "Timeout was reached")
   450  
   451  	waitAndAssert(c, defaultReconciliationTimeout, d.CheckLocalNodeState, checker.Equals, swarm.LocalNodeStatePending)
   452  
   453  	d.Stop(c)
   454  	d.Start(c)
   455  
   456  	info, err := d.SwarmInfo()
   457  	c.Assert(err, checker.IsNil)
   458  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
   459  }
   460  
   461  func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *check.C) {
   462  	d1 := s.AddDaemon(c, true, true)
   463  
   464  	instances := 2
   465  	id := d1.CreateService(c, simpleTestService, setInstances(instances))
   466  
   467  	d1.GetService(c, id)
   468  	d1.Stop(c)
   469  	d1.Start(c)
   470  	d1.GetService(c, id)
   471  
   472  	d2 := s.AddDaemon(c, true, true)
   473  	d2.GetService(c, id)
   474  	d2.Stop(c)
   475  	d2.Start(c)
   476  	d2.GetService(c, id)
   477  
   478  	d3 := s.AddDaemon(c, true, true)
   479  	d3.GetService(c, id)
   480  	d3.Stop(c)
   481  	d3.Start(c)
   482  	d3.GetService(c, id)
   483  
   484  	d3.Kill()
   485  	time.Sleep(1 * time.Second) // time to handle signal
   486  	d3.Start(c)
   487  	d3.GetService(c, id)
   488  }
   489  
   490  func (s *DockerSwarmSuite) TestAPISwarmScaleNoRollingUpdate(c *check.C) {
   491  	d := s.AddDaemon(c, true, true)
   492  
   493  	instances := 2
   494  	id := d.CreateService(c, simpleTestService, setInstances(instances))
   495  
   496  	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
   497  	containers := d.ActiveContainers()
   498  	instances = 4
   499  	d.UpdateService(c, d.GetService(c, id), setInstances(instances))
   500  	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
   501  	containers2 := d.ActiveContainers()
   502  
   503  loop0:
   504  	for _, c1 := range containers {
   505  		for _, c2 := range containers2 {
   506  			if c1 == c2 {
   507  				continue loop0
   508  			}
   509  		}
   510  		c.Errorf("container %v not found in new set %#v", c1, containers2)
   511  	}
   512  }
   513  
   514  func (s *DockerSwarmSuite) TestAPISwarmInvalidAddress(c *check.C) {
   515  	d := s.AddDaemon(c, false, false)
   516  	req := swarm.InitRequest{
   517  		ListenAddr: "",
   518  	}
   519  	res, _, err := request.DoOnHost(d.Sock(), "/swarm/init", request.Method("POST"), request.JSONBody(req))
   520  	c.Assert(err, checker.IsNil)
   521  	c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest)
   522  
   523  	req2 := swarm.JoinRequest{
   524  		ListenAddr:  "0.0.0.0:2377",
   525  		RemoteAddrs: []string{""},
   526  	}
   527  	res, _, err = request.DoOnHost(d.Sock(), "/swarm/join", request.Method("POST"), request.JSONBody(req2))
   528  	c.Assert(err, checker.IsNil)
   529  	c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest)
   530  }
   531  
   532  func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *check.C) {
   533  	d1 := s.AddDaemon(c, true, true)
   534  	d2 := s.AddDaemon(c, true, true)
   535  
   536  	instances := 2
   537  	id := d1.CreateService(c, simpleTestService, setInstances(instances))
   538  	waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances)
   539  
   540  	// drain d2, all containers should move to d1
   541  	d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) {
   542  		n.Spec.Availability = swarm.NodeAvailabilityDrain
   543  	})
   544  	waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances)
   545  	waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 0)
   546  
   547  	d2.Stop(c)
   548  
   549  	c.Assert(d1.Init(swarm.InitRequest{
   550  		ForceNewCluster: true,
   551  		Spec:            swarm.Spec{},
   552  	}), checker.IsNil)
   553  
   554  	waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances)
   555  
   556  	d3 := s.AddDaemon(c, true, true)
   557  	info, err := d3.SwarmInfo()
   558  	c.Assert(err, checker.IsNil)
   559  	c.Assert(info.ControlAvailable, checker.True)
   560  	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
   561  
   562  	instances = 4
   563  	d3.UpdateService(c, d3.GetService(c, id), setInstances(instances))
   564  
   565  	waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances)
   566  }
   567  
   568  func simpleTestService(s *swarm.Service) {
   569  	ureplicas := uint64(1)
   570  	restartDelay := time.Duration(100 * time.Millisecond)
   571  
   572  	s.Spec = swarm.ServiceSpec{
   573  		TaskTemplate: swarm.TaskSpec{
   574  			ContainerSpec: &swarm.ContainerSpec{
   575  				Image:   "busybox:latest",
   576  				Command: []string{"/bin/top"},
   577  			},
   578  			RestartPolicy: &swarm.RestartPolicy{
   579  				Delay: &restartDelay,
   580  			},
   581  		},
   582  		Mode: swarm.ServiceMode{
   583  			Replicated: &swarm.ReplicatedService{
   584  				Replicas: &ureplicas,
   585  			},
   586  		},
   587  	}
   588  	s.Spec.Name = "top"
   589  }
   590  
   591  func serviceForUpdate(s *swarm.Service) {
   592  	ureplicas := uint64(1)
   593  	restartDelay := time.Duration(100 * time.Millisecond)
   594  
   595  	s.Spec = swarm.ServiceSpec{
   596  		TaskTemplate: swarm.TaskSpec{
   597  			ContainerSpec: &swarm.ContainerSpec{
   598  				Image:   "busybox:latest",
   599  				Command: []string{"/bin/top"},
   600  			},
   601  			RestartPolicy: &swarm.RestartPolicy{
   602  				Delay: &restartDelay,
   603  			},
   604  		},
   605  		Mode: swarm.ServiceMode{
   606  			Replicated: &swarm.ReplicatedService{
   607  				Replicas: &ureplicas,
   608  			},
   609  		},
   610  		UpdateConfig: &swarm.UpdateConfig{
   611  			Parallelism:   2,
   612  			Delay:         4 * time.Second,
   613  			FailureAction: swarm.UpdateFailureActionContinue,
   614  		},
   615  		RollbackConfig: &swarm.UpdateConfig{
   616  			Parallelism:   3,
   617  			Delay:         4 * time.Second,
   618  			FailureAction: swarm.UpdateFailureActionContinue,
   619  		},
   620  	}
   621  	s.Spec.Name = "updatetest"
   622  }
   623  
   624  func setInstances(replicas int) daemon.ServiceConstructor {
   625  	ureplicas := uint64(replicas)
   626  	return func(s *swarm.Service) {
   627  		s.Spec.Mode = swarm.ServiceMode{
   628  			Replicated: &swarm.ReplicatedService{
   629  				Replicas: &ureplicas,
   630  			},
   631  		}
   632  	}
   633  }
   634  
   635  func setUpdateOrder(order string) daemon.ServiceConstructor {
   636  	return func(s *swarm.Service) {
   637  		if s.Spec.UpdateConfig == nil {
   638  			s.Spec.UpdateConfig = &swarm.UpdateConfig{}
   639  		}
   640  		s.Spec.UpdateConfig.Order = order
   641  	}
   642  }
   643  
   644  func setRollbackOrder(order string) daemon.ServiceConstructor {
   645  	return func(s *swarm.Service) {
   646  		if s.Spec.RollbackConfig == nil {
   647  			s.Spec.RollbackConfig = &swarm.UpdateConfig{}
   648  		}
   649  		s.Spec.RollbackConfig.Order = order
   650  	}
   651  }
   652  
   653  func setImage(image string) daemon.ServiceConstructor {
   654  	return func(s *swarm.Service) {
   655  		if s.Spec.TaskTemplate.ContainerSpec == nil {
   656  			s.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{}
   657  		}
   658  		s.Spec.TaskTemplate.ContainerSpec.Image = image
   659  	}
   660  }
   661  
   662  func setFailureAction(failureAction string) daemon.ServiceConstructor {
   663  	return func(s *swarm.Service) {
   664  		s.Spec.UpdateConfig.FailureAction = failureAction
   665  	}
   666  }
   667  
   668  func setMaxFailureRatio(maxFailureRatio float32) daemon.ServiceConstructor {
   669  	return func(s *swarm.Service) {
   670  		s.Spec.UpdateConfig.MaxFailureRatio = maxFailureRatio
   671  	}
   672  }
   673  
   674  func setParallelism(parallelism uint64) daemon.ServiceConstructor {
   675  	return func(s *swarm.Service) {
   676  		s.Spec.UpdateConfig.Parallelism = parallelism
   677  	}
   678  }
   679  
   680  func setConstraints(constraints []string) daemon.ServiceConstructor {
   681  	return func(s *swarm.Service) {
   682  		if s.Spec.TaskTemplate.Placement == nil {
   683  			s.Spec.TaskTemplate.Placement = &swarm.Placement{}
   684  		}
   685  		s.Spec.TaskTemplate.Placement.Constraints = constraints
   686  	}
   687  }
   688  
   689  func setPlacementPrefs(prefs []swarm.PlacementPreference) daemon.ServiceConstructor {
   690  	return func(s *swarm.Service) {
   691  		if s.Spec.TaskTemplate.Placement == nil {
   692  			s.Spec.TaskTemplate.Placement = &swarm.Placement{}
   693  		}
   694  		s.Spec.TaskTemplate.Placement.Preferences = prefs
   695  	}
   696  }
   697  
   698  func setGlobalMode(s *swarm.Service) {
   699  	s.Spec.Mode = swarm.ServiceMode{
   700  		Global: &swarm.GlobalService{},
   701  	}
   702  }
   703  
   704  func checkClusterHealth(c *check.C, cl []*daemon.Swarm, managerCount, workerCount int) {
   705  	var totalMCount, totalWCount int
   706  
   707  	for _, d := range cl {
   708  		var (
   709  			info swarm.Info
   710  			err  error
   711  		)
   712  
   713  		// check info in a waitAndAssert, because if the cluster doesn't have a leader, `info` will return an error
   714  		checkInfo := func(c *check.C) (interface{}, check.CommentInterface) {
   715  			info, err = d.SwarmInfo()
   716  			return err, check.Commentf("cluster not ready in time")
   717  		}
   718  		waitAndAssert(c, defaultReconciliationTimeout, checkInfo, checker.IsNil)
   719  		if !info.ControlAvailable {
   720  			totalWCount++
   721  			continue
   722  		}
   723  
   724  		var leaderFound bool
   725  		totalMCount++
   726  		var mCount, wCount int
   727  
   728  		for _, n := range d.ListNodes(c) {
   729  			waitReady := func(c *check.C) (interface{}, check.CommentInterface) {
   730  				if n.Status.State == swarm.NodeStateReady {
   731  					return true, nil
   732  				}
   733  				nn := d.GetNode(c, n.ID)
   734  				n = *nn
   735  				return n.Status.State == swarm.NodeStateReady, check.Commentf("state of node %s, reported by %s", n.ID, d.Info.NodeID)
   736  			}
   737  			waitAndAssert(c, defaultReconciliationTimeout, waitReady, checker.True)
   738  
   739  			waitActive := func(c *check.C) (interface{}, check.CommentInterface) {
   740  				if n.Spec.Availability == swarm.NodeAvailabilityActive {
   741  					return true, nil
   742  				}
   743  				nn := d.GetNode(c, n.ID)
   744  				n = *nn
   745  				return n.Spec.Availability == swarm.NodeAvailabilityActive, check.Commentf("availability of node %s, reported by %s", n.ID, d.Info.NodeID)
   746  			}
   747  			waitAndAssert(c, defaultReconciliationTimeout, waitActive, checker.True)
   748  
   749  			if n.Spec.Role == swarm.NodeRoleManager {
   750  				c.Assert(n.ManagerStatus, checker.NotNil, check.Commentf("manager status of node %s (manager), reported by %s", n.ID, d.Info.NodeID))
   751  				if n.ManagerStatus.Leader {
   752  					leaderFound = true
   753  				}
   754  				mCount++
   755  			} else {
   756  				c.Assert(n.ManagerStatus, checker.IsNil, check.Commentf("manager status of node %s (worker), reported by %s", n.ID, d.Info.NodeID))
   757  				wCount++
   758  			}
   759  		}
   760  		c.Assert(leaderFound, checker.True, check.Commentf("lack of leader reported by node %s", info.NodeID))
   761  		c.Assert(mCount, checker.Equals, managerCount, check.Commentf("managers count reported by node %s", info.NodeID))
   762  		c.Assert(wCount, checker.Equals, workerCount, check.Commentf("workers count reported by node %s", info.NodeID))
   763  	}
   764  	c.Assert(totalMCount, checker.Equals, managerCount)
   765  	c.Assert(totalWCount, checker.Equals, workerCount)
   766  }
   767  
   768  func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) {
   769  	mCount, wCount := 5, 1
   770  
   771  	var nodes []*daemon.Swarm
   772  	for i := 0; i < mCount; i++ {
   773  		manager := s.AddDaemon(c, true, true)
   774  		info, err := manager.SwarmInfo()
   775  		c.Assert(err, checker.IsNil)
   776  		c.Assert(info.ControlAvailable, checker.True)
   777  		c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
   778  		nodes = append(nodes, manager)
   779  	}
   780  
   781  	for i := 0; i < wCount; i++ {
   782  		worker := s.AddDaemon(c, true, false)
   783  		info, err := worker.SwarmInfo()
   784  		c.Assert(err, checker.IsNil)
   785  		c.Assert(info.ControlAvailable, checker.False)
   786  		c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
   787  		nodes = append(nodes, worker)
   788  	}
   789  
   790  	// stop whole cluster
   791  	{
   792  		var wg sync.WaitGroup
   793  		wg.Add(len(nodes))
   794  		errs := make(chan error, len(nodes))
   795  
   796  		for _, d := range nodes {
   797  			go func(daemon *daemon.Swarm) {
   798  				defer wg.Done()
   799  				if err := daemon.StopWithError(); err != nil {
   800  					errs <- err
   801  				}
   802  				// FIXME(vdemeester) This is duplicated…
   803  				if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
   804  					daemon.Root = filepath.Dir(daemon.Root)
   805  				}
   806  			}(d)
   807  		}
   808  		wg.Wait()
   809  		close(errs)
   810  		for err := range errs {
   811  			c.Assert(err, check.IsNil)
   812  		}
   813  	}
   814  
   815  	// start whole cluster
   816  	{
   817  		var wg sync.WaitGroup
   818  		wg.Add(len(nodes))
   819  		errs := make(chan error, len(nodes))
   820  
   821  		for _, d := range nodes {
   822  			go func(daemon *daemon.Swarm) {
   823  				defer wg.Done()
   824  				if err := daemon.StartWithError("--iptables=false"); err != nil {
   825  					errs <- err
   826  				}
   827  			}(d)
   828  		}
   829  		wg.Wait()
   830  		close(errs)
   831  		for err := range errs {
   832  			c.Assert(err, check.IsNil)
   833  		}
   834  	}
   835  
   836  	checkClusterHealth(c, nodes, mCount, wCount)
   837  }
   838  
   839  func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *check.C) {
   840  	d := s.AddDaemon(c, true, true)
   841  
   842  	instances := 2
   843  	id := d.CreateService(c, simpleTestService, setInstances(instances))
   844  	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
   845  
   846  	service := d.GetService(c, id)
   847  	instances = 5
   848  
   849  	setInstances(instances)(service)
   850  	cli, err := d.NewClient()
   851  	c.Assert(err, checker.IsNil)
   852  	defer cli.Close()
   853  	_, err = cli.ServiceUpdate(context.Background(), service.Spec.Name, service.Version, service.Spec, types.ServiceUpdateOptions{})
   854  	c.Assert(err, checker.IsNil)
   855  	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
   856  }
   857  
   858  // Unlocking an unlocked swarm results in an error
   859  func (s *DockerSwarmSuite) TestAPISwarmUnlockNotLocked(c *check.C) {
   860  	d := s.AddDaemon(c, true, true)
   861  	err := d.Unlock(swarm.UnlockRequest{UnlockKey: "wrong-key"})
   862  	c.Assert(err, checker.NotNil)
   863  	c.Assert(err.Error(), checker.Contains, "swarm is not locked")
   864  }
   865  
   866  // #29885
   867  func (s *DockerSwarmSuite) TestAPISwarmErrorHandling(c *check.C) {
   868  	ln, err := net.Listen("tcp", fmt.Sprintf(":%d", defaultSwarmPort))
   869  	c.Assert(err, checker.IsNil)
   870  	defer ln.Close()
   871  	d := s.AddDaemon(c, false, false)
   872  	err = d.Init(swarm.InitRequest{})
   873  	c.Assert(err, checker.NotNil)
   874  	c.Assert(err.Error(), checker.Contains, "address already in use")
   875  }
   876  
   877  // Test case for 30242, where duplicate networks, with different drivers `bridge` and `overlay`,
   878  // caused both scopes to be `swarm` for `docker network inspect` and `docker network ls`.
   879  // This test makes sure the fixes correctly output scopes instead.
   880  func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *check.C) {
   881  	d := s.AddDaemon(c, true, true)
   882  	cli, err := d.NewClient()
   883  	c.Assert(err, checker.IsNil)
   884  	defer cli.Close()
   885  
   886  	name := "foo"
   887  	networkCreate := types.NetworkCreate{
   888  		CheckDuplicate: false,
   889  	}
   890  
   891  	networkCreate.Driver = "bridge"
   892  
   893  	n1, err := cli.NetworkCreate(context.Background(), name, networkCreate)
   894  	c.Assert(err, checker.IsNil)
   895  
   896  	networkCreate.Driver = "overlay"
   897  
   898  	n2, err := cli.NetworkCreate(context.Background(), name, networkCreate)
   899  	c.Assert(err, checker.IsNil)
   900  
   901  	r1, err := cli.NetworkInspect(context.Background(), n1.ID, types.NetworkInspectOptions{})
   902  	c.Assert(err, checker.IsNil)
   903  	c.Assert(r1.Scope, checker.Equals, "local")
   904  
   905  	r2, err := cli.NetworkInspect(context.Background(), n2.ID, types.NetworkInspectOptions{})
   906  	c.Assert(err, checker.IsNil)
   907  	c.Assert(r2.Scope, checker.Equals, "swarm")
   908  }
   909  
   910  // Test case for 30178
   911  func (s *DockerSwarmSuite) TestAPISwarmHealthcheckNone(c *check.C) {
   912  	d := s.AddDaemon(c, true, true)
   913  
   914  	out, err := d.Cmd("network", "create", "-d", "overlay", "lb")
   915  	c.Assert(err, checker.IsNil, check.Commentf(out))
   916  
   917  	instances := 1
   918  	d.CreateService(c, simpleTestService, setInstances(instances), func(s *swarm.Service) {
   919  		if s.Spec.TaskTemplate.ContainerSpec == nil {
   920  			s.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{}
   921  		}
   922  		s.Spec.TaskTemplate.ContainerSpec.Healthcheck = &container.HealthConfig{}
   923  		s.Spec.TaskTemplate.Networks = []swarm.NetworkAttachmentConfig{
   924  			{Target: "lb"},
   925  		}
   926  	})
   927  
   928  	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
   929  
   930  	containers := d.ActiveContainers()
   931  
   932  	out, err = d.Cmd("exec", containers[0], "ping", "-c1", "-W3", "top")
   933  	c.Assert(err, checker.IsNil, check.Commentf(out))
   934  }
   935  
   936  func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *check.C) {
   937  	m := s.AddDaemon(c, true, true)
   938  	w := s.AddDaemon(c, true, false)
   939  
   940  	info, err := m.SwarmInfo()
   941  	c.Assert(err, checker.IsNil)
   942  
   943  	currentTrustRoot := info.Cluster.TLSInfo.TrustRoot
   944  
   945  	// rotate multiple times
   946  	for i := 0; i < 4; i++ {
   947  		var cert, key []byte
   948  		if i%2 != 0 {
   949  			cert, _, key, err = initca.New(&csr.CertificateRequest{
   950  				CN:         "newRoot",
   951  				KeyRequest: csr.NewBasicKeyRequest(),
   952  				CA:         &csr.CAConfig{Expiry: ca.RootCAExpiration},
   953  			})
   954  			c.Assert(err, checker.IsNil)
   955  		}
   956  		expectedCert := string(cert)
   957  		m.UpdateSwarm(c, func(s *swarm.Spec) {
   958  			s.CAConfig.SigningCACert = expectedCert
   959  			s.CAConfig.SigningCAKey = string(key)
   960  			s.CAConfig.ForceRotate++
   961  		})
   962  
   963  		// poll to make sure update succeeds
   964  		var clusterTLSInfo swarm.TLSInfo
   965  		for j := 0; j < 18; j++ {
   966  			info, err := m.SwarmInfo()
   967  			c.Assert(err, checker.IsNil)
   968  
   969  			// the desired CA cert and key is always redacted
   970  			c.Assert(info.Cluster.Spec.CAConfig.SigningCAKey, checker.Equals, "")
   971  			c.Assert(info.Cluster.Spec.CAConfig.SigningCACert, checker.Equals, "")
   972  
   973  			clusterTLSInfo = info.Cluster.TLSInfo
   974  
   975  			// if root rotation is done and the trust root has changed, we don't have to poll anymore
   976  			if !info.Cluster.RootRotationInProgress && clusterTLSInfo.TrustRoot != currentTrustRoot {
   977  				break
   978  			}
   979  
   980  			// root rotation not done
   981  			time.Sleep(250 * time.Millisecond)
   982  		}
   983  		if cert != nil {
   984  			c.Assert(clusterTLSInfo.TrustRoot, checker.Equals, expectedCert)
   985  		}
   986  		// could take another second or two for the nodes to trust the new roots after they've all gotten
   987  		// new TLS certificates
   988  		for j := 0; j < 18; j++ {
   989  			mInfo := m.GetNode(c, m.NodeID).Description.TLSInfo
   990  			wInfo := m.GetNode(c, w.NodeID).Description.TLSInfo
   991  
   992  			if mInfo.TrustRoot == clusterTLSInfo.TrustRoot && wInfo.TrustRoot == clusterTLSInfo.TrustRoot {
   993  				break
   994  			}
   995  
   996  			// nodes don't trust root certs yet
   997  			time.Sleep(250 * time.Millisecond)
   998  		}
   999  
  1000  		c.Assert(m.GetNode(c, m.NodeID).Description.TLSInfo, checker.DeepEquals, clusterTLSInfo)
  1001  		c.Assert(m.GetNode(c, w.NodeID).Description.TLSInfo, checker.DeepEquals, clusterTLSInfo)
  1002  		currentTrustRoot = clusterTLSInfo.TrustRoot
  1003  	}
  1004  }
  1005  
  1006  func (s *DockerSwarmSuite) TestAPINetworkInspectWithScope(c *check.C) {
  1007  	d := s.AddDaemon(c, true, true)
  1008  
  1009  	name := "foo"
  1010  	networkCreateRequest := types.NetworkCreateRequest{
  1011  		Name: name,
  1012  	}
  1013  
  1014  	var n types.NetworkCreateResponse
  1015  	networkCreateRequest.NetworkCreate.Driver = "overlay"
  1016  
  1017  	status, out, err := d.SockRequest("POST", "/networks/create", networkCreateRequest)
  1018  	c.Assert(err, checker.IsNil, check.Commentf(string(out)))
  1019  	c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(out)))
  1020  	c.Assert(json.Unmarshal(out, &n), checker.IsNil)
  1021  
  1022  	var r types.NetworkResource
  1023  
  1024  	status, body, err := d.SockRequest("GET", "/networks/"+name, nil)
  1025  	c.Assert(err, checker.IsNil, check.Commentf(string(out)))
  1026  	c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(out)))
  1027  	c.Assert(json.Unmarshal(body, &r), checker.IsNil)
  1028  	c.Assert(r.Scope, checker.Equals, "swarm")
  1029  	c.Assert(r.ID, checker.Equals, n.ID)
  1030  
  1031  	v := url.Values{}
  1032  	v.Set("scope", "local")
  1033  
  1034  	status, body, err = d.SockRequest("GET", "/networks/"+name+"?"+v.Encode(), nil)
  1035  	c.Assert(err, checker.IsNil, check.Commentf(string(out)))
  1036  	c.Assert(status, checker.Equals, http.StatusNotFound, check.Commentf(string(out)))
  1037  }