github.com/weaviate/weaviate@v1.24.6/usecases/backup/coordinator_test.go (about)

     1  //                           _       _
     2  // __      _____  __ ___   ___  __ _| |_ ___
     3  // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
     4  //  \ V  V /  __/ (_| |\ V /| | (_| | ||  __/
     5  //   \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
     6  //
     7  //  Copyright © 2016 - 2024 Weaviate B.V. All rights reserved.
     8  //
     9  //  CONTACT: hello@weaviate.io
    10  //
    11  
    12  package backup
    13  
    14  import (
    15  	"context"
    16  	"testing"
    17  	"time"
    18  
    19  	"github.com/sirupsen/logrus"
    20  	"github.com/sirupsen/logrus/hooks/test"
    21  	"github.com/stretchr/testify/assert"
    22  	"github.com/stretchr/testify/mock"
    23  	"github.com/weaviate/weaviate/entities/backup"
    24  	"github.com/weaviate/weaviate/usecases/config"
    25  )
    26  
    27  func TestCoordinatedBackup(t *testing.T) {
    28  	t.Parallel()
    29  	var (
    30  		backendName = "s3"
    31  		any         = mock.Anything
    32  		backupID    = "1"
    33  		ctx         = context.Background()
    34  		nodes       = []string{"N1", "N2"}
    35  		classes     = []string{"Class-A", "Class-B"}
    36  		now         = time.Now().UTC()
    37  		req         = newReq(classes, backendName, backupID)
    38  		creq        = &Request{
    39  			Method:      OpCreate,
    40  			ID:          backupID,
    41  			Backend:     backendName,
    42  			Classes:     req.Classes,
    43  			Duration:    _BookingPeriod,
    44  			Compression: req.Compression,
    45  		}
    46  		cresp        = &CanCommitResponse{Method: OpCreate, ID: backupID, Timeout: 1}
    47  		sReq         = &StatusRequest{OpCreate, backupID, backendName}
    48  		sresp        = &StatusResponse{Status: backup.Success, ID: backupID, Method: OpCreate}
    49  		abortReq     = &AbortRequest{OpCreate, backupID, backendName}
    50  		nodeResolver = newFakeNodeResolver(nodes)
    51  	)
    52  
    53  	t.Run("PutMeta", func(t *testing.T) {
    54  		t.Parallel()
    55  		fc := newFakeCoordinator(nodeResolver)
    56  		fc.selector.On("Shards", ctx, classes[0]).Return(nodes, nil)
    57  		fc.selector.On("Shards", ctx, classes[1]).Return(nodes, nil)
    58  
    59  		fc.client.On("CanCommit", any, nodes[0], creq).Return(cresp, nil)
    60  		fc.client.On("CanCommit", any, nodes[1], creq).Return(cresp, nil)
    61  		fc.backend.On("HomeDir", backupID).Return("bucket/" + backupID)
    62  		fc.backend.On("PutObject", any, backupID, GlobalBackupFile, any).Return(ErrAny).Once()
    63  
    64  		coordinator := *fc.coordinator()
    65  		req := newReq(classes, backendName, backupID)
    66  		store := coordStore{objStore{fc.backend, req.ID}}
    67  		err := coordinator.Backup(ctx, store, &req)
    68  		assert.NotNil(t, err)
    69  	})
    70  
    71  	t.Run("Success", func(t *testing.T) {
    72  		t.Parallel()
    73  		fc := newFakeCoordinator(nodeResolver)
    74  		fc.client.On("CanCommit", any, nodes[0], creq).Return(cresp, nil)
    75  		fc.client.On("CanCommit", any, nodes[1], creq).Return(cresp, nil)
    76  		fc.client.On("Commit", any, nodes[0], sReq).Return(nil)
    77  		fc.client.On("Commit", any, nodes[1], sReq).Return(nil)
    78  		fc.client.On("Status", any, nodes[0], sReq).Return(sresp, nil)
    79  		fc.client.On("Status", any, nodes[1], sReq).Return(sresp, nil)
    80  		fc.backend.On("HomeDir", backupID).Return("bucket/" + backupID)
    81  		fc.backend.On("PutObject", any, backupID, GlobalBackupFile, any).Return(nil).Twice()
    82  
    83  		coordinator := *fc.coordinator()
    84  		req := newReq(classes, backendName, backupID)
    85  		store := coordStore{objStore{fc.backend, req.ID}}
    86  		err := coordinator.Backup(ctx, store, &req)
    87  		assert.Nil(t, err)
    88  		<-fc.backend.doneChan
    89  
    90  		got := fc.backend.glMeta
    91  		assert.GreaterOrEqual(t, got.StartedAt, now)
    92  		assert.Greater(t, got.CompletedAt, got.StartedAt)
    93  		want := backup.DistributedBackupDescriptor{
    94  			StartedAt:     got.StartedAt,
    95  			CompletedAt:   got.CompletedAt,
    96  			ID:            backupID,
    97  			Status:        backup.Success,
    98  			Version:       Version,
    99  			ServerVersion: config.ServerVersion,
   100  			Nodes: map[string]*backup.NodeDescriptor{
   101  				nodes[0]: {
   102  					Classes: classes,
   103  					Status:  backup.Success,
   104  				},
   105  				nodes[1]: {
   106  					Classes: classes,
   107  					Status:  backup.Success,
   108  				},
   109  			},
   110  		}
   111  		assert.Equal(t, want, got)
   112  	})
   113  
   114  	t.Run("SuccessOnShardsEmptyPhysical", func(t *testing.T) {
   115  		t.Parallel()
   116  		fc := newFakeCoordinator(nodeResolver)
   117  		fc.selector.On("Shards", ctx, classes[0]).Return([]string{}, nil)
   118  		fc.selector.On("Shards", ctx, classes[1]).Return(nodes, nil)
   119  
   120  		creqWithOneClass := &Request{
   121  			Method:   OpCreate,
   122  			ID:       backupID,
   123  			Backend:  backendName,
   124  			Classes:  classes[:],
   125  			Duration: _BookingPeriod,
   126  			Compression: Compression{
   127  				Level:         DefaultCompression,
   128  				ChunkSize:     DefaultChunkSize,
   129  				CPUPercentage: DefaultCPUPercentage,
   130  			},
   131  		}
   132  		fc.client.On("CanCommit", any, nodes[0], creqWithOneClass).Return(cresp, nil)
   133  		fc.client.On("CanCommit", any, nodes[1], creqWithOneClass).Return(cresp, nil)
   134  		fc.client.On("Commit", any, nodes[0], sReq).Return(nil)
   135  		fc.client.On("Commit", any, nodes[1], sReq).Return(nil)
   136  		fc.client.On("Status", any, nodes[0], sReq).Return(sresp, nil)
   137  		fc.client.On("Status", any, nodes[1], sReq).Return(sresp, nil)
   138  		fc.backend.On("HomeDir", backupID).Return("bucket/" + backupID)
   139  		fc.backend.On("PutObject", any, backupID, GlobalBackupFile, any).Return(nil).Twice()
   140  
   141  		coordinator := *fc.coordinator()
   142  		req := newReq(classes, backendName, backupID)
   143  		store := coordStore{objStore{fc.backend, req.ID}}
   144  		err := coordinator.Backup(ctx, store, &req)
   145  		assert.Nil(t, err)
   146  		<-fc.backend.doneChan
   147  
   148  		got := fc.backend.glMeta
   149  		assert.GreaterOrEqual(t, got.StartedAt, now)
   150  		assert.Greater(t, got.CompletedAt, got.StartedAt)
   151  		want := backup.DistributedBackupDescriptor{
   152  			StartedAt:     got.StartedAt,
   153  			CompletedAt:   got.CompletedAt,
   154  			ID:            backupID,
   155  			Status:        backup.Success,
   156  			Version:       Version,
   157  			ServerVersion: config.ServerVersion,
   158  			Nodes: map[string]*backup.NodeDescriptor{
   159  				nodes[0]: {
   160  					Classes: classes[:],
   161  					Status:  backup.Success,
   162  				},
   163  				nodes[1]: {
   164  					Classes: classes[:],
   165  					Status:  backup.Success,
   166  				},
   167  			},
   168  		}
   169  		assert.Equal(t, want, got)
   170  	})
   171  
   172  	t.Run("CanCommit", func(t *testing.T) {
   173  		t.Parallel()
   174  
   175  		fc := newFakeCoordinator(nodeResolver)
   176  		fc.selector.On("Shards", ctx, classes[0]).Return(nodes, nil)
   177  		fc.selector.On("Shards", ctx, classes[1]).Return(nodes, nil)
   178  
   179  		fc.client.On("CanCommit", any, nodes[0], creq).Return(cresp, nil)
   180  		fc.client.On("CanCommit", any, nodes[1], creq).Return(&CanCommitResponse{}, nil)
   181  		fc.client.On("Abort", any, nodes[0], abortReq).Return(ErrAny)
   182  		fc.backend.On("HomeDir", backupID).Return("bucket/" + backupID)
   183  
   184  		coordinator := *fc.coordinator()
   185  		req := newReq(classes, backendName, backupID)
   186  		store := coordStore{objStore: objStore{fc.backend, req.ID}}
   187  		err := coordinator.Backup(ctx, store, &req)
   188  		assert.ErrorIs(t, err, errCannotCommit)
   189  		assert.Contains(t, err.Error(), nodes[1])
   190  	})
   191  
   192  	t.Run("NodeDown", func(t *testing.T) {
   193  		t.Parallel()
   194  		var (
   195  			fc          = newFakeCoordinator(nodeResolver)
   196  			coordinator = *fc.coordinator()
   197  			req         = newReq(classes, backendName, backupID)
   198  			store       = coordStore{objStore{fc.backend, req.ID}}
   199  		)
   200  		coordinator.timeoutNodeDown = 0
   201  		fc.selector.On("Shards", ctx, classes[0]).Return(nodes, nil)
   202  		fc.selector.On("Shards", ctx, classes[1]).Return(nodes, nil)
   203  
   204  		fc.client.On("CanCommit", any, nodes[0], creq).Return(cresp, nil)
   205  		fc.client.On("CanCommit", any, nodes[1], creq).Return(cresp, nil)
   206  		fc.client.On("Commit", any, nodes[0], sReq).Return(nil)
   207  		fc.client.On("Commit", any, nodes[1], sReq).Return(nil)
   208  		fc.client.On("Status", any, nodes[0], sReq).Return(sresp, nil)
   209  		fc.client.On("Status", any, nodes[1], sReq).Return(sresp, ErrAny)
   210  		fc.backend.On("HomeDir", backupID).Return("bucket/" + backupID)
   211  		fc.backend.On("PutObject", any, backupID, GlobalBackupFile, any).Return(nil).Twice()
   212  
   213  		fc.client.On("Abort", any, nodes[0], abortReq).Return(nil)
   214  		fc.client.On("Abort", any, nodes[1], abortReq).Return(nil)
   215  
   216  		err := coordinator.Backup(ctx, store, &req)
   217  		assert.Nil(t, err)
   218  		<-fc.backend.doneChan
   219  
   220  		got := fc.backend.glMeta
   221  		assert.GreaterOrEqual(t, got.StartedAt, now)
   222  		assert.Greater(t, got.CompletedAt, got.StartedAt)
   223  		assert.Contains(t, got.Nodes[nodes[1]].Error, ErrAny.Error())
   224  		want := backup.DistributedBackupDescriptor{
   225  			StartedAt:     got.StartedAt,
   226  			CompletedAt:   got.CompletedAt,
   227  			ID:            backupID,
   228  			Status:        backup.Failed,
   229  			Error:         got.Nodes[nodes[1]].Error,
   230  			Version:       Version,
   231  			ServerVersion: config.ServerVersion,
   232  			Nodes: map[string]*backup.NodeDescriptor{
   233  				nodes[0]: {
   234  					Classes: classes,
   235  					Status:  backup.Success,
   236  				},
   237  				nodes[1]: {
   238  					Classes: classes,
   239  					Status:  backup.Failed,
   240  					Error:   got.Nodes[nodes[1]].Error,
   241  				},
   242  			},
   243  		}
   244  		assert.Equal(t, want, got)
   245  	})
   246  
   247  	t.Run("NodeDisconnect", func(t *testing.T) {
   248  		t.Parallel()
   249  		var (
   250  			fc          = newFakeCoordinator(nodeResolver)
   251  			coordinator = *fc.coordinator()
   252  		)
   253  		coordinator.timeoutNodeDown = 0
   254  		fc.selector.On("Shards", ctx, classes[0]).Return(nodes, nil)
   255  		fc.selector.On("Shards", ctx, classes[1]).Return(nodes, nil)
   256  
   257  		fc.client.On("CanCommit", any, nodes[0], creq).Return(cresp, nil)
   258  		fc.client.On("CanCommit", any, nodes[1], creq).Return(cresp, nil)
   259  		fc.client.On("Commit", any, nodes[0], sReq).Return(ErrAny)
   260  		fc.client.On("Commit", any, nodes[1], sReq).Return(nil)
   261  		fc.client.On("Status", any, nodes[1], sReq).Return(sresp, nil)
   262  		fc.backend.On("HomeDir", backupID).Return("bucket/" + backupID)
   263  		fc.backend.On("PutObject", any, backupID, GlobalBackupFile, any).Return(nil).Twice()
   264  
   265  		fc.client.On("Abort", any, nodes[0], abortReq).Return(nil)
   266  		fc.client.On("Abort", any, nodes[1], abortReq).Return(nil)
   267  
   268  		req := newReq(classes, backendName, backupID)
   269  		store := coordStore{objStore: objStore{fc.backend, req.ID}}
   270  		err := coordinator.Backup(ctx, store, &req)
   271  		assert.Nil(t, err)
   272  		<-fc.backend.doneChan
   273  
   274  		got := fc.backend.glMeta
   275  		assert.GreaterOrEqual(t, got.StartedAt, now)
   276  		assert.Greater(t, got.CompletedAt, got.StartedAt)
   277  		assert.Contains(t, got.Nodes[nodes[0]].Error, ErrAny.Error())
   278  		want := backup.DistributedBackupDescriptor{
   279  			StartedAt:     got.StartedAt,
   280  			CompletedAt:   got.CompletedAt,
   281  			ID:            backupID,
   282  			Status:        backup.Failed,
   283  			Error:         got.Nodes[nodes[0]].Error,
   284  			Version:       Version,
   285  			ServerVersion: config.ServerVersion,
   286  			Nodes: map[string]*backup.NodeDescriptor{
   287  				nodes[1]: {
   288  					Classes: classes,
   289  					Status:  "",
   290  				},
   291  				nodes[0]: {
   292  					Classes: classes,
   293  					Status:  backup.Failed,
   294  					Error:   got.Nodes[nodes[0]].Error,
   295  				},
   296  			},
   297  		}
   298  		assert.Equal(t, want, got)
   299  	})
   300  }
   301  
   302  func TestCoordinatedRestore(t *testing.T) {
   303  	t.Parallel()
   304  	var (
   305  		now          = time.Now().UTC()
   306  		backendName  = "s3"
   307  		any          = mock.Anything
   308  		backupID     = "1"
   309  		path         = "backups/1"
   310  		ctx          = context.Background()
   311  		nodes        = []string{"N1", "N2"}
   312  		classes      = []string{"Class-A", "Class-B"}
   313  		nodeResolver = newFakeNodeResolver(nodes)
   314  		genReq       = func() *backup.DistributedBackupDescriptor {
   315  			return &backup.DistributedBackupDescriptor{
   316  				StartedAt:     now,
   317  				CompletedAt:   now.Add(time.Second).UTC(),
   318  				ID:            backupID,
   319  				Status:        backup.Success,
   320  				Version:       Version,
   321  				ServerVersion: config.ServerVersion,
   322  				Nodes: map[string]*backup.NodeDescriptor{
   323  					nodes[0]: {
   324  						Classes: classes,
   325  						Status:  backup.Success,
   326  					},
   327  					nodes[1]: {
   328  						Classes: classes,
   329  						Status:  backup.Success,
   330  					},
   331  				},
   332  			}
   333  		}
   334  		creq = &Request{
   335  			Method:   OpRestore,
   336  			ID:       backupID,
   337  			Backend:  backendName,
   338  			Classes:  classes,
   339  			Duration: _BookingPeriod,
   340  			Compression: Compression{
   341  				Level:         DefaultCompression,
   342  				ChunkSize:     DefaultChunkSize,
   343  				CPUPercentage: DefaultCPUPercentage,
   344  			},
   345  		}
   346  		cresp    = &CanCommitResponse{Method: OpRestore, ID: backupID, Timeout: 1}
   347  		sReq     = &StatusRequest{OpRestore, backupID, backendName}
   348  		sresp    = &StatusResponse{Status: backup.Success, ID: backupID, Method: OpRestore}
   349  		abortReq = &AbortRequest{OpRestore, backupID, backendName}
   350  	)
   351  
   352  	t.Run("Success", func(t *testing.T) {
   353  		t.Parallel()
   354  		fc := newFakeCoordinator(nodeResolver)
   355  		fc.selector.On("Shards", ctx, classes[0]).Return(nodes, nil)
   356  		fc.selector.On("Shards", ctx, classes[1]).Return(nodes, nil)
   357  
   358  		fc.client.On("CanCommit", any, nodes[0], creq).Return(cresp, nil)
   359  		fc.client.On("CanCommit", any, nodes[1], creq).Return(cresp, nil)
   360  
   361  		fc.client.On("Commit", any, nodes[0], sReq).Return(nil)
   362  		fc.client.On("Commit", any, nodes[1], sReq).Return(nil)
   363  		fc.client.On("Status", any, nodes[0], sReq).Return(sresp, nil)
   364  		fc.client.On("Status", any, nodes[1], sReq).Return(sresp, nil)
   365  		fc.backend.On("HomeDir", backupID).Return("bucket/" + backupID)
   366  		fc.backend.On("PutObject", any, backupID, GlobalRestoreFile, any).Return(nil).Twice()
   367  
   368  		coordinator := *fc.coordinator()
   369  		store := coordStore{objStore{fc.backend, backupID}}
   370  
   371  		req := newReq([]string{}, backendName, "")
   372  		err := coordinator.Restore(ctx, store, &req, genReq())
   373  		assert.Nil(t, err)
   374  	})
   375  
   376  	t.Run("CanCommit", func(t *testing.T) {
   377  		t.Parallel()
   378  
   379  		fc := newFakeCoordinator(nodeResolver)
   380  		fc.client.On("CanCommit", any, nodes[0], creq).Return(cresp, nil)
   381  		fc.client.On("CanCommit", any, nodes[1], creq).Return(&CanCommitResponse{}, nil)
   382  		fc.backend.On("HomeDir", mock.Anything).Return(path)
   383  		fc.client.On("Abort", any, nodes[0], abortReq).Return(nil)
   384  
   385  		coordinator := *fc.coordinator()
   386  		store := coordStore{objStore{fc.backend, backupID}}
   387  		req := newReq([]string{}, backendName, "")
   388  		err := coordinator.Restore(ctx, store, &req, genReq())
   389  		assert.ErrorIs(t, err, errCannotCommit)
   390  		assert.Contains(t, err.Error(), nodes[1])
   391  	})
   392  
   393  	t.Run("PutInitialMeta", func(t *testing.T) {
   394  		t.Parallel()
   395  
   396  		fc := newFakeCoordinator(nodeResolver)
   397  		fc.client.On("CanCommit", any, nodes[0], creq).Return(cresp, nil)
   398  		fc.client.On("CanCommit", any, nodes[1], creq).Return(cresp, nil)
   399  		fc.backend.On("HomeDir", backupID).Return("bucket/" + backupID)
   400  		fc.backend.On("PutObject", any, backupID, GlobalRestoreFile, any).Return(ErrAny).Once()
   401  		fc.client.On("Abort", any, nodes[0], abortReq).Return(nil)
   402  		fc.client.On("Abort", any, nodes[1], abortReq).Return(nil)
   403  
   404  		coordinator := *fc.coordinator()
   405  		store := coordStore{objStore{fc.backend, backupID}}
   406  		req := newReq([]string{}, backendName, "")
   407  		err := coordinator.Restore(ctx, store, &req, genReq())
   408  		assert.ErrorIs(t, err, ErrAny)
   409  		assert.Contains(t, err.Error(), "initial")
   410  	})
   411  }
   412  
   413  func TestCoordinatedRestoreWithNodeMapping(t *testing.T) {
   414  	t.Parallel()
   415  	var (
   416  		now         = time.Now().UTC()
   417  		backendName = "s3"
   418  		any         = mock.Anything
   419  		backupID    = "1"
   420  		ctx         = context.Background()
   421  		nodes       = []string{"Old-N1", "Old-N2"}
   422  		newNodes    = []string{"New-N1", "New-N2"}
   423  		classes     = []string{"Dedicated-Class-A", "Dedicated-Class-B"}
   424  		nodeMapping = map[string]string{nodes[0]: newNodes[0], nodes[1]: newNodes[1]}
   425  		genReq      = func() *backup.DistributedBackupDescriptor {
   426  			return &backup.DistributedBackupDescriptor{
   427  				StartedAt:     now,
   428  				CompletedAt:   now.Add(time.Second).UTC(),
   429  				ID:            backupID,
   430  				Status:        backup.Success,
   431  				Version:       Version,
   432  				ServerVersion: config.ServerVersion,
   433  				Nodes: map[string]*backup.NodeDescriptor{
   434  					nodes[0]: {
   435  						Classes: classes,
   436  						Status:  backup.Success,
   437  					},
   438  					nodes[1]: {
   439  						Classes: classes,
   440  						Status:  backup.Success,
   441  					},
   442  				},
   443  				NodeMapping: nodeMapping,
   444  			}
   445  		}
   446  		creq = &Request{
   447  			Method:      OpRestore,
   448  			ID:          backupID,
   449  			Backend:     backendName,
   450  			Classes:     classes,
   451  			NodeMapping: nodeMapping,
   452  			Duration:    _BookingPeriod,
   453  			Compression: Compression{
   454  				Level:         DefaultCompression,
   455  				ChunkSize:     DefaultChunkSize,
   456  				CPUPercentage: DefaultCPUPercentage,
   457  			},
   458  		}
   459  		cresp = &CanCommitResponse{Method: OpRestore, ID: backupID, Timeout: 1}
   460  		sReq  = &StatusRequest{OpRestore, backupID, backendName}
   461  		sresp = &StatusResponse{Status: backup.Success, ID: backupID, Method: OpRestore}
   462  	)
   463  
   464  	t.Run("Success", func(t *testing.T) {
   465  		t.Parallel()
   466  
   467  		nodeResolverWithNodeMapping := newFakeNodeResolver(append(nodes, newNodes...))
   468  		fc := newFakeCoordinator(nodeResolverWithNodeMapping)
   469  		fc.selector.On("Shards", ctx, classes[0]).Return(nodes)
   470  		fc.selector.On("Shards", ctx, classes[1]).Return(nodes)
   471  
   472  		fc.client.On("CanCommit", any, newNodes[0], creq).Return(cresp, nil)
   473  		fc.client.On("CanCommit", any, newNodes[1], creq).Return(cresp, nil)
   474  
   475  		fc.client.On("Commit", any, newNodes[0], sReq).Return(nil)
   476  		fc.client.On("Commit", any, newNodes[1], sReq).Return(nil)
   477  		fc.client.On("Status", any, newNodes[0], sReq).Return(sresp, nil)
   478  		fc.client.On("Status", any, newNodes[1], sReq).Return(sresp, nil)
   479  		fc.backend.On("HomeDir", backupID).Return("bucket/" + backupID)
   480  		fc.backend.On("PutObject", any, backupID, GlobalRestoreFile, any).Return(nil).Twice()
   481  
   482  		coordinator := *fc.coordinator()
   483  		descReq := genReq()
   484  		store := coordStore{objStore{fc.backend, descReq.ID}}
   485  		req := newReq([]string{}, backendName, "")
   486  		err := coordinator.Restore(ctx, store, &req, descReq)
   487  		assert.Nil(t, err)
   488  	})
   489  }
   490  
   491  type fakeSelector struct {
   492  	mock.Mock
   493  }
   494  
   495  func (s *fakeSelector) Shards(ctx context.Context, class string) ([]string, error) {
   496  	args := s.Called(ctx, class)
   497  	return args.Get(0).([]string), args.Error(1)
   498  }
   499  
   500  func (s *fakeSelector) ListClasses(ctx context.Context) []string {
   501  	args := s.Called(ctx)
   502  	return args.Get(0).([]string)
   503  }
   504  
   505  func (s *fakeSelector) Backupable(ctx context.Context, classes []string) error {
   506  	args := s.Called(ctx, classes)
   507  	return args.Error(0)
   508  }
   509  
   510  type fakeCoordinator struct {
   511  	selector     fakeSelector
   512  	client       fakeClient
   513  	backend      *fakeBackend
   514  	log          logrus.FieldLogger
   515  	nodeResolver nodeResolver
   516  }
   517  
   518  func newFakeCoordinator(resolver nodeResolver) *fakeCoordinator {
   519  	fc := fakeCoordinator{}
   520  	fc.backend = newFakeBackend()
   521  	logger, _ := test.NewNullLogger()
   522  	fc.log = logger
   523  	fc.nodeResolver = resolver
   524  	return &fc
   525  }
   526  
   527  type fakeNodeResolver struct {
   528  	hosts map[string]string
   529  }
   530  
   531  func (r *fakeNodeResolver) NodeHostname(nodeName string) (string, bool) {
   532  	return r.hosts[nodeName], true
   533  }
   534  
   535  func (r *fakeNodeResolver) NodeCount() int {
   536  	if r.hosts != nil {
   537  		return len(r.hosts)
   538  	}
   539  	return 1
   540  }
   541  
   542  func (r *fakeNodeResolver) AllNames() []string {
   543  	xs := make([]string, 0, len(r.hosts))
   544  	for k := range r.hosts {
   545  		xs = append(xs, k)
   546  	}
   547  	return xs
   548  }
   549  
   550  func newFakeNodeResolver(nodes []string) *fakeNodeResolver {
   551  	hosts := make(map[string]string)
   552  	for _, node := range nodes {
   553  		hosts[node] = node
   554  	}
   555  	return &fakeNodeResolver{hosts: hosts}
   556  }
   557  
   558  func (fc *fakeCoordinator) coordinator() *coordinator {
   559  	c := newCoordinator(&fc.selector, &fc.client, fc.log, fc.nodeResolver)
   560  	c.timeoutNextRound = time.Millisecond * 200
   561  	return c
   562  }
   563  
   564  type fakeClient struct {
   565  	mock.Mock
   566  }
   567  
   568  func (f *fakeClient) CanCommit(ctx context.Context, node string, req *Request) (*CanCommitResponse, error) {
   569  	args := f.Called(ctx, node, req)
   570  	if args.Get(0) != nil {
   571  		return args.Get(0).(*CanCommitResponse), args.Error(1)
   572  	}
   573  	return nil, args.Error(1)
   574  }
   575  
   576  func (f *fakeClient) Commit(ctx context.Context, node string, req *StatusRequest) error {
   577  	args := f.Called(ctx, node, req)
   578  	return args.Error(0)
   579  }
   580  
   581  func (f *fakeClient) Status(ctx context.Context, node string, req *StatusRequest) (*StatusResponse, error) {
   582  	args := f.Called(ctx, node, req)
   583  	if args.Get(0) != nil {
   584  		return args.Get(0).(*StatusResponse), args.Error(1)
   585  	}
   586  	return nil, args.Error(1)
   587  }
   588  
   589  func (f *fakeClient) Abort(ctx context.Context, node string, req *AbortRequest) error {
   590  	args := f.Called(ctx, node, req)
   591  	return args.Error(0)
   592  }
   593  
   594  func newReq(classes []string, backendName, backupID string) Request {
   595  	return Request{
   596  		ID:      backupID,
   597  		Backend: backendName,
   598  		Classes: classes,
   599  		Compression: Compression{
   600  			Level:         DefaultCompression,
   601  			ChunkSize:     DefaultChunkSize,
   602  			CPUPercentage: DefaultCPUPercentage,
   603  		},
   604  	}
   605  }