github.com/weaviate/weaviate@v1.24.6/usecases/backup/scheduler_test.go (about) 1 // _ _ 2 // __ _____ __ ___ ___ __ _| |_ ___ 3 // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ 4 // \ V V / __/ (_| |\ V /| | (_| | || __/ 5 // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| 6 // 7 // Copyright © 2016 - 2024 Weaviate B.V. All rights reserved. 8 // 9 // CONTACT: hello@weaviate.io 10 // 11 12 package backup 13 14 import ( 15 "context" 16 "encoding/json" 17 "errors" 18 "fmt" 19 "strings" 20 "testing" 21 "time" 22 23 "github.com/sirupsen/logrus" 24 "github.com/sirupsen/logrus/hooks/test" 25 "github.com/stretchr/testify/assert" 26 "github.com/stretchr/testify/mock" 27 "github.com/weaviate/weaviate/entities/backup" 28 "github.com/weaviate/weaviate/entities/models" 29 ) 30 31 func TestSchedulerValidateCreateBackup(t *testing.T) { 32 t.Parallel() 33 var ( 34 cls = "C1" 35 backendName = "s3" 36 s = newFakeScheduler(nil).scheduler() 37 ctx = context.Background() 38 id = "123" 39 path = "root/123" 40 ) 41 t.Run("ValidateEmptyID", func(t *testing.T) { 42 _, err := s.Backup(ctx, nil, &BackupRequest{ 43 Backend: backendName, 44 ID: "", 45 Include: []string{cls}, 46 }) 47 assert.NotNil(t, err) 48 }) 49 t.Run("ValidateID", func(t *testing.T) { 50 _, err := s.Backup(ctx, nil, &BackupRequest{ 51 Backend: backendName, 52 ID: "A*:", 53 Include: []string{cls}, 54 }) 55 assert.NotNil(t, err) 56 }) 57 t.Run("IncludeExclude", func(t *testing.T) { 58 _, err := s.Backup(ctx, nil, &BackupRequest{ 59 Backend: backendName, 60 ID: "1234", 61 Include: []string{cls}, 62 Exclude: []string{cls}, 63 }) 64 assert.NotNil(t, err) 65 }) 66 67 t.Run("RequestIncludeHasDuplicate", func(t *testing.T) { 68 _, err := s.Backup(ctx, nil, &BackupRequest{ 69 Backend: backendName, 70 ID: "1234", 71 Include: []string{"C2", "C2", "C1"}, 72 Exclude: []string{}, 73 }) 74 assert.NotNil(t, err) 75 assert.ErrorContains(t, err, "C2") 76 }) 77 78 t.Run("ResultingClassListIsEmpty", func(t *testing.T) { 79 // return one class and exclude it in the request 80 fs := newFakeScheduler(nil) 81 fs.selector.On("ListClasses", ctx).Return([]string{cls}) 82 _, err := fs.scheduler().Backup(ctx, nil, &BackupRequest{ 83 Backend: backendName, 84 ID: "1234", 85 Include: []string{}, 86 Exclude: []string{cls}, 87 }) 88 assert.NotNil(t, err) 89 }) 90 t.Run("ClassNotBackupable", func(t *testing.T) { 91 // return an error in case index doesn't exist or a shard has multiple nodes 92 fs := newFakeScheduler(nil) 93 fs.selector.On("ListClasses", ctx).Return([]string{cls}) 94 fs.selector.On("Backupable", ctx, []string{cls}).Return(ErrAny) 95 _, err := fs.scheduler().Backup(ctx, nil, &BackupRequest{ 96 Backend: backendName, 97 ID: "1234", 98 Include: []string{}, 99 }) 100 assert.NotNil(t, err) 101 }) 102 103 t.Run("GetMetadataFails", func(t *testing.T) { 104 fs := newFakeScheduler(nil) 105 fs.selector.On("Backupable", ctx, []string{cls}).Return(nil) 106 fs.backend.On("HomeDir", mock.Anything).Return(path) 107 fs.backend.On("GetObject", ctx, id, GlobalBackupFile).Return(nil, errors.New("can not be read")) 108 fs.backend.On("GetObject", ctx, id, BackupFile).Return(nil, backup.ErrNotFound{}) 109 110 meta, err := fs.scheduler().Backup(ctx, nil, &BackupRequest{ 111 Backend: backendName, 112 ID: id, 113 Include: []string{cls}, 114 }) 115 116 assert.Nil(t, meta) 117 assert.NotNil(t, err) 118 assert.Contains(t, err.Error(), fmt.Sprintf("check if backup %q exists", id)) 119 assert.IsType(t, backup.ErrUnprocessable{}, err) 120 }) 121 t.Run("MetadataNotFound", func(t *testing.T) { 122 fs := newFakeScheduler(nil) 123 fs.selector.On("Backupable", ctx, []string{cls}).Return(nil) 124 fs.backend.On("HomeDir", mock.Anything).Return(path) 125 bytes := marshalMeta(backup.BackupDescriptor{ID: id}) 126 fs.backend.On("GetObject", ctx, id, GlobalBackupFile).Return(bytes, nil) 127 meta, err := fs.scheduler().Backup(ctx, nil, &BackupRequest{ 128 Backend: backendName, 129 ID: id, 130 Include: []string{cls}, 131 }) 132 133 assert.Nil(t, meta) 134 assert.NotNil(t, err) 135 assert.Contains(t, err.Error(), fmt.Sprintf("backup %q already exists", id)) 136 assert.IsType(t, backup.ErrUnprocessable{}, err) 137 }) 138 } 139 140 func TestSchedulerBackupStatus(t *testing.T) { 141 t.Parallel() 142 var ( 143 backendName = "s3" 144 id = "1234" 145 ctx = context.Background() 146 starTime = time.Date(2022, 1, 1, 1, 0, 0, 0, time.UTC) 147 nodeHome = id + "/" + nodeName 148 path = "bucket/backups/" + nodeHome 149 want = &Status{ 150 Path: path, 151 StartedAt: starTime, 152 Status: backup.Transferring, 153 } 154 ) 155 156 t.Run("ActiveState", func(t *testing.T) { 157 s := newFakeScheduler(nil).scheduler() 158 s.backupper.lastOp.reqStat = reqStat{ 159 Starttime: starTime, 160 ID: id, 161 Status: backup.Transferring, 162 Path: path, 163 } 164 st, err := s.BackupStatus(ctx, nil, backendName, id) 165 assert.Nil(t, err) 166 assert.Equal(t, want, st) 167 }) 168 169 t.Run("GetBackupProvider", func(t *testing.T) { 170 fs := newFakeScheduler(nil) 171 fs.backendErr = ErrAny 172 _, err := fs.scheduler().BackupStatus(ctx, nil, backendName, id) 173 assert.NotNil(t, err) 174 }) 175 176 t.Run("MetadataNotFound", func(t *testing.T) { 177 fs := newFakeScheduler(nil) 178 fs.backend.On("GetObject", ctx, id, GlobalBackupFile).Return(nil, ErrAny) 179 fs.backend.On("GetObject", ctx, id, BackupFile).Return(nil, backup.ErrNotFound{}) 180 181 _, err := fs.scheduler().BackupStatus(ctx, nil, backendName, id) 182 assert.NotNil(t, err) 183 nerr := backup.ErrNotFound{} 184 if !errors.As(err, &nerr) { 185 t.Errorf("error want=%v got=%v", nerr, err) 186 } 187 }) 188 189 t.Run("ReadFromMetadata", func(t *testing.T) { 190 fs := newFakeScheduler(nil) 191 completedAt := starTime.Add(time.Hour) 192 bytes := marshalCoordinatorMeta( 193 backup.DistributedBackupDescriptor{ 194 StartedAt: starTime, CompletedAt: completedAt, 195 Nodes: map[string]*backup.NodeDescriptor{"N1": {Classes: []string{"C1"}}}, 196 Status: backup.Success, 197 }) 198 want := want 199 want.CompletedAt = completedAt 200 want.Status = backup.Success 201 fs.backend.On("GetObject", ctx, id, GlobalBackupFile).Return(bytes, nil) 202 fs.backend.On("HomeDir", mock.Anything).Return(path) 203 got, err := fs.scheduler().BackupStatus(ctx, nil, backendName, id) 204 assert.Nil(t, err) 205 assert.Equal(t, want, got) 206 }) 207 208 t.Run("ReadFromOldMetadata", func(t *testing.T) { 209 fs := newFakeScheduler(nil) 210 completedAt := starTime.Add(time.Hour) 211 bytes := marshalMeta(backup.BackupDescriptor{StartedAt: starTime, CompletedAt: completedAt, Status: string(backup.Success)}) 212 want := want 213 want.CompletedAt = completedAt 214 want.Status = backup.Success 215 fs.backend.On("GetObject", ctx, id, GlobalBackupFile).Return(nil, ErrAny) 216 fs.backend.On("GetObject", ctx, id, BackupFile).Return(bytes, nil) 217 fs.backend.On("HomeDir", mock.Anything).Return(path) 218 got, err := fs.scheduler().BackupStatus(ctx, nil, backendName, id) 219 assert.Nil(t, err) 220 assert.Equal(t, want, got) 221 }) 222 } 223 224 func TestSchedulerRestorationStatus(t *testing.T) { 225 t.Parallel() 226 var ( 227 backendName = "s3" 228 id = "1234" 229 ctx = context.Background() 230 starTime = time.Date(2022, 1, 1, 1, 0, 0, 0, time.UTC) 231 nodeHome = id + "/" + nodeName 232 path = "bucket/backups/" + nodeHome 233 want = &Status{ 234 Path: path, 235 StartedAt: starTime, 236 Status: backup.Transferring, 237 } 238 ) 239 240 t.Run("ActiveState", func(t *testing.T) { 241 s := newFakeScheduler(nil).scheduler() 242 s.restorer.lastOp.reqStat = reqStat{ 243 Starttime: starTime, 244 ID: id, 245 Status: backup.Transferring, 246 Path: path, 247 } 248 st, err := s.RestorationStatus(ctx, nil, backendName, id) 249 assert.Nil(t, err) 250 assert.Equal(t, want, st) 251 }) 252 253 t.Run("GetBackupProvider", func(t *testing.T) { 254 fs := newFakeScheduler(nil) 255 fs.backendErr = ErrAny 256 _, err := fs.scheduler().RestorationStatus(ctx, nil, backendName, id) 257 assert.NotNil(t, err) 258 }) 259 260 t.Run("MetadataNotFound", func(t *testing.T) { 261 fs := newFakeScheduler(nil) 262 fs.backend.On("GetObject", ctx, id, GlobalRestoreFile).Return(nil, ErrAny) 263 _, err := fs.scheduler().RestorationStatus(ctx, nil, backendName, id) 264 assert.NotNil(t, err) 265 nerr := backup.ErrNotFound{} 266 if !errors.As(err, &nerr) { 267 t.Errorf("error want=%v got=%v", nerr, err) 268 } 269 }) 270 271 t.Run("ReadFromMetadata", func(t *testing.T) { 272 fs := newFakeScheduler(nil) 273 completedAt := starTime.Add(time.Hour) 274 bytes := marshalMeta(backup.BackupDescriptor{StartedAt: starTime, CompletedAt: completedAt, Status: string(backup.Success)}) 275 want := want 276 want.CompletedAt = completedAt 277 want.Status = backup.Success 278 fs.backend.On("GetObject", ctx, id, GlobalRestoreFile).Return(bytes, nil) 279 fs.backend.On("HomeDir", mock.Anything).Return(path) 280 got, err := fs.scheduler().RestorationStatus(ctx, nil, backendName, id) 281 assert.Nil(t, err) 282 assert.Equal(t, want, got) 283 }) 284 } 285 286 func TestSchedulerCreateBackup(t *testing.T) { 287 t.Parallel() 288 var ( 289 cls = "Class-A" 290 node = "Node-A" 291 backendName = "gcs" 292 backupID = "1" 293 any = mock.Anything 294 ctx = context.Background() 295 path = "dst/path" 296 req = BackupRequest{ 297 ID: backupID, 298 Include: []string{cls}, 299 Backend: backendName, 300 } 301 cresp = &CanCommitResponse{Method: OpCreate, ID: backupID, Timeout: 1} 302 sReq = &StatusRequest{OpCreate, backupID, backendName} 303 sresp = &StatusResponse{Status: backup.Success, ID: backupID, Method: OpCreate} 304 ) 305 306 t.Run("AnotherBackupIsInProgress", func(t *testing.T) { 307 req1 := BackupRequest{ 308 ID: backupID, 309 Include: []string{cls}, 310 Backend: backendName, 311 } 312 313 fs := newFakeScheduler(newFakeNodeResolver([]string{node})) 314 // first 315 fs.selector.On("Backupable", ctx, req1.Include).Return(nil) 316 fs.selector.On("Shards", ctx, cls).Return([]string{node}, nil) 317 318 fs.backend.On("GetObject", ctx, backupID, GlobalBackupFile).Return(nil, backup.ErrNotFound{}) 319 fs.backend.On("GetObject", ctx, backupID, BackupFile).Return(nil, backup.ErrNotFound{}) 320 fs.backend.On("HomeDir", mock.Anything).Return(path) 321 fs.backend.On("Initialize", ctx, mock.Anything).Return(nil) 322 fs.client.On("CanCommit", any, node, any).Return(cresp, nil) 323 fs.client.On("Commit", any, node, sReq).Return(nil) 324 fs.client.On("Status", any, node, sReq).Return(sresp, nil) 325 fs.backend.On("PutObject", any, backupID, GlobalBackupFile, any).Return(nil).Twice() 326 m := fs.scheduler() 327 resp1, err := m.Backup(ctx, nil, &req1) 328 assert.Nil(t, err) 329 status1 := string(backup.Started) 330 want1 := &models.BackupCreateResponse{ 331 Backend: backendName, 332 Classes: req1.Include, 333 ID: backupID, 334 Status: &status1, 335 Path: path, 336 } 337 assert.Equal(t, resp1, want1) 338 resp2, err := m.Backup(ctx, nil, &req1) 339 assert.NotNil(t, err) 340 assert.Contains(t, err.Error(), "already in progress") 341 assert.IsType(t, backup.ErrUnprocessable{}, err) 342 assert.Nil(t, resp2) 343 }) 344 345 t.Run("BackendUnregistered", func(t *testing.T) { 346 classes := []string{cls} 347 backendError := errors.New("I do not exist") 348 fs := newFakeScheduler(nil) 349 fs.backendErr = backendError 350 meta, err := fs.scheduler().Backup(ctx, nil, &BackupRequest{ 351 Backend: backendName, 352 ID: backupID, 353 Include: classes, 354 }) 355 356 assert.Nil(t, meta) 357 assert.NotNil(t, err) 358 assert.Contains(t, err.Error(), backendName) 359 assert.IsType(t, backup.ErrUnprocessable{}, err) 360 }) 361 362 t.Run("InitMetadata", func(t *testing.T) { 363 classes := []string{cls} 364 fs := newFakeScheduler(nil) 365 fs.selector.On("Backupable", ctx, classes).Return(nil) 366 fs.backend.On("HomeDir", mock.Anything).Return(path) 367 fs.backend.On("GetObject", ctx, backupID, GlobalBackupFile).Return(nil, backup.NewErrNotFound(errors.New("not found"))) 368 fs.backend.On("GetObject", ctx, backupID, BackupFile).Return(nil, backup.ErrNotFound{}) 369 370 fs.backend.On("Initialize", ctx, backupID).Return(errors.New("init meta failed")) 371 meta, err := fs.scheduler().Backup(ctx, nil, &BackupRequest{ 372 Backend: backendName, 373 ID: backupID, 374 Include: classes, 375 }) 376 377 assert.Nil(t, meta) 378 assert.NotNil(t, err) 379 assert.Contains(t, err.Error(), "init") 380 assert.IsType(t, backup.ErrUnprocessable{}, err) 381 }) 382 383 t.Run("Success", func(t *testing.T) { 384 fs := newFakeScheduler(newFakeNodeResolver([]string{node})) 385 fs.selector.On("Backupable", ctx, req.Include).Return(nil) 386 fs.selector.On("Shards", ctx, cls).Return([]string{node}, nil) 387 388 fs.backend.On("GetObject", ctx, backupID, GlobalBackupFile).Return(nil, backup.ErrNotFound{}) 389 fs.backend.On("GetObject", ctx, backupID, BackupFile).Return(nil, backup.ErrNotFound{}) 390 391 fs.backend.On("HomeDir", mock.Anything).Return(path) 392 fs.backend.On("Initialize", ctx, mock.Anything).Return(nil) 393 fs.client.On("CanCommit", any, node, any).Return(cresp, nil) 394 fs.client.On("Commit", any, node, sReq).Return(nil) 395 fs.client.On("Status", any, node, sReq).Return(sresp, nil) 396 fs.backend.On("PutObject", any, backupID, GlobalBackupFile, any).Return(nil).Twice() 397 s := fs.scheduler() 398 resp, err := s.Backup(ctx, nil, &req) 399 assert.Nil(t, err) 400 status1 := string(backup.Started) 401 want1 := &models.BackupCreateResponse{ 402 Backend: backendName, 403 Classes: req.Include, 404 ID: backupID, 405 Status: &status1, 406 Path: path, 407 } 408 assert.Equal(t, resp, want1) 409 410 for i := 0; i < 10; i++ { 411 time.Sleep(time.Millisecond * 50) 412 if i > 0 && s.backupper.lastOp.get().Status == "" { 413 break 414 } 415 } 416 assert.Equal(t, fs.backend.glMeta.Status, backup.Success) 417 assert.Equal(t, fs.backend.glMeta.Error, "") 418 }) 419 } 420 421 func TestSchedulerRestoration(t *testing.T) { 422 var ( 423 cls = "MyClass-A" 424 node = "Node-A" 425 any = mock.Anything 426 backendName = "gcs" 427 backupID = "1" 428 timePt = time.Now().UTC() 429 ctx = context.Background() 430 path = "bucket/backups/" + backupID 431 cresp = &CanCommitResponse{Method: OpRestore, ID: backupID, Timeout: 1} 432 sReq = &StatusRequest{OpRestore, backupID, backendName} 433 sresp = &StatusResponse{Status: backup.Success, ID: backupID, Method: OpRestore} 434 ) 435 meta1 := backup.DistributedBackupDescriptor{ 436 ID: backupID, 437 StartedAt: timePt, 438 Version: "1", 439 ServerVersion: "1", 440 Status: backup.Success, 441 Nodes: map[string]*backup.NodeDescriptor{ 442 node: {Classes: []string{cls}}, 443 }, 444 } 445 446 t.Run("AnotherBackupIsInProgress", func(t *testing.T) { 447 req1 := BackupRequest{ 448 ID: backupID, 449 Include: []string{cls}, 450 Backend: backendName, 451 } 452 fs := newFakeScheduler(newFakeNodeResolver([]string{node})) 453 bytes := marshalCoordinatorMeta(meta1) 454 fs.backend.On("Initialize", ctx, mock.Anything).Return(nil) 455 fs.backend.On("GetObject", ctx, backupID, GlobalBackupFile).Return(bytes, nil) 456 fs.backend.On("HomeDir", mock.Anything).Return(path) 457 fs.backend.On("PutObject", mock.Anything, mock.Anything, GlobalRestoreFile, mock.AnythingOfType("[]uint8")).Return(nil) 458 fs.client.On("CanCommit", any, node, any).Return(cresp, nil) 459 fs.client.On("Commit", any, node, sReq).Return(nil) 460 fs.client.On("Status", any, node, sReq).Return(sresp, nil).After(time.Minute) 461 462 s := fs.scheduler() 463 resp, err := s.Restore(ctx, nil, &req1) 464 assert.Nil(t, err) 465 status1 := string(backup.Started) 466 want1 := &models.BackupRestoreResponse{ 467 Backend: backendName, 468 Classes: req1.Include, 469 ID: backupID, 470 Status: &status1, 471 Path: path, 472 } 473 assert.Equal(t, resp, want1) 474 475 resp, err = s.Restore(ctx, nil, &req1) 476 assert.NotNil(t, err) 477 assert.Contains(t, err.Error(), "already in progress") 478 assert.IsType(t, backup.ErrUnprocessable{}, err) 479 assert.Nil(t, resp) 480 }) 481 482 t.Run("Success", func(t *testing.T) { 483 req := BackupRequest{ 484 ID: backupID, 485 Include: []string{cls}, 486 Backend: backendName, 487 } 488 fs := newFakeScheduler(newFakeNodeResolver([]string{node})) 489 bytes := marshalCoordinatorMeta(meta1) 490 fs.backend.On("Initialize", ctx, mock.Anything).Return(nil) 491 fs.backend.On("GetObject", ctx, backupID, GlobalBackupFile).Return(bytes, nil) 492 fs.backend.On("HomeDir", mock.Anything).Return(path) 493 // first for initial "STARTED", second for updated participant status 494 fs.backend.On("PutObject", mock.Anything, mock.Anything, GlobalRestoreFile, mock.AnythingOfType("[]uint8")).Return(nil) 495 fs.backend.On("PutObject", mock.Anything, mock.Anything, GlobalRestoreFile, mock.AnythingOfType("[]uint8")).Return(nil) 496 fs.client.On("CanCommit", any, node, any).Return(cresp, nil) 497 fs.client.On("Commit", any, node, sReq).Return(nil) 498 fs.client.On("Status", any, node, sReq).Return(sresp, nil) 499 fs.backend.On("PutObject", any, backupID, GlobalRestoreFile, any).Return(nil).Twice() 500 s := fs.scheduler() 501 resp, err := s.Restore(ctx, nil, &req) 502 assert.Nil(t, err) 503 status1 := string(backup.Started) 504 want1 := &models.BackupRestoreResponse{ 505 Backend: backendName, 506 Classes: req.Include, 507 ID: backupID, 508 Status: &status1, 509 Path: path, 510 } 511 assert.Equal(t, resp, want1) 512 for i := 0; i < 10; i++ { 513 time.Sleep(time.Millisecond * 60) 514 if i > 0 && s.restorer.lastOp.get().Status == "" { 515 break 516 } 517 } 518 assert.Equal(t, fs.backend.glMeta.Status, backup.Success) 519 assert.Equal(t, fs.backend.glMeta.Error, "") 520 }) 521 } 522 523 func TestSchedulerRestoreRequestValidation(t *testing.T) { 524 var ( 525 cls = "MyClass" 526 backendName = "s3" 527 s = newFakeScheduler(nil).scheduler() 528 id = "1234" 529 timePt = time.Now().UTC() 530 ctx = context.Background() 531 path = "bucket/backups/" + id 532 req = &BackupRequest{ 533 Backend: backendName, 534 ID: id, 535 Include: []string{cls}, 536 Exclude: []string{}, 537 } 538 ) 539 meta := backup.DistributedBackupDescriptor{ 540 ID: id, 541 StartedAt: timePt, 542 Version: "1", 543 ServerVersion: "1", 544 Status: backup.Success, 545 Nodes: map[string]*backup.NodeDescriptor{ 546 nodeName: {Classes: []string{cls}}, 547 }, 548 } 549 550 t.Run("NonEmptyIncludeAndExclude", func(t *testing.T) { 551 _, err := s.Restore(ctx, nil, &BackupRequest{ 552 Backend: backendName, 553 ID: id, 554 Include: []string{cls}, 555 Exclude: []string{cls}, 556 }) 557 assert.NotNil(t, err) 558 }) 559 560 t.Run("RequestIncludeHasDuplicates", func(t *testing.T) { 561 _, err := s.Restore(ctx, nil, &BackupRequest{ 562 Backend: backendName, 563 ID: id, 564 Include: []string{"C1", "C2", "C1"}, 565 Exclude: []string{}, 566 }) 567 assert.NotNil(t, err) 568 assert.ErrorContains(t, err, "C1") 569 }) 570 571 t.Run("BackendFailure", func(t *testing.T) { // backend provider fails 572 fs := newFakeScheduler(nil) 573 fs.backendErr = ErrAny 574 _, err := fs.scheduler().Restore(ctx, nil, &BackupRequest{ 575 Backend: backendName, 576 ID: id, 577 Include: []string{cls}, 578 Exclude: []string{}, 579 }) 580 assert.NotNil(t, err) 581 assert.Contains(t, err.Error(), backendName) 582 }) 583 584 t.Run("GetMetadataFile", func(t *testing.T) { 585 fs := newFakeScheduler(nil) 586 587 fs.backend.On("GetObject", ctx, id, GlobalBackupFile).Return(nil, ErrAny) 588 fs.backend.On("GetObject", ctx, id, BackupFile).Return(nil, backup.ErrNotFound{}) 589 590 fs.backend.On("HomeDir", mock.Anything).Return(path) 591 _, err := fs.scheduler().Restore(ctx, nil, req) 592 if err == nil || !strings.Contains(err.Error(), "find") { 593 t.Errorf("must return an error if it fails to get meta data: %v", err) 594 } 595 // meta data not found 596 fs = newFakeScheduler(nil) 597 fs.backend.On("HomeDir", mock.Anything).Return(path) 598 fs.backend.On("GetObject", ctx, id, GlobalBackupFile).Return(nil, backup.ErrNotFound{}) 599 fs.backend.On("GetObject", ctx, id, BackupFile).Return(nil, backup.ErrNotFound{}) 600 601 _, err = fs.scheduler().Restore(ctx, nil, req) 602 if _, ok := err.(backup.ErrNotFound); !ok { 603 t.Errorf("must return an error if meta data doesn't exist: %v", err) 604 } 605 }) 606 607 t.Run("FailedBackup", func(t *testing.T) { 608 fs := newFakeScheduler(nil) 609 bytes := marshalMeta(backup.BackupDescriptor{ID: id, Status: string(backup.Failed)}) 610 fs.backend.On("GetObject", ctx, id, GlobalBackupFile).Return(bytes, nil) 611 fs.backend.On("HomeDir", mock.Anything).Return(path) 612 _, err := fs.scheduler().Restore(ctx, nil, req) 613 assert.NotNil(t, err) 614 assert.Contains(t, err.Error(), backup.Failed) 615 assert.IsType(t, backup.ErrUnprocessable{}, err) 616 }) 617 618 t.Run("BackupWithHigherVersion", func(t *testing.T) { 619 fs := newFakeScheduler(nil) 620 version := "3.0" 621 meta := backup.DistributedBackupDescriptor{ 622 ID: id, 623 StartedAt: timePt, 624 Version: version, 625 ServerVersion: "2", 626 Status: backup.Success, 627 Nodes: map[string]*backup.NodeDescriptor{ 628 nodeName: {Classes: []string{cls}}, 629 }, 630 } 631 632 bytes := marshalCoordinatorMeta(meta) 633 fs.backend.On("GetObject", ctx, id, GlobalBackupFile).Return(bytes, nil) 634 fs.backend.On("HomeDir", mock.Anything).Return(path) 635 _, err := fs.scheduler().Restore(ctx, nil, req) 636 assert.NotNil(t, err) 637 assert.Contains(t, err.Error(), errMsgHigherVersion) 638 assert.IsType(t, backup.ErrUnprocessable{}, err) 639 }) 640 641 t.Run("CorruptedBackupFile", func(t *testing.T) { 642 fs := newFakeScheduler(nil) 643 bytes := marshalMeta(backup.BackupDescriptor{ID: id, Status: string(backup.Success)}) 644 fs.backend.On("GetObject", ctx, id, GlobalBackupFile).Return(bytes, nil) 645 fs.backend.On("HomeDir", mock.Anything).Return(path) 646 _, err := fs.scheduler().Restore(ctx, nil, req) 647 assert.NotNil(t, err) 648 assert.IsType(t, backup.ErrUnprocessable{}, err) 649 assert.Contains(t, err.Error(), "corrupted") 650 }) 651 652 t.Run("WrongBackupFile", func(t *testing.T) { 653 fs := newFakeScheduler(nil) 654 655 bytes := marshalMeta(backup.BackupDescriptor{ID: "123", Status: string(backup.Success)}) 656 fs.backend.On("GetObject", ctx, id, GlobalBackupFile).Return(bytes, nil) 657 fs.backend.On("HomeDir", mock.Anything).Return(path) 658 _, err := fs.scheduler().Restore(ctx, nil, req) 659 assert.NotNil(t, err) 660 assert.IsType(t, backup.ErrUnprocessable{}, err) 661 assert.Contains(t, err.Error(), "wrong backup file") 662 }) 663 664 t.Run("UnknownClass", func(t *testing.T) { 665 fs := newFakeScheduler(nil) 666 667 bytes := marshalCoordinatorMeta(meta) 668 fs.backend.On("GetObject", ctx, id, GlobalBackupFile).Return(bytes, nil) 669 fs.backend.On("HomeDir", mock.Anything).Return(path) 670 _, err := fs.scheduler().Restore(ctx, nil, &BackupRequest{ID: id, Include: []string{"unknown"}}) 671 assert.NotNil(t, err) 672 assert.Contains(t, err.Error(), "unknown") 673 }) 674 675 t.Run("EmptyResultClassList", func(t *testing.T) { // backup was successful but class list is empty 676 fs := newFakeScheduler(&fakeNodeResolver{}) 677 678 bytes := marshalCoordinatorMeta(meta) 679 fs.backend.On("GetObject", ctx, id, GlobalBackupFile).Return(bytes, nil) 680 fs.backend.On("HomeDir", mock.Anything).Return(path) 681 _, err := fs.scheduler().Restore(ctx, nil, &BackupRequest{ID: id, Exclude: []string{cls}}) 682 assert.NotNil(t, err) 683 assert.Contains(t, err.Error(), cls) 684 }) 685 } 686 687 type fakeScheduler struct { 688 selector fakeSelector 689 client fakeClient 690 backend *fakeBackend 691 backendErr error 692 auth *fakeAuthorizer 693 nodeResolver nodeResolver 694 log logrus.FieldLogger 695 } 696 697 func newFakeScheduler(resolver nodeResolver) *fakeScheduler { 698 fc := fakeScheduler{} 699 fc.backend = newFakeBackend() 700 fc.backendErr = nil 701 logger, _ := test.NewNullLogger() 702 fc.auth = &fakeAuthorizer{} 703 fc.log = logger 704 if resolver == nil { 705 fc.nodeResolver = &fakeNodeResolver{} 706 } else { 707 fc.nodeResolver = resolver 708 } 709 return &fc 710 } 711 712 func (f *fakeScheduler) scheduler() *Scheduler { 713 provider := &fakeBackupBackendProvider{f.backend, f.backendErr} 714 c := NewScheduler(f.auth, &f.client, &f.selector, provider, 715 f.nodeResolver, f.log) 716 c.backupper.timeoutNextRound = time.Millisecond * 200 717 c.restorer.timeoutNextRound = time.Millisecond * 200 718 return c 719 } 720 721 func marshalCoordinatorMeta(m backup.DistributedBackupDescriptor) []byte { 722 bytes, _ := json.MarshalIndent(m, "", "") 723 return bytes 724 } 725 726 func TestFirstDuplicate(t *testing.T) { 727 tests := []struct { 728 in []string 729 want string 730 }{ 731 {}, 732 {[]string{"1"}, ""}, 733 {[]string{"1", "1"}, "1"}, 734 {[]string{"1", "2", "2", "1"}, "2"}, 735 {[]string{"1", "2", "3", "1"}, "1"}, 736 } 737 for _, test := range tests { 738 got := findDuplicate(test.in) 739 if got != test.want { 740 t.Errorf("firstDuplicate(%v) want=%s got=%s", test.in, test.want, got) 741 } 742 } 743 }