github.com/swiftstack/proxyfs@v0.0.0-20201223034610-5434d919416e/pfsworkout/main.go (about) 1 package main 2 3 import ( 4 "fmt" 5 "math/rand" 6 "os" 7 "strconv" 8 "time" 9 10 "github.com/swiftstack/ProxyFS/conf" 11 "github.com/swiftstack/ProxyFS/fs" 12 "github.com/swiftstack/ProxyFS/headhunter" 13 "github.com/swiftstack/ProxyFS/inode" 14 "github.com/swiftstack/ProxyFS/swiftclient" 15 "github.com/swiftstack/ProxyFS/trackedlock" 16 "github.com/swiftstack/ProxyFS/transitions" 17 "github.com/swiftstack/ProxyFS/utils" 18 ) 19 20 const ( 21 basenamePrefix = "__pfsworkout_" 22 ) 23 24 type rwTimesStruct struct { 25 writeDuration time.Duration 26 readDuration time.Duration 27 } 28 29 type rwSizeEachStruct struct { 30 name string 31 KiB uint64 32 dirTimes rwTimesStruct 33 fuseTimes rwTimesStruct 34 fsTimes rwTimesStruct 35 inodeTimes rwTimesStruct 36 swiftclientTimes rwTimesStruct 37 VolumeHandle fs.VolumeHandle // Only used if all threads use same file 38 FileInodeNumber inode.InodeNumber // Only used if all threads use same file 39 ObjectPath string // Only used if all threads use same object 40 } 41 42 var ( 43 dirPath string 44 doNextStepChan chan bool 45 mountPointName string 46 mutex trackedlock.Mutex 47 rwSizeTotal uint64 48 stepErrChan chan error 49 volumeList []string 50 volumeName string 51 headhunterVolumeHandle headhunter.VolumeHandle 52 ) 53 54 func usage(file *os.File) { 55 fmt.Fprintf(file, "Usage:\n") 56 fmt.Fprintf(file, " %v d threads rw-size-in-mb dir-path\n", os.Args[0]) 57 fmt.Fprintf(file, " %v [mfisru] threads rw-size-in-mb conf-file [section.option=value]*\n", os.Args[0]) 58 fmt.Fprintf(file, " where:\n") 59 fmt.Fprintf(file, " d run tests against specified target directory\n") 60 fmt.Fprintf(file, " m run tests against FUSE mount point\n") 61 fmt.Fprintf(file, " f run tests against package fs\n") 62 fmt.Fprintf(file, " i run tests against package inode\n") 63 fmt.Fprintf(file, " s run tests against package swiftclient\n") 64 fmt.Fprintf(file, " r run tests with random I/O instead of sequential\n") 65 fmt.Fprintf(file, " u run multiple readers/writers on same file against packages\n") 66 fmt.Fprintf(file, " threads number of threads (currently must be '1')\n") 67 fmt.Fprintf(file, " rw-size-in-mb number of MiB per thread per test case\n") 68 fmt.Fprintf(file, " dir-path target directory\n") 69 fmt.Fprintf(file, " conf-file input to conf.MakeConfMapFromFile()\n") 70 fmt.Fprintf(file, " [section.option=value]* optional input to conf.UpdateFromStrings()\n") 71 fmt.Fprintf(file, "\n") 72 fmt.Fprintf(file, "Note: At least one of f, i, or s must be specified\n") 73 fmt.Fprintf(file, "\n") 74 fmt.Fprintf(file, "The default is a sequential test on a different file per thread.\n") 75 fmt.Fprintf(file, " r specifies that the I/O is random for fs and inodee packages instead of sequential.\n") 76 fmt.Fprintf(file, " u specifies that all threads operate on the same file.\n") 77 } 78 79 func main() { 80 var ( 81 confMap conf.ConfMap 82 83 proxyfsRequired = false 84 85 doDirWorkout = false 86 doFuseWorkout = false 87 doFsWorkout = false 88 doInodeWorkout = false 89 doSwiftclientWorkout = false 90 doSameFile = false 91 doRandomIO = false 92 93 primaryPeer string 94 volumeGroupToCheck string 95 volumeGroupToUse string 96 volumeGroupList []string 97 volumeList []string 98 whoAmI string 99 100 timeBeforeWrites time.Time 101 timeAfterWrites time.Time 102 timeBeforeReads time.Time 103 timeAfterReads time.Time 104 105 bandwidthNumerator float64 106 107 rwSizeEachArray = [...]*rwSizeEachStruct{ 108 &rwSizeEachStruct{name: " 4 KiB", KiB: 4}, 109 &rwSizeEachStruct{name: " 8 KiB", KiB: 8}, 110 &rwSizeEachStruct{name: "16 KiB", KiB: 16}, 111 &rwSizeEachStruct{name: "32 KiB", KiB: 32}, 112 &rwSizeEachStruct{name: "64 KiB", KiB: 64}, 113 } 114 ) 115 116 // Parse arguments 117 118 if 5 > len(os.Args) { 119 usage(os.Stderr) 120 os.Exit(1) 121 } 122 123 for _, workoutSelector := range os.Args[1] { 124 switch workoutSelector { 125 case 'd': 126 doDirWorkout = true 127 case 'm': 128 proxyfsRequired = true 129 doFuseWorkout = true 130 case 'f': 131 proxyfsRequired = true 132 doFsWorkout = true 133 case 'i': 134 proxyfsRequired = true 135 doInodeWorkout = true 136 case 's': 137 proxyfsRequired = true 138 doSwiftclientWorkout = true 139 case 'r': 140 proxyfsRequired = true 141 doRandomIO = true 142 case 'u': 143 proxyfsRequired = true 144 doSameFile = true 145 default: 146 fmt.Fprintf(os.Stderr, "workoutSelector ('%v') must be one of 'd', 'm', 'f', 'i', or 's'\n", string(workoutSelector)) 147 os.Exit(1) 148 } 149 } 150 151 if doDirWorkout { 152 if doFuseWorkout || doFsWorkout || doInodeWorkout || doSwiftclientWorkout { 153 fmt.Fprintf(os.Stderr, "workoutSelectors cannot include both 'd' and any of 'm', 'f', 'i', or 's'\n") 154 os.Exit(1) 155 } 156 } else { 157 if !(doFuseWorkout || doFsWorkout || doInodeWorkout || doSwiftclientWorkout) { 158 fmt.Fprintf(os.Stderr, "workoutSelectors must include at least one of 'm', 'f', 'i', or 's' when 'd' is not selected") 159 os.Exit(1) 160 } 161 } 162 163 threads, err := strconv.ParseUint(os.Args[2], 10, 64) 164 if nil != err { 165 fmt.Fprintf(os.Stderr, "strconv.ParseUint(\"%v\", 10, 64) failed: %v\n", os.Args[2], err) 166 os.Exit(1) 167 } 168 if 0 == threads { 169 fmt.Fprintf(os.Stderr, "threads must be a positive number\n") 170 os.Exit(1) 171 } 172 173 rwSizeTotalMiB, err := strconv.ParseUint(os.Args[3], 10, 64) 174 if nil != err { 175 fmt.Fprintf(os.Stderr, "strconv.ParseUint(\"%v\", 10, 64) failed: %v\n", os.Args[3], err) 176 os.Exit(1) 177 } 178 179 rwSizeTotal = rwSizeTotalMiB * 1024 * 1024 180 181 if doDirWorkout { 182 dirPath = os.Args[4] 183 } else { 184 confMap, err = conf.MakeConfMapFromFile(os.Args[4]) 185 if nil != err { 186 fmt.Fprintf(os.Stderr, "conf.MakeConfMapFromFile(\"%v\") failed: %v\n", os.Args[4], err) 187 os.Exit(1) 188 } 189 190 if 5 < len(os.Args) { 191 err = confMap.UpdateFromStrings(os.Args[5:]) 192 if nil != err { 193 fmt.Fprintf(os.Stderr, "confMap.UpdateFromStrings(%#v) failed: %v\n", os.Args[5:], err) 194 os.Exit(1) 195 } 196 } 197 198 // Upgrade confMap if necessary 199 err = transitions.UpgradeConfMapIfNeeded(confMap) 200 if nil != err { 201 fmt.Fprintf(os.Stderr, "Failed to upgrade config: %v", err) 202 os.Exit(1) 203 } 204 205 // Select first Volume of the first "active" VolumeGroup in [FSGlobals]VolumeGroupList 206 207 whoAmI, err = confMap.FetchOptionValueString("Cluster", "WhoAmI") 208 if nil != err { 209 fmt.Fprintf(os.Stderr, "confMap.FetchOptionValueString(\"Cluster\", \"WhoAmI\") failed: %v\n", err) 210 os.Exit(1) 211 } 212 213 volumeGroupList, err = confMap.FetchOptionValueStringSlice("FSGlobals", "VolumeGroupList") 214 if nil != err { 215 fmt.Fprintf(os.Stderr, "confMap.FetchOptionValueStringSlice(\"FSGlobals\", \"VolumeGroupList\") failed: %v\n", err) 216 os.Exit(1) 217 } 218 219 volumeGroupToUse = "" 220 221 for _, volumeGroupToCheck = range volumeGroupList { 222 primaryPeer, err = confMap.FetchOptionValueString("VolumeGroup:"+volumeGroupToCheck, "PrimaryPeer") 223 if nil != err { 224 fmt.Fprintf(os.Stderr, "confMap.FetchOptionValueString(\"VolumeGroup:%s\", \"PrimaryPeer\") failed: %v\n", volumeGroupToCheck, err) 225 os.Exit(1) 226 } 227 if whoAmI == primaryPeer { 228 volumeGroupToUse = volumeGroupToCheck 229 break 230 } 231 } 232 233 if "" == volumeGroupToUse { 234 fmt.Fprintf(os.Stderr, "confMap didn't contain an \"active\" VolumeGroup") 235 os.Exit(1) 236 } 237 238 volumeList, err = confMap.FetchOptionValueStringSlice("VolumeGroup:"+volumeGroupToUse, "VolumeList") 239 if nil != err { 240 fmt.Fprintf(os.Stderr, "confMap.FetchOptionValueStringSlice(\"VolumeGroup:%s\", \"PrimaryPeer\") failed: %v\n", volumeGroupToUse, err) 241 os.Exit(1) 242 } 243 if 1 > len(volumeList) { 244 fmt.Fprintf(os.Stderr, "confMap.FetchOptionValueStringSlice(\"VolumeGroup:%s\", \"VolumeList\") returned empty volumeList", volumeGroupToUse) 245 os.Exit(1) 246 } 247 248 volumeName = volumeList[0] 249 250 mountPointName, err = confMap.FetchOptionValueString("Volume:"+volumeName, "FUSEMountPointName") 251 if nil != err { 252 fmt.Fprintf(os.Stderr, "confMap.FetchOptionValueString(\"Volume:%s\", \"FUSEMountPointName\") failed: %v\n", volumeName, err) 253 os.Exit(1) 254 } 255 } 256 257 if proxyfsRequired { 258 // Start up needed ProxyFS components 259 260 err = transitions.Up(confMap) 261 if nil != err { 262 fmt.Fprintf(os.Stderr, "transitions.Up() failed: %v\n", err) 263 os.Exit(1) 264 } 265 266 headhunterVolumeHandle, err = headhunter.FetchVolumeHandle(volumeName) 267 if nil != err { 268 fmt.Fprintf(os.Stderr, "headhunter.FetchVolumeHandle(\"%s\") failed: %v\n", volumeName, err) 269 os.Exit(1) 270 } 271 } 272 273 // Perform tests 274 275 stepErrChan = make(chan error, threads) 276 doNextStepChan = make(chan bool, threads) 277 278 if doDirWorkout { 279 for _, rwSizeEach := range rwSizeEachArray { 280 // Do initialization step 281 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 282 go dirWorkout(rwSizeEach, threadIndex) 283 } 284 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 285 err = <-stepErrChan 286 if nil != err { 287 fmt.Fprintf(os.Stderr, "dirWorkout() initialization step returned: %v\n", err) 288 os.Exit(1) 289 } 290 } 291 // Do writes step 292 timeBeforeWrites = time.Now() 293 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 294 doNextStepChan <- true 295 } 296 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 297 err = <-stepErrChan 298 if nil != err { 299 fmt.Fprintf(os.Stderr, "dirWorkout() write step returned: %v\n", err) 300 os.Exit(1) 301 } 302 } 303 timeAfterWrites = time.Now() 304 // Do reads step 305 timeBeforeReads = time.Now() 306 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 307 doNextStepChan <- true 308 } 309 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 310 err = <-stepErrChan 311 if nil != err { 312 fmt.Fprintf(os.Stderr, "dirWorkout() read step returned: %v\n", err) 313 os.Exit(1) 314 } 315 } 316 timeAfterReads = time.Now() 317 // Do shutdown step 318 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 319 doNextStepChan <- true 320 } 321 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 322 err = <-stepErrChan 323 if nil != err { 324 fmt.Fprintf(os.Stderr, "dirWorkout() shutdown step returned: %v\n", err) 325 os.Exit(1) 326 } 327 } 328 329 rwSizeEach.dirTimes.writeDuration = timeAfterWrites.Sub(timeBeforeWrites) 330 rwSizeEach.dirTimes.readDuration = timeAfterReads.Sub(timeBeforeReads) 331 } 332 } 333 334 if doFuseWorkout { 335 for _, rwSizeEach := range rwSizeEachArray { 336 // Do initialization step 337 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 338 go fuseWorkout(rwSizeEach, threadIndex) 339 } 340 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 341 err = <-stepErrChan 342 if nil != err { 343 fmt.Fprintf(os.Stderr, "fuseWorkout() initialization step returned: %v\n", err) 344 os.Exit(1) 345 } 346 } 347 // Do writes step 348 timeBeforeWrites = time.Now() 349 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 350 doNextStepChan <- true 351 } 352 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 353 err = <-stepErrChan 354 if nil != err { 355 fmt.Fprintf(os.Stderr, "fuseWorkout() write step returned: %v\n", err) 356 os.Exit(1) 357 } 358 } 359 timeAfterWrites = time.Now() 360 // Do reads step 361 timeBeforeReads = time.Now() 362 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 363 doNextStepChan <- true 364 } 365 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 366 err = <-stepErrChan 367 if nil != err { 368 fmt.Fprintf(os.Stderr, "fuseWorkout() read step returned: %v\n", err) 369 os.Exit(1) 370 } 371 } 372 timeAfterReads = time.Now() 373 // Do shutdown step 374 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 375 doNextStepChan <- true 376 } 377 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 378 err = <-stepErrChan 379 if nil != err { 380 fmt.Fprintf(os.Stderr, "fuseWorkout() shutdown step returned: %v\n", err) 381 os.Exit(1) 382 } 383 } 384 385 rwSizeEach.fuseTimes.writeDuration = timeAfterWrites.Sub(timeBeforeWrites) 386 rwSizeEach.fuseTimes.readDuration = timeAfterReads.Sub(timeBeforeReads) 387 } 388 } 389 390 if doFsWorkout { 391 for _, rwSizeEach := range rwSizeEachArray { 392 var fileName string 393 394 // If we are doing the operations on the same file for all threads, create the file now. 395 if doSameFile { 396 // Save off MountHandle and FileInodeNumber in rwSizeEach since all threads need this 397 err, rwSizeEach.VolumeHandle, rwSizeEach.FileInodeNumber, fileName = createFsFile() 398 if nil != err { 399 // In an error, no point in continuing. Just break from this for loop. 400 break 401 } 402 } 403 404 // Do initialization step 405 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 406 go fsWorkout(rwSizeEach, threadIndex, doSameFile, doRandomIO) 407 } 408 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 409 err = <-stepErrChan 410 if nil != err { 411 fmt.Fprintf(os.Stderr, "fsWorkout() initialization step returned: %v\n", err) 412 os.Exit(1) 413 } 414 } 415 // Do writes step 416 timeBeforeWrites = time.Now() 417 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 418 doNextStepChan <- true 419 } 420 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 421 err = <-stepErrChan 422 if nil != err { 423 fmt.Fprintf(os.Stderr, "fsWorkout() write step returned: %v\n", err) 424 os.Exit(1) 425 } 426 } 427 timeAfterWrites = time.Now() 428 // Do reads step 429 timeBeforeReads = time.Now() 430 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 431 doNextStepChan <- true 432 } 433 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 434 err = <-stepErrChan 435 if nil != err { 436 fmt.Fprintf(os.Stderr, "fsWorkout() read step returned: %v\n", err) 437 os.Exit(1) 438 } 439 } 440 timeAfterReads = time.Now() 441 // Do shutdown step 442 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 443 doNextStepChan <- true 444 } 445 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 446 err = <-stepErrChan 447 if nil != err { 448 fmt.Fprintf(os.Stderr, "fsWorkout() shutdown step returned: %v\n", err) 449 os.Exit(1) 450 } 451 } 452 453 // Remove file if all threads used same file 454 if doSameFile { 455 _ = unlinkFsFile(rwSizeEach.VolumeHandle, fileName) 456 } 457 458 rwSizeEach.fsTimes.writeDuration = timeAfterWrites.Sub(timeBeforeWrites) 459 rwSizeEach.fsTimes.readDuration = timeAfterReads.Sub(timeBeforeReads) 460 } 461 } 462 463 if doInodeWorkout { 464 for _, rwSizeEach := range rwSizeEachArray { 465 // If we are doing the operations on the same object for all threads, create the object now. 466 if doSameFile { 467 err, rwSizeEach.FileInodeNumber = createInode() 468 if nil != err { 469 // In an error, no point in continuing. Just break from this for loop. 470 break 471 } 472 } 473 474 // Do initialization step 475 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 476 go inodeWorkout(rwSizeEach, threadIndex, doSameFile, doRandomIO) 477 } 478 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 479 err = <-stepErrChan 480 if nil != err { 481 fmt.Fprintf(os.Stderr, "inodeWorkout() initialization step returned: %v\n", err) 482 os.Exit(1) 483 } 484 } 485 // Do writes step 486 timeBeforeWrites = time.Now() 487 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 488 doNextStepChan <- true 489 } 490 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 491 err = <-stepErrChan 492 if nil != err { 493 fmt.Fprintf(os.Stderr, "inodeWorkout() write step returned: %v\n", err) 494 os.Exit(1) 495 } 496 } 497 timeAfterWrites = time.Now() 498 // Do reads step 499 timeBeforeReads = time.Now() 500 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 501 doNextStepChan <- true 502 } 503 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 504 err = <-stepErrChan 505 if nil != err { 506 fmt.Fprintf(os.Stderr, "inodeWorkout() read step returned: %v\n", err) 507 os.Exit(1) 508 } 509 } 510 timeAfterReads = time.Now() 511 // Do shutdown step 512 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 513 doNextStepChan <- true 514 } 515 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 516 err = <-stepErrChan 517 if nil != err { 518 fmt.Fprintf(os.Stderr, "inodeWorkout() shutdown step returned: %v\n", err) 519 os.Exit(1) 520 } 521 } 522 523 // Remove inode if all threads use same inode 524 if doSameFile { 525 _ = destroyInode(rwSizeEach.FileInodeNumber) 526 } 527 528 rwSizeEach.inodeTimes.writeDuration = timeAfterWrites.Sub(timeBeforeWrites) 529 rwSizeEach.inodeTimes.readDuration = timeAfterReads.Sub(timeBeforeReads) 530 } 531 } 532 533 if doSwiftclientWorkout { 534 for _, rwSizeEach := range rwSizeEachArray { 535 536 // Create object used by all threads 537 if doSameFile { 538 err, rwSizeEach.ObjectPath = createObject() 539 if nil != err { 540 // In an error, no point in continuing. Just break from this for loop. 541 break 542 } 543 } 544 545 // Do initialization step 546 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 547 go swiftclientWorkout(rwSizeEach, threadIndex, doSameFile) 548 } 549 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 550 err = <-stepErrChan 551 if nil != err { 552 fmt.Fprintf(os.Stderr, "swiftclientWorkout() initialization step returned: %v\n", err) 553 os.Exit(1) 554 } 555 } 556 // Do writes step 557 timeBeforeWrites = time.Now() 558 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 559 doNextStepChan <- true 560 } 561 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 562 err = <-stepErrChan 563 if nil != err { 564 fmt.Fprintf(os.Stderr, "swiftclientWorkout() write step returned: %v\n", err) 565 os.Exit(1) 566 } 567 } 568 timeAfterWrites = time.Now() 569 // Do reads step 570 timeBeforeReads = time.Now() 571 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 572 doNextStepChan <- true 573 } 574 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 575 err = <-stepErrChan 576 if nil != err { 577 fmt.Fprintf(os.Stderr, "swiftclientWorkout() read step returned: %v\n", err) 578 os.Exit(1) 579 } 580 } 581 timeAfterReads = time.Now() 582 // Do shutdown step 583 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 584 doNextStepChan <- true 585 } 586 for threadIndex := uint64(0); threadIndex < threads; threadIndex++ { 587 err = <-stepErrChan 588 if nil != err { 589 fmt.Fprintf(os.Stderr, "swiftclientWorkout() shutdown step returned: %v\n", err) 590 os.Exit(1) 591 } 592 } 593 594 // Remove object if all threads use same object 595 if doSameFile { 596 _ = deleteObject(rwSizeEach.ObjectPath) 597 } 598 599 rwSizeEach.swiftclientTimes.writeDuration = timeAfterWrites.Sub(timeBeforeWrites) 600 rwSizeEach.swiftclientTimes.readDuration = timeAfterReads.Sub(timeBeforeReads) 601 } 602 } 603 604 if proxyfsRequired { 605 // Stop ProxyFS components launched above 606 607 err = transitions.Down(confMap) 608 if nil != err { 609 fmt.Fprintf(os.Stderr, "transitions.Down() failed: %v\n", err) 610 os.Exit(1) 611 } 612 } 613 614 // Report results 615 616 bandwidthNumerator = float64(threads*rwSizeTotal) / float64(1024*1024) 617 618 var fileAccess = "Sequential" 619 var threadFile = "different files" 620 if doSameFile { 621 threadFile = "same file" 622 } 623 if doRandomIO { 624 fileAccess = "Random" 625 } 626 627 fmt.Printf(" I/O type: %v and %v per thread\n", fileAccess, threadFile) 628 fmt.Printf(" (in MiB/sec) ") 629 for _, rwSizeEach := range rwSizeEachArray { 630 fmt.Printf(" %s", rwSizeEach.name) 631 } 632 fmt.Println() 633 634 if doDirWorkout { 635 fmt.Printf("dir read ") 636 for _, rwSizeEach := range rwSizeEachArray { 637 fmt.Printf(" %8.2f", bandwidthNumerator/rwSizeEach.dirTimes.readDuration.Seconds()) 638 } 639 fmt.Println() 640 fmt.Printf(" write ") 641 for _, rwSizeEach := range rwSizeEachArray { 642 fmt.Printf(" %8.2f", bandwidthNumerator/rwSizeEach.dirTimes.writeDuration.Seconds()) 643 } 644 fmt.Println() 645 } 646 647 if doFuseWorkout { 648 fmt.Printf("fuse read ") 649 for _, rwSizeEach := range rwSizeEachArray { 650 fmt.Printf(" %8.2f", bandwidthNumerator/rwSizeEach.fuseTimes.readDuration.Seconds()) 651 } 652 fmt.Println() 653 fmt.Printf(" write ") 654 for _, rwSizeEach := range rwSizeEachArray { 655 fmt.Printf(" %8.2f", bandwidthNumerator/rwSizeEach.fuseTimes.writeDuration.Seconds()) 656 } 657 fmt.Println() 658 } 659 660 if doFsWorkout { 661 fmt.Printf("fs read ") 662 for _, rwSizeEach := range rwSizeEachArray { 663 fmt.Printf(" %8.2f", bandwidthNumerator/rwSizeEach.fsTimes.readDuration.Seconds()) 664 } 665 fmt.Println() 666 fmt.Printf(" write ") 667 for _, rwSizeEach := range rwSizeEachArray { 668 fmt.Printf(" %8.2f", bandwidthNumerator/rwSizeEach.fsTimes.writeDuration.Seconds()) 669 } 670 fmt.Println() 671 } 672 673 if doInodeWorkout { 674 fmt.Printf("inode read ") 675 for _, rwSizeEach := range rwSizeEachArray { 676 fmt.Printf(" %8.2f", bandwidthNumerator/rwSizeEach.inodeTimes.readDuration.Seconds()) 677 } 678 fmt.Println() 679 fmt.Printf(" write ") 680 for _, rwSizeEach := range rwSizeEachArray { 681 fmt.Printf(" %8.2f", bandwidthNumerator/rwSizeEach.inodeTimes.writeDuration.Seconds()) 682 } 683 fmt.Println() 684 } 685 686 if doSwiftclientWorkout { 687 fmt.Printf("swiftclient read ") 688 for _, rwSizeEach := range rwSizeEachArray { 689 fmt.Printf(" %8.2f", bandwidthNumerator/rwSizeEach.swiftclientTimes.readDuration.Seconds()) 690 } 691 fmt.Println() 692 fmt.Printf(" write ") 693 for _, rwSizeEach := range rwSizeEachArray { 694 fmt.Printf(" %8.2f", bandwidthNumerator/rwSizeEach.swiftclientTimes.writeDuration.Seconds()) 695 } 696 fmt.Println() 697 } 698 } 699 700 func dirWorkout(rwSizeEach *rwSizeEachStruct, threadIndex uint64) { 701 fileName := fmt.Sprintf("%s/%s%016X", dirPath, basenamePrefix, threadIndex) 702 703 file, err := os.Create(fileName) 704 if nil != err { 705 stepErrChan <- fmt.Errorf("os.Create(\"%v\") failed: %v\n", fileName, err) 706 return 707 } 708 709 rwSizeRequested := rwSizeEach.KiB * 1024 710 711 bufWritten := make([]byte, rwSizeRequested) 712 for i := uint64(0); i < rwSizeRequested; i++ { 713 bufWritten[i] = 0 714 } 715 716 bufRead := make([]byte, rwSizeRequested) 717 718 stepErrChan <- nil 719 _ = <-doNextStepChan 720 721 for rwOffset := uint64(0); rwOffset < rwSizeTotal; rwOffset += rwSizeRequested { 722 _, err = file.WriteAt(bufWritten, int64(rwOffset)) 723 if nil != err { 724 stepErrChan <- fmt.Errorf("file.WriteAt(bufWritten, int64(rwOffset)) failed: %v\n", err) 725 return 726 } 727 } 728 729 err = file.Sync() 730 if nil != err { 731 stepErrChan <- fmt.Errorf("file.Sync() failed: %v\n", err) 732 return 733 } 734 735 stepErrChan <- nil 736 _ = <-doNextStepChan 737 738 for rwOffset := uint64(0); rwOffset < rwSizeTotal; rwOffset += rwSizeRequested { 739 _, err = file.ReadAt(bufRead, int64(rwOffset)) 740 if nil != err { 741 stepErrChan <- fmt.Errorf("file.ReadAt(bufRead, int64(rwOffset)) failed: %v\n", err) 742 return 743 } 744 } 745 746 stepErrChan <- nil 747 _ = <-doNextStepChan 748 749 err = file.Close() 750 if nil != err { 751 stepErrChan <- fmt.Errorf("file.Close() failed: %v\n", err) 752 return 753 } 754 err = os.Remove(fileName) 755 if nil != err { 756 stepErrChan <- fmt.Errorf("os.Remove(fileName) failed: %v\n", err) 757 return 758 } 759 760 stepErrChan <- nil 761 } 762 763 func fuseWorkout(rwSizeEach *rwSizeEachStruct, threadIndex uint64) { 764 nonce := headhunterVolumeHandle.FetchNonce() 765 766 fileName := fmt.Sprintf("%s/%s%016X", mountPointName, basenamePrefix, nonce) 767 768 file, err := os.Create(fileName) 769 if nil != err { 770 stepErrChan <- fmt.Errorf("os.Create(\"%v\") failed: %v\n", fileName, err) 771 return 772 } 773 774 rwSizeRequested := rwSizeEach.KiB * 1024 775 776 bufWritten := make([]byte, rwSizeRequested) 777 for i := uint64(0); i < rwSizeRequested; i++ { 778 bufWritten[i] = 0 779 } 780 781 bufRead := make([]byte, rwSizeRequested) 782 783 stepErrChan <- nil 784 _ = <-doNextStepChan 785 786 for rwOffset := uint64(0); rwOffset < rwSizeTotal; rwOffset += rwSizeRequested { 787 _, err = file.WriteAt(bufWritten, int64(rwOffset)) 788 if nil != err { 789 stepErrChan <- fmt.Errorf("file.WriteAt(bufWritten, int64(rwOffset)) failed: %v\n", err) 790 return 791 } 792 } 793 794 err = file.Sync() 795 if nil != err { 796 stepErrChan <- fmt.Errorf("file.Sync() failed: %v\n", err) 797 return 798 } 799 800 stepErrChan <- nil 801 _ = <-doNextStepChan 802 803 for rwOffset := uint64(0); rwOffset < rwSizeTotal; rwOffset += rwSizeRequested { 804 _, err = file.ReadAt(bufRead, int64(rwOffset)) 805 if nil != err { 806 stepErrChan <- fmt.Errorf("file.ReadAt(bufRead, int64(rwOffset)) failed: %v\n", err) 807 return 808 } 809 } 810 811 stepErrChan <- nil 812 _ = <-doNextStepChan 813 814 err = file.Close() 815 if nil != err { 816 stepErrChan <- fmt.Errorf("file.Close() failed: %v\n", err) 817 return 818 } 819 err = os.Remove(fileName) 820 if nil != err { 821 stepErrChan <- fmt.Errorf("os.Remove(fileName) failed: %v\n", err) 822 return 823 } 824 825 stepErrChan <- nil 826 } 827 828 func createFsFile() (err error, volumeHandle fs.VolumeHandle, fileInodeNumber inode.InodeNumber, fileName string) { 829 volumeHandle, err = fs.FetchVolumeHandleByVolumeName(volumeName) 830 if nil != err { 831 stepErrChan <- fmt.Errorf("fs.FetchVolumeHandleByVolumeName(\"%v\") failed: %v\n", volumeName, err) 832 return 833 } 834 835 nonce := headhunterVolumeHandle.FetchNonce() 836 837 fileName = fmt.Sprintf("%s%016X", basenamePrefix, nonce) 838 839 fileInodeNumber, err = volumeHandle.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, fileName, inode.PosixModePerm) 840 if nil != err { 841 stepErrChan <- fmt.Errorf("fs.Create(,,,, fileName==\"%s\", inode.PosixModePerm) failed: %v\n", fileName, err) 842 return 843 } 844 return 845 } 846 847 func unlinkFsFile(volumeHandle fs.VolumeHandle, fileName string) (err error) { 848 err = volumeHandle.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, fileName) 849 if nil != err { 850 stepErrChan <- fmt.Errorf("fs.Unlink(,,,, rootInodeNumber, \"%v\") failed: %v\n", fileName, err) 851 return 852 } 853 return 854 } 855 856 func fsWorkout(rwSizeEach *rwSizeEachStruct, threadIndex uint64, doSameFile bool, doRandomIO bool) { 857 var ( 858 err error 859 fileInodeNumber inode.InodeNumber 860 fileName string 861 volumeHandle fs.VolumeHandle 862 ) 863 864 if !doSameFile { 865 // Create the file for this thread 866 err, volumeHandle, fileInodeNumber, fileName = createFsFile() 867 if nil != err { 868 return 869 } 870 } else { 871 // File was already created during main() 872 volumeHandle = rwSizeEach.VolumeHandle 873 fileInodeNumber = rwSizeEach.FileInodeNumber 874 } 875 rwSizeRequested := rwSizeEach.KiB * 1024 876 877 bufWritten := make([]byte, rwSizeRequested) 878 for i := uint64(0); i < rwSizeRequested; i++ { 879 bufWritten[i] = 0 880 } 881 882 stepErrChan <- nil 883 _ = <-doNextStepChan 884 885 if doRandomIO { 886 var rwOffset int64 887 888 // Calculate number of I/Os to do since we cannot use size of file in the random case. 889 var numberIOsNeeded uint64 = rwSizeTotal / rwSizeRequested 890 for i := uint64(0); i < numberIOsNeeded; i++ { 891 // For the first I/O, we set it to (rwSizeTotal - rwSizeRequested). This guarantees that we write 892 // the full size of the buffer. 893 if i == 0 { 894 rwOffset = int64(rwSizeTotal - rwSizeRequested) 895 } else { 896 897 // Pick a random offset within the buffer. We back off from end of buffer by rwSizeRequested 898 // to make sure we do not go past end of file. 899 rwOffset = rand.Int63n(int64(rwSizeTotal - rwSizeRequested)) 900 } 901 rwSizeDelivered, err := volumeHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInodeNumber, uint64(rwOffset), bufWritten, nil) 902 if nil != err { 903 stepErrChan <- fmt.Errorf("fs.Write(,,,, fileInodeNumber, rwOffset, bufWritten) failed: %v\n", err) 904 return 905 } 906 if rwSizeRequested != rwSizeDelivered { 907 stepErrChan <- fmt.Errorf("fs.Write(,,,, fileInodeNumber, rwOffset, bufWritten) failed to transfer all requested bytes\n") 908 return 909 } 910 } 911 } else { 912 913 for rwOffset := uint64(0); rwOffset < rwSizeTotal; rwOffset += rwSizeRequested { 914 rwSizeDelivered, err := volumeHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInodeNumber, rwOffset, bufWritten, nil) 915 if nil != err { 916 stepErrChan <- fmt.Errorf("fs.Write(,,,, fileInodeNumber, rwOffset, bufWritten) failed: %v\n", err) 917 return 918 } 919 if rwSizeRequested != rwSizeDelivered { 920 stepErrChan <- fmt.Errorf("fs.Write(,,,, fileInodeNumber, rwOffset, bufWritten) failed to transfer all requested bytes\n") 921 return 922 } 923 } 924 } 925 926 err = volumeHandle.Flush(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInodeNumber) 927 if nil != err { 928 stepErrChan <- fmt.Errorf("fs.Flush(,,,, fileInodeNumber) failed: %v\n", err) 929 return 930 } 931 932 stepErrChan <- nil 933 _ = <-doNextStepChan 934 935 if doRandomIO { 936 // Calculate number of I/Os to do since we cannot use size of file in the random case. 937 var numberIOsNeeded uint64 = rwSizeTotal / rwSizeRequested 938 for i := uint64(0); i < numberIOsNeeded; i++ { 939 940 // Calculate random offset 941 rwOffset := uint64(rand.Int63n(int64(rwSizeTotal - rwSizeRequested))) 942 943 bufRead, err := volumeHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInodeNumber, rwOffset, rwSizeRequested, nil) 944 if nil != err { 945 stepErrChan <- fmt.Errorf("fs.Read(,,,, fileInodeNumber, rwOffset, rwSizeRequested) failed: %v\n", err) 946 return 947 } 948 if rwSizeRequested != uint64(len(bufRead)) { 949 stepErrChan <- fmt.Errorf("fs.Read(,,,, fileInodeNumber, rwOffset, rwSizeRequested) failed to transfer all requested bytes\n") 950 return 951 } 952 } 953 } else { 954 for rwOffset := uint64(0); rwOffset < rwSizeTotal; rwOffset += rwSizeRequested { 955 bufRead, err := volumeHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInodeNumber, rwOffset, rwSizeRequested, nil) 956 if nil != err { 957 stepErrChan <- fmt.Errorf("fs.Read(,,,, fileInodeNumber, rwOffset, rwSizeRequested) failed: %v\n", err) 958 return 959 } 960 if rwSizeRequested != uint64(len(bufRead)) { 961 stepErrChan <- fmt.Errorf("fs.Read(,,,, fileInodeNumber, rwOffset, rwSizeRequested) failed to transfer all requested bytes\n") 962 return 963 } 964 } 965 } 966 967 stepErrChan <- nil 968 _ = <-doNextStepChan 969 970 if !doSameFile { 971 err = unlinkFsFile(volumeHandle, fileName) 972 if nil != err { 973 return 974 } 975 } 976 977 stepErrChan <- nil 978 } 979 980 func createInode() (err error, fileInodeNumber inode.InodeNumber) { 981 mutex.Lock() 982 volumeHandle, err := inode.FetchVolumeHandle(volumeName) 983 mutex.Unlock() 984 if nil != err { 985 stepErrChan <- fmt.Errorf("inode.FetchVolumeHandle(\"%v\") failed: %v\n", volumeName, err) 986 return 987 } 988 989 fileInodeNumber, err = volumeHandle.CreateFile(inode.PosixModePerm, inode.InodeUserID(0), inode.InodeGroupID(0)) 990 if nil != err { 991 stepErrChan <- fmt.Errorf("volumeHandle.CreateFile(inode.PosixModePerm, inode.InodeUserID(0), inode.InodeGroupID(0)) failed: %v\n", err) 992 return 993 } 994 return 995 } 996 997 func destroyInode(fileInodeNumber inode.InodeNumber) (err error) { 998 mutex.Lock() 999 volumeHandle, err := inode.FetchVolumeHandle(volumeName) 1000 mutex.Unlock() 1001 if nil != err { 1002 stepErrChan <- fmt.Errorf("inode.FetchVolumeHandle(\"%v\") failed: %v\n", volumeName, err) 1003 return 1004 } 1005 err = volumeHandle.Purge(fileInodeNumber) 1006 if nil != err { 1007 stepErrChan <- fmt.Errorf("volumeHandle.Purge(fileInodeNumber) failed: %v\n", err) 1008 return 1009 } 1010 err = volumeHandle.Destroy(fileInodeNumber) 1011 if nil != err { 1012 stepErrChan <- fmt.Errorf("volumeHandle.Destroy(fileInodeNumber) failed: %v\n", err) 1013 return 1014 } 1015 return 1016 } 1017 1018 func inodeWorkout(rwSizeEach *rwSizeEachStruct, threadIndex uint64, doSameFile bool, doRandomIO bool) { 1019 mutex.Lock() 1020 volumeHandle, err := inode.FetchVolumeHandle(volumeName) 1021 mutex.Unlock() 1022 1023 if nil != err { 1024 stepErrChan <- fmt.Errorf("inode.FetchVolumeHandle(\"%v\") failed: %v\n", volumeName, err) 1025 return 1026 } 1027 1028 var fileInodeNumber inode.InodeNumber 1029 if !doSameFile { 1030 err, fileInodeNumber = createInode() 1031 if nil != err { 1032 return 1033 } 1034 } else { 1035 fileInodeNumber = rwSizeEach.FileInodeNumber 1036 } 1037 1038 rwSizeRequested := rwSizeEach.KiB * 1024 1039 1040 bufWritten := make([]byte, rwSizeRequested) 1041 for i := uint64(0); i < rwSizeRequested; i++ { 1042 bufWritten[i] = 0 1043 } 1044 1045 stepErrChan <- nil 1046 _ = <-doNextStepChan 1047 1048 if doRandomIO { 1049 var rwOffset int64 1050 1051 // Calculate number of I/Os to do since we cannot use size of file in the random case. 1052 var numberIOsNeeded uint64 = rwSizeTotal / rwSizeRequested 1053 for i := uint64(0); i < numberIOsNeeded; i++ { 1054 1055 // For the first I/O, we set it to (rwSizeTotal - rwSizeRequested). This guarantees that we write 1056 // the full size of the buffer. 1057 if i == 0 { 1058 rwOffset = int64(rwSizeTotal - rwSizeRequested) 1059 } else { 1060 1061 // Pick a random offset within the buffer. We back off from end of buffer by rwSizeRequested 1062 // to make sure we do not go past end of file. 1063 rwOffset = rand.Int63n(int64(rwSizeTotal - rwSizeRequested)) 1064 } 1065 err = volumeHandle.Write(fileInodeNumber, uint64(rwOffset), bufWritten, nil) 1066 if nil != err { 1067 stepErrChan <- fmt.Errorf("volumeHandle.Write(fileInodeNumber, rwOffset, bufWritten) failed: %v\n", err) 1068 return 1069 } 1070 } 1071 } else { 1072 for rwOffset := uint64(0); rwOffset < rwSizeTotal; rwOffset += rwSizeRequested { 1073 err = volumeHandle.Write(fileInodeNumber, rwOffset, bufWritten, nil) 1074 if nil != err { 1075 stepErrChan <- fmt.Errorf("volumeHandle.Write(fileInodeNumber, rwOffset, bufWritten) failed: %v\n", err) 1076 return 1077 } 1078 } 1079 } 1080 1081 err = volumeHandle.Flush(fileInodeNumber, false) 1082 if nil != err { 1083 stepErrChan <- fmt.Errorf("volumeHandle.Flush(rwSizeEach.FileInodeNumber, false) failed: %v\n", err) 1084 return 1085 } 1086 1087 stepErrChan <- nil 1088 _ = <-doNextStepChan 1089 1090 if doRandomIO { 1091 // Calculate number of I/Os to do since we cannot use size of file in the random case. 1092 var numberIOsNeeded uint64 = rwSizeTotal / rwSizeRequested 1093 for i := uint64(0); i < numberIOsNeeded; i++ { 1094 1095 // Calculate random offset 1096 rwOffset := uint64(rand.Int63n(int64(rwSizeTotal - rwSizeRequested))) 1097 bufRead, err := volumeHandle.Read(fileInodeNumber, rwOffset, rwSizeRequested, nil) 1098 if nil != err { 1099 stepErrChan <- fmt.Errorf("volumeHandle.Read(rwSizeEach.FileInodeNumber, rwOffset, rwSizeRequested) failed: %v\n", err) 1100 return 1101 } 1102 if rwSizeRequested != uint64(len(bufRead)) { 1103 stepErrChan <- fmt.Errorf("volumeHandle.Read(rwSizeEach.FileInodeNumber, rwOffset, rwSizeRequested) failed to transfer all requested bytes\n") 1104 return 1105 } 1106 } 1107 } else { 1108 for rwOffset := uint64(0); rwOffset < rwSizeTotal; rwOffset += rwSizeRequested { 1109 bufRead, err := volumeHandle.Read(fileInodeNumber, rwOffset, rwSizeRequested, nil) 1110 if nil != err { 1111 stepErrChan <- fmt.Errorf("volumeHandle.Read(rwSizeEach.FileInodeNumber, rwOffset, rwSizeRequested) failed: %v\n", err) 1112 return 1113 } 1114 if rwSizeRequested != uint64(len(bufRead)) { 1115 stepErrChan <- fmt.Errorf("volumeHandle.Read(rwSizeEach.FileInodeNumber, rwOffset, rwSizeRequested) failed to transfer all requested bytes\n") 1116 return 1117 } 1118 } 1119 } 1120 1121 stepErrChan <- nil 1122 _ = <-doNextStepChan 1123 1124 if !doSameFile { 1125 err = destroyInode(fileInodeNumber) 1126 if nil != err { 1127 return 1128 } 1129 } 1130 1131 stepErrChan <- nil 1132 } 1133 1134 func createObject() (err error, objectPath string) { 1135 mutex.Lock() 1136 volumeHandle, err := inode.FetchVolumeHandle(volumeName) 1137 mutex.Unlock() 1138 if nil != err { 1139 stepErrChan <- fmt.Errorf("inode.FetchVolumeHandle(\"%v\") failed: %v\n", volumeName, err) 1140 return 1141 } 1142 1143 objectPath, err = volumeHandle.ProvisionObject() 1144 if nil != err { 1145 stepErrChan <- fmt.Errorf("volumeHandle.ProvisionObject() failed: %v\n", err) 1146 return 1147 } 1148 return 1149 } 1150 1151 func deleteObject(objectPath string) (err error) { 1152 accountName, containerName, objectName, err := utils.PathToAcctContObj(objectPath) 1153 if nil != err { 1154 stepErrChan <- fmt.Errorf("utils.PathToAcctContObj(\"%v\") failed: %v\n", objectPath, err) 1155 return 1156 } 1157 1158 err = swiftclient.ObjectDelete(accountName, containerName, objectName, swiftclient.SkipRetry) 1159 if nil != err { 1160 stepErrChan <- fmt.Errorf("swiftclient.ObjectDelete(\"%v\", \"%v\", \"%v\", swiftclient.SkipRetry) failed: %v\n", accountName, containerName, objectName, err) 1161 return 1162 } 1163 return 1164 } 1165 1166 func swiftclientWorkout(rwSizeEach *rwSizeEachStruct, threadIndex uint64, doSameFile bool) { 1167 var err error 1168 var objectPath string 1169 if !doSameFile { 1170 err, objectPath = createObject() 1171 if nil != err { 1172 return 1173 } 1174 } else { 1175 objectPath = rwSizeEach.ObjectPath 1176 } 1177 1178 accountName, containerName, objectName, err := utils.PathToAcctContObj(objectPath) 1179 if nil != err { 1180 stepErrChan <- fmt.Errorf("utils.PathToAcctContObj(\"%v\") failed: %v\n", objectPath, err) 1181 return 1182 } 1183 1184 rwSizeRequested := rwSizeEach.KiB * 1024 1185 1186 bufWritten := make([]byte, rwSizeRequested) 1187 for i := uint64(0); i < rwSizeRequested; i++ { 1188 bufWritten[i] = 0 1189 } 1190 1191 stepErrChan <- nil 1192 _ = <-doNextStepChan 1193 1194 chunkedPutContext, err := swiftclient.ObjectFetchChunkedPutContext(accountName, containerName, objectName, "") 1195 if nil != err { 1196 stepErrChan <- fmt.Errorf("swiftclient.ObjectFetchChunkedPutContext(\"%v\", \"%v\", \"%v\") failed: %v\n", accountName, containerName, objectName, err) 1197 return 1198 } 1199 1200 for rwOffset := uint64(0); rwOffset < rwSizeTotal; rwOffset += rwSizeRequested { 1201 err = chunkedPutContext.SendChunk(bufWritten) 1202 if nil != err { 1203 stepErrChan <- fmt.Errorf("chunkedPutContext.SendChunk(bufWritten) failed: %v\n", err) 1204 return 1205 } 1206 } 1207 1208 err = chunkedPutContext.Close() 1209 if nil != err { 1210 stepErrChan <- fmt.Errorf("chunkedPutContext.Close() failed: %v\n", err) 1211 return 1212 } 1213 1214 stepErrChan <- nil 1215 _ = <-doNextStepChan 1216 1217 for rwOffset := uint64(0); rwOffset < rwSizeTotal; rwOffset += rwSizeRequested { 1218 bufRead, err := swiftclient.ObjectGet(accountName, containerName, objectName, rwOffset, rwSizeRequested) 1219 if nil != err { 1220 stepErrChan <- fmt.Errorf("swiftclient.ObjectGet(\"%v\", \"%v\", \"%v\", rwOffset, rwSizeRequested) failed: %v\n", accountName, containerName, objectName, err) 1221 return 1222 } 1223 if rwSizeRequested != uint64(len(bufRead)) { 1224 stepErrChan <- fmt.Errorf("swiftclient.ObjectGet(\"%v\", \"%v\", \"%v\", rwOffset, rwSizeRequested) failed to transfer all requested bytes\n", accountName, containerName, objectName) 1225 return 1226 } 1227 } 1228 1229 stepErrChan <- nil 1230 _ = <-doNextStepChan 1231 1232 if !doSameFile { 1233 err = deleteObject(objectPath) 1234 } 1235 1236 stepErrChan <- nil 1237 }