github.com/0chain/gosdk@v1.17.11/zboxcore/sdk/chunked_upload_bench_test.go (about) 1 package sdk 2 3 import ( 4 "context" 5 "crypto/rand" 6 "fmt" 7 "strings" 8 "testing" 9 "testing/fstest" 10 11 "github.com/0chain/gosdk/dev" 12 "github.com/0chain/gosdk/zboxcore/blockchain" 13 "github.com/0chain/gosdk/zboxcore/logger" 14 "github.com/0chain/gosdk/zboxcore/zboxutil" 15 ) 16 17 type nopeChunkedUploadProgressStorer struct { 18 up *UploadProgress 19 } 20 21 func (nope *nopeChunkedUploadProgressStorer) Load(id string) *UploadProgress { 22 return nope.up 23 } 24 25 func (nope *nopeChunkedUploadProgressStorer) Save(up UploadProgress) { 26 nope.up = &up 27 } 28 29 func (nope *nopeChunkedUploadProgressStorer) Remove(id string) error { 30 nope.up = nil 31 return nil 32 } 33 34 func (nope *nopeChunkedUploadProgressStorer) Update(id string, chunkIndex int, upMask zboxutil.Uint128) { 35 } 36 37 func generateRandomBytes(n int64) []byte { 38 b := make([]byte, n) 39 _, err := rand.Read(b) 40 // Note that err == nil only if we read len(b) bytes. 41 if err != nil { 42 return nil 43 } 44 45 return b 46 } 47 48 func BenchmarkChunkedUpload(b *testing.B) { 49 50 SetLogFile("cmdlog.log", false) 51 52 logger.Logger.SetLevel(2) 53 54 server := dev.NewBlobberServer(nil) 55 defer server.Close() 56 57 benchmarks := []struct { 58 Name string 59 Size int64 60 ChunkSize int 61 EncryptOnUpload bool 62 }{ 63 {Name: "1M 1K", Size: MB * 1, ChunkSize: KB * 1, EncryptOnUpload: false}, 64 {Name: "1M 64K", Size: MB * 1, ChunkSize: KB * 64, EncryptOnUpload: false}, 65 66 {Name: "10M 64K", Size: MB * 10, ChunkSize: KB * 64, EncryptOnUpload: false}, 67 {Name: "10M 6M", Size: MB * 10, ChunkSize: MB * 6, EncryptOnUpload: false}, 68 69 {Name: "100M 64K", Size: MB * 100, ChunkSize: KB * 64, EncryptOnUpload: false}, 70 {Name: "100M 6M", Size: MB * 100, ChunkSize: MB * 6, EncryptOnUpload: false}, 71 72 {Name: "500M 64K", Size: MB * 500, ChunkSize: KB * 64, EncryptOnUpload: false}, 73 {Name: "500M 6M", Size: MB * 500, ChunkSize: MB * 6, EncryptOnUpload: false}, 74 75 // {Name: "1G 64K", Size: GB * 1, ChunkSize: KB * 64, EncryptOnUpload: false}, 76 // {Name: "1G 60M", Size: GB * 1, ChunkSize: MB * 60, EncryptOnUpload: false}, 77 } 78 79 for n, bm := range benchmarks { 80 b.Run(bm.Name, func(b *testing.B) { 81 82 buf := generateRandomBytes(bm.Size) 83 84 b.ResetTimer() 85 86 a := &Allocation{ 87 ID: "1a0190c411f3e742c881b7b84c964dc1bb435d459bd3beca74a6c0ae8ececd92", 88 Tx: "1a0190c411f3e742c881b7b84c964dc1bb435d459bd3beca74a6c0ae8ececd92", 89 DataShards: 2, 90 ParityShards: 1, 91 ctx: context.TODO(), 92 } 93 a.fullconsensus, a.consensusThreshold = a.getConsensuses() 94 for i := 0; i < (a.DataShards + a.ParityShards); i++ { 95 96 a.Blobbers = append(a.Blobbers, &blockchain.StorageNode{ 97 ID: fmt.Sprintf("blobber_%v_%v_", n, i), 98 Baseurl: server.URL, 99 }) 100 } 101 102 for i := 0; i < b.N; i++ { 103 name := strings.Replace(bm.Name, " ", "_", -1) 104 105 fileName := "test_" + name + ".txt" 106 107 m := fstest.MapFS{ 108 fileName: { 109 Data: buf, 110 }, 111 } 112 113 reader, err := m.Open(fileName) 114 115 if err != nil { 116 b.Fatal(err) 117 return 118 } 119 120 fi, _ := reader.Stat() 121 122 fileMeta := FileMeta{ 123 Path: "/tmp/" + fileName, 124 ActualSize: fi.Size(), 125 126 MimeType: "plain/text", 127 RemoteName: "/test.txt", 128 RemotePath: "/test.txt", 129 } 130 131 chunkedUpload, err := CreateChunkedUpload(a.ctx, "/tmp", a, fileMeta, reader, false, false, false, zboxutil.NewConnectionId()) 132 if err != nil { 133 b.Fatal(err) 134 return 135 } 136 chunkedUpload.progressStorer = &nopeChunkedUploadProgressStorer{} 137 138 err = chunkedUpload.Start() 139 if err != nil { 140 b.Fatal(err) 141 return 142 } 143 144 } 145 }) 146 } 147 }