gitlab.com/SkynetLabs/skyd@v1.6.9/skymodules/renter/skyfile.go (about) 1 package renter 2 3 // skyfile.go provides the tools for creating and uploading skyfiles, and then 4 // receiving the associated skylinks to recover the files. The skyfile is the 5 // fundamental data structure underpinning Skynet. 6 // 7 // The primary trick of the skyfile is that the initial data is stored entirely 8 // in a single sector which is put on the Sia network using 1-of-N redundancy. 9 // Every replica has an identical Merkle root, meaning that someone attempting 10 // to fetch the file only needs the Merkle root and then some way to ask hosts 11 // on the network whether they have access to the Merkle root. 12 // 13 // That single sector then contains all of the other information that is 14 // necessary to recover the rest of the file. If the file is small enough, the 15 // entire file will be stored within the single sector. If the file is larger, 16 // the Merkle roots that are needed to download the remaining data get encoded 17 // into something called a 'fanout'. While the base chunk is required to use 18 // 1-of-N redundancy, the fanout chunks can use more sophisticated redundancy. 19 // 20 // The 1-of-N redundancy requirement really stems from the fact that Skylinks 21 // are only 34 bytes of raw data, meaning that there's only enough room in a 22 // Skylink to encode a single root. The fanout however has much more data to 23 // work with, meaning there is space to describe much fancier redundancy schemes 24 // and data fetching patterns. 25 // 26 // Skyfiles also contain some metadata which gets encoded as json. The 27 // intention is to allow uploaders to put any arbitrary metadata fields into 28 // their file and know that users will be able to see that metadata after 29 // downloading. A couple of fields such as the mode of the file are supported at 30 // the base level by Sia. 31 32 import ( 33 "bytes" 34 "context" 35 "encoding/json" 36 "fmt" 37 "io" 38 "sync" 39 "time" 40 41 "github.com/opentracing/opentracing-go" 42 "gitlab.com/SkynetLabs/skyd/build" 43 "gitlab.com/SkynetLabs/skyd/fixtures" 44 45 "gitlab.com/NebulousLabs/errors" 46 "gitlab.com/SkynetLabs/skyd/skykey" 47 "gitlab.com/SkynetLabs/skyd/skymodules" 48 "gitlab.com/SkynetLabs/skyd/skymodules/renter/filesystem" 49 "go.sia.tech/siad/crypto" 50 "go.sia.tech/siad/modules" 51 "go.sia.tech/siad/types" 52 ) 53 54 var ( 55 // MaxSkylinkV2ResolvingDepth defines the maximum recursion depth the 56 // renter tries to resolve when downloading v2 skylinks. 57 MaxSkylinkV2ResolvingDepth = build.Select(build.Var{ 58 Standard: uint8(3), 59 Dev: uint8(5), 60 Testing: uint8(5), 61 }).(uint8) 62 ) 63 64 var ( 65 // SkyfileDefaultBaseChunkRedundancy establishes the default redundancy for 66 // the base chunk of a skyfile. 67 SkyfileDefaultBaseChunkRedundancy = build.Select(build.Var{ 68 Dev: uint8(2), 69 Standard: uint8(10), 70 Testing: uint8(2), 71 }).(uint8) 72 73 // hasSectorBatchSize is the maximum number of hasSector jobs within a 74 // single batch. 75 maxHasSectorBatchSize = build.Select(build.Var{ 76 Dev: uint64(50), 77 Standard: uint64(100), 78 Testing: uint64(2), 79 }).(uint64) 80 ) 81 82 var ( 83 // ErrEncryptionNotSupported is the error returned when Skykey encryption is 84 // not supported for a Skynet action. 85 ErrEncryptionNotSupported = errors.New("skykey encryption not supported") 86 87 // ErrInvalidMetadata is the error returned when the metadata is not valid. 88 ErrInvalidMetadata = errors.New("metadata is invalid") 89 90 // ErrMetadataTooBig is the error returned when the metadata exceeds a 91 // sectorsize. 92 ErrMetadataTooBig = errors.New("metadata exceeds sectorsize") 93 94 // ErrSkylinkBlocked is the error returned when a skylink is blocked 95 ErrSkylinkBlocked = errors.New("skylink is blocked") 96 97 // ErrSkylinkNesting is the error returned when a skylink is nested more 98 // times than MaxSkylinkV2ResolvingDepth 99 ErrSkylinkNesting = errors.New("skylink is nested more times than is supported") 100 101 // ErrInvalidSkylinkVersion is returned when an operation fails due to the 102 // skylink having the wrong version. 103 ErrInvalidSkylinkVersion = errors.New("skylink had unexpected version") 104 ) 105 106 // baseSectorUploadParamsFromSUP will derive the FileUploadParams to use when 107 // uploading the base chunk siafile of a skyfile using the skyfile's upload 108 // parameters. 109 func baseSectorUploadParamsFromSUP(sup skymodules.SkyfileUploadParameters) (skymodules.FileUploadParams, error) { 110 // Establish defaults 111 skyfileEstablishDefaults(&sup) 112 113 // Create parameters to upload the file with 1-of-N erasure coding and no 114 // encryption. This should cause all of the pieces to have the same Merkle 115 // root, which is critical to making the file discoverable to viewnodes and 116 // also resilient to host failures. 117 return fileUploadParams(sup.SiaPath, 1, int(sup.BaseChunkRedundancy)-1, sup.Force, crypto.TypePlain) 118 } 119 120 // isCompressedFanout is a helper to tell whether a fanout is expected to be 121 // compressed based on some inputs. 122 func isCompressedFanout(ec skymodules.ErasureCoder, cType crypto.CipherType) bool { 123 return ec.MinPieces() == 1 && cType == crypto.TypePlain 124 } 125 126 // fileUploadParams will create an erasure coder and return the FileUploadParams 127 // to use when uploading using the provided parameters. 128 func fileUploadParams(siaPath skymodules.SiaPath, dataPieces, parityPieces int, force bool, ct crypto.CipherType) (skymodules.FileUploadParams, error) { 129 // Create the erasure coder 130 ec, err := skymodules.NewRSSubCode(dataPieces, parityPieces, crypto.SegmentSize) 131 if err != nil { 132 return skymodules.FileUploadParams{}, errors.AddContext(err, "unable to create erasure coder") 133 } 134 135 // Return the FileUploadParams 136 return skymodules.FileUploadParams{ 137 SiaPath: siaPath, 138 ErasureCode: ec, 139 Force: force, 140 Repair: false, // indicates whether this is a repair operation 141 CipherType: ct, 142 }, nil 143 } 144 145 // skyfileEstablishDefaults will set any zero values in the lup to be equal to 146 // the desired defaults. 147 func skyfileEstablishDefaults(sup *skymodules.SkyfileUploadParameters) { 148 if sup.BaseChunkRedundancy == 0 { 149 sup.BaseChunkRedundancy = SkyfileDefaultBaseChunkRedundancy 150 } 151 } 152 153 // streamerFromReader wraps a bytes.Reader to give it a Close() method, which 154 // allows it to satisfy the skymodules.Streamer interface. 155 type streamerFromReader struct { 156 *bytes.Reader 157 } 158 159 // skylinkStreamerFromReader wraps a streamerFromReader to give it a Metadata() 160 // method, which allows it to satisfy the modules.SkyfileStreamer interface. 161 type skylinkStreamerFromReader struct { 162 modules.Streamer 163 staticLayout skymodules.SkyfileLayout 164 staticMD skymodules.SkyfileMetadata 165 staticRawMD []byte 166 staticSkylink skymodules.Skylink 167 } 168 169 // Close is a no-op because a bytes.Reader doesn't need to be closed. 170 func (sfr *streamerFromReader) Close() error { 171 return nil 172 } 173 174 // StreamerFromSlice returns a skymodules.Streamer given a slice. This is 175 // non-trivial because a bytes.Reader does not implement Close. 176 func StreamerFromSlice(b []byte) skymodules.Streamer { 177 reader := bytes.NewReader(b) 178 return &streamerFromReader{ 179 Reader: reader, 180 } 181 } 182 183 // SkylinkStreamerFromSlice creates a modules.SkyfileStreamer from a byte slice. 184 func SkylinkStreamerFromSlice(b []byte, md skymodules.SkyfileMetadata, rawMD []byte, skylink skymodules.Skylink, layout skymodules.SkyfileLayout) skymodules.SkyfileStreamer { 185 streamer := StreamerFromSlice(b) 186 return &skylinkStreamerFromReader{ 187 Streamer: streamer, 188 staticLayout: layout, 189 staticMD: md, 190 staticRawMD: rawMD, 191 staticSkylink: skylink, 192 } 193 } 194 195 // CacheRatio returns 100% for the streamer created from the reader since the 196 // data already exists in memory at the time it is created. 197 func (sfr *skylinkStreamerFromReader) CacheRatio() float64 { 198 return 1.0 199 } 200 201 // Layout implements the skymodules.SkyfileStreamer interface. 202 func (sfr *skylinkStreamerFromReader) Layout() skymodules.SkyfileLayout { 203 return sfr.staticLayout 204 } 205 206 // RawLayout implements the skymodules.SkyfileStreamer interface. 207 func (sfr *skylinkStreamerFromReader) RawLayout() (skymodules.SkyfileLayout, []byte, []crypto.Hash) { 208 return sfr.staticLayout, nil, nil 209 } 210 211 // Metadata implements the skymodules.SkyfileStreamer interface. 212 func (sfr *skylinkStreamerFromReader) Metadata() skymodules.SkyfileMetadata { 213 return sfr.staticMD 214 } 215 216 // RawMetadata implements the modules.SkyfileStreamer interface. 217 func (sfr *skylinkStreamerFromReader) RawMetadata() []byte { 218 return sfr.staticRawMD 219 } 220 221 // Skylink implements the modules.SkyfileStreamer interface. 222 func (sfr *skylinkStreamerFromReader) Skylink() skymodules.Skylink { 223 return sfr.staticSkylink 224 } 225 226 // TrustlessWrite implements the modules.SkyfileStreamer interface. 227 func (sfr *skylinkStreamerFromReader) TrustlessWrite(io.Writer, uint64) error { 228 return errors.New("not supported") 229 } 230 231 // CreateSkylinkFromSiafile creates a skyfile from a siafile. This requires 232 // uploading a new skyfile which contains fanout information pointing to the 233 // siafile data. The SiaPath provided in 'sup' indicates where the new base 234 // sector skyfile will be placed, and the siaPath provided as its own input is 235 // the siaPath of the file that is being used to create the skyfile. 236 func (r *Renter) CreateSkylinkFromSiafile(sup skymodules.SkyfileUploadParameters, siaPath skymodules.SiaPath) (_ skymodules.Skylink, err error) { 237 // Encryption is not supported for SiaFile conversion. 238 if encryptionEnabled(&sup) { 239 return skymodules.Skylink{}, errors.AddContext(ErrEncryptionNotSupported, "unable to convert siafile") 240 } 241 // Set reasonable default values for any sup fields that are blank. 242 skyfileEstablishDefaults(&sup) 243 244 // Grab the filenode for the provided siapath. 245 fileNode, err := r.staticFileSystem.OpenSiaFile(siaPath) 246 if err != nil { 247 return skymodules.Skylink{}, errors.AddContext(err, "unable to open siafile") 248 } 249 defer func() { 250 err = errors.Compose(err, fileNode.Close()) 251 }() 252 253 // Override the metadata with the info from the fileNode. 254 metadata := skymodules.SkyfileMetadata{ 255 Filename: siaPath.Name(), 256 Mode: fileNode.Mode(), 257 Length: fileNode.Size(), 258 } 259 260 // Generate the fanoutBytes 261 cipherType := fileNode.Metadata().StaticMasterKeyType 262 onlyOnePieceNeeded := isCompressedFanout(fileNode.ErasureCode(), cipherType) 263 fanoutBytes, err := skyfileEncodeFanoutFromFileNode(fileNode, onlyOnePieceNeeded) 264 if err != nil { 265 return skymodules.Skylink{}, errors.AddContext(err, "unable to generate the fanout bytes") 266 } 267 268 return r.managedCreateSkylinkFromFileNode(r.tg.StopCtx(), sup, metadata, fileNode, fanoutBytes) 269 } 270 271 // managedCreateSkylink creates a skylink from the provided parameters. 272 func (r *Renter) managedCreateSkylink(ctx context.Context, sup skymodules.SkyfileUploadParameters, skyfileMetadata skymodules.SkyfileMetadata, fanoutBytes []byte, size uint64, masterKey crypto.CipherKey, ec skymodules.ErasureCoder) (skymodules.Skylink, error) { 273 // Check if the given metadata is valid 274 err := skymodules.ValidateSkyfileMetadata(skyfileMetadata) 275 if err != nil { 276 return skymodules.Skylink{}, errors.Compose(ErrInvalidMetadata, err) 277 } 278 // Marshal the metadata. 279 metadataBytes, err := skymodules.SkyfileMetadataBytes(skyfileMetadata) 280 if err != nil { 281 return skymodules.Skylink{}, errors.AddContext(err, "error retrieving skyfile metadata bytes") 282 } 283 return r.managedCreateSkylinkRawMD(ctx, sup, metadataBytes, fanoutBytes, size, masterKey, ec) 284 } 285 286 // managedCreateSkylinkRawMD creates a skylink from the provided parameters 287 // using already encoded metadata. 288 func (r *Renter) managedCreateSkylinkRawMD(ctx context.Context, sup skymodules.SkyfileUploadParameters, metadataBytes, fanoutBytes []byte, size uint64, masterKey crypto.CipherKey, ec skymodules.ErasureCoder) (skymodules.Skylink, error) { 289 // Check that the encryption key and erasure code is compatible with the 290 // skyfile format. This is intentionally done before any heavy computation 291 // to catch errors early on. 292 var sl skymodules.SkyfileLayout 293 if len(masterKey.Key()) > len(sl.KeyData) { 294 return skymodules.Skylink{}, errors.New("cipher key is not supported by the skyfile format") 295 } 296 if ec.Type() != skymodules.ECReedSolomonSubShards64 { 297 return skymodules.Skylink{}, errors.New("siafile has unsupported erasure code type") 298 } 299 300 // Assemble the first chunk of the skyfile. 301 sl = skymodules.NewSkyfileLayout(size, uint64(len(metadataBytes)), uint64(len(fanoutBytes)), ec, masterKey.Type()) 302 303 // If we're uploading in plaintext, we put the key in the baseSector 304 if !encryptionEnabled(&sup) { 305 copy(sl.KeyData[:], masterKey.Key()) 306 } 307 // Create the base sector. 308 baseSector, fetchSize, baseSectorExtension := skymodules.BuildBaseSector(sl.Encode(), fanoutBytes, metadataBytes, nil) 309 310 // We need to pin the extended fanout as well so we just add it to the 311 // base sector. 312 for _, ext := range baseSectorExtension { 313 baseSector = append(baseSector, ext...) 314 } 315 316 // Encrypt the base sector if necessary. 317 if encryptionEnabled(&sup) { 318 err := encryptBaseSectorWithSkykey(baseSector[:modules.SectorSize], sl, sup.FileSpecificSkykey) 319 if err != nil { 320 return skymodules.Skylink{}, errors.AddContext(err, "Failed to encrypt base sector for upload") 321 } 322 } 323 324 // Create the skylink. 325 baseSectorRoot := crypto.MerkleRoot(baseSector[:modules.SectorSize]) 326 skylink, err := skymodules.NewSkylinkV1(baseSectorRoot, 0, fetchSize) 327 if err != nil { 328 return skymodules.Skylink{}, errors.AddContext(err, "unable to build skylink") 329 } 330 if sup.DryRun { 331 return skylink, nil 332 } 333 334 // Check if the new skylink is blocked 335 err = r.managedHandleIsBlockedCheck(ctx, skylink, sup.SiaPath) 336 if err != nil { 337 return skymodules.Skylink{}, err 338 } 339 340 // Upload the base sector. 341 err = r.managedUploadBaseSector(ctx, sup, baseSector, skylink) 342 if err != nil { 343 return skymodules.Skylink{}, errors.AddContext(err, "Unable to upload base sector for file node. ") 344 } 345 346 return skylink, errors.AddContext(err, "unable to add skylink to the sianodes") 347 } 348 349 // managedCreateSkylinkFromFileNode creates a skylink from a file node. 350 // 351 // The name needs to be passed in explicitly because a file node does not track 352 // its own name, which allows the file to be renamed concurrently without 353 // causing any race conditions. 354 func (r *Renter) managedCreateSkylinkFromFileNode(ctx context.Context, sup skymodules.SkyfileUploadParameters, skyfileMetadata skymodules.SkyfileMetadata, fileNode *filesystem.FileNode, fanoutBytes []byte) (skymodules.Skylink, error) { 355 // Check if any of the skylinks associated with the siafile are blocked 356 if err := r.managedHandleFileNodeBlockedCheck(fileNode, sup.SiaPath); err != nil { 357 return skymodules.Skylink{}, err 358 } 359 360 // Create the skylink. 361 skylink, err := r.managedCreateSkylink(ctx, sup, skyfileMetadata, fanoutBytes, fileNode.Size(), fileNode.MasterKey(), fileNode.ErasureCode()) 362 if err != nil { 363 return skymodules.Skylink{}, err 364 } 365 366 // Add the skylink to the siafiles. 367 err = fileNode.AddSkylink(skylink) 368 if err != nil { 369 return skylink, errors.AddContext(err, "unable to add skylink to the sianodes") 370 } 371 return skylink, errors.AddContext(err, "unable to add skylink to the sianodes") 372 } 373 374 // managedDownloadBaseSector downloads the base sector for a given skylink. If 375 // possible this will fetch the sector from the cache. 376 // NOTE: serving the base sector from the cache won't yield a worker state. 377 func (r *Renter) managedDownloadBaseSector(ctx context.Context, skylink skymodules.Skylink, pricePerMS types.Currency, disableCache bool) ([]byte, *pcwsWorkerState, error) { 378 // Get the offset and fetchsize from the skylink 379 offset, fetchSize, err := skylink.OffsetAndFetchSize() 380 if err != nil { 381 return nil, nil, errors.AddContext(err, "unable to parse skylink") 382 } 383 384 if !disableCache { 385 // See if we have the base sector in the cache. 386 dd, cached, err := r.staticStreamBufferSet.staticCache.Get(skylink.DataSourceID(), baseSectorSectionIndex) 387 if err != nil { 388 return nil, nil, errors.AddContext(err, "failed to access cache") 389 } 390 if cached { 391 baseSector, err := dd.Recover() 392 return baseSector, nil, errors.AddContext(err, "failed to recover base sector") 393 } 394 } 395 396 // Download the base sector. The base sector contains the metadata, without 397 // it we can't provide a completed data source. 398 // 399 // NOTE: we pass in the provided context here, if the user imposed a timeout 400 // on the download request, this will fire if it takes too long. 401 baseSector, _, ws, err := r.managedDownloadByRoot(ctx, skylink.MerkleRoot(), offset, fetchSize, pricePerMS) 402 if err != nil { 403 return nil, nil, errors.AddContext(err, "unable to download base sector") 404 } 405 return baseSector, ws, nil 406 } 407 408 // managedPopulateFileNodeFromReader takes the fileNode and a reader and returns 409 // a populated filenode without uploading any data. It is used to perform a 410 // dry-run of a skyfile upload. 411 func (r *Renter) managedPopulateFileNodeFromReader(fileNode *filesystem.FileNode, reader skymodules.ChunkReader) error { 412 // Extract some helper variables 413 hpk := types.SiaPublicKey{} // blank host key 414 csize := fileNode.ChunkSize() 415 416 for chunkIndex := uint64(0); ; chunkIndex++ { 417 // Allocate data pieces and fill them with data from r. 418 dataEncoded, total, errRead := reader.ReadChunk() 419 if errors.Contains(errRead, io.EOF) { 420 return nil 421 } 422 if errRead != nil { 423 return errRead 424 } 425 // If no more data is read from the stream we are done. 426 if total == 0 { 427 return nil // done 428 } 429 430 // Grow the SiaFile to the right size. 431 err := fileNode.SiaFile.GrowNumChunks(chunkIndex + 1) 432 if err != nil { 433 return err 434 } 435 436 for pieceIndex, dataPieceEnc := range dataEncoded { 437 if err := fileNode.SiaFile.AddPiece(hpk, chunkIndex, uint64(pieceIndex), crypto.MerkleRoot(dataPieceEnc)); err != nil { 438 return err 439 } 440 } 441 442 adjustedSize := fileNode.Size() - csize + total 443 if err := fileNode.SetFileSize(adjustedSize); err != nil { 444 return errors.AddContext(err, "failed to adjust FileSize") 445 } 446 } 447 } 448 449 // Blocklist returns the merkleroots that are on the blocklist 450 func (r *Renter) Blocklist() ([]crypto.Hash, error) { 451 err := r.tg.Add() 452 if err != nil { 453 return []crypto.Hash{}, err 454 } 455 defer r.tg.Done() 456 return r.staticSkynetBlocklist.Blocklist(), nil 457 } 458 459 // UpdateSkynetBlocklist updates the list of hashed merkleroots that are blocked 460 func (r *Renter) UpdateSkynetBlocklist(ctx context.Context, additions, removals []string, isHash bool, probationaryPeriod int64) ([]skymodules.SkynetBlocklistInvalidInput, error) { 461 err := r.tg.Add() 462 if err != nil { 463 return nil, err 464 } 465 defer r.tg.Done() 466 467 // Parse the hashes that should be added to the blocklist 468 addHashes, invalidAdds := r.managedParseBlocklistHashes(ctx, additions, isHash, true) 469 removeHashes, invalidRemovals := r.managedParseBlocklistHashes(ctx, removals, isHash, true) 470 471 // Update the blocklist 472 err = r.staticSkynetBlocklist.UpdateBlocklist(addHashes, removeHashes, probationaryPeriod) 473 if err != nil { 474 return nil, err 475 } 476 477 // Return the invalid inputs 478 return append(invalidAdds, invalidRemovals...), nil 479 } 480 481 // Portals returns the list of known skynet portals. 482 func (r *Renter) Portals() ([]skymodules.SkynetPortal, error) { 483 err := r.tg.Add() 484 if err != nil { 485 return []skymodules.SkynetPortal{}, err 486 } 487 defer r.tg.Done() 488 return r.staticSkynetPortals.Portals(), nil 489 } 490 491 // UpdateSkynetPortals updates the list of known Skynet portals that are listed. 492 func (r *Renter) UpdateSkynetPortals(additions []skymodules.SkynetPortal, removals []modules.NetAddress) error { 493 err := r.tg.Add() 494 if err != nil { 495 return err 496 } 497 defer r.tg.Done() 498 return r.staticSkynetPortals.UpdatePortals(additions, removals) 499 } 500 501 // managedUploadBaseSector will take the raw baseSector bytes and upload them, 502 // returning the resulting merkle root, and the fileNode of the siafile that is 503 // tracking the base sector. 504 func (r *Renter) managedUploadBaseSector(ctx context.Context, sup skymodules.SkyfileUploadParameters, baseSector []byte, skylink skymodules.Skylink) (err error) { 505 start := time.Now() 506 // Trace the base sector upload in its own span if the given ctx already has 507 // a span attached. 508 span, ctx := opentracing.StartSpanFromContext(ctx, "managedUploadBaseSector") 509 span.SetTag("skylink", skylink.String()) 510 defer func() { 511 if err != nil { 512 span.LogKV("err", err) 513 } 514 span.SetTag("success", err == nil) 515 span.Finish() 516 }() 517 518 uploadParams, err := baseSectorUploadParamsFromSUP(sup) 519 if err != nil { 520 return errors.AddContext(err, "failed to create siafile upload parameters") 521 } 522 523 // Turn the base sector into a reader. The extended fanout is also 524 // added. Make sure every piece of the extended fanout ends up in its 525 // own chunk. 526 reader := bytes.NewReader(baseSector) 527 528 // Perform the actual upload. 529 fileNode, err := r.callUploadStreamFromReader(ctx, uploadParams, reader) 530 if err != nil { 531 return errors.AddContext(err, "failed to stream upload base sector") 532 } 533 defer func() { 534 // If there was an error, try and delete the file that was created 535 if err != nil { 536 deleteErr := r.DeleteFile(sup.SiaPath) 537 // Don't bother returning an error if the file doesn't exist 538 if !errors.Contains(deleteErr, filesystem.ErrNotExist) { 539 err = errors.Compose(err, deleteErr) 540 } 541 } 542 err = errors.Compose(err, fileNode.Close()) 543 }() 544 545 // Add the skylink to the Siafile. 546 err = fileNode.AddSkylink(skylink) 547 548 // Update stats. 549 if err == nil { 550 r.staticBaseSectorUploadStats.AddDataPoint(time.Since(start)) 551 } 552 return errors.AddContext(err, "unable to add skylink to siafile") 553 } 554 555 // managedUploadSkyfile uploads a file and returns the skylink and whether or 556 // not it was a large file. 557 func (r *Renter) managedUploadSkyfile(ctx context.Context, sup skymodules.SkyfileUploadParameters, reader skymodules.SkyfileUploadReader) (skymodules.Skylink, error) { 558 // see if we can fit the entire upload in a single chunk 559 buf := make([]byte, modules.SectorSize) 560 numBytes, err := io.ReadFull(reader, buf) 561 buf = buf[:numBytes] // truncate the buffer 562 563 // if we've reached EOF, we can safely fetch the metadata and calculate the 564 // actual header size, if that fits in a single sector we can upload the 565 // Skyfile as a small file 566 if errors.Contains(err, io.EOF) || errors.Contains(err, io.ErrUnexpectedEOF) { 567 // get the skyfile metadata from the reader 568 metadata, err := reader.SkyfileMetadata(ctx) 569 if err != nil { 570 return skymodules.Skylink{}, errors.AddContext(err, "unable to get skyfile metadata") 571 } 572 573 // check whether it's valid 574 err = skymodules.ValidateSkyfileMetadata(metadata) 575 if err != nil { 576 return skymodules.Skylink{}, errors.Compose(ErrInvalidMetadata, err) 577 } 578 // marshal the skyfile metadata into bytes 579 metadataBytes, err := skymodules.SkyfileMetadataBytes(metadata) 580 if err != nil { 581 return skymodules.Skylink{}, errors.AddContext(err, "unable to get skyfile metadata bytes") 582 } 583 584 // verify if it fits in a single chunk 585 headerSize := uint64(skymodules.SkyfileLayoutSize + len(metadataBytes)) 586 if uint64(numBytes)+headerSize <= modules.SectorSize { 587 return r.managedUploadSkyfileSmallFile(ctx, sup, metadataBytes, buf) 588 } 589 } 590 591 // if we reach this point it means either we have not reached the EOF or the 592 // data combined with the header exceeds a single sector, we add the data we 593 // already read and upload as a large file 594 reader.SetReadBuffer(buf) 595 // set buffer nil to allow for GC to pick it up before starting the upload. 596 // That way it won't stick around until the upload is done. 597 buf = nil 598 return r.managedUploadSkyfileLargeFile(ctx, sup, reader) 599 } 600 601 // managedUploadSkyfileSmallFile uploads a file that fits entirely in the 602 // leading chunk of a skyfile to the Sia network and returns the skylink that 603 // can be used to access the file. 604 func (r *Renter) managedUploadSkyfileSmallFile(ctx context.Context, sup skymodules.SkyfileUploadParameters, metadataBytes, fileBytes []byte) (skylink skymodules.Skylink, err error) { 605 // Fetch the span from our context and tag it as small (large=false). 606 if span := opentracing.SpanFromContext(ctx); span != nil { 607 defer func() { 608 if err != nil { 609 span.LogKV("err", err) 610 } 611 span.SetTag("large", false) 612 }() 613 } 614 615 // Create the layout. Since this is a small upload it doesn't have a 616 // fanout. 617 sl := skymodules.NewSkyfileLayoutNoFanout(uint64(len(fileBytes)), uint64(len(metadataBytes)), crypto.TypePlain) 618 619 // Create the base sector. This is done as late as possible so that any 620 // errors are caught before a large block of memory is allocated. 621 baseSector, fetchSize, extendedFanout := skymodules.BuildBaseSector(sl.Encode(), nil, metadataBytes, fileBytes) // 'nil' because there is no fanout 622 if len(extendedFanout) > 0 { 623 err = errors.New("shouldn't have an extended fanout in small file upload") 624 build.Critical(err) 625 return skymodules.Skylink{}, err 626 } 627 628 // Add encryption if required. 629 if encryptionEnabled(&sup) { 630 err = encryptBaseSectorWithSkykey(baseSector, sl, sup.FileSpecificSkykey) 631 if err != nil { 632 return skymodules.Skylink{}, errors.AddContext(err, "Failed to encrypt base sector for upload") 633 } 634 } 635 636 // Create the skylink. 637 baseSectorRoot := crypto.MerkleRoot(baseSector) // Should be identical to the sector roots for each sector in the siafile. 638 skylink, err = skymodules.NewSkylinkV1(baseSectorRoot, 0, fetchSize) 639 if err != nil { 640 return skymodules.Skylink{}, errors.AddContext(err, "failed to build the skylink") 641 } 642 643 // If this is a dry-run, we do not need to upload the base sector 644 if sup.DryRun { 645 return skylink, nil 646 } 647 648 // Upload the base sector. 649 err = r.managedUploadBaseSector(ctx, sup, baseSector, skylink) 650 if err != nil { 651 return skymodules.Skylink{}, errors.AddContext(err, "failed to upload base sector") 652 } 653 return skylink, nil 654 } 655 656 // managedUploadSkyfileLargeFile will accept a fileReader containing all of the 657 // data to a large siafile and upload it to the Sia network using 658 // 'callUploadStreamFromReader'. The final skylink is created by calling 659 // 'CreateSkylinkFromSiafile' on the resulting siafile. 660 func (r *Renter) managedUploadSkyfileLargeFile(ctx context.Context, sup skymodules.SkyfileUploadParameters, fileReader skymodules.SkyfileUploadReader) (skylink skymodules.Skylink, err error) { 661 // Fetch the span from our context and tag it as large. 662 if span := opentracing.SpanFromContext(ctx); span != nil { 663 defer func() { 664 if err != nil { 665 span.LogKV("err", err) 666 } 667 span.SetTag("large", true) 668 }() 669 } 670 671 // Create the siapath for the skyfile extra data. This is going to be the 672 // same as the skyfile upload siapath, except with a suffix. 673 siaPath, err := sup.SiaPath.AddSuffixStr(skymodules.ExtendedSuffix) 674 if err != nil { 675 return skymodules.Skylink{}, errors.AddContext(err, "unable to create SiaPath for large skyfile extended data") 676 } 677 678 // Disrupt and use custom redundancy if the StandardUploadRedundancy 679 // dependency is set. 680 dataPieces := skymodules.RenterDefaultDataPieces 681 parityPieces := skymodules.RenterDefaultParityPieces 682 if r.staticDeps.Disrupt("StandardUploadRedundancy") { 683 dataPieces = 10 684 parityPieces = 20 685 } else if r.staticDeps.Disrupt("PDCFanoutNotFinished") { 686 dataPieces = 2 687 } 688 689 // Create the FileUploadParams 690 fup, err := fileUploadParams(siaPath, dataPieces, parityPieces, sup.Force, crypto.TypePlain) 691 if err != nil { 692 return skymodules.Skylink{}, errors.AddContext(err, "unable to create FileUploadParams for large file") 693 } 694 695 // Generate a Cipher Key for the FileUploadParams. 696 err = generateCipherKey(&fup, sup) 697 if err != nil { 698 return skymodules.Skylink{}, errors.AddContext(err, "unable to create Cipher key for FileUploadParams") 699 } 700 701 // Check the upload params first and create an empty fileNode to upload 702 // to. 703 fileNode, err := r.managedInitFileNode(fup, 0) 704 if err != nil { 705 return skymodules.Skylink{}, err 706 } 707 // Defer closing the file 708 defer func() { 709 // If there was an error, try and delete the file that was created 710 if err != nil { 711 deleteErr := r.DeleteFile(sup.SiaPath) 712 // Don't bother returning an error if the file doesn't exist 713 if !errors.Contains(deleteErr, filesystem.ErrNotExist) { 714 err = errors.Compose(err, deleteErr) 715 } 716 } 717 err = errors.Compose(err, fileNode.Close()) 718 }() 719 720 // Figure out how to create the fanout. If only one piece is needed, we 721 // create it from the node directly after the upload. 722 cipherType := fileNode.MasterKey().Type() 723 onlyOnePieceNeeded := isCompressedFanout(fileNode.ErasureCode(), cipherType) 724 725 // Wrap the reader in a FanoutChunkReader. 726 cr := NewFanoutChunkReader(fileReader, fileNode.ErasureCode(), fileNode.MasterKey()) 727 if sup.DryRun || r.staticDeps.Disrupt("DoNotUploadFanout") { 728 // In case of a dry-run we don't want to perform the actual upload, 729 // instead we create a filenode that contains all of the data pieces and 730 // their merkle roots. 731 err = r.managedPopulateFileNodeFromReader(fileNode, cr) 732 } else { 733 // Upload the file using a streamer. 734 _, err = r.callUploadStreamFromReaderWithFileNode(ctx, fileNode, cr, 0) 735 } 736 if err != nil { 737 return skymodules.Skylink{}, errors.AddContext(err, "failed to upload file") 738 } 739 740 // If there was no reader then the fanout creation failed. We need to create 741 // the fanout from the fileNode in that case. 742 var fanout []byte 743 if fileReader != nil { 744 fanout = cr.Fanout() 745 } else { 746 fanout, err = skyfileEncodeFanoutFromFileNode(fileNode, onlyOnePieceNeeded) 747 } 748 if err != nil { 749 return skymodules.Skylink{}, errors.AddContext(err, "failed to compute fanout") 750 } 751 752 // Get the SkyfileMetadata from the reader object. 753 metadata, err := fileReader.SkyfileMetadata(ctx) 754 if err != nil { 755 return skymodules.Skylink{}, errors.AddContext(err, "unable to get skyfile metadata") 756 } 757 758 // Convert the new siafile we just uploaded into a skyfile using the 759 // convert function. 760 skylink, err = r.managedCreateSkylinkFromFileNode(ctx, sup, metadata, fileNode, fanout) 761 if err != nil { 762 return skymodules.Skylink{}, errors.AddContext(err, "unable to create skylink from filenode") 763 } 764 return skylink, nil 765 } 766 767 // DownloadByRoot will fetch data using the merkle root of that data. This uses 768 // all of the async worker primitives to improve speed and throughput. 769 func (r *Renter) DownloadByRoot(ctx context.Context, root crypto.Hash, offset, length uint64, pricePerMS types.Currency) ([]byte, [][]byte, [][]crypto.Hash, error) { 770 if err := r.tg.Add(); err != nil { 771 return nil, nil, nil, err 772 } 773 defer r.tg.Done() 774 775 // Check if the merkleroot is blocked 776 if _, blocked := r.staticSkynetBlocklist.IsHashBlocked(crypto.HashObject(root)); blocked { 777 return nil, nil, nil, ErrSkylinkBlocked 778 } 779 780 // Fetch the parent span 781 span := opentracing.SpanFromContext(ctx) 782 if span == nil { 783 span = opentracing.StartSpan("DownloadByRoot") 784 } else { 785 spanRef := opentracing.ChildOf(span.Context()) 786 span = opentracing.StartSpan("DownloadByRoot", spanRef) 787 } 788 span.SetTag("root", root) 789 defer span.Finish() 790 791 // Attach the span to the ctx 792 ctx = opentracing.ContextWithSpan(ctx, span) 793 794 // Fetch the data 795 start := time.Now() 796 data, dd, _, err := r.managedDownloadByRoot(ctx, root, offset, length, pricePerMS) 797 if errors.Contains(err, ErrProjectTimedOut) { 798 elapsedInS := time.Since(start).Seconds() 799 err = errors.AddContext(err, fmt.Sprintf("timed out after %vs", elapsedInS)) 800 return nil, nil, nil, err 801 } 802 if err != nil { 803 return nil, nil, nil, err 804 } 805 return data, dd.LogicalChunkData, dd.Proofs, nil 806 } 807 808 // DownloadSkylink will take a link and turn it into the metadata and data of a 809 // download. 810 func (r *Renter) DownloadSkylink(link skymodules.Skylink, timeout time.Duration, pricePerMS types.Currency) (skymodules.SkyfileStreamer, []skymodules.RegistryEntry, error) { 811 if err := r.tg.Add(); err != nil { 812 return nil, nil, err 813 } 814 defer r.tg.Done() 815 816 // Create a context 817 ctx := r.tg.StopCtx() 818 if timeout > 0 { 819 var cancel context.CancelFunc 820 ctx, cancel = context.WithTimeout(r.tg.StopCtx(), timeout) 821 defer cancel() 822 } 823 824 // Create a new span. 825 span := opentracing.StartSpan("DownloadSkylink") 826 span.SetTag("skylink", link.String()) 827 828 // Attach the span to the ctx 829 ctx = opentracing.ContextWithSpan(ctx, span) 830 831 // Check if link needs to be resolved from V2 to V1. 832 link, srvs, err := r.managedTryResolveSkylinkV2(ctx, link, true, false) 833 if err != nil { 834 return nil, nil, err 835 } 836 837 // Download the data 838 streamer, err := r.managedDownloadSkylink(ctx, link, timeout, pricePerMS, false) 839 if errors.Contains(err, ErrProjectTimedOut) { 840 span.LogKV("timeout", timeout) 841 span.SetTag("timeout", true) 842 err = errors.AddContext(err, fmt.Sprintf("timed out after %vs", timeout.Seconds())) 843 } 844 845 return streamer, srvs, err 846 } 847 848 // DownloadSkylinkBaseSector will take a link and turn it into the data of 849 // a basesector without any decoding of the metadata, fanout, or decryption. 850 func (r *Renter) DownloadSkylinkBaseSector(link skymodules.Skylink, timeout time.Duration, pricePerMS types.Currency) (skymodules.Streamer, []skymodules.RegistryEntry, skymodules.Skylink, error) { 851 if err := r.tg.Add(); err != nil { 852 return nil, nil, link, err 853 } 854 defer r.tg.Done() 855 856 // Create the context 857 ctx := r.tg.StopCtx() 858 if timeout > 0 { 859 var cancel context.CancelFunc 860 ctx, cancel = context.WithTimeout(r.tg.StopCtx(), timeout) 861 defer cancel() 862 } 863 864 // Create a span 865 span := opentracing.StartSpan("DownloadSkylinkBaseSector") 866 span.SetTag("skylink", link.String()) 867 defer span.Finish() 868 869 // Attach the span to the ctx 870 ctx = opentracing.ContextWithSpan(ctx, span) 871 872 // Check if link needs to be resolved from V2 to V1. 873 link, srvs, err := r.managedTryResolveSkylinkV2(ctx, link, true, false) 874 if err != nil { 875 return nil, nil, link, err 876 } 877 878 // Download the base sector 879 baseSector, _, err := r.managedDownloadBaseSector(ctx, link, pricePerMS, false) 880 return StreamerFromSlice(baseSector), srvs, link, err 881 } 882 883 // managedDownloadSkylink will take a link and turn it into the metadata and 884 // data of a download. 885 func (r *Renter) managedDownloadSkylink(ctx context.Context, link skymodules.Skylink, streamReadTimeout time.Duration, pricePerMS types.Currency, repair bool) (skymodules.SkyfileStreamer, error) { 886 if r.staticDeps.Disrupt("resolveSkylinkToFixture") { 887 sf, err := fixtures.LoadSkylinkFixture(link) 888 if err != nil { 889 return nil, errors.AddContext(err, "failed to fetch fixture") 890 } 891 err = skymodules.ValidateSkyfileMetadata(sf.Metadata) 892 if err != nil { 893 return nil, errors.AddContext(err, "invalid metadata") 894 } 895 rawMD, err := json.Marshal(sf.Metadata) 896 if err != nil { 897 return nil, errors.AddContext(err, "failed to fetch fixture") 898 } 899 return SkylinkStreamerFromSlice(sf.Content, sf.Metadata, rawMD, link, skymodules.SkyfileLayout{}), err 900 } 901 902 // Get the span from our context and defer cached tag update. 903 var exists bool 904 span := opentracing.SpanFromContext(ctx) 905 defer func() { 906 span.SetTag("cached", exists) 907 }() 908 909 // Get the current number of cached datasections for the skylink before 910 // we do any downloading. 911 id := link.DataSourceID() 912 cachedSections := r.staticStreamBufferSet.staticCache.NumCachedSections(id) 913 914 // Check if this skylink is already in the stream buffer set. If so, we can 915 // skip the lookup procedure and use any data that other threads have 916 // cached. 917 var stream *stream 918 stream, exists = r.staticStreamBufferSet.callNewStreamFromID(ctx, id, 0, streamReadTimeout, cachedSections, repair) 919 if exists { 920 return stream, nil 921 } 922 923 // Create the data source and add it to the stream buffer set. 924 dataSource, err := r.managedSkylinkDataSource(ctx, link, pricePerMS) 925 if err != nil { 926 return nil, errors.AddContext(err, "unable to create data source for skylink") 927 } 928 stream = r.staticStreamBufferSet.callNewStream(ctx, dataSource, 0, streamReadTimeout, pricePerMS, cachedSections, repair) 929 return stream, nil 930 } 931 932 // managedPinFanoutBlocking pins the fanout for a skylink in a blocking way. 933 // That means it is download and uploaded under this node's contracts and only 934 // returns if the uploaded data has become available on the network. 935 func (r *Renter) managedPinFanoutBlocking(ctx context.Context, skylink skymodules.Skylink, pricePerMS types.Currency, downloadTimeout time.Duration, fup skymodules.FileUploadParams, layout skymodules.SkyfileLayout) (_ *filesystem.FileNode, err error) { 936 // Wrap the context to allow for a disrupt later. 937 dlCtx, cancel := context.WithCancel(ctx) 938 defer cancel() 939 940 // Get cached sections. 941 cachedSections := r.staticStreamBufferSet.staticCache.NumCachedSections(skylink.DataSourceID()) 942 943 // Create the data source and add it to the stream buffer set. 944 dataSource, err := r.managedSkylinkDataSource(dlCtx, skylink, pricePerMS) 945 if err != nil { 946 return nil, errors.AddContext(err, "unable to create data source for skylink") 947 } 948 stream := r.staticStreamBufferSet.callNewStream(dlCtx, dataSource, 0, downloadTimeout, pricePerMS, cachedSections, false) 949 950 // If the dependency is set, we want the context to time out here, right 951 // before the upload. 952 if r.staticDeps.Disrupt("PinTimeout") { 953 cancel() 954 } 955 956 // Upload directly from the stream. 957 fileNode, err := r.callUploadStreamFromReader(ctx, fup, stream) 958 if err != nil { 959 return nil, errors.AddContext(err, "unable to upload large skyfile") 960 } 961 962 // Sanity Check that the fileNode created matches the layout. This is to 963 // protect against an edge case where a portal can download a basesector 964 // but none of the fanout data. In this case the fileNode is 965 // successfully created, but with no data uploaded. 966 actual := fileNode.Metadata().FileSize 967 expected := int64(layout.Filesize) 968 if actual != expected { 969 return nil, fmt.Errorf("pin unsuccessful, filesize %v does not match layout filesize %v", actual, expected) 970 } 971 972 // Add skylink to FileNode 973 err = fileNode.AddSkylink(skylink) 974 if err != nil { 975 return nil, errors.AddContext(err, "unable to upload skyfile fanout") 976 } 977 return fileNode, nil 978 } 979 980 // managedPinFanoutNonBlocking pins a fanout in a non-blocking way by simply 981 // creating a fileNode for it with the right size and letting the repair loop 982 // repair it from the network. 983 func (r *Renter) managedPinFanoutNonBlocking(ctx context.Context, fup skymodules.FileUploadParams, layout skymodules.SkyfileLayout) (_ *filesystem.FileNode, err error) { 984 fileNode, err := r.managedInitFileNode(fup, layout.FanoutSize) 985 if err != nil { 986 return nil, err 987 } 988 bubbleDir, err := fup.SiaPath.Dir() 989 if err != nil { 990 return nil, err 991 } 992 r.staticDirUpdateBatcher.callQueueDirUpdate(bubbleDir) 993 return fileNode, fileNode.MarkAsLazyUpload() 994 } 995 996 // PinSkylink will fetch the file associated with the Skylink, and then pin all 997 // necessary content to maintain that Skylink. 998 func (r *Renter) PinSkylink(ctx context.Context, skylink skymodules.Skylink, lup skymodules.SkyfileUploadParameters, downloadTimeout time.Duration, pricePerMS types.Currency, lazy bool) (err error) { 999 err = r.tg.Add() 1000 if err != nil { 1001 return err 1002 } 1003 defer r.tg.Done() 1004 return r.managedPinSkylink(ctx, skylink, lup, downloadTimeout, pricePerMS, false, lazy) 1005 } 1006 1007 // RestoreSkyfile restores a skyfile from disk such that the skylink is 1008 // preserved. 1009 func (r *Renter) RestoreSkyfile(reader io.Reader) (skymodules.Skylink, error) { 1010 // Restore the skylink and baseSector from the reader 1011 skylinkStr, baseSector, err := skymodules.RestoreSkylink(reader) 1012 if err != nil { 1013 return skymodules.Skylink{}, errors.AddContext(err, "unable to restore skyfile from backup") 1014 } 1015 1016 // Load the skylink 1017 var skylink skymodules.Skylink 1018 err = skylink.LoadString(skylinkStr) 1019 if err != nil { 1020 return skymodules.Skylink{}, errors.AddContext(err, "unable to load skylink") 1021 } 1022 1023 // Check if the new skylink is blocked 1024 err = r.managedHandleIsBlockedCheck(r.tg.StopCtx(), skylink, skymodules.SiaPath{}) 1025 if err != nil { 1026 return skymodules.Skylink{}, err 1027 } 1028 1029 // Check if the base sector is encrypted, and attempt to decrypt it. 1030 // This will fail if we don't have the decryption key. 1031 var fileSpecificSkykey skykey.Skykey 1032 encrypted := skymodules.IsEncryptedBaseSector(baseSector) 1033 if encrypted { 1034 fileSpecificSkykey, err = r.managedDecryptBaseSector(baseSector) 1035 if err != nil { 1036 return skymodules.Skylink{}, errors.AddContext(err, "Unable to decrypt skyfile base sector") 1037 } 1038 } 1039 1040 // Parse the baseSector. 1041 sl, _, sm, _, _, baseSectorExtension, err := r.ParseSkyfileMetadata(baseSector) 1042 if err != nil { 1043 return skymodules.Skylink{}, errors.AddContext(err, "error parsing the baseSector") 1044 } 1045 baseSector = append(baseSector, baseSectorExtension...) 1046 1047 // Create the upload parameters 1048 sup := skymodules.SkyfileUploadParameters{ 1049 BaseChunkRedundancy: sl.FanoutDataPieces + sl.FanoutParityPieces, 1050 SiaPath: skymodules.RandomSkynetFilePath(), 1051 1052 // Set filename and mode 1053 Filename: sm.Filename, 1054 Mode: sm.Mode, 1055 1056 // Set the default path params 1057 DefaultPath: sm.DefaultPath, 1058 DisableDefaultPath: sm.DisableDefaultPath, 1059 1060 TryFiles: sm.TryFiles, 1061 ErrorPages: sm.ErrorPages, 1062 } 1063 skyfileEstablishDefaults(&sup) 1064 1065 // Re-encrypt the baseSector for upload and set the Skykey fields of the 1066 // sup. 1067 if encrypted { 1068 err = encryptBaseSectorWithSkykey(baseSector[:modules.SectorSize], sl, fileSpecificSkykey) 1069 if err != nil { 1070 return skymodules.Skylink{}, errors.AddContext(err, "error re-encrypting base sector") 1071 } 1072 1073 // Set the Skykey fields 1074 sup.SkykeyName = fileSpecificSkykey.Name 1075 sup.FileSpecificSkykey = fileSpecificSkykey 1076 } 1077 1078 // Create the SkyfileUploadReader for the restoration 1079 var restoreReader skymodules.SkyfileUploadReader 1080 if len(sm.Subfiles) == 0 { 1081 restoreReader = skymodules.NewSkyfileReader(reader, sup) 1082 } else { 1083 // Create multipart reader from the subfiles 1084 multiReader, err := skymodules.NewMultipartReader(reader, sm.Subfiles) 1085 if err != nil { 1086 return skymodules.Skylink{}, errors.AddContext(err, "unable to create multireader") 1087 } 1088 restoreReader = skymodules.NewSkyfileMultipartReader(multiReader, sup) 1089 } 1090 1091 // Upload the Base Sector of the skyfile 1092 err = r.managedUploadBaseSector(r.tg.StopCtx(), sup, baseSector, skylink) 1093 if err != nil { 1094 return skymodules.Skylink{}, errors.AddContext(err, "failed to upload base sector") 1095 } 1096 1097 // If there was no fanout then we are done. 1098 if sl.FanoutSize == 0 { 1099 return skylink, nil 1100 } 1101 1102 // Create erasure coder and FileUploadParams 1103 extendedPath, err := sup.SiaPath.AddSuffixStr(skymodules.ExtendedSuffix) 1104 if err != nil { 1105 return skymodules.Skylink{}, errors.AddContext(err, "unable to create extended siapath") 1106 } 1107 1108 // Create the FileUploadParams 1109 fup, err := fileUploadParams(extendedPath, int(sl.FanoutDataPieces), int(sl.FanoutParityPieces), sup.Force, sl.CipherType) 1110 if err != nil { 1111 return skymodules.Skylink{}, errors.AddContext(err, "unable to create FileUploadParams for large file") 1112 } 1113 1114 // Generate a Cipher Key for the FileUploadParams. 1115 // 1116 // NOTE: Specifically using TypeThreefish instead of TypeDefaultRenter for two 1117 // reason. First, TypeThreefish was the CipherType of the siafiles when 1118 // Skyfiles were introduced. Second, this should make the tests fail if the 1119 // TypeDefaultRenter changes, ensuring we add compat code for older converted 1120 // siafiles. 1121 if sl.CipherType == crypto.TypeThreefish { 1122 // For converted files we need to generate a SiaKey 1123 fup.CipherKey, err = crypto.NewSiaKey(sl.CipherType, sl.KeyData[:]) 1124 if err != nil { 1125 return skymodules.Skylink{}, errors.AddContext(err, "unable to create Cipher key from SkyfileLayout KeyData") 1126 } 1127 } else { 1128 err = generateCipherKey(&fup, sup) 1129 if err != nil { 1130 return skymodules.Skylink{}, errors.AddContext(err, "unable to create Cipher key for FileUploadParams") 1131 } 1132 } 1133 1134 // Upload the file 1135 fileNode, err := r.callUploadStreamFromReader(r.tg.StopCtx(), fup, restoreReader) 1136 if err != nil { 1137 return skymodules.Skylink{}, errors.AddContext(err, "unable to upload large skyfile") 1138 } 1139 1140 // Defer closing the file 1141 defer func() { 1142 if err := fileNode.Close(); err != nil { 1143 r.staticLog.Printf("Could not close node, err: %s\n", err.Error()) 1144 } 1145 }() 1146 1147 // Check if any of the skylinks associated with the siafile are blocked 1148 if err := r.managedHandleFileNodeBlockedCheck(fileNode, fup.SiaPath); err != nil { 1149 return skymodules.Skylink{}, err 1150 } 1151 1152 // Add the skylink to the siafiles. 1153 err = fileNode.AddSkylink(skylink) 1154 if err != nil { 1155 err = errors.AddContext(err, "unable to add skylink to the sianodes") 1156 deleteErr := r.DeleteFile(sup.SiaPath) 1157 // Don't bother returning an error if the file doesn't exist 1158 if !errors.Contains(deleteErr, filesystem.ErrNotExist) { 1159 err = errors.Compose(err, deleteErr) 1160 } 1161 return skymodules.Skylink{}, err 1162 } 1163 1164 return skylink, nil 1165 } 1166 1167 // UploadSkyfile will upload the provided data with the provided metadata, 1168 // returning a skylink which can be used by any portal to recover the full 1169 // original file and metadata. The skylink will be unique to the combination of 1170 // both the file data and metadata. 1171 func (r *Renter) UploadSkyfile(ctx context.Context, sup skymodules.SkyfileUploadParameters, reader skymodules.SkyfileUploadReader) (skylink skymodules.Skylink, err error) { 1172 // Set reasonable default values for any sup fields that are blank. 1173 skyfileEstablishDefaults(&sup) 1174 1175 // If a skykey name or ID was specified, generate a file-specific key for 1176 // this upload. 1177 err = r.managedGenerateFilekey(&sup, nil) 1178 if err != nil { 1179 return skymodules.Skylink{}, errors.AddContext(err, "unable to upload skyfile") 1180 } 1181 1182 // defer a function that cleans up the siafiles after a failed upload 1183 // attempt or after a dry run 1184 defer func() { 1185 if err != nil || sup.DryRun { 1186 if err := r.DeleteFile(sup.SiaPath); err != nil && !errors.Contains(err, filesystem.ErrNotExist) { 1187 r.staticLog.Printf("error deleting siafile after upload error: %v", err) 1188 } 1189 1190 extendedSiaPath, spErr := sup.SiaPath.AddSuffixStr(skymodules.ExtendedSuffix) 1191 if spErr == nil { 1192 if err := r.DeleteFile(extendedSiaPath); err != nil && !errors.Contains(err, filesystem.ErrNotExist) { 1193 r.staticLog.Printf("error deleting extended siafile after upload error: %v\n", err) 1194 } 1195 } 1196 } 1197 }() 1198 1199 // Create a span and attach it to our context 1200 span := opentracing.StartSpan("UploadSkyfile") 1201 ctx = opentracing.ContextWithSpan(ctx, span) 1202 defer func() { 1203 if err != nil { 1204 span.LogKV("err", err) 1205 } 1206 span.SetTag("success", err == nil) 1207 span.SetTag("skylink", skylink.String()) 1208 span.Finish() 1209 }() 1210 1211 // Upload the skyfile 1212 skylink, err = r.managedUploadSkyfile(ctx, sup, reader) 1213 if err != nil { 1214 return skymodules.Skylink{}, errors.AddContext(err, "unable to upload skyfile") 1215 } 1216 if r.staticDeps.Disrupt("SkyfileUploadFail") { 1217 return skymodules.Skylink{}, errors.New("SkyfileUploadFail") 1218 } 1219 1220 // After uploading the file we queue a bubble for the new files on disk. 1221 dirPath, err := sup.SiaPath.Dir() 1222 if err != nil { 1223 r.staticLog.Println("UploadSkyfile: failed to get path of path's parent folder", err) 1224 } 1225 r.staticDirUpdateBatcher.callQueueDirUpdate(dirPath) 1226 1227 // Check if skylink is blocked 1228 err = r.managedHandleIsBlockedCheck(ctx, skylink, sup.SiaPath) 1229 if err != nil && !sup.DryRun { 1230 return skymodules.Skylink{}, err 1231 } 1232 1233 return skylink, nil 1234 } 1235 1236 // managedHandleFileNodeBlockedCheck checks if any of the skylinks associated 1237 // with the siafile are blocked and deletes the file if necessary via 1238 // managedHandleIsBlockedCheck. 1239 func (r *Renter) managedHandleFileNodeBlockedCheck(fileNode *filesystem.FileNode, siaPath skymodules.SiaPath) error { 1240 skylinkstrs := fileNode.Metadata().Skylinks 1241 for _, skylinkstr := range skylinkstrs { 1242 var skylink skymodules.Skylink 1243 err := skylink.LoadString(skylinkstr) 1244 if err != nil { 1245 // If there is an error just continue as we shouldn't prevent the 1246 // conversion due to bad old skylinks 1247 // 1248 // Log the error for debugging purposes 1249 r.staticLog.Printf("WARN: previous skylink for siafile %v could not be loaded from string; potentially corrupt skylink: %v", fileNode.SiaFilePath(), skylinkstr) 1250 continue 1251 } 1252 // Check if skylink is blocked 1253 err = r.managedHandleIsBlockedCheck(r.tg.StopCtx(), skylink, siaPath) 1254 if err == ErrSkylinkBlocked { 1255 return err 1256 } 1257 if err != nil { 1258 r.staticLog.Printf("WARN: error checking if skylink (%v) is blocked: %v", skylink, err) 1259 } 1260 } 1261 return nil 1262 } 1263 1264 // ResolveSkylinkV2 resolves a V2 skylink to a V1 skylink if possible. 1265 func (r *Renter) ResolveSkylinkV2(ctx context.Context, sl skymodules.Skylink) (skymodules.Skylink, []skymodules.RegistryEntry, error) { 1266 if err := r.tg.Add(); err != nil { 1267 return skymodules.Skylink{}, nil, err 1268 } 1269 defer r.tg.Done() 1270 slResolved, srvs, err := r.managedTryResolveSkylinkV2(ctx, sl, true, false) 1271 if err != nil { 1272 return skymodules.Skylink{}, srvs, err 1273 } 1274 return slResolved, srvs, nil 1275 } 1276 1277 // SkylinkHealth returns the health of a skylink on the network. 1278 func (r *Renter) SkylinkHealth(ctx context.Context, sl skymodules.Skylink, ppms types.Currency) (skymodules.SkylinkHealth, error) { 1279 if err := r.tg.Add(); err != nil { 1280 return skymodules.SkylinkHealth{}, err 1281 } 1282 defer r.tg.Done() 1283 return r.managedSkylinkHealth(ctx, sl, ppms) 1284 } 1285 1286 // managedPinSkylink will fetch the file associated with the Skylink, and then 1287 // pin all necessary content to maintain that Skylink. 1288 func (r *Renter) managedPinSkylink(ctx context.Context, skylink skymodules.Skylink, lup skymodules.SkyfileUploadParameters, downloadTimeout time.Duration, pricePerMS types.Currency, skipBaseSector, lazy bool) (err error) { 1289 // Check if link is v2. 1290 if skylink.IsSkylinkV2() { 1291 return errors.New("can't pin version 2 skylink") 1292 } 1293 1294 // Create a context with the given timeout. This timeout applies to the 1295 // download of the basesector. 1296 dlCtx := ctx 1297 var cancel context.CancelFunc 1298 if downloadTimeout > 0 { 1299 dlCtx, cancel = context.WithTimeout(ctx, downloadTimeout) 1300 defer cancel() 1301 } 1302 1303 // Check if link is blocked 1304 err = r.managedHandleIsBlockedCheck(dlCtx, skylink, lup.SiaPath) 1305 if err != nil { 1306 return err 1307 } 1308 1309 // Create a span. 1310 span := opentracing.StartSpan("PinSkylink") 1311 span.SetTag("skylink", skylink.String()) 1312 defer span.Finish() 1313 1314 // Attach the span to both contexts 1315 ctx = opentracing.ContextWithSpan(ctx, span) 1316 dlCtx = opentracing.ContextWithSpan(dlCtx, span) 1317 1318 // Fetch the leading chunk. 1319 baseSector, _, _, err := r.DownloadByRoot(dlCtx, skylink.MerkleRoot(), 0, modules.SectorSize, pricePerMS) 1320 if err != nil { 1321 return errors.AddContext(err, "unable to fetch base sector of skylink") 1322 } 1323 if uint64(len(baseSector)) != modules.SectorSize { 1324 return errors.New("download did not fetch enough data, file cannot be re-pinned") 1325 } 1326 1327 // Check if the base sector is encrypted, and attempt to decrypt it. 1328 var fileSpecificSkykey skykey.Skykey 1329 encrypted := skymodules.IsEncryptedBaseSector(baseSector) 1330 if encrypted { 1331 fileSpecificSkykey, err = r.managedDecryptBaseSector(baseSector) 1332 if err != nil { 1333 return errors.AddContext(err, "Unable to decrypt skyfile base sector") 1334 } 1335 } 1336 1337 // Parse out the metadata of the skyfile. 1338 layout, _, _, _, _, baseSectorExtension, err := r.ParseSkyfileMetadata(baseSector) 1339 if err != nil { 1340 return errors.AddContext(err, "error parsing skyfile metadata") 1341 } 1342 1343 // We need to pin the extended fanout as well so we just add it to the 1344 // base sector. 1345 baseSector = append(baseSector, baseSectorExtension...) 1346 1347 // Set sane defaults for unspecified values. 1348 skyfileEstablishDefaults(&lup) 1349 1350 // Start setting up the FUP. 1351 fup := skymodules.FileUploadParams{ 1352 Force: lup.Force, 1353 Repair: false, // indicates whether this is a repair operation 1354 CipherType: crypto.TypePlain, 1355 } 1356 1357 // Re-encrypt the baseSector for upload and add the fanout key to the fup. 1358 if encrypted { 1359 err = encryptBaseSectorWithSkykey(baseSector[:modules.SectorSize], layout, fileSpecificSkykey) 1360 if err != nil { 1361 return errors.AddContext(err, "Error re-encrypting base sector") 1362 } 1363 1364 // Derive the fanout key and add to the fup. 1365 fanoutSkykey, err := fileSpecificSkykey.DeriveSubkey(skymodules.FanoutNonceDerivation[:]) 1366 if err != nil { 1367 return errors.AddContext(err, "Error deriving fanout skykey") 1368 } 1369 fup.CipherKey, err = fanoutSkykey.CipherKey() 1370 if err != nil { 1371 return errors.AddContext(err, "Error getting fanout CipherKey") 1372 } 1373 fup.CipherType = fanoutSkykey.CipherType() 1374 1375 // These fields aren't used yet, but we'll set them anyway to mimic 1376 // behavior in upload/download code for consistency. 1377 lup.SkykeyName = fileSpecificSkykey.Name 1378 lup.FileSpecificSkykey = fileSpecificSkykey 1379 } 1380 1381 // Re-upload the baseSector. 1382 if !skipBaseSector { 1383 err = r.managedUploadBaseSector(ctx, lup, baseSector, skylink) 1384 if err != nil { 1385 return errors.AddContext(err, "unable to upload base sector") 1386 } 1387 } 1388 1389 // If there is no fanout, nothing more to do, the pin is complete. 1390 if layout.FanoutSize == 0 { 1391 return nil 1392 } 1393 1394 // If there was an error, try and delete the file that was created 1395 defer func() { 1396 if err != nil { 1397 // Delete skyfile 1398 deleteErr := r.DeleteFile(lup.SiaPath) 1399 // Don't bother returning an error if the file doesn't exist 1400 if !errors.Contains(deleteErr, filesystem.ErrNotExist) { 1401 err = errors.Compose(err, deleteErr) 1402 } 1403 1404 // Delete extended file 1405 extendedSiaPath, pathErr := lup.SiaPath.AddSuffixStr(skymodules.ExtendedSuffix) 1406 if pathErr == nil { 1407 deleteErr = r.DeleteFile(extendedSiaPath) 1408 // Don't bother returning an error if the file doesn't exist 1409 if !errors.Contains(deleteErr, filesystem.ErrNotExist) { 1410 err = errors.Compose(err, deleteErr) 1411 } 1412 } 1413 } 1414 }() 1415 1416 // Create the erasure coder to use when uploading the file bulk. 1417 fup.ErasureCode, err = skymodules.NewRSSubCode(int(layout.FanoutDataPieces), int(layout.FanoutParityPieces), crypto.SegmentSize) 1418 if err != nil { 1419 return errors.AddContext(err, "unable to create erasure coder for large file") 1420 } 1421 // Create the siapath for the skyfile extra data. This is going to be the 1422 // same as the skyfile upload siapath, except with a suffix. 1423 fup.SiaPath, err = lup.SiaPath.AddSuffixStr(skymodules.ExtendedSuffix) 1424 if err != nil { 1425 return errors.AddContext(err, "unable to create SiaPath for large skyfile extended data") 1426 } 1427 1428 // Pin the fanout. 1429 var fileNode *filesystem.FileNode 1430 if lazy { 1431 fileNode, err = r.managedPinFanoutNonBlocking(ctx, fup, layout) 1432 } else { 1433 fileNode, err = r.managedPinFanoutBlocking(ctx, skylink, pricePerMS, downloadTimeout, fup, layout) 1434 } 1435 if err != nil { 1436 return errors.AddContext(err, "pinning of fanout failed") 1437 } 1438 1439 // Add skylink to FileNode 1440 err = fileNode.AddSkylink(skylink) 1441 if err != nil { 1442 return errors.AddContext(err, "unable to upload skyfile fanout") 1443 } 1444 return nil 1445 } 1446 1447 // managedResolveSkylinkV2 resolves a V2 skylink to a V1 skylink. If the skylink 1448 // is not a V2 skylink, the input link is returned. 1449 func (r *Renter) managedResolveSkylinkV2(ctx context.Context, sl skymodules.Skylink, blocklistCheck, ignoreCutoff bool) (skylink skymodules.Skylink, _ *skymodules.RegistryEntry, err error) { 1450 // If the Skylink is a V1 Skylink, just return the skylink 1451 if sl.IsSkylinkV1() { 1452 return sl, nil, nil 1453 } 1454 // Future proof check that the Skylink is a V2 Skylink 1455 if !sl.IsSkylinkV2() { 1456 return skymodules.Skylink{}, nil, ErrInvalidSkylinkVersion 1457 } 1458 1459 // Create a child span to capture the resolve for v2 skylinks. 1460 span, ctx := opentracing.StartSpanFromContext(ctx, "managedTryResolveSkylinkV2") 1461 defer func() { 1462 if err != nil { 1463 span.LogKV("error", err) 1464 } 1465 span.SetTag("success", err == nil) 1466 span.SetTag("skylinkv2", skylink.String()) 1467 span.Finish() 1468 }() 1469 1470 // Get link from registry entry but ignore the regular cutoff and 1471 // instead apply a stricter timeout. We use the same timeout we use for 1472 // checking the health of an entry on the network. 1473 readCtx, cancel := context.WithTimeout(ctx, DefaultRegistryHealthTimeout) 1474 defer cancel() 1475 1476 srv, err := r.managedReadRegistry(readCtx, sl.RegistryEntryID(), nil, nil, ignoreCutoff) 1477 if err != nil { 1478 return skymodules.Skylink{}, nil, err 1479 } 1480 if len(srv.Data) == 0 { 1481 return skymodules.Skylink{}, nil, errors.New("failed to resolve skylink") 1482 } 1483 1484 err = skylink.LoadBytes(srv.Data) 1485 if err != nil { 1486 return skymodules.Skylink{}, nil, errors.AddContext(err, "failed to parse resolved skylink") 1487 } 1488 // If the link resolves to an empty skylink, return ErrRootNotFound to cause 1489 // the API to return a 404. 1490 if skylink == (skymodules.Skylink{}) { 1491 return skymodules.Skylink{}, &srv, ErrSkylinkDeleted 1492 } 1493 1494 // See if we need to check the blocklist 1495 if !blocklistCheck { 1496 return skylink, &srv, nil 1497 } 1498 1499 // Check if link is blocked 1500 err = r.managedHandleIsBlockedCheck(ctx, skylink, skymodules.SiaPath{}) 1501 if err != nil { 1502 return skymodules.Skylink{}, nil, err 1503 } 1504 return skylink, &srv, nil 1505 } 1506 1507 // managedTryResolveSkylinkV2 tries to resolve a V2 skylink to a V1 skylink. If 1508 // the skylink is not a V2 skylink, the input link is returned. If the V2 1509 // skylink is a nested V2 skylink, it will continue to try and resolve down to a 1510 // V1 skylink until MaxSkylinkV2ResolvingDepth is met. If the skylink is nested 1511 // more times than MaxSkylinkV2ResolvingDepth then an error is returned. 1512 func (r *Renter) managedTryResolveSkylinkV2(ctx context.Context, link skymodules.Skylink, blocklistCheck, ignoreCutoff bool) (_ skymodules.Skylink, srvs []skymodules.RegistryEntry, err error) { 1513 // Check if link needs to be resolved from V2 to V1. 1514 for i := 0; i < int(MaxSkylinkV2ResolvingDepth) && link.IsSkylinkV2(); i++ { 1515 var srv *skymodules.RegistryEntry 1516 link, srv, err = r.managedResolveSkylinkV2(ctx, link, blocklistCheck, ignoreCutoff) 1517 if srv != nil { 1518 srvs = append(srvs, *srv) 1519 } 1520 if err != nil { 1521 return skymodules.Skylink{}, srvs, err 1522 } 1523 } 1524 1525 // If we are still a V2 skylink it means that the skylink is nested more times that is currently supported so return an error. 1526 if link.IsSkylinkV2() { 1527 return skymodules.Skylink{}, nil, ErrSkylinkNesting 1528 } 1529 1530 // If we made it to a V1 link check if it is blocked. 1531 if blocklistCheck { 1532 err = r.managedHandleIsBlockedCheck(ctx, link, skymodules.SiaPath{}) 1533 if err != nil { 1534 return skymodules.Skylink{}, nil, err 1535 } 1536 } 1537 return link, srvs, nil 1538 } 1539 1540 // managedSkylinkHealth returns the health of a skylink on the network. 1541 func (r *Renter) managedSkylinkHealth(ctx context.Context, sl skymodules.Skylink, ppms types.Currency) (skymodules.SkylinkHealth, error) { 1542 // Resolve the skylink if necessary. 1543 sl, _, err := r.managedTryResolveSkylinkV2(ctx, sl, true, false) 1544 if err != nil { 1545 return skymodules.SkylinkHealth{}, errors.AddContext(err, "failed to resolve skylink") 1546 } 1547 1548 // Get base sector. Don't use r.managedDownloadBaseSector to avoid 1549 // caching and to get a fresh pcwsWorkerState. 1550 baseSector, ws, err := r.managedDownloadBaseSector(ctx, sl, ppms, true) 1551 if err != nil { 1552 return skymodules.SkylinkHealth{}, errors.AddContext(err, "unable to download base sector") 1553 } 1554 1555 // Check if the base sector is encrypted, and attempt to decrypt it. 1556 encrypted := skymodules.IsEncryptedBaseSector(baseSector) 1557 if encrypted { 1558 _, err = r.managedDecryptBaseSector(baseSector) 1559 if err != nil { 1560 return skymodules.SkylinkHealth{}, errors.AddContext(err, "failed to decrypt base sector") 1561 } 1562 } 1563 1564 // Parse out the metadata of the skyfile. 1565 layout, fanoutBytes, _, _, _, _, err := r.ParseSkyfileMetadata(baseSector) 1566 if err != nil { 1567 return skymodules.SkylinkHealth{}, errors.AddContext(err, "error parsing skyfile metadata") 1568 } 1569 numPieces := int(layout.FanoutDataPieces + layout.FanoutParityPieces) 1570 1571 // Prepare the list of roots to ask the hosts for. 1572 var roots []crypto.Hash 1573 1574 // If the file has a fanout, ask the hosts for the fanout as well. 1575 rootIndexToChunkIndex := make(map[int]int) 1576 numChunks := 0 1577 if len(fanoutBytes) > 0 { 1578 // Create the list of chunks from the fanout. Since we want to 1579 // give an overview of the health of the file on the network, we 1580 // don't compress the fanout. 1581 fanoutChunks, err := layout.DecodeFanoutIntoChunks(fanoutBytes) 1582 if err != nil { 1583 return skymodules.SkylinkHealth{}, errors.AddContext(err, "error parsing skyfile fanout") 1584 } 1585 1586 for chunkIndex, chunk := range fanoutChunks { 1587 for _, root := range chunk { 1588 rootIndexToChunkIndex[len(roots)] = chunkIndex 1589 roots = append(roots, root) 1590 } 1591 } 1592 numChunks = len(fanoutChunks) 1593 } 1594 1595 // Get the workers. 1596 workers := r.staticWorkerPool.callWorkers() 1597 1598 // Launch the jobs in batches. Each batch with its own response channel. 1599 remainingRoots := roots 1600 var responseChans []chan *jobHasSectorResponse 1601 var launchedWorkerss []int 1602 for batchIndex := 0; len(remainingRoots) > 0; batchIndex++ { 1603 batch := remainingRoots 1604 if uint64(len(remainingRoots)) > maxHasSectorBatchSize { 1605 batch = batch[:maxHasSectorBatchSize] 1606 } 1607 remainingRoots = remainingRoots[len(batch):] 1608 responseChan := make(chan *jobHasSectorResponse, len(workers)) 1609 1610 launchedWorkers := 0 1611 for _, worker := range workers { 1612 // Check for gouging. 1613 pt := worker.staticPriceTable().staticPriceTable 1614 cache := worker.staticCache() 1615 err := staticPCWSGougingCache.IsGouging(worker.staticHostPubKeyStr, pt, cache.staticRenterAllowance, len(workers), len(roots)) 1616 if err != nil { 1617 continue // ignore 1618 } 1619 1620 // Add job to worker. 1621 jhs := worker.newJobHasSector(ctx, responseChan, numPieces, batch...) 1622 if !worker.staticJobHasSectorQueue.callAdd(jhs) { 1623 continue // ignore 1624 } 1625 launchedWorkers++ 1626 } 1627 responseChans = append(responseChans, responseChan) 1628 launchedWorkerss = append(launchedWorkerss, launchedWorkers) 1629 1630 // If a batch has 0 launched workers we are done. 1631 if launchedWorkers == 0 { 1632 return skymodules.SkylinkHealth{}, errors.New("no workers were launched successfully") 1633 } 1634 } 1635 1636 // Each batch has its own waiting goroutine. 1637 // TODO: Once Sia has upgraded to support larger MDM programs we don't 1638 // need the batching here anymore. 1639 var wg sync.WaitGroup 1640 rootTotals := make([]uint64, len(roots)) 1641 for batchIndex, responseChan := range responseChans { 1642 wg.Add(1) 1643 go func(batchIndex uint64, responseChan chan *jobHasSectorResponse) { 1644 defer wg.Done() 1645 1646 for i := 0; i < launchedWorkerss[batchIndex]; i++ { 1647 var resp *jobHasSectorResponse 1648 select { 1649 case <-ctx.Done(): 1650 return 1651 case resp = <-responseChan: 1652 } 1653 1654 if resp.staticErr != nil { 1655 continue 1656 } 1657 // Add the result to the totals. 1658 for _, index := range resp.staticAvailbleIndices { 1659 batchOffset := uint64(maxHasSectorBatchSize) * batchIndex 1660 rootTotals[batchOffset+index]++ 1661 } 1662 } 1663 }(uint64(batchIndex), responseChan) 1664 } 1665 wg.Wait() 1666 1667 // Wait for the worker state results for the base sector. 1668 resps := ws.WaitForResults(ctx) 1669 var baseSectorRedundancy uint64 1670 for _, resp := range resps { 1671 if resp.err != nil { 1672 continue 1673 } 1674 // Check > 0 because base sector only has 1 piece. 1675 if len(resp.pieceIndices) > 0 { 1676 baseSectorRedundancy++ 1677 } 1678 } 1679 1680 // Create a slice of good pieces for each chunk. A chunk has a good 1681 // piece if a root belonging to the chunk exists >0 times on the 1682 // network. 1683 chunkGoodPieces := make([]int, numChunks) 1684 onlyOnePiecePerChunk := layout.FanoutDataPieces == 1 && layout.CipherType == crypto.TypePlain 1685 for i := 0; i < len(rootTotals); i++ { 1686 chunkIndex := rootIndexToChunkIndex[i] 1687 if onlyOnePiecePerChunk { 1688 // Special Case: If we only need one piece per chunk, we 1689 // count all occurrences of that piece up until 1690 // numPieces. 1691 chunkGoodPieces[chunkIndex] += int(rootTotals[i]) 1692 if chunkGoodPieces[chunkIndex] > numPieces { 1693 chunkGoodPieces[chunkIndex] = numPieces 1694 } 1695 } else if rootTotals[i] > 0 { 1696 // Otherwise every piece only counts as 1 good piece. 1697 chunkGoodPieces[chunkIndex]++ 1698 } 1699 } 1700 1701 // Set the base sector redundancy. 1702 health := skymodules.SkylinkHealth{ 1703 BaseSectorRedundancy: baseSectorRedundancy, 1704 } 1705 1706 // If the fanout datapieces are 0, there is no fanout and we are done. 1707 if layout.FanoutDataPieces == 0 { 1708 return health, nil 1709 } 1710 1711 // Compute the health of all chunks and remember the worst one. That's 1712 // the overall fanout health. 1713 worstHealth := float64(numPieces / int(layout.FanoutDataPieces)) 1714 fanoutHealth := make([]float64, 0, numChunks) 1715 for _, goodPieces := range chunkGoodPieces { 1716 chunkHealth := float64(goodPieces) / float64(layout.FanoutDataPieces) 1717 if chunkHealth < worstHealth { 1718 worstHealth = chunkHealth 1719 } 1720 fanoutHealth = append(fanoutHealth, chunkHealth) 1721 } 1722 return skymodules.SkylinkHealth{ 1723 BaseSectorRedundancy: baseSectorRedundancy, 1724 FanoutEffectiveRedundancy: worstHealth, 1725 FanoutRedundancy: fanoutHealth, 1726 FanoutDataPieces: layout.FanoutDataPieces, 1727 FanoutParityPieces: layout.FanoutParityPieces, 1728 }, nil 1729 } 1730 1731 // ParseSkyfileMetadata parses all the information from a base sector similar to 1732 // skymodules.ParseSkyfileMetadata. The difference is that it can also parse a 1733 // recursive base sector. 1734 func (r *Renter) ParseSkyfileMetadata(baseSector []byte) (sl skymodules.SkyfileLayout, fanoutBytes []byte, sm skymodules.SkyfileMetadata, rawSM, baseSectorPayload, baseSectorExtension []byte, err error) { 1735 if err = r.tg.Add(); err != nil { 1736 return 1737 } 1738 defer r.tg.Done() 1739 1740 // Try parsing the metadata the regular way. 1741 sl, fanoutBytes, sm, rawSM, baseSectorPayload, err = skymodules.ParseSkyfileMetadata(baseSector) 1742 if err == nil || (err != nil && !errors.Contains(err, skymodules.ErrRecursiveBaseSector)) { 1743 return 1744 } 1745 1746 // TODO: Should we request memory here? 1747 1748 // Fanout is recursive. Parse only the layout for now. 1749 sl = skymodules.ParseSkyfileLayout(baseSector) 1750 1751 // Get the size of the compressed payload. 1752 payloadSize := sl.FanoutSize + sl.MetadataSize 1753 1754 // Figure out how many bytes of the base sector can be used. Should be 1755 // all bytes except for the layout at the beginning. 1756 maxSize := uint64(len(baseSector)) - skymodules.SkyfileLayoutSize 1757 1758 // To parse the metadata and fanout, we need to download the full 1759 // payload. 1760 translatedOffset, chunkSpans := skymodules.TranslateBaseSectorExtensionOffset(0, payloadSize, payloadSize, maxSize) 1761 1762 // Figure out how many hashes were stored in the base sector and grab 1763 // them. 1764 usedHashes, _ := skymodules.BaseSectorExtensionSize(payloadSize, maxSize) 1765 hashesStart := uint64(skymodules.SkyfileLayoutSize) 1766 hashesEnd := hashesStart + usedHashes*crypto.HashSize 1767 if hashesEnd > uint64(len(baseSector)) { 1768 err = fmt.Errorf("hashesEnd is out-of-bounds %v > %v", hashesEnd, len(baseSector)) 1769 build.Critical(err) 1770 return 1771 } 1772 hashes := baseSector[hashesStart:hashesEnd] 1773 1774 var emptyRoot crypto.Hash 1775 for i, span := range chunkSpans { 1776 // Download each chunk in parallel. 1777 sectors := make([][]byte, span.MaxIndex-span.MinIndex+1) 1778 errs := make([]error, len(sectors)) 1779 resultIndex := 0 1780 var wg sync.WaitGroup 1781 for chunkIndex := span.MinIndex; chunkIndex <= span.MaxIndex; chunkIndex++ { 1782 // Extract root. 1783 var root crypto.Hash 1784 copy(root[:], hashes[chunkIndex*crypto.HashSize:][:crypto.HashSize]) 1785 1786 // If the root is empty we are done. 1787 if root == emptyRoot { 1788 break 1789 } 1790 1791 wg.Add(1) 1792 go func(root crypto.Hash, resultIndex int) { 1793 defer wg.Done() 1794 sectors[resultIndex], _, _, errs[resultIndex] = r.managedDownloadByRoot(r.tg.StopCtx(), root, 0, modules.SectorSize, skymodules.DefaultSkynetPricePerMS) 1795 }(root, resultIndex) 1796 resultIndex++ 1797 } 1798 wg.Wait() 1799 1800 // Check errors. 1801 for _, err := range errs { 1802 if err != nil { 1803 return skymodules.SkyfileLayout{}, nil, skymodules.SkyfileMetadata{}, nil, nil, nil, err 1804 } 1805 } 1806 1807 // The downloaded sectors become the new hashes. 1808 hashes = bytes.Join(sectors, nil) 1809 1810 // Append the intermediary hashes to the extension. 1811 if i < len(chunkSpans)-1 { 1812 baseSectorExtension = append(baseSectorExtension, hashes...) 1813 } 1814 } 1815 1816 // In the last iteration 'hashes' is the actual payload. That's why we 1817 // trim it and only then add it to the extension. 1818 hashes = hashes[translatedOffset:][:payloadSize] 1819 baseSectorExtension = append(baseSectorExtension, hashes...) 1820 1821 // Sanity check length. 1822 if uint64(len(hashes)) != payloadSize { 1823 err = fmt.Errorf("expected len(hashes) %v but got %v", payloadSize, len(hashes)) 1824 build.Critical(err) 1825 return 1826 } 1827 1828 // Return parsed data. 1829 fanoutBytes = hashes[:sl.FanoutSize] 1830 rawSM = hashes[sl.FanoutSize:] 1831 err = json.Unmarshal(rawSM, &sm) 1832 return sl, fanoutBytes, sm, rawSM, nil, baseSectorExtension, err 1833 }