github.com/ethersphere/bee/v2@v2.2.0/pkg/file/pipeline/feeder/feeder.go (about) 1 // Copyright 2020 The Swarm Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package feeder 6 7 import ( 8 "encoding/binary" 9 10 "github.com/ethersphere/bee/v2/pkg/file/pipeline" 11 "github.com/ethersphere/bee/v2/pkg/swarm" 12 ) 13 14 const span = swarm.SpanSize 15 16 type chunkFeeder struct { 17 size int 18 next pipeline.ChainWriter 19 buffer []byte 20 bufferIdx int 21 wrote int64 22 } 23 24 // newChunkFeederWriter creates a new chunkFeeder that allows for partial 25 // writes into the pipeline. Any pending data in the buffer is flushed to 26 // subsequent writers when Sum() is called. 27 func NewChunkFeederWriter(size int, next pipeline.ChainWriter) pipeline.Interface { 28 return &chunkFeeder{ 29 size: size, 30 next: next, 31 buffer: make([]byte, size), 32 } 33 } 34 35 // Write writes data to the chunk feeder. It returns the number of bytes written 36 // to the feeder. The number of bytes written does not necessarily reflect how many 37 // bytes were actually flushed to subsequent writers, since the feeder is buffered 38 // and works in chunk-size quantiles. 39 func (f *chunkFeeder) Write(b []byte) (int, error) { 40 l := len(b) // data length 41 w := 0 // written 42 43 if l+f.bufferIdx < f.size { 44 // write the data into the buffer and return 45 n := copy(f.buffer[f.bufferIdx:], b) 46 f.bufferIdx += n 47 return n, nil 48 } 49 50 // if we are here it means we have to do at least one write 51 d := make([]byte, f.size+span) 52 var sp int // span of current write 53 54 //copy from existing buffer to this one 55 sp = copy(d[span:], f.buffer[:f.bufferIdx]) 56 57 // don't account what was already in the buffer when returning 58 // number of written bytes 59 if sp > 0 { 60 w -= sp 61 } 62 63 var n int 64 for i := 0; i < len(b); { 65 // if we can't fill a whole write, buffer the rest and return 66 if sp+len(b[i:]) < f.size { 67 n = copy(f.buffer, b[i:]) 68 f.bufferIdx = n 69 return w + n, nil 70 } 71 72 // fill stuff up from the incoming write 73 n = copy(d[span+f.bufferIdx:], b[i:]) 74 i += n 75 sp += n 76 77 binary.LittleEndian.PutUint64(d[:span], uint64(sp)) 78 args := &pipeline.PipeWriteArgs{Data: d[:span+sp], Span: d[:span]} 79 err := f.next.ChainWrite(args) 80 if err != nil { 81 return 0, err 82 } 83 f.bufferIdx = 0 84 w += sp 85 sp = 0 86 } 87 f.wrote += int64(w) 88 return w, nil 89 } 90 91 // Sum flushes any pending data to subsequent writers and returns 92 // the cryptographic root-hash representing the data written to 93 // the feeder. 94 func (f *chunkFeeder) Sum() ([]byte, error) { 95 // flush existing data in the buffer 96 if f.bufferIdx > 0 { 97 d := make([]byte, f.bufferIdx+span) 98 copy(d[span:], f.buffer[:f.bufferIdx]) 99 binary.LittleEndian.PutUint64(d[:span], uint64(f.bufferIdx)) 100 args := &pipeline.PipeWriteArgs{Data: d, Span: d[:span]} 101 err := f.next.ChainWrite(args) 102 if err != nil { 103 return nil, err 104 } 105 f.wrote += int64(len(d)) 106 } 107 108 if f.wrote == 0 { 109 // this is an empty file, we should write the span of 110 // an empty file (0). 111 d := make([]byte, span) 112 args := &pipeline.PipeWriteArgs{Data: d, Span: d} 113 err := f.next.ChainWrite(args) 114 if err != nil { 115 return nil, err 116 } 117 f.wrote += int64(len(d)) 118 } 119 120 return f.next.Sum() 121 }