git.sr.ht/~pingoo/stdx@v0.0.0-20240218134121-094174641f6e/crypto/internal/blake3/bao.go (about)

     1  package blake3
     2  
     3  import (
     4  	"bytes"
     5  	"encoding/binary"
     6  	"io"
     7  	"math/bits"
     8  )
     9  
    10  // BaoEncodedSize returns the size of a Bao encoding for the provided quantity
    11  // of data.
    12  func BaoEncodedSize(dataLen int, outboard bool) int {
    13  	size := 8
    14  	if dataLen > 0 {
    15  		chunks := (dataLen + chunkSize - 1) / chunkSize
    16  		cvs := 2*chunks - 2 // no I will not elaborate
    17  		size += cvs * 32
    18  	}
    19  	if !outboard {
    20  		size += dataLen
    21  	}
    22  	return size
    23  }
    24  
    25  // BaoEncode computes the intermediate BLAKE3 tree hashes of data and writes
    26  // them to dst. If outboard is false, the contents of data are also written to
    27  // dst, interleaved with the tree hashes. It also returns the tree root, i.e.
    28  // the 256-bit BLAKE3 hash.
    29  //
    30  // Note that dst is not written sequentially, and therefore must be initialized
    31  // with sufficient capacity to hold the encoding; see BaoEncodedSize.
    32  func BaoEncode(dst io.WriterAt, data io.Reader, dataLen int64, outboard bool) ([32]byte, error) {
    33  	var counter uint64
    34  	var chunkBuf [chunkSize]byte
    35  	var err error
    36  	read := func(p []byte) []byte {
    37  		if err == nil {
    38  			_, err = io.ReadFull(data, p)
    39  		}
    40  		return p
    41  	}
    42  	write := func(p []byte, off uint64) {
    43  		if err == nil {
    44  			_, err = dst.WriteAt(p, int64(off))
    45  		}
    46  	}
    47  
    48  	// NOTE: unlike the reference implementation, we write directly in
    49  	// pre-order, rather than writing in post-order and then flipping. This cuts
    50  	// the I/O required in half, but also makes hashing multiple chunks in SIMD
    51  	// a lot trickier. I'll save that optimization for a rainy day.
    52  	var rec func(bufLen uint64, flags uint32, off uint64) (uint64, [8]uint32)
    53  	rec = func(bufLen uint64, flags uint32, off uint64) (uint64, [8]uint32) {
    54  		if err != nil {
    55  			return 0, [8]uint32{}
    56  		} else if bufLen <= chunkSize {
    57  			cv := chainingValue(compressChunk(read(chunkBuf[:bufLen]), &iv, counter, flags))
    58  			counter++
    59  			if !outboard {
    60  				write(chunkBuf[:bufLen], off)
    61  			}
    62  			return 0, cv
    63  		}
    64  		mid := uint64(1) << (bits.Len64(bufLen-1) - 1)
    65  		lchildren, l := rec(mid, 0, off+64)
    66  		llen := lchildren * 32
    67  		if !outboard {
    68  			llen += (mid / chunkSize) * chunkSize
    69  		}
    70  		rchildren, r := rec(bufLen-mid, 0, off+64+llen)
    71  		write(cvToBytes(&l)[:], off)
    72  		write(cvToBytes(&r)[:], off+32)
    73  		return 2 + lchildren + rchildren, chainingValue(parentNode(l, r, iv, flags))
    74  	}
    75  
    76  	binary.LittleEndian.PutUint64(chunkBuf[:8], uint64(dataLen))
    77  	write(chunkBuf[:8], 0)
    78  	_, root := rec(uint64(dataLen), flagRoot, 8)
    79  	return *cvToBytes(&root), err
    80  }
    81  
    82  // BaoDecode reads content and tree data from the provided reader(s), and
    83  // streams the verified content to dst. It returns false if verification fails.
    84  // If the content and tree data are interleaved, outboard should be nil.
    85  func BaoDecode(dst io.Writer, data, outboard io.Reader, root [32]byte) (bool, error) {
    86  	if outboard == nil {
    87  		outboard = data
    88  	}
    89  	var counter uint64
    90  	var buf [chunkSize]byte
    91  	var err error
    92  	read := func(r io.Reader, p []byte) []byte {
    93  		if err == nil {
    94  			_, err = io.ReadFull(r, p)
    95  		}
    96  		return p
    97  	}
    98  	readParent := func() (l, r [8]uint32) {
    99  		read(outboard, buf[:64])
   100  		return bytesToCV(buf[:32]), bytesToCV(buf[32:])
   101  	}
   102  
   103  	var rec func(cv [8]uint32, bufLen uint64, flags uint32) bool
   104  	rec = func(cv [8]uint32, bufLen uint64, flags uint32) bool {
   105  		if err != nil {
   106  			return false
   107  		} else if bufLen <= chunkSize {
   108  			n := compressChunk(read(data, buf[:bufLen]), &iv, counter, flags)
   109  			counter++
   110  			return cv == chainingValue(n)
   111  		}
   112  		l, r := readParent()
   113  		n := parentNode(l, r, iv, flags)
   114  		mid := uint64(1) << (bits.Len64(bufLen-1) - 1)
   115  		return chainingValue(n) == cv && rec(l, mid, 0) && rec(r, bufLen-mid, 0)
   116  	}
   117  
   118  	read(outboard, buf[:8])
   119  	dataLen := binary.LittleEndian.Uint64(buf[:8])
   120  	ok := rec(bytesToCV(root[:]), dataLen, flagRoot)
   121  	return ok, err
   122  }
   123  
   124  type bufferAt struct {
   125  	buf []byte
   126  }
   127  
   128  func (b *bufferAt) WriteAt(p []byte, off int64) (int, error) {
   129  	if copy(b.buf[off:], p) != len(p) {
   130  		panic("bad buffer size")
   131  	}
   132  	return len(p), nil
   133  }
   134  
   135  // BaoEncodeBuf returns the Bao encoding and root (i.e. BLAKE3 hash) for data.
   136  func BaoEncodeBuf(data []byte, outboard bool) ([]byte, [32]byte) {
   137  	buf := bufferAt{buf: make([]byte, BaoEncodedSize(len(data), outboard))}
   138  	root, _ := BaoEncode(&buf, bytes.NewReader(data), int64(len(data)), outboard)
   139  	return buf.buf, root
   140  }
   141  
   142  // BaoVerifyBuf verifies the Bao encoding and root (i.e. BLAKE3 hash) for data.
   143  // If the content and tree data are interleaved, outboard should be nil.
   144  func BaoVerifyBuf(data, outboard []byte, root [32]byte) bool {
   145  	var or io.Reader = bytes.NewReader(outboard)
   146  	if outboard == nil {
   147  		or = nil
   148  	}
   149  	ok, _ := BaoDecode(io.Discard, bytes.NewReader(data), or, root)
   150  	return ok
   151  }