git.sr.ht/~pingoo/stdx@v0.0.0-20240218134121-094174641f6e/crypto/internal/blake3/blake3.go (about)

     1  // Package blake3 implements the BLAKE3 cryptographic hash function.
     2  package blake3 // import "lukechampine.com/blake3"
     3  
     4  import (
     5  	"encoding/binary"
     6  	"errors"
     7  	"hash"
     8  	"io"
     9  	"math"
    10  	"math/bits"
    11  )
    12  
    13  const (
    14  	flagChunkStart = 1 << iota
    15  	flagChunkEnd
    16  	flagParent
    17  	flagRoot
    18  	flagKeyedHash
    19  	flagDeriveKeyContext
    20  	flagDeriveKeyMaterial
    21  
    22  	blockSize = 64
    23  	chunkSize = 1024
    24  
    25  	maxSIMD = 16 // AVX-512 vectors can store 16 words
    26  )
    27  
    28  var iv = [8]uint32{
    29  	0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
    30  	0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19,
    31  }
    32  
    33  // A node represents a chunk or parent in the BLAKE3 Merkle tree.
    34  type node struct {
    35  	cv       [8]uint32 // chaining value from previous node
    36  	block    [16]uint32
    37  	counter  uint64
    38  	blockLen uint32
    39  	flags    uint32
    40  }
    41  
    42  // parentNode returns a node that incorporates the chaining values of two child
    43  // nodes.
    44  func parentNode(left, right [8]uint32, key [8]uint32, flags uint32) node {
    45  	n := node{
    46  		cv:       key,
    47  		counter:  0,         // counter is reset for parents
    48  		blockLen: blockSize, // block is full
    49  		flags:    flags | flagParent,
    50  	}
    51  	copy(n.block[:8], left[:])
    52  	copy(n.block[8:], right[:])
    53  	return n
    54  }
    55  
    56  // Hasher implements hash.Hash.
    57  type Hasher struct {
    58  	key   [8]uint32
    59  	flags uint32
    60  	size  int // output size, for Sum
    61  
    62  	// log(n) set of Merkle subtree roots, at most one per height.
    63  	stack   [50][8]uint32 // 2^50 * maxSIMD * chunkSize = 2^64
    64  	counter uint64        // number of buffers hashed; also serves as a bit vector indicating which stack elems are occupied
    65  
    66  	buf    [maxSIMD * chunkSize]byte
    67  	buflen int
    68  }
    69  
    70  func (h *Hasher) hasSubtreeAtHeight(i int) bool {
    71  	return h.counter&(1<<i) != 0
    72  }
    73  
    74  func (h *Hasher) pushSubtree(cv [8]uint32) {
    75  	// seek to first open stack slot, merging subtrees as we go
    76  	i := 0
    77  	for h.hasSubtreeAtHeight(i) {
    78  		cv = chainingValue(parentNode(h.stack[i], cv, h.key, h.flags))
    79  		i++
    80  	}
    81  	h.stack[i] = cv
    82  	h.counter++
    83  }
    84  
    85  // rootNode computes the root of the Merkle tree. It does not modify the
    86  // stack.
    87  func (h *Hasher) rootNode() node {
    88  	n := compressBuffer(&h.buf, h.buflen, &h.key, h.counter*maxSIMD, h.flags)
    89  	for i := bits.TrailingZeros64(h.counter); i < bits.Len64(h.counter); i++ {
    90  		if h.hasSubtreeAtHeight(i) {
    91  			n = parentNode(h.stack[i], chainingValue(n), h.key, h.flags)
    92  		}
    93  	}
    94  	n.flags |= flagRoot
    95  	return n
    96  }
    97  
    98  // Write implements hash.Hash.
    99  func (h *Hasher) Write(p []byte) (int, error) {
   100  	lenp := len(p)
   101  	for len(p) > 0 {
   102  		if h.buflen == len(h.buf) {
   103  			n := compressBuffer(&h.buf, h.buflen, &h.key, h.counter*maxSIMD, h.flags)
   104  			h.pushSubtree(chainingValue(n))
   105  			h.buflen = 0
   106  		}
   107  		n := copy(h.buf[h.buflen:], p)
   108  		h.buflen += n
   109  		p = p[n:]
   110  	}
   111  	return lenp, nil
   112  }
   113  
   114  // Sum implements hash.Hash.
   115  func (h *Hasher) Sum(b []byte) (sum []byte) {
   116  	// We need to append h.Size() bytes to b. Reuse b's capacity if possible;
   117  	// otherwise, allocate a new slice.
   118  	if total := len(b) + h.Size(); cap(b) >= total {
   119  		sum = b[:total]
   120  	} else {
   121  		sum = make([]byte, total)
   122  		copy(sum, b)
   123  	}
   124  	// Read into the appended portion of sum. Use a low-latency-low-throughput
   125  	// path for small digests (requiring a single compression), and a
   126  	// high-latency-high-throughput path for large digests.
   127  	if dst := sum[len(b):]; len(dst) <= 64 {
   128  		var out [64]byte
   129  		wordsToBytes(compressNode(h.rootNode()), &out)
   130  		copy(dst, out[:])
   131  	} else {
   132  		h.XOF().Read(dst)
   133  	}
   134  	return
   135  }
   136  
   137  // Reset implements hash.Hash.
   138  func (h *Hasher) Reset() {
   139  	h.counter = 0
   140  	h.buflen = 0
   141  }
   142  
   143  // BlockSize implements hash.Hash.
   144  func (h *Hasher) BlockSize() int { return 64 }
   145  
   146  // Size implements hash.Hash.
   147  func (h *Hasher) Size() int { return h.size }
   148  
   149  // XOF returns an OutputReader initialized with the current hash state.
   150  func (h *Hasher) XOF() *OutputReader {
   151  	return &OutputReader{
   152  		n: h.rootNode(),
   153  	}
   154  }
   155  
   156  func newHasher(key [8]uint32, flags uint32, size int) *Hasher {
   157  	return &Hasher{
   158  		key:   key,
   159  		flags: flags,
   160  		size:  size,
   161  	}
   162  }
   163  
   164  // New returns a Hasher for the specified digest size and key. If key is nil,
   165  // the hash is unkeyed. Otherwise, len(key) must be 32.
   166  func New(size int, key []byte) *Hasher {
   167  	if key == nil {
   168  		return newHasher(iv, 0, size)
   169  	}
   170  	var keyWords [8]uint32
   171  	for i := range keyWords {
   172  		keyWords[i] = binary.LittleEndian.Uint32(key[i*4:])
   173  	}
   174  	return newHasher(keyWords, flagKeyedHash, size)
   175  }
   176  
   177  // Sum256 and Sum512 always use the same hasher state, so we can save some time
   178  // when hashing small inputs by constructing the hasher ahead of time.
   179  var defaultHasher = New(64, nil)
   180  
   181  // Sum256 returns the unkeyed BLAKE3 hash of b, truncated to 256 bits.
   182  func Sum256(b []byte) (out [32]byte) {
   183  	out512 := Sum512(b)
   184  	copy(out[:], out512[:])
   185  	return
   186  }
   187  
   188  // Sum512 returns the unkeyed BLAKE3 hash of b, truncated to 512 bits.
   189  func Sum512(b []byte) (out [64]byte) {
   190  	var n node
   191  	if len(b) <= blockSize {
   192  		hashBlock(&out, b)
   193  		return
   194  	} else if len(b) <= chunkSize {
   195  		n = compressChunk(b, &iv, 0, 0)
   196  		n.flags |= flagRoot
   197  	} else {
   198  		h := *defaultHasher
   199  		h.Write(b)
   200  		n = h.rootNode()
   201  	}
   202  	wordsToBytes(compressNode(n), &out)
   203  	return
   204  }
   205  
   206  // DeriveKey derives a subkey from ctx and srcKey. ctx should be hardcoded,
   207  // globally unique, and application-specific. A good format for ctx strings is:
   208  //
   209  //	[application] [commit timestamp] [purpose]
   210  //
   211  // e.g.:
   212  //
   213  //	example.com 2019-12-25 16:18:03 session tokens v1
   214  //
   215  // The purpose of these requirements is to ensure that an attacker cannot trick
   216  // two different applications into using the same context string.
   217  func DeriveKey(subKey []byte, ctx string, srcKey []byte) {
   218  	// construct the derivation Hasher
   219  	const derivationIVLen = 32
   220  	h := newHasher(iv, flagDeriveKeyContext, 32)
   221  	h.Write([]byte(ctx))
   222  	derivationIV := h.Sum(make([]byte, 0, derivationIVLen))
   223  	var ivWords [8]uint32
   224  	for i := range ivWords {
   225  		ivWords[i] = binary.LittleEndian.Uint32(derivationIV[i*4:])
   226  	}
   227  	h = newHasher(ivWords, flagDeriveKeyMaterial, 0)
   228  	// derive the subKey
   229  	h.Write(srcKey)
   230  	h.XOF().Read(subKey)
   231  }
   232  
   233  // An OutputReader produces an seekable stream of 2^64 - 1 pseudorandom output
   234  // bytes.
   235  type OutputReader struct {
   236  	n   node
   237  	buf [maxSIMD * blockSize]byte
   238  	off uint64
   239  }
   240  
   241  // Read implements io.Reader. Callers may assume that Read returns len(p), nil
   242  // unless the read would extend beyond the end of the stream.
   243  func (or *OutputReader) Read(p []byte) (int, error) {
   244  	if or.off == math.MaxUint64 {
   245  		return 0, io.EOF
   246  	} else if rem := math.MaxUint64 - or.off; uint64(len(p)) > rem {
   247  		p = p[:rem]
   248  	}
   249  	lenp := len(p)
   250  	for len(p) > 0 {
   251  		if or.off%(maxSIMD*blockSize) == 0 {
   252  			or.n.counter = or.off / blockSize
   253  			compressBlocks(&or.buf, or.n)
   254  		}
   255  		n := copy(p, or.buf[or.off%(maxSIMD*blockSize):])
   256  		p = p[n:]
   257  		or.off += uint64(n)
   258  	}
   259  	return lenp, nil
   260  }
   261  
   262  // Seek implements io.Seeker.
   263  func (or *OutputReader) Seek(offset int64, whence int) (int64, error) {
   264  	off := or.off
   265  	switch whence {
   266  	case io.SeekStart:
   267  		if offset < 0 {
   268  			return 0, errors.New("seek position cannot be negative")
   269  		}
   270  		off = uint64(offset)
   271  	case io.SeekCurrent:
   272  		if offset < 0 {
   273  			if uint64(-offset) > off {
   274  				return 0, errors.New("seek position cannot be negative")
   275  			}
   276  			off -= uint64(-offset)
   277  		} else {
   278  			off += uint64(offset)
   279  		}
   280  	case io.SeekEnd:
   281  		off = uint64(offset) - 1
   282  	default:
   283  		panic("invalid whence")
   284  	}
   285  	or.off = off
   286  	or.n.counter = uint64(off) / blockSize
   287  	if or.off%(maxSIMD*blockSize) != 0 {
   288  		compressBlocks(&or.buf, or.n)
   289  	}
   290  	// NOTE: or.off >= 2^63 will result in a negative return value.
   291  	// Nothing we can do about this.
   292  	return int64(or.off), nil
   293  }
   294  
   295  // ensure that Hasher implements hash.Hash
   296  var _ hash.Hash = (*Hasher)(nil)