github.com/dolthub/dolt/go@v0.40.5-0.20240520175717-68db7794bea6/store/nbs/archive_reader.go (about)

     1  // Copyright 2024 Dolthub, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package nbs
    16  
    17  import (
    18  	"bufio"
    19  	"crypto/sha512"
    20  	"encoding/binary"
    21  	"fmt"
    22  	"io"
    23  	"math"
    24  	"math/bits"
    25  
    26  	"github.com/dolthub/gozstd"
    27  	"github.com/pkg/errors"
    28  
    29  	"github.com/dolthub/dolt/go/store/hash"
    30  )
    31  
    32  type archiveReader struct {
    33  	reader    io.ReaderAt
    34  	prefixes  []uint64
    35  	byteSpans []byteSpan
    36  	chunkRefs []chunkRef
    37  	suffixes  []suffix
    38  	footer    footer
    39  }
    40  
    41  type chunkRef struct {
    42  	dict uint32
    43  	data uint32
    44  }
    45  
    46  type suffix [hash.SuffixLen]byte
    47  
    48  type footer struct {
    49  	indexSize     uint32
    50  	byteSpanCount uint32
    51  	chunkCount    uint32
    52  	metadataSize  uint32
    53  	dataCheckSum  sha512Sum
    54  	indexCheckSum sha512Sum
    55  	metaCheckSum  sha512Sum
    56  	formatVersion byte
    57  	fileSignature string
    58  	fileSize      uint64 // Not actually part of the footer, but necessary for calculating offsets.
    59  }
    60  
    61  // dataSpan returns the span of the data section of the archive. This is not generally used directly since we usually
    62  // read individual spans for each chunk.
    63  func (f footer) dataSpan() byteSpan {
    64  	return byteSpan{offset: 0, length: f.fileSize - archiveFooterSize - uint64(f.metadataSize) - uint64(f.indexSize)}
    65  }
    66  
    67  // totalIndexSpan returns the span of the entire index section of the archive. This span is not directly useful as
    68  // the index is broken into a compressed section and an uncompressed section. Use indexCompressedSpan and indexSuffixSpan
    69  func (f footer) totalIndexSpan() byteSpan {
    70  	return byteSpan{offset: f.fileSize - archiveFooterSize - uint64(f.metadataSize) - uint64(f.indexSize), length: uint64(f.indexSize)}
    71  }
    72  
    73  // indexCompressedSpan returns the span of the index section of the archive.
    74  func (f footer) indexCompressedSpan() byteSpan {
    75  	suffixLen := uint64(f.chunkCount * hash.SuffixLen)
    76  	totalIdx := f.totalIndexSpan()
    77  	return byteSpan{offset: totalIdx.offset, length: totalIdx.length - suffixLen}
    78  }
    79  
    80  func (f footer) indexSuffixSpan() byteSpan {
    81  	suffixLen := uint64(f.chunkCount * hash.SuffixLen)
    82  	totalIdx := f.totalIndexSpan()
    83  	compressedLen := totalIdx.length - suffixLen
    84  
    85  	return byteSpan{totalIdx.offset + compressedLen, suffixLen}
    86  }
    87  
    88  func (f footer) metadataSpan() byteSpan {
    89  	return byteSpan{offset: f.fileSize - archiveFooterSize - uint64(f.metadataSize), length: uint64(f.metadataSize)}
    90  }
    91  
    92  func newArchiveReader(reader io.ReaderAt, fileSize uint64) (archiveReader, error) {
    93  	footer, err := loadFooter(reader, fileSize)
    94  	if err != nil {
    95  		return archiveReader{}, err
    96  	}
    97  
    98  	indexSpan := footer.indexCompressedSpan()
    99  	secRdr := io.NewSectionReader(reader, int64(indexSpan.offset), int64(indexSpan.length))
   100  	rawReader := bufio.NewReader(secRdr)
   101  
   102  	redr, wrtr := io.Pipe()
   103  	defer redr.Close()
   104  	go func() {
   105  		err := gozstd.StreamDecompress(wrtr, rawReader)
   106  		if err != nil {
   107  			wrtr.CloseWithError(err)
   108  		} else {
   109  			wrtr.Close()
   110  		}
   111  	}()
   112  	byteReader := bufio.NewReader(redr)
   113  
   114  	byteSpans := make([]byteSpan, footer.byteSpanCount+1)
   115  	byteSpans = append(byteSpans, byteSpan{offset: 0, length: 0}) // Null byteSpan to simplify logic.
   116  
   117  	offset := uint64(0)
   118  	for i := uint32(0); i < footer.byteSpanCount; i++ {
   119  		length, err := binary.ReadUvarint(byteReader)
   120  		if err != nil {
   121  			return archiveReader{}, err
   122  		}
   123  
   124  		if length > math.MaxUint32 {
   125  			return archiveReader{}, errors.New("invalid byte span length. Byte span lengths must be uint32s.")
   126  		}
   127  
   128  		byteSpans[i+1] = byteSpan{offset: offset, length: length}
   129  		offset += length
   130  	}
   131  
   132  	lastPrefix := uint64(0)
   133  	prefixes := make([]uint64, footer.chunkCount)
   134  	for i := uint32(0); i < footer.chunkCount; i++ {
   135  		delta := uint64(0)
   136  		err := binary.Read(byteReader, binary.BigEndian, &delta)
   137  		if err != nil {
   138  			return archiveReader{}, err
   139  		}
   140  
   141  		nextDelta := lastPrefix + delta
   142  		if nextDelta < lastPrefix || nextDelta < delta {
   143  			return archiveReader{}, errors.New("invalid prefix delta. Overflow occurred.")
   144  		}
   145  		prefixes[i] = nextDelta
   146  		lastPrefix = nextDelta
   147  	}
   148  
   149  	chunks := make([]chunkRef, footer.chunkCount)
   150  	for i := uint32(0); i < footer.chunkCount; i++ {
   151  		dict64, err := binary.ReadUvarint(byteReader)
   152  		if err != nil {
   153  			return archiveReader{}, err
   154  		}
   155  		data64, err := binary.ReadUvarint(byteReader)
   156  		if err != nil {
   157  			return archiveReader{}, err
   158  		}
   159  
   160  		if dict64 > math.MaxUint32 || data64 > math.MaxUint32 {
   161  			return archiveReader{}, errors.New("invalid chunk reference. Chunk references must be 32-bit unsigned integers.")
   162  		}
   163  
   164  		chunks[i] = chunkRef{dict: uint32(dict64), data: uint32(data64)}
   165  	}
   166  	// Reading the compressed portion should be complete at this point.
   167  
   168  	// Read the suffixes.
   169  	suffixSpan := footer.indexSuffixSpan()
   170  	sufRdr := io.NewSectionReader(reader, int64(suffixSpan.offset), int64(suffixSpan.length))
   171  	sufReader := bufio.NewReader(sufRdr)
   172  	suffixes := make([]suffix, footer.chunkCount)
   173  	for i := uint32(0); i < footer.chunkCount; i++ {
   174  		_, err := io.ReadFull(sufReader, suffixes[i][:])
   175  		if err != nil {
   176  			return archiveReader{}, err
   177  		}
   178  	}
   179  
   180  	return archiveReader{
   181  		reader:    reader,
   182  		prefixes:  prefixes,
   183  		byteSpans: byteSpans,
   184  		chunkRefs: chunks,
   185  		suffixes:  suffixes,
   186  		footer:    footer,
   187  	}, nil
   188  }
   189  
   190  func loadFooter(reader io.ReaderAt, fileSize uint64) (f footer, err error) {
   191  	section := io.NewSectionReader(reader, int64(fileSize-archiveFooterSize), int64(archiveFooterSize))
   192  
   193  	buf := make([]byte, archiveFooterSize)
   194  	_, err = io.ReadFull(section, buf)
   195  	if err != nil {
   196  		return
   197  	}
   198  
   199  	f.formatVersion = buf[afrVersionOffset]
   200  	f.fileSignature = string(buf[afrSigOffset:])
   201  	// Verify File Signature
   202  	if f.fileSignature != string(archiveFileSignature) {
   203  		err = ErrInvalidFileSignature
   204  		return
   205  	}
   206  	// Verify Format Version. Currently only one version is supported, but we'll need to be more flexible in the future.
   207  	if f.formatVersion != archiveFormatVersion {
   208  		err = ErrInvalidFormatVersion
   209  		return
   210  	}
   211  
   212  	f.indexSize = binary.BigEndian.Uint32(buf[afrIndexLenOffset : afrIndexChkSumOffset+uint32Size])
   213  	f.byteSpanCount = binary.BigEndian.Uint32(buf[afrByteSpanOffset : afrByteSpanOffset+uint32Size])
   214  	f.chunkCount = binary.BigEndian.Uint32(buf[afrChunkCountOffset : afrChunkCountOffset+uint32Size])
   215  	f.metadataSize = binary.BigEndian.Uint32(buf[afrMetaLenOffset : afrMetaLenOffset+uint32Size])
   216  	f.dataCheckSum = sha512Sum(buf[afrDataChkSumOffset : afrDataChkSumOffset+sha512.Size])
   217  	f.indexCheckSum = sha512Sum(buf[afrIndexChkSumOffset : afrIndexChkSumOffset+sha512.Size])
   218  	f.metaCheckSum = sha512Sum(buf[afrMetaChkSumOffset : afrMetaChkSumOffset+sha512.Size])
   219  	f.fileSize = fileSize
   220  
   221  	return
   222  }
   223  
   224  func (ai archiveReader) search(hash hash.Hash) (int, bool) {
   225  	prefix := hash.Prefix()
   226  	possibleMatch := prollyBinSearch(ai.prefixes, prefix)
   227  	targetSfx := hash.Suffix()
   228  
   229  	for i := 0; i < len(ai.suffixes); i++ {
   230  		idx := possibleMatch + i
   231  
   232  		if ai.prefixes[idx] != prefix {
   233  			return -1, false
   234  		}
   235  
   236  		if ai.suffixes[idx] == suffix(targetSfx) {
   237  			return idx, true
   238  		}
   239  	}
   240  	return -1, true
   241  }
   242  
   243  func (ai archiveReader) has(hash hash.Hash) bool {
   244  	_, found := ai.search(hash)
   245  	return found
   246  }
   247  
   248  // get returns the decompressed data for the given hash. If the hash is not found, nil is returned (not an error)
   249  func (ai archiveReader) get(hash hash.Hash) ([]byte, error) {
   250  	dict, data, err := ai.getRaw(hash)
   251  	if err != nil || data == nil {
   252  		return nil, err
   253  	}
   254  
   255  	var result []byte
   256  	if dict == nil {
   257  		result, err = gozstd.Decompress(nil, data)
   258  	} else {
   259  		dDict, e2 := gozstd.NewDDict(dict)
   260  		if e2 != nil {
   261  			return nil, e2
   262  		}
   263  		result, err = gozstd.DecompressDict(nil, data, dDict)
   264  	}
   265  	if err != nil {
   266  		return nil, err
   267  	}
   268  	return result, nil
   269  }
   270  
   271  func (ai archiveReader) readByteSpan(bs byteSpan) ([]byte, error) {
   272  	buff := make([]byte, bs.length)
   273  	_, err := ai.reader.ReadAt(buff[:], int64(bs.offset))
   274  	if err != nil {
   275  		return nil, err
   276  	}
   277  	return buff, nil
   278  }
   279  
   280  // getRaw returns the raw data for the given hash. If the hash is not found, nil is returned for both slices. Also,
   281  // no error is returned in this case. Errors will only be returned if there is an io error.
   282  //
   283  // The data returned is still compressed, regardless of the dictionary being present or not.
   284  func (ai archiveReader) getRaw(hash hash.Hash) (dict, data []byte, err error) {
   285  	idx, found := ai.search(hash)
   286  	if !found {
   287  		return nil, nil, nil
   288  	}
   289  
   290  	chunkRef := ai.chunkRefs[idx]
   291  	if chunkRef.dict != 0 {
   292  		byteSpan := ai.byteSpans[chunkRef.dict]
   293  		dict, err = ai.readByteSpan(byteSpan)
   294  		if err != nil {
   295  			return nil, nil, err
   296  		}
   297  	}
   298  
   299  	byteSpan := ai.byteSpans[chunkRef.data]
   300  	data, err = ai.readByteSpan(byteSpan)
   301  	if err != nil {
   302  		return nil, nil, err
   303  	}
   304  	return
   305  }
   306  
   307  func (ai archiveReader) getMetadata() ([]byte, error) {
   308  	return ai.readByteSpan(ai.footer.metadataSpan())
   309  }
   310  
   311  // verifyDataCheckSum verifies the checksum of the data section of the archive. Note - this requires a fully read of
   312  // the data section, which could be sizable.
   313  func (ai archiveReader) verifyDataCheckSum() error {
   314  	return verifyCheckSum(ai.reader, ai.footer.dataSpan(), ai.footer.dataCheckSum)
   315  }
   316  
   317  // verifyIndexCheckSum verifies the checksum of the index section of the archive.
   318  func (ai archiveReader) verifyIndexCheckSum() error {
   319  	return verifyCheckSum(ai.reader, ai.footer.totalIndexSpan(), ai.footer.indexCheckSum)
   320  }
   321  
   322  // verifyMetaCheckSum verifies the checksum of the metadata section of the archive.
   323  func (ai archiveReader) verifyMetaCheckSum() error {
   324  	return verifyCheckSum(ai.reader, ai.footer.metadataSpan(), ai.footer.metaCheckSum)
   325  }
   326  
   327  func verifyCheckSum(reader io.ReaderAt, span byteSpan, checkSum sha512Sum) error {
   328  	hshr := sha512.New()
   329  	_, err := io.Copy(hshr, io.NewSectionReader(reader, int64(span.offset), int64(span.length)))
   330  	if err != nil {
   331  		return err
   332  	}
   333  
   334  	if sha512Sum(hshr.Sum(nil)) != checkSum {
   335  		return fmt.Errorf("checksum mismatch.")
   336  	}
   337  	return nil
   338  }
   339  
   340  // prollyBinSearch is a search that returns the _best_ index of the target in the input slice. If the target exists,
   341  // one or more times, the index of the first instance is returned. If the target does not exist, the index which it
   342  // would be inserted at is returned.
   343  //
   344  // A strong requirement for the proper behavior of this function is to have a sorted and well distributed slice where the
   345  // values are not dense. Crypto hashes are a good example of this.
   346  //
   347  // For our purposes where we are just trying to get the index, we must compare the resulting index to our target to
   348  // determine if it is a match.
   349  func prollyBinSearch(slice []uint64, target uint64) int {
   350  	items := len(slice)
   351  	if items == 0 {
   352  		return 0
   353  	}
   354  	lft, rht := 0, items
   355  	lo, hi := slice[lft], slice[rht-1]
   356  	if target > hi {
   357  		return rht
   358  	}
   359  	if lo >= target {
   360  		return lft
   361  	}
   362  	for lft < rht {
   363  		valRangeSz := hi - lo
   364  		idxRangeSz := uint64(rht - lft - 1)
   365  		shiftedTgt := target - lo
   366  		mhi, mlo := bits.Mul64(shiftedTgt, idxRangeSz)
   367  		dU64, _ := bits.Div64(mhi, mlo, valRangeSz)
   368  		idx := int(dU64) + lft
   369  		if slice[idx] < target {
   370  			lft = idx + 1
   371  			// No need to update lo if i == items, since this loop will be ending.
   372  			if lft < items {
   373  				lo = slice[lft]
   374  				// Interpolation doesn't like lo >= target, so if we're already there, just return |i|.
   375  				if lo >= target {
   376  					return lft
   377  				}
   378  			}
   379  		} else {
   380  			rht = idx
   381  			hi = slice[rht]
   382  		}
   383  	}
   384  	return lft
   385  }