github.com/dolthub/dolt/go@v0.40.5-0.20240520175717-68db7794bea6/store/nbs/aws_chunk_source.go (about)

     1  // Copyright 2019-2021 Dolthub, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  //
    15  // This file incorporates work covered by the following copyright and
    16  // permission notice:
    17  //
    18  // Copyright 2016 Attic Labs, Inc. All rights reserved.
    19  // Licensed under the Apache License, version 2.0:
    20  // http://www.apache.org/licenses/LICENSE-2.0
    21  
    22  package nbs
    23  
    24  import (
    25  	"bytes"
    26  	"context"
    27  	"errors"
    28  	"time"
    29  
    30  	"github.com/dolthub/dolt/go/store/hash"
    31  )
    32  
    33  func tableExistsInChunkSource(ctx context.Context, s3 *s3ObjectReader, al awsLimits, name hash.Hash, chunkCount uint32, q MemoryQuotaProvider, stats *Stats) (bool, error) {
    34  	magic := make([]byte, magicNumberSize)
    35  	n, _, err := s3.ReadFromEnd(ctx, name, magic, stats)
    36  	if err != nil {
    37  		return false, err
    38  	}
    39  	if n != len(magic) {
    40  		return false, errors.New("failed to read all data")
    41  	}
    42  	return bytes.Equal(magic, []byte(magicNumber)), nil
    43  }
    44  
    45  func newAWSChunkSource(ctx context.Context, s3 *s3ObjectReader, al awsLimits, name hash.Hash, chunkCount uint32, q MemoryQuotaProvider, stats *Stats) (cs chunkSource, err error) {
    46  	var tra tableReaderAt
    47  	index, err := loadTableIndex(ctx, stats, chunkCount, q, func(p []byte) error {
    48  		n, _, err := s3.ReadFromEnd(ctx, name, p, stats)
    49  		if err != nil {
    50  			return err
    51  		}
    52  		if len(p) != n {
    53  			return errors.New("failed to read all data")
    54  		}
    55  		tra = &s3TableReaderAt{h: name, s3: s3}
    56  		return nil
    57  	})
    58  	if err != nil {
    59  		return &chunkSourceAdapter{}, err
    60  	}
    61  
    62  	tr, err := newTableReader(index, tra, s3BlockSize)
    63  	if err != nil {
    64  		_ = index.Close()
    65  		return &chunkSourceAdapter{}, err
    66  	}
    67  	return &chunkSourceAdapter{tr, name}, nil
    68  }
    69  
    70  func loadTableIndex(ctx context.Context, stats *Stats, cnt uint32, q MemoryQuotaProvider, loadIndexBytes func(p []byte) error) (tableIndex, error) {
    71  	idxSz := int(indexSize(cnt) + footerSize)
    72  	offsetSz := int((cnt - (cnt / 2)) * offsetSize)
    73  	buf, err := q.AcquireQuotaBytes(ctx, idxSz+offsetSz)
    74  	if err != nil {
    75  		return nil, err
    76  	}
    77  
    78  	t1 := time.Now()
    79  	if err := loadIndexBytes(buf[:idxSz]); err != nil {
    80  		q.ReleaseQuotaBytes(len(buf))
    81  		return nil, err
    82  	}
    83  	stats.IndexReadLatency.SampleTimeSince(t1)
    84  	stats.IndexBytesPerRead.Sample(uint64(len(buf)))
    85  
    86  	idx, err := parseTableIndexWithOffsetBuff(buf[:idxSz], buf[idxSz:], q)
    87  	if err != nil {
    88  		q.ReleaseQuotaBytes(len(buf))
    89  	}
    90  	return idx, err
    91  }