github.com/jbendotnet/noms@v0.0.0-20190904222105-c43e4293ea92/go/nbs/aws_chunk_source.go (about)

     1  // Copyright 2016 Attic Labs, Inc. All rights reserved.
     2  // Licensed under the Apache License, version 2.0:
     3  // http://www.apache.org/licenses/LICENSE-2.0
     4  
     5  package nbs
     6  
     7  import (
     8  	"sync"
     9  	"time"
    10  
    11  	"github.com/attic-labs/noms/go/d"
    12  )
    13  
    14  func newAWSChunkSource(ddb *ddbTableStore, s3 *s3ObjectReader, al awsLimits, name addr, chunkCount uint32, indexCache *indexCache, stats *Stats) chunkSource {
    15  	if indexCache != nil {
    16  		indexCache.lockEntry(name)
    17  		defer indexCache.unlockEntry(name)
    18  		if index, found := indexCache.get(name); found {
    19  			tra := &awsTableReaderAt{al: al, ddb: ddb, s3: s3, name: name, chunkCount: chunkCount}
    20  			return &awsChunkSource{newTableReader(index, tra, s3BlockSize), name}
    21  		}
    22  	}
    23  
    24  	t1 := time.Now()
    25  	indexBytes, tra := func() ([]byte, tableReaderAt) {
    26  		if al.tableMayBeInDynamo(chunkCount) {
    27  			data, err := ddb.ReadTable(name, stats)
    28  			if data != nil {
    29  				return data, &dynamoTableReaderAt{ddb: ddb, h: name}
    30  			}
    31  			d.PanicIfTrue(err == nil) // There MUST be either data or an error
    32  			d.PanicIfNotType(err, tableNotInDynamoErr{})
    33  		}
    34  
    35  		size := indexSize(chunkCount) + footerSize
    36  		buff := make([]byte, size)
    37  
    38  		n, err := s3.ReadFromEnd(name, buff, stats)
    39  		d.PanicIfError(err)
    40  		d.PanicIfFalse(size == uint64(n))
    41  		return buff, &s3TableReaderAt{s3: s3, h: name}
    42  	}()
    43  	stats.IndexBytesPerRead.Sample(uint64(len(indexBytes)))
    44  	stats.IndexReadLatency.SampleTimeSince(t1)
    45  
    46  	index := parseTableIndex(indexBytes)
    47  	if indexCache != nil {
    48  		indexCache.put(name, index)
    49  	}
    50  	return &awsChunkSource{newTableReader(index, tra, s3BlockSize), name}
    51  }
    52  
    53  type awsChunkSource struct {
    54  	tableReader
    55  	name addr
    56  }
    57  
    58  func (acs *awsChunkSource) hash() addr {
    59  	return acs.name
    60  }
    61  
    62  type awsTableReaderAt struct {
    63  	once sync.Once
    64  	tra  tableReaderAt
    65  
    66  	al  awsLimits
    67  	ddb *ddbTableStore
    68  	s3  *s3ObjectReader
    69  
    70  	name       addr
    71  	chunkCount uint32
    72  }
    73  
    74  func (atra *awsTableReaderAt) hash() addr {
    75  	return atra.name
    76  }
    77  
    78  func (atra *awsTableReaderAt) ReadAtWithStats(p []byte, off int64, stats *Stats) (n int, err error) {
    79  	atra.once.Do(func() { atra.tra = atra.getTableReaderAt(stats) })
    80  	return atra.tra.ReadAtWithStats(p, off, stats)
    81  }
    82  
    83  func (atra *awsTableReaderAt) getTableReaderAt(stats *Stats) tableReaderAt {
    84  	if atra.al.tableMayBeInDynamo(atra.chunkCount) {
    85  		data, err := atra.ddb.ReadTable(atra.name, stats)
    86  		if data != nil {
    87  			return &dynamoTableReaderAt{ddb: atra.ddb, h: atra.name}
    88  		}
    89  		d.PanicIfTrue(err == nil) // There MUST be either data or an error
    90  		d.PanicIfNotType(err, tableNotInDynamoErr{})
    91  	}
    92  
    93  	return &s3TableReaderAt{s3: atra.s3, h: atra.name}
    94  }