github.com/hasnat/dolt/go@v0.0.0-20210628190320-9eb5d843fbb7/store/nbs/bs_persister.go (about)

     1  // Copyright 2019 Dolthub, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package nbs
    16  
    17  import (
    18  	"context"
    19  	"errors"
    20  	"io"
    21  	"time"
    22  
    23  	"github.com/dolthub/dolt/go/store/blobstore"
    24  	"github.com/dolthub/dolt/go/store/chunks"
    25  )
    26  
    27  type blobstorePersister struct {
    28  	bs         blobstore.Blobstore
    29  	blockSize  uint64
    30  	indexCache *indexCache
    31  }
    32  
    33  // Persist makes the contents of mt durable. Chunks already present in
    34  // |haver| may be dropped in the process.
    35  func (bsp *blobstorePersister) Persist(ctx context.Context, mt *memTable, haver chunkReader, stats *Stats) (chunkSource, error) {
    36  	name, data, chunkCount, err := mt.write(haver, stats)
    37  
    38  	if err != nil {
    39  		return emptyChunkSource{}, nil
    40  	}
    41  
    42  	if chunkCount == 0 {
    43  		return emptyChunkSource{}, nil
    44  	}
    45  
    46  	_, err = blobstore.PutBytes(ctx, bsp.bs, name.String(), data)
    47  
    48  	if err != nil {
    49  		return emptyChunkSource{}, err
    50  	}
    51  
    52  	bsTRA := &bsTableReaderAt{name.String(), bsp.bs}
    53  	return newReaderFromIndexData(bsp.indexCache, data, name, bsTRA, bsp.blockSize)
    54  }
    55  
    56  // ConjoinAll (Not currently implemented) conjoins all chunks in |sources| into a single,
    57  // new chunkSource.
    58  func (bsp *blobstorePersister) ConjoinAll(ctx context.Context, sources chunkSources, stats *Stats) (chunkSource, error) {
    59  	return emptyChunkSource{}, nil
    60  }
    61  
    62  // Open a table named |name|, containing |chunkCount| chunks.
    63  func (bsp *blobstorePersister) Open(ctx context.Context, name addr, chunkCount uint32, stats *Stats) (chunkSource, error) {
    64  	return newBSChunkSource(ctx, bsp.bs, name, chunkCount, bsp.blockSize, bsp.indexCache, stats)
    65  }
    66  
    67  type bsTableReaderAt struct {
    68  	key string
    69  	bs  blobstore.Blobstore
    70  }
    71  
    72  // ReadAtWithStats is the bsTableReaderAt implementation of the tableReaderAt interface
    73  func (bsTRA *bsTableReaderAt) ReadAtWithStats(ctx context.Context, p []byte, off int64, stats *Stats) (int, error) {
    74  	br := blobstore.NewBlobRange(off, int64(len(p)))
    75  	rc, _, err := bsTRA.bs.Get(ctx, bsTRA.key, br)
    76  
    77  	if err != nil {
    78  		return 0, err
    79  	}
    80  	defer rc.Close()
    81  
    82  	totalRead := 0
    83  	for totalRead < len(p) {
    84  		n, err := rc.Read(p[totalRead:])
    85  
    86  		if err != nil && err != io.EOF {
    87  			return 0, err
    88  		}
    89  
    90  		totalRead += n
    91  
    92  		if err == io.EOF {
    93  			break
    94  		}
    95  	}
    96  
    97  	return totalRead, nil
    98  }
    99  
   100  func newBSChunkSource(ctx context.Context, bs blobstore.Blobstore, name addr, chunkCount uint32, blockSize uint64, indexCache *indexCache, stats *Stats) (cs chunkSource, err error) {
   101  	if indexCache != nil {
   102  		indexCache.lockEntry(name)
   103  		defer func() {
   104  			unlockErr := indexCache.unlockEntry(name)
   105  
   106  			if err != nil {
   107  				err = unlockErr
   108  			}
   109  		}()
   110  
   111  		if index, found := indexCache.get(name); found {
   112  			bsTRA := &bsTableReaderAt{name.String(), bs}
   113  			return &chunkSourceAdapter{newTableReader(index, bsTRA, blockSize), name}, nil
   114  		}
   115  	}
   116  
   117  	t1 := time.Now()
   118  	indexBytes, tra, err := func() ([]byte, tableReaderAt, error) {
   119  		size := int64(indexSize(chunkCount) + footerSize)
   120  		key := name.String()
   121  		buff, _, err := blobstore.GetBytes(ctx, bs, key, blobstore.NewBlobRange(-size, 0))
   122  
   123  		if err != nil {
   124  			return nil, nil, err
   125  		}
   126  
   127  		if size != int64(len(buff)) {
   128  			return nil, nil, errors.New("failed to read all data")
   129  		}
   130  
   131  		return buff, &bsTableReaderAt{key, bs}, nil
   132  	}()
   133  
   134  	if err != nil {
   135  		return nil, err
   136  	}
   137  
   138  	stats.IndexBytesPerRead.Sample(uint64(len(indexBytes)))
   139  	stats.IndexReadLatency.SampleTimeSince(t1)
   140  
   141  	index, err := parseTableIndex(indexBytes)
   142  
   143  	if err != nil {
   144  		return nil, err
   145  	}
   146  
   147  	if indexCache != nil {
   148  		indexCache.put(name, index)
   149  	}
   150  
   151  	return &chunkSourceAdapter{newTableReader(index, tra, s3BlockSize), name}, nil
   152  }
   153  
   154  func (bsp *blobstorePersister) PruneTableFiles(ctx context.Context, contents manifestContents) error {
   155  	return chunks.ErrUnsupportedOperation
   156  }