github.com/hasnat/dolt/go@v0.0.0-20210628190320-9eb5d843fbb7/store/cmd/noms/noms_blob_put.go (about)

     1  // Copyright 2019 Dolthub, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  //
    15  // This file incorporates work covered by the following copyright and
    16  // permission notice:
    17  //
    18  // Copyright 2016 Attic Labs, Inc. All rights reserved.
    19  // Licensed under the Apache License, version 2.0:
    20  // http://www.apache.org/licenses/LICENSE-2.0
    21  
    22  package main
    23  
    24  import (
    25  	"context"
    26  	"errors"
    27  	"fmt"
    28  	"io"
    29  	"os"
    30  
    31  	"github.com/dolthub/dolt/go/store/cmd/noms/util"
    32  	"github.com/dolthub/dolt/go/store/config"
    33  	"github.com/dolthub/dolt/go/store/d"
    34  	"github.com/dolthub/dolt/go/store/types"
    35  	"github.com/dolthub/dolt/go/store/util/profile"
    36  )
    37  
    38  func nomsBlobPut(ctx context.Context, filePath string, dsPath string, concurrency int) int {
    39  	info, err := os.Stat(filePath)
    40  	if err != nil {
    41  		util.CheckError(errors.New("couldn't stat file"))
    42  	}
    43  
    44  	defer profile.MaybeStartProfile().Stop()
    45  
    46  	fileSize := info.Size()
    47  	chunkSize := fileSize / int64(concurrency)
    48  	if chunkSize < (1 << 20) {
    49  		chunkSize = 1 << 20
    50  	}
    51  
    52  	readers := make([]io.Reader, fileSize/chunkSize)
    53  	for i := 0; i < len(readers); i++ {
    54  		r, err := os.Open(filePath)
    55  		util.CheckErrorNoUsage(err)
    56  		defer r.Close()
    57  		_, err = r.Seek(int64(i)*chunkSize, 0)
    58  
    59  		// TODO: fix panics
    60  		d.PanicIfError(err)
    61  
    62  		limit := chunkSize
    63  		if i == len(readers)-1 {
    64  			limit += fileSize % chunkSize // adjust size of last slice to include the final bytes.
    65  		}
    66  		lr := io.LimitReader(r, limit)
    67  		readers[i] = lr
    68  	}
    69  
    70  	cfg := config.NewResolver()
    71  	db, ds, err := cfg.GetDataset(ctx, dsPath)
    72  	if err != nil {
    73  		fmt.Fprintf(os.Stderr, "Could not create dataset: %s\n", err)
    74  		return 1
    75  	}
    76  	defer db.Close()
    77  
    78  	blob, err := types.NewBlob(ctx, db, readers...)
    79  
    80  	// TODO: fix panics
    81  	d.PanicIfError(err)
    82  
    83  	_, err = db.CommitValue(ctx, ds, blob)
    84  	if err != nil {
    85  		fmt.Fprintf(os.Stderr, "Error committing: %s\n", err)
    86  		return 1
    87  	}
    88  	return 0
    89  }