github.com/dolthub/dolt/go@v0.40.5-0.20240520175717-68db7794bea6/store/cmd/noms/noms_blob_put.go (about)

     1  // Copyright 2019 Dolthub, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  //
    15  // This file incorporates work covered by the following copyright and
    16  // permission notice:
    17  //
    18  // Copyright 2016 Attic Labs, Inc. All rights reserved.
    19  // Licensed under the Apache License, version 2.0:
    20  // http://www.apache.org/licenses/LICENSE-2.0
    21  
    22  package main
    23  
    24  import (
    25  	"context"
    26  	"errors"
    27  	"fmt"
    28  	"io"
    29  	"os"
    30  
    31  	"github.com/dolthub/dolt/go/store/cmd/noms/util"
    32  	"github.com/dolthub/dolt/go/store/config"
    33  	"github.com/dolthub/dolt/go/store/d"
    34  	"github.com/dolthub/dolt/go/store/datas"
    35  	"github.com/dolthub/dolt/go/store/types"
    36  	"github.com/dolthub/dolt/go/store/util/profile"
    37  )
    38  
    39  func nomsBlobPut(ctx context.Context, filePath string, dsPath string, concurrency int) int {
    40  	info, err := os.Stat(filePath)
    41  	if err != nil {
    42  		util.CheckError(errors.New("couldn't stat file"))
    43  	}
    44  
    45  	defer profile.MaybeStartProfile().Stop()
    46  
    47  	fileSize := info.Size()
    48  	chunkSize := fileSize / int64(concurrency)
    49  	if chunkSize < (1 << 20) {
    50  		chunkSize = 1 << 20
    51  	}
    52  
    53  	readers := make([]io.Reader, fileSize/chunkSize)
    54  	for i := 0; i < len(readers); i++ {
    55  		r, err := os.Open(filePath)
    56  		util.CheckErrorNoUsage(err)
    57  		defer r.Close()
    58  		_, err = r.Seek(int64(i)*chunkSize, 0)
    59  
    60  		// TODO: fix panics
    61  		d.PanicIfError(err)
    62  
    63  		limit := chunkSize
    64  		if i == len(readers)-1 {
    65  			limit += fileSize % chunkSize // adjust size of last slice to include the final bytes.
    66  		}
    67  		lr := io.LimitReader(r, limit)
    68  		readers[i] = lr
    69  	}
    70  
    71  	cfg := config.NewResolver()
    72  	db, vrw, ds, err := cfg.GetDataset(ctx, dsPath)
    73  	if err != nil {
    74  		fmt.Fprintf(os.Stderr, "Could not create dataset: %s\n", err)
    75  		return 1
    76  	}
    77  	defer db.Close()
    78  
    79  	blob, err := types.NewBlob(ctx, vrw, readers...)
    80  
    81  	// TODO: fix panics
    82  	d.PanicIfError(err)
    83  
    84  	_, err = datas.CommitValue(ctx, db, ds, blob)
    85  	if err != nil {
    86  		fmt.Fprintf(os.Stderr, "Error committing: %s\n", err)
    87  		return 1
    88  	}
    89  	return 0
    90  }