github.com/gitbundle/modules@v0.0.0-20231025071548-85b91c5c3b01/nosql/manager_leveldb.go (about)

     1  // Copyright 2023 The GitBundle Inc. All rights reserved.
     2  // Copyright 2017 The Gitea Authors. All rights reserved.
     3  // Use of this source code is governed by a MIT-style
     4  // license that can be found in the LICENSE file.
     5  
     6  package nosql
     7  
     8  import (
     9  	"fmt"
    10  	"path"
    11  	"runtime/pprof"
    12  	"strconv"
    13  	"strings"
    14  
    15  	"github.com/gitbundle/modules/log"
    16  
    17  	"github.com/syndtr/goleveldb/leveldb"
    18  	"github.com/syndtr/goleveldb/leveldb/errors"
    19  	"github.com/syndtr/goleveldb/leveldb/opt"
    20  )
    21  
    22  // CloseLevelDB closes a levelDB
    23  func (m *Manager) CloseLevelDB(connection string) error {
    24  	m.mutex.Lock()
    25  	defer m.mutex.Unlock()
    26  	db, ok := m.LevelDBConnections[connection]
    27  	if !ok {
    28  		// Try the full URI
    29  		uri := ToLevelDBURI(connection)
    30  		db, ok = m.LevelDBConnections[uri.String()]
    31  
    32  		if !ok {
    33  			// Try the datadir directly
    34  			dataDir := path.Join(uri.Host, uri.Path)
    35  
    36  			db, ok = m.LevelDBConnections[dataDir]
    37  		}
    38  	}
    39  	if !ok {
    40  		return nil
    41  	}
    42  
    43  	db.count--
    44  	if db.count > 0 {
    45  		return nil
    46  	}
    47  
    48  	for _, name := range db.name {
    49  		delete(m.LevelDBConnections, name)
    50  	}
    51  	return db.db.Close()
    52  }
    53  
    54  // GetLevelDB gets a levelDB for a particular connection
    55  func (m *Manager) GetLevelDB(connection string) (db *leveldb.DB, err error) {
    56  	// Because we want associate any goroutines created by this call to the main nosqldb context we need to
    57  	// wrap this in a goroutine labelled with the nosqldb context
    58  	done := make(chan struct{})
    59  	var recovered interface{}
    60  	go func() {
    61  		defer func() {
    62  			recovered = recover()
    63  			if recovered != nil {
    64  				log.Critical("PANIC during GetLevelDB: %v\nStacktrace: %s", recovered, log.Stack(2))
    65  			}
    66  			close(done)
    67  		}()
    68  		pprof.SetGoroutineLabels(m.ctx)
    69  
    70  		db, err = m.getLevelDB(connection)
    71  	}()
    72  	<-done
    73  	if recovered != nil {
    74  		panic(recovered)
    75  	}
    76  	return
    77  }
    78  
    79  func (m *Manager) getLevelDB(connection string) (*leveldb.DB, error) {
    80  	// Convert the provided connection description to the common format
    81  	uri := ToLevelDBURI(connection)
    82  
    83  	// Get the datadir
    84  	dataDir := path.Join(uri.Host, uri.Path)
    85  
    86  	m.mutex.Lock()
    87  	defer m.mutex.Unlock()
    88  	db, ok := m.LevelDBConnections[connection]
    89  	if ok {
    90  		db.count++
    91  
    92  		return db.db, nil
    93  	}
    94  
    95  	db, ok = m.LevelDBConnections[uri.String()]
    96  	if ok {
    97  		db.count++
    98  
    99  		return db.db, nil
   100  	}
   101  
   102  	// if there is already a connection to this leveldb reuse that
   103  	// NOTE: if there differing options then only the first leveldb connection will be used
   104  	db, ok = m.LevelDBConnections[dataDir]
   105  	if ok {
   106  		db.count++
   107  		log.Warn("Duplicate connnection to level db: %s with different connection strings. Initial connection: %s. This connection: %s", dataDir, db.name[0], connection)
   108  		db.name = append(db.name, connection)
   109  		m.LevelDBConnections[connection] = db
   110  		return db.db, nil
   111  	}
   112  	db = &levelDBHolder{
   113  		name: []string{connection, uri.String(), dataDir},
   114  	}
   115  
   116  	opts := &opt.Options{}
   117  	for k, v := range uri.Query() {
   118  		switch replacer.Replace(strings.ToLower(k)) {
   119  		case "blockcachecapacity":
   120  			opts.BlockCacheCapacity, _ = strconv.Atoi(v[0])
   121  		case "blockcacheevictremoved":
   122  			opts.BlockCacheEvictRemoved, _ = strconv.ParseBool(v[0])
   123  		case "blockrestartinterval":
   124  			opts.BlockRestartInterval, _ = strconv.Atoi(v[0])
   125  		case "blocksize":
   126  			opts.BlockSize, _ = strconv.Atoi(v[0])
   127  		case "compactionexpandlimitfactor":
   128  			opts.CompactionExpandLimitFactor, _ = strconv.Atoi(v[0])
   129  		case "compactiongpoverlapsfactor":
   130  			opts.CompactionGPOverlapsFactor, _ = strconv.Atoi(v[0])
   131  		case "compactionl0trigger":
   132  			opts.CompactionL0Trigger, _ = strconv.Atoi(v[0])
   133  		case "compactionsourcelimitfactor":
   134  			opts.CompactionSourceLimitFactor, _ = strconv.Atoi(v[0])
   135  		case "compactiontablesize":
   136  			opts.CompactionTableSize, _ = strconv.Atoi(v[0])
   137  		case "compactiontablesizemultiplier":
   138  			opts.CompactionTableSizeMultiplier, _ = strconv.ParseFloat(v[0], 64)
   139  		case "compactiontablesizemultiplierperlevel":
   140  			for _, val := range v {
   141  				f, _ := strconv.ParseFloat(val, 64)
   142  				opts.CompactionTableSizeMultiplierPerLevel = append(opts.CompactionTableSizeMultiplierPerLevel, f)
   143  			}
   144  		case "compactiontotalsize":
   145  			opts.CompactionTotalSize, _ = strconv.Atoi(v[0])
   146  		case "compactiontotalsizemultiplier":
   147  			opts.CompactionTotalSizeMultiplier, _ = strconv.ParseFloat(v[0], 64)
   148  		case "compactiontotalsizemultiplierperlevel":
   149  			for _, val := range v {
   150  				f, _ := strconv.ParseFloat(val, 64)
   151  				opts.CompactionTotalSizeMultiplierPerLevel = append(opts.CompactionTotalSizeMultiplierPerLevel, f)
   152  			}
   153  		case "compression":
   154  			val, _ := strconv.Atoi(v[0])
   155  			opts.Compression = opt.Compression(val)
   156  		case "disablebufferpool":
   157  			opts.DisableBufferPool, _ = strconv.ParseBool(v[0])
   158  		case "disableblockcache":
   159  			opts.DisableBlockCache, _ = strconv.ParseBool(v[0])
   160  		case "disablecompactionbackoff":
   161  			opts.DisableCompactionBackoff, _ = strconv.ParseBool(v[0])
   162  		case "disablelargebatchtransaction":
   163  			opts.DisableLargeBatchTransaction, _ = strconv.ParseBool(v[0])
   164  		case "errorifexist":
   165  			opts.ErrorIfExist, _ = strconv.ParseBool(v[0])
   166  		case "errorifmissing":
   167  			opts.ErrorIfMissing, _ = strconv.ParseBool(v[0])
   168  		case "iteratorsamplingrate":
   169  			opts.IteratorSamplingRate, _ = strconv.Atoi(v[0])
   170  		case "nosync":
   171  			opts.NoSync, _ = strconv.ParseBool(v[0])
   172  		case "nowritemerge":
   173  			opts.NoWriteMerge, _ = strconv.ParseBool(v[0])
   174  		case "openfilescachecapacity":
   175  			opts.OpenFilesCacheCapacity, _ = strconv.Atoi(v[0])
   176  		case "readonly":
   177  			opts.ReadOnly, _ = strconv.ParseBool(v[0])
   178  		case "strict":
   179  			val, _ := strconv.Atoi(v[0])
   180  			opts.Strict = opt.Strict(val)
   181  		case "writebuffer":
   182  			opts.WriteBuffer, _ = strconv.Atoi(v[0])
   183  		case "writel0pausetrigger":
   184  			opts.WriteL0PauseTrigger, _ = strconv.Atoi(v[0])
   185  		case "writel0slowdowntrigger":
   186  			opts.WriteL0SlowdownTrigger, _ = strconv.Atoi(v[0])
   187  		case "clientname":
   188  			db.name = append(db.name, v[0])
   189  		}
   190  	}
   191  
   192  	var err error
   193  	db.db, err = leveldb.OpenFile(dataDir, opts)
   194  	if err != nil {
   195  		if !errors.IsCorrupted(err) {
   196  			if strings.Contains(err.Error(), "resource temporarily unavailable") {
   197  				err = fmt.Errorf("unable to lock level db at %s: %w", dataDir, err)
   198  				return nil, err
   199  			}
   200  
   201  			err = fmt.Errorf("unable to open level db at %s: %w", dataDir, err)
   202  			return nil, err
   203  		}
   204  		db.db, err = leveldb.RecoverFile(dataDir, opts)
   205  	}
   206  
   207  	if err != nil {
   208  		return nil, err
   209  	}
   210  
   211  	for _, name := range db.name {
   212  		m.LevelDBConnections[name] = db
   213  	}
   214  	db.count++
   215  	return db.db, nil
   216  }