github.com/unigraph-dev/dgraph@v1.1.1-0.20200923154953-8b52b426f765/xidmap/xidmap.go (about)

     1  /*
     2   * Copyright 2017-2018 Dgraph Labs, Inc. and Contributors
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *
     8   *     http://www.apache.org/licenses/LICENSE-2.0
     9   *
    10   * Unless required by applicable law or agreed to in writing, software
    11   * distributed under the License is distributed on an "AS IS" BASIS,
    12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13   * See the License for the specific language governing permissions and
    14   * limitations under the License.
    15   */
    16  
    17  package xidmap
    18  
    19  import (
    20  	"context"
    21  	"encoding/binary"
    22  	"math/rand"
    23  	"sync"
    24  	"sync/atomic"
    25  	"time"
    26  
    27  	"google.golang.org/grpc"
    28  
    29  	"github.com/dgraph-io/badger"
    30  	"github.com/dgraph-io/dgraph/protos/pb"
    31  	"github.com/dgraph-io/dgraph/x"
    32  	farm "github.com/dgryski/go-farm"
    33  	"github.com/golang/glog"
    34  )
    35  
    36  // XidMap allocates and tracks mappings between Xids and Uids in a threadsafe
    37  // manner. It's memory friendly because the mapping is stored on disk, but fast
    38  // because it uses an LRU cache.
    39  type XidMap struct {
    40  	shards     []*shard
    41  	newRanges  chan *pb.AssignedIds
    42  	zc         pb.ZeroClient
    43  	maxUidSeen uint64
    44  
    45  	// Optionally, these can be set to persist the mappings.
    46  	writer *badger.WriteBatch
    47  }
    48  
    49  type shard struct {
    50  	sync.RWMutex
    51  	block
    52  
    53  	uidMap map[string]uint64
    54  }
    55  
    56  type block struct {
    57  	start, end uint64
    58  }
    59  
    60  // assign assumes the write lock is already acquired.
    61  func (b *block) assign(ch <-chan *pb.AssignedIds) uint64 {
    62  	if b.end == 0 || b.start > b.end {
    63  		newRange := <-ch
    64  		b.start, b.end = newRange.StartId, newRange.EndId
    65  	}
    66  	x.AssertTrue(b.start <= b.end)
    67  	uid := b.start
    68  	b.start++
    69  	return uid
    70  }
    71  
    72  // New creates an XidMap. zero conn must be valid for UID allocations to happen. Optionally, a
    73  // badger.DB can be provided to persist the xid to uid allocations. This would add latency to the
    74  // assignment operations.
    75  func New(zero *grpc.ClientConn, db *badger.DB) *XidMap {
    76  	numShards := 32
    77  	xm := &XidMap{
    78  		newRanges: make(chan *pb.AssignedIds, numShards),
    79  		shards:    make([]*shard, numShards),
    80  	}
    81  	for i := range xm.shards {
    82  		xm.shards[i] = &shard{
    83  			uidMap: make(map[string]uint64),
    84  		}
    85  	}
    86  	if db != nil {
    87  		// If DB is provided, let's load up all the xid -> uid mappings in memory.
    88  		xm.writer = db.NewWriteBatch()
    89  
    90  		err := db.View(func(txn *badger.Txn) error {
    91  			var count int
    92  			opt := badger.DefaultIteratorOptions
    93  			opt.PrefetchValues = false
    94  			itr := txn.NewIterator(opt)
    95  			defer itr.Close()
    96  			for itr.Rewind(); itr.Valid(); itr.Next() {
    97  				item := itr.Item()
    98  				key := string(item.Key())
    99  				sh := xm.shardFor(key)
   100  				err := item.Value(func(val []byte) error {
   101  					uid := binary.BigEndian.Uint64(val)
   102  					// No need to acquire a lock. This is all serial access.
   103  					sh.uidMap[key] = uid
   104  					return nil
   105  				})
   106  				if err != nil {
   107  					return err
   108  				}
   109  				count++
   110  			}
   111  			glog.Infof("Loaded up %d xid to uid mappings", count)
   112  			return nil
   113  		})
   114  		x.Check(err)
   115  	}
   116  	xm.zc = pb.NewZeroClient(zero)
   117  
   118  	go func() {
   119  		const initBackoff = 10 * time.Millisecond
   120  		const maxBackoff = 5 * time.Second
   121  		backoff := initBackoff
   122  		for {
   123  			ctx, cancel := context.WithTimeout(context.Background(), time.Second)
   124  			assigned, err := xm.zc.AssignUids(ctx, &pb.Num{Val: 1e4})
   125  			glog.V(1).Infof("Assigned Uids: %+v. Err: %v", assigned, err)
   126  			cancel()
   127  			if err == nil {
   128  				backoff = initBackoff
   129  				xm.updateMaxSeen(assigned.EndId)
   130  				xm.newRanges <- assigned
   131  				continue
   132  			}
   133  			glog.Errorf("Error while getting lease: %v\n", err)
   134  			backoff *= 2
   135  			if backoff > maxBackoff {
   136  				backoff = maxBackoff
   137  			}
   138  			time.Sleep(backoff)
   139  		}
   140  	}()
   141  	return xm
   142  }
   143  
   144  func (m *XidMap) shardFor(xid string) *shard {
   145  	fp := farm.Fingerprint32([]byte(xid))
   146  	idx := fp % uint32(len(m.shards))
   147  	return m.shards[idx]
   148  }
   149  
   150  // AssignUid creates new or looks up existing XID to UID mappings.
   151  func (m *XidMap) AssignUid(xid string) uint64 {
   152  	sh := m.shardFor(xid)
   153  	sh.RLock()
   154  	uid := sh.uidMap[xid]
   155  	sh.RUnlock()
   156  	if uid > 0 {
   157  		return uid
   158  	}
   159  
   160  	sh.Lock()
   161  	defer sh.Unlock()
   162  
   163  	uid = sh.uidMap[xid]
   164  	if uid > 0 {
   165  		return uid
   166  	}
   167  
   168  	newUid := sh.assign(m.newRanges)
   169  	sh.uidMap[xid] = newUid
   170  
   171  	if m.writer != nil {
   172  		var uidBuf [8]byte
   173  		binary.BigEndian.PutUint64(uidBuf[:], newUid)
   174  		if err := m.writer.Set([]byte(xid), uidBuf[:]); err != nil {
   175  			panic(err)
   176  		}
   177  	}
   178  	return newUid
   179  }
   180  
   181  func (sh *shard) Current() uint64 {
   182  	sh.RLock()
   183  	defer sh.RUnlock()
   184  	return sh.start
   185  }
   186  
   187  func (m *XidMap) updateMaxSeen(max uint64) {
   188  	for {
   189  		prev := atomic.LoadUint64(&m.maxUidSeen)
   190  		if prev >= max {
   191  			return
   192  		}
   193  		atomic.CompareAndSwapUint64(&m.maxUidSeen, prev, max)
   194  	}
   195  }
   196  
   197  // BumpTo can be used to make Zero allocate UIDs up to this given number. Attempts are made to
   198  // ensure all future allocations of UIDs be higher than this one, but results are not guaranteed.
   199  func (m *XidMap) BumpTo(uid uint64) {
   200  	curMax := atomic.LoadUint64(&m.maxUidSeen)
   201  	if uid <= curMax {
   202  		return
   203  	}
   204  
   205  	for {
   206  		glog.V(1).Infof("Bumping up to %v", uid)
   207  		num := x.Max(uid-curMax, 1e4)
   208  		ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
   209  		assigned, err := m.zc.AssignUids(ctx, &pb.Num{Val: num})
   210  		cancel()
   211  		if err == nil {
   212  			glog.V(1).Infof("Requested bump: %d. Got assigned: %v", uid, assigned)
   213  			m.updateMaxSeen(assigned.EndId)
   214  			return
   215  		}
   216  		glog.Errorf("While requesting AssignUids(%d): %v", num, err)
   217  	}
   218  }
   219  
   220  // AllocateUid gives a single uid without creating an xid to uid mapping.
   221  func (m *XidMap) AllocateUid() uint64 {
   222  	sh := m.shards[rand.Intn(len(m.shards))]
   223  	sh.Lock()
   224  	defer sh.Unlock()
   225  	return sh.assign(m.newRanges)
   226  }
   227  
   228  // Flush must be called if DB is provided to XidMap.
   229  func (m *XidMap) Flush() error {
   230  	if m.writer == nil {
   231  		return nil
   232  	}
   233  	return m.writer.Flush()
   234  }