github.com/Kartograf/gqlgen@v0.7.2/example/dataloader/itemsliceloader_gen.go (about)

     1  // Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
     2  
     3  package dataloader
     4  
     5  import (
     6  	"sync"
     7  	"time"
     8  )
     9  
    10  // ItemSliceLoader batches and caches requests
    11  type ItemSliceLoader struct {
    12  	// this method provides the data for the loader
    13  	fetch func(keys []int) ([][]Item, []error)
    14  
    15  	// how long to done before sending a batch
    16  	wait time.Duration
    17  
    18  	// this will limit the maximum number of keys to send in one batch, 0 = no limit
    19  	maxBatch int
    20  
    21  	// INTERNAL
    22  
    23  	// lazily created cache
    24  	cache map[int][]Item
    25  
    26  	// the current batch. keys will continue to be collected until timeout is hit,
    27  	// then everything will be sent to the fetch method and out to the listeners
    28  	batch *itemSliceBatch
    29  
    30  	// mutex to prevent races
    31  	mu sync.Mutex
    32  }
    33  
    34  type itemSliceBatch struct {
    35  	keys    []int
    36  	data    [][]Item
    37  	error   []error
    38  	closing bool
    39  	done    chan struct{}
    40  }
    41  
    42  // Load a item by key, batching and caching will be applied automatically
    43  func (l *ItemSliceLoader) Load(key int) ([]Item, error) {
    44  	return l.LoadThunk(key)()
    45  }
    46  
    47  // LoadThunk returns a function that when called will block waiting for a item.
    48  // This method should be used if you want one goroutine to make requests to many
    49  // different data loaders without blocking until the thunk is called.
    50  func (l *ItemSliceLoader) LoadThunk(key int) func() ([]Item, error) {
    51  	l.mu.Lock()
    52  	if it, ok := l.cache[key]; ok {
    53  		l.mu.Unlock()
    54  		return func() ([]Item, error) {
    55  			return it, nil
    56  		}
    57  	}
    58  	if l.batch == nil {
    59  		l.batch = &itemSliceBatch{done: make(chan struct{})}
    60  	}
    61  	batch := l.batch
    62  	pos := batch.keyIndex(l, key)
    63  	l.mu.Unlock()
    64  
    65  	return func() ([]Item, error) {
    66  		<-batch.done
    67  
    68  		var data []Item
    69  		if pos < len(batch.data) {
    70  			data = batch.data[pos]
    71  		}
    72  
    73  		var err error
    74  		// its convenient to be able to return a single error for everything
    75  		if len(batch.error) == 1 {
    76  			err = batch.error[0]
    77  		} else if batch.error != nil {
    78  			err = batch.error[pos]
    79  		}
    80  
    81  		if err == nil {
    82  			l.mu.Lock()
    83  			l.unsafeSet(key, data)
    84  			l.mu.Unlock()
    85  		}
    86  
    87  		return data, err
    88  	}
    89  }
    90  
    91  // LoadAll fetches many keys at once. It will be broken into appropriate sized
    92  // sub batches depending on how the loader is configured
    93  func (l *ItemSliceLoader) LoadAll(keys []int) ([][]Item, []error) {
    94  	results := make([]func() ([]Item, error), len(keys))
    95  
    96  	for i, key := range keys {
    97  		results[i] = l.LoadThunk(key)
    98  	}
    99  
   100  	items := make([][]Item, len(keys))
   101  	errors := make([]error, len(keys))
   102  	for i, thunk := range results {
   103  		items[i], errors[i] = thunk()
   104  	}
   105  	return items, errors
   106  }
   107  
   108  // Prime the cache with the provided key and value. If the key already exists, no change is made
   109  // and false is returned.
   110  // (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
   111  func (l *ItemSliceLoader) Prime(key int, value []Item) bool {
   112  	l.mu.Lock()
   113  	var found bool
   114  	if _, found = l.cache[key]; !found {
   115  		l.unsafeSet(key, value)
   116  	}
   117  	l.mu.Unlock()
   118  	return !found
   119  }
   120  
   121  // Clear the value at key from the cache, if it exists
   122  func (l *ItemSliceLoader) Clear(key int) {
   123  	l.mu.Lock()
   124  	delete(l.cache, key)
   125  	l.mu.Unlock()
   126  }
   127  
   128  func (l *ItemSliceLoader) unsafeSet(key int, value []Item) {
   129  	if l.cache == nil {
   130  		l.cache = map[int][]Item{}
   131  	}
   132  	l.cache[key] = value
   133  }
   134  
   135  // keyIndex will return the location of the key in the batch, if its not found
   136  // it will add the key to the batch
   137  func (b *itemSliceBatch) keyIndex(l *ItemSliceLoader, key int) int {
   138  	for i, existingKey := range b.keys {
   139  		if key == existingKey {
   140  			return i
   141  		}
   142  	}
   143  
   144  	pos := len(b.keys)
   145  	b.keys = append(b.keys, key)
   146  	if pos == 0 {
   147  		go b.startTimer(l)
   148  	}
   149  
   150  	if l.maxBatch != 0 && pos >= l.maxBatch-1 {
   151  		if !b.closing {
   152  			b.closing = true
   153  			l.batch = nil
   154  			go b.end(l)
   155  		}
   156  	}
   157  
   158  	return pos
   159  }
   160  
   161  func (b *itemSliceBatch) startTimer(l *ItemSliceLoader) {
   162  	time.Sleep(l.wait)
   163  	l.mu.Lock()
   164  
   165  	// we must have hit a batch limit and are already finalizing this batch
   166  	if b.closing {
   167  		l.mu.Unlock()
   168  		return
   169  	}
   170  
   171  	l.batch = nil
   172  	l.mu.Unlock()
   173  
   174  	b.end(l)
   175  }
   176  
   177  func (b *itemSliceBatch) end(l *ItemSliceLoader) {
   178  	b.data, b.error = l.fetch(b.keys)
   179  	close(b.done)
   180  }