github.com/humans-group/gqlgen@v0.7.2/example/dataloader/addressloader_gen.go (about)

     1  // Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
     2  
     3  package dataloader
     4  
     5  import (
     6  	"sync"
     7  	"time"
     8  )
     9  
    10  // AddressLoader batches and caches requests
    11  type AddressLoader struct {
    12  	// this method provides the data for the loader
    13  	fetch func(keys []int) ([]*Address, []error)
    14  
    15  	// how long to done before sending a batch
    16  	wait time.Duration
    17  
    18  	// this will limit the maximum number of keys to send in one batch, 0 = no limit
    19  	maxBatch int
    20  
    21  	// INTERNAL
    22  
    23  	// lazily created cache
    24  	cache map[int]*Address
    25  
    26  	// the current batch. keys will continue to be collected until timeout is hit,
    27  	// then everything will be sent to the fetch method and out to the listeners
    28  	batch *addressBatch
    29  
    30  	// mutex to prevent races
    31  	mu sync.Mutex
    32  }
    33  
    34  type addressBatch struct {
    35  	keys    []int
    36  	data    []*Address
    37  	error   []error
    38  	closing bool
    39  	done    chan struct{}
    40  }
    41  
    42  // Load a address by key, batching and caching will be applied automatically
    43  func (l *AddressLoader) Load(key int) (*Address, error) {
    44  	return l.LoadThunk(key)()
    45  }
    46  
    47  // LoadThunk returns a function that when called will block waiting for a address.
    48  // This method should be used if you want one goroutine to make requests to many
    49  // different data loaders without blocking until the thunk is called.
    50  func (l *AddressLoader) LoadThunk(key int) func() (*Address, error) {
    51  	l.mu.Lock()
    52  	if it, ok := l.cache[key]; ok {
    53  		l.mu.Unlock()
    54  		return func() (*Address, error) {
    55  			return it, nil
    56  		}
    57  	}
    58  	if l.batch == nil {
    59  		l.batch = &addressBatch{done: make(chan struct{})}
    60  	}
    61  	batch := l.batch
    62  	pos := batch.keyIndex(l, key)
    63  	l.mu.Unlock()
    64  
    65  	return func() (*Address, error) {
    66  		<-batch.done
    67  
    68  		var data *Address
    69  		if pos < len(batch.data) {
    70  			data = batch.data[pos]
    71  		}
    72  
    73  		var err error
    74  		// its convenient to be able to return a single error for everything
    75  		if len(batch.error) == 1 {
    76  			err = batch.error[0]
    77  		} else if batch.error != nil {
    78  			err = batch.error[pos]
    79  		}
    80  
    81  		if err == nil {
    82  			l.mu.Lock()
    83  			l.unsafeSet(key, data)
    84  			l.mu.Unlock()
    85  		}
    86  
    87  		return data, err
    88  	}
    89  }
    90  
    91  // LoadAll fetches many keys at once. It will be broken into appropriate sized
    92  // sub batches depending on how the loader is configured
    93  func (l *AddressLoader) LoadAll(keys []int) ([]*Address, []error) {
    94  	results := make([]func() (*Address, error), len(keys))
    95  
    96  	for i, key := range keys {
    97  		results[i] = l.LoadThunk(key)
    98  	}
    99  
   100  	addresss := make([]*Address, len(keys))
   101  	errors := make([]error, len(keys))
   102  	for i, thunk := range results {
   103  		addresss[i], errors[i] = thunk()
   104  	}
   105  	return addresss, errors
   106  }
   107  
   108  // Prime the cache with the provided key and value. If the key already exists, no change is made
   109  // and false is returned.
   110  // (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
   111  func (l *AddressLoader) Prime(key int, value *Address) bool {
   112  	l.mu.Lock()
   113  	var found bool
   114  	if _, found = l.cache[key]; !found {
   115  		// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
   116  		// and end up with the whole cache pointing to the same value.
   117  		cpy := *value
   118  		l.unsafeSet(key, &cpy)
   119  	}
   120  	l.mu.Unlock()
   121  	return !found
   122  }
   123  
   124  // Clear the value at key from the cache, if it exists
   125  func (l *AddressLoader) Clear(key int) {
   126  	l.mu.Lock()
   127  	delete(l.cache, key)
   128  	l.mu.Unlock()
   129  }
   130  
   131  func (l *AddressLoader) unsafeSet(key int, value *Address) {
   132  	if l.cache == nil {
   133  		l.cache = map[int]*Address{}
   134  	}
   135  	l.cache[key] = value
   136  }
   137  
   138  // keyIndex will return the location of the key in the batch, if its not found
   139  // it will add the key to the batch
   140  func (b *addressBatch) keyIndex(l *AddressLoader, key int) int {
   141  	for i, existingKey := range b.keys {
   142  		if key == existingKey {
   143  			return i
   144  		}
   145  	}
   146  
   147  	pos := len(b.keys)
   148  	b.keys = append(b.keys, key)
   149  	if pos == 0 {
   150  		go b.startTimer(l)
   151  	}
   152  
   153  	if l.maxBatch != 0 && pos >= l.maxBatch-1 {
   154  		if !b.closing {
   155  			b.closing = true
   156  			l.batch = nil
   157  			go b.end(l)
   158  		}
   159  	}
   160  
   161  	return pos
   162  }
   163  
   164  func (b *addressBatch) startTimer(l *AddressLoader) {
   165  	time.Sleep(l.wait)
   166  	l.mu.Lock()
   167  
   168  	// we must have hit a batch limit and are already finalizing this batch
   169  	if b.closing {
   170  		l.mu.Unlock()
   171  		return
   172  	}
   173  
   174  	l.batch = nil
   175  	l.mu.Unlock()
   176  
   177  	b.end(l)
   178  }
   179  
   180  func (b *addressBatch) end(l *AddressLoader) {
   181  	b.data, b.error = l.fetch(b.keys)
   182  	close(b.done)
   183  }