github.com/HaswinVidanage/gqlgen@v0.8.1-0.20220609041233-69528c1bf712/example/dataloader/addressloader_gen.go (about) 1 // Code generated by github.com/vektah/dataloaden, DO NOT EDIT. 2 3 package dataloader 4 5 import ( 6 "sync" 7 "time" 8 ) 9 10 // AddressLoaderConfig captures the config to create a new AddressLoader 11 type AddressLoaderConfig struct { 12 // Fetch is a method that provides the data for the loader 13 Fetch func(keys []int) ([]*Address, []error) 14 15 // Wait is how long wait before sending a batch 16 Wait time.Duration 17 18 // MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit 19 MaxBatch int 20 } 21 22 // NewAddressLoader creates a new AddressLoader given a fetch, wait, and maxBatch 23 func NewAddressLoader(config AddressLoaderConfig) *AddressLoader { 24 return &AddressLoader{ 25 fetch: config.Fetch, 26 wait: config.Wait, 27 maxBatch: config.MaxBatch, 28 } 29 } 30 31 // AddressLoader batches and caches requests 32 type AddressLoader struct { 33 // this method provides the data for the loader 34 fetch func(keys []int) ([]*Address, []error) 35 36 // how long to done before sending a batch 37 wait time.Duration 38 39 // this will limit the maximum number of keys to send in one batch, 0 = no limit 40 maxBatch int 41 42 // INTERNAL 43 44 // lazily created cache 45 cache map[int]*Address 46 47 // the current batch. keys will continue to be collected until timeout is hit, 48 // then everything will be sent to the fetch method and out to the listeners 49 batch *addressBatch 50 51 // mutex to prevent races 52 mu sync.Mutex 53 } 54 55 type addressBatch struct { 56 keys []int 57 data []*Address 58 error []error 59 closing bool 60 done chan struct{} 61 } 62 63 // Load a address by key, batching and caching will be applied automatically 64 func (l *AddressLoader) Load(key int) (*Address, error) { 65 return l.LoadThunk(key)() 66 } 67 68 // LoadThunk returns a function that when called will block waiting for a address. 69 // This method should be used if you want one goroutine to make requests to many 70 // different data loaders without blocking until the thunk is called. 71 func (l *AddressLoader) LoadThunk(key int) func() (*Address, error) { 72 l.mu.Lock() 73 if it, ok := l.cache[key]; ok { 74 l.mu.Unlock() 75 return func() (*Address, error) { 76 return it, nil 77 } 78 } 79 if l.batch == nil { 80 l.batch = &addressBatch{done: make(chan struct{})} 81 } 82 batch := l.batch 83 pos := batch.keyIndex(l, key) 84 l.mu.Unlock() 85 86 return func() (*Address, error) { 87 <-batch.done 88 89 var data *Address 90 if pos < len(batch.data) { 91 data = batch.data[pos] 92 } 93 94 var err error 95 // its convenient to be able to return a single error for everything 96 if len(batch.error) == 1 { 97 err = batch.error[0] 98 } else if batch.error != nil { 99 err = batch.error[pos] 100 } 101 102 if err == nil { 103 l.mu.Lock() 104 l.unsafeSet(key, data) 105 l.mu.Unlock() 106 } 107 108 return data, err 109 } 110 } 111 112 // LoadAll fetches many keys at once. It will be broken into appropriate sized 113 // sub batches depending on how the loader is configured 114 func (l *AddressLoader) LoadAll(keys []int) ([]*Address, []error) { 115 results := make([]func() (*Address, error), len(keys)) 116 117 for i, key := range keys { 118 results[i] = l.LoadThunk(key) 119 } 120 121 addresss := make([]*Address, len(keys)) 122 errors := make([]error, len(keys)) 123 for i, thunk := range results { 124 addresss[i], errors[i] = thunk() 125 } 126 return addresss, errors 127 } 128 129 // Prime the cache with the provided key and value. If the key already exists, no change is made 130 // and false is returned. 131 // (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).) 132 func (l *AddressLoader) Prime(key int, value *Address) bool { 133 l.mu.Lock() 134 var found bool 135 if _, found = l.cache[key]; !found { 136 // make a copy when writing to the cache, its easy to pass a pointer in from a loop var 137 // and end up with the whole cache pointing to the same value. 138 cpy := *value 139 l.unsafeSet(key, &cpy) 140 } 141 l.mu.Unlock() 142 return !found 143 } 144 145 // Clear the value at key from the cache, if it exists 146 func (l *AddressLoader) Clear(key int) { 147 l.mu.Lock() 148 delete(l.cache, key) 149 l.mu.Unlock() 150 } 151 152 func (l *AddressLoader) unsafeSet(key int, value *Address) { 153 if l.cache == nil { 154 l.cache = map[int]*Address{} 155 } 156 l.cache[key] = value 157 } 158 159 // keyIndex will return the location of the key in the batch, if its not found 160 // it will add the key to the batch 161 func (b *addressBatch) keyIndex(l *AddressLoader, key int) int { 162 for i, existingKey := range b.keys { 163 if key == existingKey { 164 return i 165 } 166 } 167 168 pos := len(b.keys) 169 b.keys = append(b.keys, key) 170 if pos == 0 { 171 go b.startTimer(l) 172 } 173 174 if l.maxBatch != 0 && pos >= l.maxBatch-1 { 175 if !b.closing { 176 b.closing = true 177 l.batch = nil 178 go b.end(l) 179 } 180 } 181 182 return pos 183 } 184 185 func (b *addressBatch) startTimer(l *AddressLoader) { 186 time.Sleep(l.wait) 187 l.mu.Lock() 188 189 // we must have hit a batch limit and are already finalizing this batch 190 if b.closing { 191 l.mu.Unlock() 192 return 193 } 194 195 l.batch = nil 196 l.mu.Unlock() 197 198 b.end(l) 199 } 200 201 func (b *addressBatch) end(l *AddressLoader) { 202 b.data, b.error = l.fetch(b.keys) 203 close(b.done) 204 }