github.com/kyma-incubator/compass/components/director@v0.0.0-20230623144113-d764f56ff805/internal/dataloaders/fetchrequestapidefloader_gen.go (about) 1 // Code generated by github.com/vektah/dataloaden, DO NOT EDIT. 2 3 package dataloader 4 5 import ( 6 "sync" 7 "time" 8 9 "github.com/kyma-incubator/compass/components/director/pkg/graphql" 10 ) 11 12 // FetchRequestAPIDefLoaderConfig captures the config to create a new FetchRequestAPIDefLoader 13 type FetchRequestAPIDefLoaderConfig struct { 14 // Fetch is a method that provides the data for the loader 15 Fetch func(keys []ParamFetchRequestAPIDef) ([]*graphql.FetchRequest, []error) 16 17 // Wait is how long wait before sending a batch 18 Wait time.Duration 19 20 // MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit 21 MaxBatch int 22 } 23 24 // NewFetchRequestAPIDefLoader creates a new FetchRequestAPIDefLoader given a fetch, wait, and maxBatch 25 func NewFetchRequestAPIDefLoader(config FetchRequestAPIDefLoaderConfig) *FetchRequestAPIDefLoader { 26 return &FetchRequestAPIDefLoader{ 27 fetch: config.Fetch, 28 wait: config.Wait, 29 maxBatch: config.MaxBatch, 30 } 31 } 32 33 // FetchRequestAPIDefLoader batches and caches requests 34 type FetchRequestAPIDefLoader struct { 35 // this method provides the data for the loader 36 fetch func(keys []ParamFetchRequestAPIDef) ([]*graphql.FetchRequest, []error) 37 38 // how long to done before sending a batch 39 wait time.Duration 40 41 // this will limit the maximum number of keys to send in one batch, 0 = no limit 42 maxBatch int 43 44 // INTERNAL 45 46 // lazily created cache 47 cache map[ParamFetchRequestAPIDef]*graphql.FetchRequest 48 49 // the current batch. keys will continue to be collected until timeout is hit, 50 // then everything will be sent to the fetch method and out to the listeners 51 batch *fetchRequestAPIDefLoaderBatch 52 53 // mutex to prevent races 54 mu sync.Mutex 55 } 56 57 type fetchRequestAPIDefLoaderBatch struct { 58 keys []ParamFetchRequestAPIDef 59 data []*graphql.FetchRequest 60 error []error 61 closing bool 62 done chan struct{} 63 } 64 65 // Load a FetchRequest by key, batching and caching will be applied automatically 66 func (l *FetchRequestAPIDefLoader) Load(key ParamFetchRequestAPIDef) (*graphql.FetchRequest, error) { 67 return l.LoadThunk(key)() 68 } 69 70 // LoadThunk returns a function that when called will block waiting for a FetchRequest. 71 // This method should be used if you want one goroutine to make requests to many 72 // different data loaders without blocking until the thunk is called. 73 func (l *FetchRequestAPIDefLoader) LoadThunk(key ParamFetchRequestAPIDef) func() (*graphql.FetchRequest, error) { 74 l.mu.Lock() 75 if it, ok := l.cache[key]; ok { 76 l.mu.Unlock() 77 return func() (*graphql.FetchRequest, error) { 78 return it, nil 79 } 80 } 81 if l.batch == nil { 82 l.batch = &fetchRequestAPIDefLoaderBatch{done: make(chan struct{})} 83 } 84 batch := l.batch 85 pos := batch.keyIndex(l, key) 86 l.mu.Unlock() 87 88 return func() (*graphql.FetchRequest, error) { 89 <-batch.done 90 91 var data *graphql.FetchRequest 92 if pos < len(batch.data) { 93 data = batch.data[pos] 94 } 95 96 var err error 97 // its convenient to be able to return a single error for everything 98 if len(batch.error) == 1 { 99 err = batch.error[0] 100 } else if batch.error != nil { 101 err = batch.error[pos] 102 } 103 104 if err == nil { 105 l.mu.Lock() 106 l.unsafeSet(key, data) 107 l.mu.Unlock() 108 } 109 110 return data, err 111 } 112 } 113 114 // LoadAll fetches many keys at once. It will be broken into appropriate sized 115 // sub batches depending on how the loader is configured 116 func (l *FetchRequestAPIDefLoader) LoadAll(keys []ParamFetchRequestAPIDef) ([]*graphql.FetchRequest, []error) { 117 results := make([]func() (*graphql.FetchRequest, error), len(keys)) 118 119 for i, key := range keys { 120 results[i] = l.LoadThunk(key) 121 } 122 123 fetchRequests := make([]*graphql.FetchRequest, len(keys)) 124 errors := make([]error, len(keys)) 125 for i, thunk := range results { 126 fetchRequests[i], errors[i] = thunk() 127 } 128 return fetchRequests, errors 129 } 130 131 // LoadAllThunk returns a function that when called will block waiting for a FetchRequests. 132 // This method should be used if you want one goroutine to make requests to many 133 // different data loaders without blocking until the thunk is called. 134 func (l *FetchRequestAPIDefLoader) LoadAllThunk(keys []ParamFetchRequestAPIDef) func() ([]*graphql.FetchRequest, []error) { 135 results := make([]func() (*graphql.FetchRequest, error), len(keys)) 136 for i, key := range keys { 137 results[i] = l.LoadThunk(key) 138 } 139 return func() ([]*graphql.FetchRequest, []error) { 140 fetchRequests := make([]*graphql.FetchRequest, len(keys)) 141 errors := make([]error, len(keys)) 142 for i, thunk := range results { 143 fetchRequests[i], errors[i] = thunk() 144 } 145 return fetchRequests, errors 146 } 147 } 148 149 // Prime the cache with the provided key and value. If the key already exists, no change is made 150 // and false is returned. 151 // (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).) 152 func (l *FetchRequestAPIDefLoader) Prime(key ParamFetchRequestAPIDef, value *graphql.FetchRequest) bool { 153 l.mu.Lock() 154 var found bool 155 if _, found = l.cache[key]; !found { 156 // make a copy when writing to the cache, its easy to pass a pointer in from a loop var 157 // and end up with the whole cache pointing to the same value. 158 cpy := *value 159 l.unsafeSet(key, &cpy) 160 } 161 l.mu.Unlock() 162 return !found 163 } 164 165 // Clear the value at key from the cache, if it exists 166 func (l *FetchRequestAPIDefLoader) Clear(key ParamFetchRequestAPIDef) { 167 l.mu.Lock() 168 delete(l.cache, key) 169 l.mu.Unlock() 170 } 171 172 func (l *FetchRequestAPIDefLoader) unsafeSet(key ParamFetchRequestAPIDef, value *graphql.FetchRequest) { 173 if l.cache == nil { 174 l.cache = map[ParamFetchRequestAPIDef]*graphql.FetchRequest{} 175 } 176 l.cache[key] = value 177 } 178 179 // keyIndex will return the location of the key in the batch, if its not found 180 // it will add the key to the batch 181 func (b *fetchRequestAPIDefLoaderBatch) keyIndex(l *FetchRequestAPIDefLoader, key ParamFetchRequestAPIDef) int { 182 for i, existingKey := range b.keys { 183 if key == existingKey { 184 return i 185 } 186 } 187 188 pos := len(b.keys) 189 b.keys = append(b.keys, key) 190 if pos == 0 { 191 go b.startTimer(l) 192 } 193 194 if l.maxBatch != 0 && pos >= l.maxBatch-1 { 195 if !b.closing { 196 b.closing = true 197 l.batch = nil 198 go b.end(l) 199 } 200 } 201 202 return pos 203 } 204 205 func (b *fetchRequestAPIDefLoaderBatch) startTimer(l *FetchRequestAPIDefLoader) { 206 time.Sleep(l.wait) 207 l.mu.Lock() 208 209 // we must have hit a batch limit and are already finalizing this batch 210 if b.closing { 211 l.mu.Unlock() 212 return 213 } 214 215 l.batch = nil 216 l.mu.Unlock() 217 218 b.end(l) 219 } 220 221 func (b *fetchRequestAPIDefLoaderBatch) end(l *FetchRequestAPIDefLoader) { 222 b.data, b.error = l.fetch(b.keys) 223 close(b.done) 224 }