github.com/kyma-incubator/compass/components/director@v0.0.0-20230623144113-d764f56ff805/internal/dataloaders/runtimecontextloader_gen.go (about) 1 // Code generated by github.com/vektah/dataloaden, DO NOT EDIT. 2 3 package dataloader 4 5 import ( 6 "sync" 7 "time" 8 9 "github.com/kyma-incubator/compass/components/director/pkg/graphql" 10 ) 11 12 // RuntimeContextLoaderConfig captures the config to create a new RuntimeContextLoader 13 type RuntimeContextLoaderConfig struct { 14 // Fetch is a method that provides the data for the loader 15 Fetch func(keys []ParamRuntimeContext) ([]*graphql.RuntimeContextPage, []error) 16 17 // Wait is how long wait before sending a batch 18 Wait time.Duration 19 20 // MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit 21 MaxBatch int 22 } 23 24 // NewRuntimeContextLoader creates a new RuntimeContextLoader given a fetch, wait, and maxBatch 25 func NewRuntimeContextLoader(config RuntimeContextLoaderConfig) *RuntimeContextLoader { 26 return &RuntimeContextLoader{ 27 fetch: config.Fetch, 28 wait: config.Wait, 29 maxBatch: config.MaxBatch, 30 } 31 } 32 33 // RuntimeContextLoader batches and caches requests 34 type RuntimeContextLoader struct { 35 // this method provides the data for the loader 36 fetch func(keys []ParamRuntimeContext) ([]*graphql.RuntimeContextPage, []error) 37 38 // how long to done before sending a batch 39 wait time.Duration 40 41 // this will limit the maximum number of keys to send in one batch, 0 = no limit 42 maxBatch int 43 44 // INTERNAL 45 46 // lazily created cache 47 cache map[ParamRuntimeContext]*graphql.RuntimeContextPage 48 49 // the current batch. keys will continue to be collected until timeout is hit, 50 // then everything will be sent to the fetch method and out to the listeners 51 batch *runtimeContextLoaderBatch 52 53 // mutex to prevent races 54 mu sync.Mutex 55 } 56 57 type runtimeContextLoaderBatch struct { 58 keys []ParamRuntimeContext 59 data []*graphql.RuntimeContextPage 60 error []error 61 closing bool 62 done chan struct{} 63 } 64 65 // Load a RuntimeContextPage by key, batching and caching will be applied automatically 66 func (l *RuntimeContextLoader) Load(key ParamRuntimeContext) (*graphql.RuntimeContextPage, error) { 67 return l.LoadThunk(key)() 68 } 69 70 // LoadThunk returns a function that when called will block waiting for a RuntimeContextPage. 71 // This method should be used if you want one goroutine to make requests to many 72 // different data loaders without blocking until the thunk is called. 73 func (l *RuntimeContextLoader) LoadThunk(key ParamRuntimeContext) func() (*graphql.RuntimeContextPage, error) { 74 l.mu.Lock() 75 if it, ok := l.cache[key]; ok { 76 l.mu.Unlock() 77 return func() (*graphql.RuntimeContextPage, error) { 78 return it, nil 79 } 80 } 81 if l.batch == nil { 82 l.batch = &runtimeContextLoaderBatch{done: make(chan struct{})} 83 } 84 batch := l.batch 85 pos := batch.keyIndex(l, key) 86 l.mu.Unlock() 87 88 return func() (*graphql.RuntimeContextPage, error) { 89 <-batch.done 90 91 var data *graphql.RuntimeContextPage 92 if pos < len(batch.data) { 93 data = batch.data[pos] 94 } 95 96 var err error 97 // its convenient to be able to return a single error for everything 98 if len(batch.error) == 1 { 99 err = batch.error[0] 100 } else if batch.error != nil { 101 err = batch.error[pos] 102 } 103 104 if err == nil { 105 l.mu.Lock() 106 l.unsafeSet(key, data) 107 l.mu.Unlock() 108 } 109 110 return data, err 111 } 112 } 113 114 // LoadAll fetches many keys at once. It will be broken into appropriate sized 115 // sub batches depending on how the loader is configured 116 func (l *RuntimeContextLoader) LoadAll(keys []ParamRuntimeContext) ([]*graphql.RuntimeContextPage, []error) { 117 results := make([]func() (*graphql.RuntimeContextPage, error), len(keys)) 118 119 for i, key := range keys { 120 results[i] = l.LoadThunk(key) 121 } 122 123 runtimeContextPages := make([]*graphql.RuntimeContextPage, len(keys)) 124 errors := make([]error, len(keys)) 125 for i, thunk := range results { 126 runtimeContextPages[i], errors[i] = thunk() 127 } 128 return runtimeContextPages, errors 129 } 130 131 // LoadAllThunk returns a function that when called will block waiting for a RuntimeContextPages. 132 // This method should be used if you want one goroutine to make requests to many 133 // different data loaders without blocking until the thunk is called. 134 func (l *RuntimeContextLoader) LoadAllThunk(keys []ParamRuntimeContext) func() ([]*graphql.RuntimeContextPage, []error) { 135 results := make([]func() (*graphql.RuntimeContextPage, error), len(keys)) 136 for i, key := range keys { 137 results[i] = l.LoadThunk(key) 138 } 139 return func() ([]*graphql.RuntimeContextPage, []error) { 140 runtimeContextPages := make([]*graphql.RuntimeContextPage, len(keys)) 141 errors := make([]error, len(keys)) 142 for i, thunk := range results { 143 runtimeContextPages[i], errors[i] = thunk() 144 } 145 return runtimeContextPages, errors 146 } 147 } 148 149 // Prime the cache with the provided key and value. If the key already exists, no change is made 150 // and false is returned. 151 // (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).) 152 func (l *RuntimeContextLoader) Prime(key ParamRuntimeContext, value *graphql.RuntimeContextPage) bool { 153 l.mu.Lock() 154 var found bool 155 if _, found = l.cache[key]; !found { 156 // make a copy when writing to the cache, its easy to pass a pointer in from a loop var 157 // and end up with the whole cache pointing to the same value. 158 cpy := *value 159 l.unsafeSet(key, &cpy) 160 } 161 l.mu.Unlock() 162 return !found 163 } 164 165 // Clear the value at key from the cache, if it exists 166 func (l *RuntimeContextLoader) Clear(key ParamRuntimeContext) { 167 l.mu.Lock() 168 delete(l.cache, key) 169 l.mu.Unlock() 170 } 171 172 func (l *RuntimeContextLoader) unsafeSet(key ParamRuntimeContext, value *graphql.RuntimeContextPage) { 173 if l.cache == nil { 174 l.cache = map[ParamRuntimeContext]*graphql.RuntimeContextPage{} 175 } 176 l.cache[key] = value 177 } 178 179 // keyIndex will return the location of the key in the batch, if its not found 180 // it will add the key to the batch 181 func (b *runtimeContextLoaderBatch) keyIndex(l *RuntimeContextLoader, key ParamRuntimeContext) int { 182 for i, existingKey := range b.keys { 183 if key == existingKey { 184 return i 185 } 186 } 187 188 pos := len(b.keys) 189 b.keys = append(b.keys, key) 190 if pos == 0 { 191 go b.startTimer(l) 192 } 193 194 if l.maxBatch != 0 && pos >= l.maxBatch-1 { 195 if !b.closing { 196 b.closing = true 197 l.batch = nil 198 go b.end(l) 199 } 200 } 201 202 return pos 203 } 204 205 func (b *runtimeContextLoaderBatch) startTimer(l *RuntimeContextLoader) { 206 time.Sleep(l.wait) 207 l.mu.Lock() 208 209 // we must have hit a batch limit and are already finalizing this batch 210 if b.closing { 211 l.mu.Unlock() 212 return 213 } 214 215 l.batch = nil 216 l.mu.Unlock() 217 218 b.end(l) 219 } 220 221 func (b *runtimeContextLoaderBatch) end(l *RuntimeContextLoader) { 222 b.data, b.error = l.fetch(b.keys) 223 close(b.done) 224 }