github.com/hashicorp/terraform-plugin-sdk@v1.17.2/internal/dag/walk.go (about) 1 package dag 2 3 import ( 4 "errors" 5 "log" 6 "sync" 7 "time" 8 9 "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" 10 ) 11 12 // Walker is used to walk every vertex of a graph in parallel. 13 // 14 // A vertex will only be walked when the dependencies of that vertex have 15 // been walked. If two vertices can be walked at the same time, they will be. 16 // 17 // Update can be called to update the graph. This can be called even during 18 // a walk, cahnging vertices/edges mid-walk. This should be done carefully. 19 // If a vertex is removed but has already been executed, the result of that 20 // execution (any error) is still returned by Wait. Changing or re-adding 21 // a vertex that has already executed has no effect. Changing edges of 22 // a vertex that has already executed has no effect. 23 // 24 // Non-parallelism can be enforced by introducing a lock in your callback 25 // function. However, the goroutine overhead of a walk will remain. 26 // Walker will create V*2 goroutines (one for each vertex, and dependency 27 // waiter for each vertex). In general this should be of no concern unless 28 // there are a huge number of vertices. 29 // 30 // The walk is depth first by default. This can be changed with the Reverse 31 // option. 32 // 33 // A single walker is only valid for one graph walk. After the walk is complete 34 // you must construct a new walker to walk again. State for the walk is never 35 // deleted in case vertices or edges are changed. 36 type Walker struct { 37 // Callback is what is called for each vertex 38 Callback WalkFunc 39 40 // Reverse, if true, causes the source of an edge to depend on a target. 41 // When false (default), the target depends on the source. 42 Reverse bool 43 44 // changeLock must be held to modify any of the fields below. Only Update 45 // should modify these fields. Modifying them outside of Update can cause 46 // serious problems. 47 changeLock sync.Mutex 48 vertices Set 49 edges Set 50 vertexMap map[Vertex]*walkerVertex 51 52 // wait is done when all vertices have executed. It may become "undone" 53 // if new vertices are added. 54 wait sync.WaitGroup 55 56 // diagsMap contains the diagnostics recorded so far for execution, 57 // and upstreamFailed contains all the vertices whose problems were 58 // caused by upstream failures, and thus whose diagnostics should be 59 // excluded from the final set. 60 // 61 // Readers and writers of either map must hold diagsLock. 62 diagsMap map[Vertex]tfdiags.Diagnostics 63 upstreamFailed map[Vertex]struct{} 64 diagsLock sync.Mutex 65 } 66 67 type walkerVertex struct { 68 // These should only be set once on initialization and never written again. 69 // They are not protected by a lock since they don't need to be since 70 // they are write-once. 71 72 // DoneCh is closed when this vertex has completed execution, regardless 73 // of success. 74 // 75 // CancelCh is closed when the vertex should cancel execution. If execution 76 // is already complete (DoneCh is closed), this has no effect. Otherwise, 77 // execution is cancelled as quickly as possible. 78 DoneCh chan struct{} 79 CancelCh chan struct{} 80 81 // Dependency information. Any changes to any of these fields requires 82 // holding DepsLock. 83 // 84 // DepsCh is sent a single value that denotes whether the upstream deps 85 // were successful (no errors). Any value sent means that the upstream 86 // dependencies are complete. No other values will ever be sent again. 87 // 88 // DepsUpdateCh is closed when there is a new DepsCh set. 89 DepsCh chan bool 90 DepsUpdateCh chan struct{} 91 DepsLock sync.Mutex 92 93 // Below is not safe to read/write in parallel. This behavior is 94 // enforced by changes only happening in Update. Nothing else should 95 // ever modify these. 96 deps map[Vertex]chan struct{} 97 depsCancelCh chan struct{} 98 } 99 100 // Wait waits for the completion of the walk and returns diagnostics describing 101 // any problems that arose. Update should be called to populate the walk with 102 // vertices and edges prior to calling this. 103 // 104 // Wait will return as soon as all currently known vertices are complete. 105 // If you plan on calling Update with more vertices in the future, you 106 // should not call Wait until after this is done. 107 func (w *Walker) Wait() tfdiags.Diagnostics { 108 // Wait for completion 109 w.wait.Wait() 110 111 var diags tfdiags.Diagnostics 112 w.diagsLock.Lock() 113 for v, vDiags := range w.diagsMap { 114 if _, upstream := w.upstreamFailed[v]; upstream { 115 // Ignore diagnostics for nodes that had failed upstreams, since 116 // the downstream diagnostics are likely to be redundant. 117 continue 118 } 119 diags = diags.Append(vDiags) 120 } 121 w.diagsLock.Unlock() 122 123 return diags 124 } 125 126 // Update updates the currently executing walk with the given graph. 127 // This will perform a diff of the vertices and edges and update the walker. 128 // Already completed vertices remain completed (including any errors during 129 // their execution). 130 // 131 // This returns immediately once the walker is updated; it does not wait 132 // for completion of the walk. 133 // 134 // Multiple Updates can be called in parallel. Update can be called at any 135 // time during a walk. 136 func (w *Walker) Update(g *AcyclicGraph) { 137 log.Print("[TRACE] dag/walk: updating graph") 138 var v, e *Set 139 if g != nil { 140 v, e = g.vertices, g.edges 141 } 142 143 // Grab the change lock so no more updates happen but also so that 144 // no new vertices are executed during this time since we may be 145 // removing them. 146 w.changeLock.Lock() 147 defer w.changeLock.Unlock() 148 149 // Initialize fields 150 if w.vertexMap == nil { 151 w.vertexMap = make(map[Vertex]*walkerVertex) 152 } 153 154 // Calculate all our sets 155 newEdges := e.Difference(&w.edges) 156 oldEdges := w.edges.Difference(e) 157 newVerts := v.Difference(&w.vertices) 158 oldVerts := w.vertices.Difference(v) 159 160 // Add the new vertices 161 for _, raw := range newVerts.List() { 162 v := raw.(Vertex) 163 164 // Add to the waitgroup so our walk is not done until everything finishes 165 w.wait.Add(1) 166 167 // Add to our own set so we know about it already 168 log.Printf("[TRACE] dag/walk: added new vertex: %q", VertexName(v)) 169 w.vertices.Add(raw) 170 171 // Initialize the vertex info 172 info := &walkerVertex{ 173 DoneCh: make(chan struct{}), 174 CancelCh: make(chan struct{}), 175 deps: make(map[Vertex]chan struct{}), 176 } 177 178 // Add it to the map and kick off the walk 179 w.vertexMap[v] = info 180 } 181 182 // Remove the old vertices 183 for _, raw := range oldVerts.List() { 184 v := raw.(Vertex) 185 186 // Get the vertex info so we can cancel it 187 info, ok := w.vertexMap[v] 188 if !ok { 189 // This vertex for some reason was never in our map. This 190 // shouldn't be possible. 191 continue 192 } 193 194 // Cancel the vertex 195 close(info.CancelCh) 196 197 // Delete it out of the map 198 delete(w.vertexMap, v) 199 200 log.Printf("[TRACE] dag/walk: removed vertex: %q", VertexName(v)) 201 w.vertices.Delete(raw) 202 } 203 204 // Add the new edges 205 var changedDeps Set 206 for _, raw := range newEdges.List() { 207 edge := raw.(Edge) 208 waiter, dep := w.edgeParts(edge) 209 210 // Get the info for the waiter 211 waiterInfo, ok := w.vertexMap[waiter] 212 if !ok { 213 // Vertex doesn't exist... shouldn't be possible but ignore. 214 continue 215 } 216 217 // Get the info for the dep 218 depInfo, ok := w.vertexMap[dep] 219 if !ok { 220 // Vertex doesn't exist... shouldn't be possible but ignore. 221 continue 222 } 223 224 // Add the dependency to our waiter 225 waiterInfo.deps[dep] = depInfo.DoneCh 226 227 // Record that the deps changed for this waiter 228 changedDeps.Add(waiter) 229 230 log.Printf( 231 "[TRACE] dag/walk: added edge: %q waiting on %q", 232 VertexName(waiter), VertexName(dep)) 233 w.edges.Add(raw) 234 } 235 236 // Process reoved edges 237 for _, raw := range oldEdges.List() { 238 edge := raw.(Edge) 239 waiter, dep := w.edgeParts(edge) 240 241 // Get the info for the waiter 242 waiterInfo, ok := w.vertexMap[waiter] 243 if !ok { 244 // Vertex doesn't exist... shouldn't be possible but ignore. 245 continue 246 } 247 248 // Delete the dependency from the waiter 249 delete(waiterInfo.deps, dep) 250 251 // Record that the deps changed for this waiter 252 changedDeps.Add(waiter) 253 254 log.Printf( 255 "[TRACE] dag/walk: removed edge: %q waiting on %q", 256 VertexName(waiter), VertexName(dep)) 257 w.edges.Delete(raw) 258 } 259 260 // For each vertex with changed dependencies, we need to kick off 261 // a new waiter and notify the vertex of the changes. 262 for _, raw := range changedDeps.List() { 263 v := raw.(Vertex) 264 info, ok := w.vertexMap[v] 265 if !ok { 266 // Vertex doesn't exist... shouldn't be possible but ignore. 267 continue 268 } 269 270 // Create a new done channel 271 doneCh := make(chan bool, 1) 272 273 // Create the channel we close for cancellation 274 cancelCh := make(chan struct{}) 275 276 // Build a new deps copy 277 deps := make(map[Vertex]<-chan struct{}) 278 for k, v := range info.deps { 279 deps[k] = v 280 } 281 282 // Update the update channel 283 info.DepsLock.Lock() 284 if info.DepsUpdateCh != nil { 285 close(info.DepsUpdateCh) 286 } 287 info.DepsCh = doneCh 288 info.DepsUpdateCh = make(chan struct{}) 289 info.DepsLock.Unlock() 290 291 // Cancel the older waiter 292 if info.depsCancelCh != nil { 293 close(info.depsCancelCh) 294 } 295 info.depsCancelCh = cancelCh 296 297 log.Printf( 298 "[TRACE] dag/walk: dependencies changed for %q, sending new deps", 299 VertexName(v)) 300 301 // Start the waiter 302 go w.waitDeps(v, deps, doneCh, cancelCh) 303 } 304 305 // Start all the new vertices. We do this at the end so that all 306 // the edge waiters and changes are setup above. 307 for _, raw := range newVerts.List() { 308 v := raw.(Vertex) 309 go w.walkVertex(v, w.vertexMap[v]) 310 } 311 } 312 313 // edgeParts returns the waiter and the dependency, in that order. 314 // The waiter is waiting on the dependency. 315 func (w *Walker) edgeParts(e Edge) (Vertex, Vertex) { 316 if w.Reverse { 317 return e.Source(), e.Target() 318 } 319 320 return e.Target(), e.Source() 321 } 322 323 // walkVertex walks a single vertex, waiting for any dependencies before 324 // executing the callback. 325 func (w *Walker) walkVertex(v Vertex, info *walkerVertex) { 326 // When we're done executing, lower the waitgroup count 327 defer w.wait.Done() 328 329 // When we're done, always close our done channel 330 defer close(info.DoneCh) 331 332 // Wait for our dependencies. We create a [closed] deps channel so 333 // that we can immediately fall through to load our actual DepsCh. 334 var depsSuccess bool 335 var depsUpdateCh chan struct{} 336 depsCh := make(chan bool, 1) 337 depsCh <- true 338 close(depsCh) 339 for { 340 select { 341 case <-info.CancelCh: 342 // Cancel 343 return 344 345 case depsSuccess = <-depsCh: 346 // Deps complete! Mark as nil to trigger completion handling. 347 depsCh = nil 348 349 case <-depsUpdateCh: 350 // New deps, reloop 351 } 352 353 // Check if we have updated dependencies. This can happen if the 354 // dependencies were satisfied exactly prior to an Update occurring. 355 // In that case, we'd like to take into account new dependencies 356 // if possible. 357 info.DepsLock.Lock() 358 if info.DepsCh != nil { 359 depsCh = info.DepsCh 360 info.DepsCh = nil 361 } 362 if info.DepsUpdateCh != nil { 363 depsUpdateCh = info.DepsUpdateCh 364 } 365 info.DepsLock.Unlock() 366 367 // If we still have no deps channel set, then we're done! 368 if depsCh == nil { 369 break 370 } 371 } 372 373 // If we passed dependencies, we just want to check once more that 374 // we're not cancelled, since this can happen just as dependencies pass. 375 select { 376 case <-info.CancelCh: 377 // Cancelled during an update while dependencies completed. 378 return 379 default: 380 } 381 382 // Run our callback or note that our upstream failed 383 var diags tfdiags.Diagnostics 384 var upstreamFailed bool 385 if depsSuccess { 386 log.Printf("[TRACE] dag/walk: visiting %q", VertexName(v)) 387 diags = w.Callback(v) 388 } else { 389 log.Printf("[TRACE] dag/walk: upstream of %q errored, so skipping", VertexName(v)) 390 // This won't be displayed to the user because we'll set upstreamFailed, 391 // but we need to ensure there's at least one error in here so that 392 // the failures will cascade downstream. 393 diags = diags.Append(errors.New("upstream dependencies failed")) 394 upstreamFailed = true 395 } 396 397 // Record the result (we must do this after execution because we mustn't 398 // hold diagsLock while visiting a vertex.) 399 w.diagsLock.Lock() 400 if w.diagsMap == nil { 401 w.diagsMap = make(map[Vertex]tfdiags.Diagnostics) 402 } 403 w.diagsMap[v] = diags 404 if w.upstreamFailed == nil { 405 w.upstreamFailed = make(map[Vertex]struct{}) 406 } 407 if upstreamFailed { 408 w.upstreamFailed[v] = struct{}{} 409 } 410 w.diagsLock.Unlock() 411 } 412 413 func (w *Walker) waitDeps( 414 v Vertex, 415 deps map[Vertex]<-chan struct{}, 416 doneCh chan<- bool, 417 cancelCh <-chan struct{}) { 418 419 // For each dependency given to us, wait for it to complete 420 for dep, depCh := range deps { 421 DepSatisfied: 422 for { 423 select { 424 case <-depCh: 425 // Dependency satisfied! 426 break DepSatisfied 427 428 case <-cancelCh: 429 // Wait cancelled. Note that we didn't satisfy dependencies 430 // so that anything waiting on us also doesn't run. 431 doneCh <- false 432 return 433 434 case <-time.After(time.Second * 5): 435 log.Printf("[TRACE] dag/walk: vertex %q is waiting for %q", 436 VertexName(v), VertexName(dep)) 437 } 438 } 439 } 440 441 // Dependencies satisfied! We need to check if any errored 442 w.diagsLock.Lock() 443 defer w.diagsLock.Unlock() 444 for dep := range deps { 445 if w.diagsMap[dep].HasErrors() { 446 // One of our dependencies failed, so return false 447 doneCh <- false 448 return 449 } 450 } 451 452 // All dependencies satisfied and successful 453 doneCh <- true 454 }