github.com/nektos/act@v0.2.63-0.20240520024548-8acde99bfa9c/pkg/model/planner.go (about) 1 package model 2 3 import ( 4 "fmt" 5 "io" 6 "io/fs" 7 "math" 8 "os" 9 "path/filepath" 10 "regexp" 11 "sort" 12 13 log "github.com/sirupsen/logrus" 14 ) 15 16 // WorkflowPlanner contains methods for creating plans 17 type WorkflowPlanner interface { 18 PlanEvent(eventName string) (*Plan, error) 19 PlanJob(jobName string) (*Plan, error) 20 PlanAll() (*Plan, error) 21 GetEvents() []string 22 } 23 24 // Plan contains a list of stages to run in series 25 type Plan struct { 26 Stages []*Stage 27 } 28 29 // Stage contains a list of runs to execute in parallel 30 type Stage struct { 31 Runs []*Run 32 } 33 34 // Run represents a job from a workflow that needs to be run 35 type Run struct { 36 Workflow *Workflow 37 JobID string 38 } 39 40 func (r *Run) String() string { 41 jobName := r.Job().Name 42 if jobName == "" { 43 jobName = r.JobID 44 } 45 return jobName 46 } 47 48 // Job returns the job for this Run 49 func (r *Run) Job() *Job { 50 return r.Workflow.GetJob(r.JobID) 51 } 52 53 type WorkflowFiles struct { 54 workflowDirEntry os.DirEntry 55 dirPath string 56 } 57 58 // NewWorkflowPlanner will load a specific workflow, all workflows from a directory or all workflows from a directory and its subdirectories 59 // 60 //nolint:gocyclo 61 func NewWorkflowPlanner(path string, noWorkflowRecurse bool) (WorkflowPlanner, error) { 62 path, err := filepath.Abs(path) 63 if err != nil { 64 return nil, err 65 } 66 67 fi, err := os.Stat(path) 68 if err != nil { 69 return nil, err 70 } 71 72 var workflows []WorkflowFiles 73 74 if fi.IsDir() { 75 log.Debugf("Loading workflows from '%s'", path) 76 if noWorkflowRecurse { 77 files, err := os.ReadDir(path) 78 if err != nil { 79 return nil, err 80 } 81 82 for _, v := range files { 83 workflows = append(workflows, WorkflowFiles{ 84 dirPath: path, 85 workflowDirEntry: v, 86 }) 87 } 88 } else { 89 log.Debug("Loading workflows recursively") 90 if err := filepath.Walk(path, 91 func(p string, f os.FileInfo, err error) error { 92 if err != nil { 93 return err 94 } 95 96 if !f.IsDir() { 97 log.Debugf("Found workflow '%s' in '%s'", f.Name(), p) 98 workflows = append(workflows, WorkflowFiles{ 99 dirPath: filepath.Dir(p), 100 workflowDirEntry: fs.FileInfoToDirEntry(f), 101 }) 102 } 103 104 return nil 105 }); err != nil { 106 return nil, err 107 } 108 } 109 } else { 110 log.Debugf("Loading workflow '%s'", path) 111 dirname := filepath.Dir(path) 112 113 workflows = append(workflows, WorkflowFiles{ 114 dirPath: dirname, 115 workflowDirEntry: fs.FileInfoToDirEntry(fi), 116 }) 117 } 118 if err != nil { 119 return nil, err 120 } 121 122 wp := new(workflowPlanner) 123 for _, wf := range workflows { 124 ext := filepath.Ext(wf.workflowDirEntry.Name()) 125 if ext == ".yml" || ext == ".yaml" { 126 f, err := os.Open(filepath.Join(wf.dirPath, wf.workflowDirEntry.Name())) 127 if err != nil { 128 return nil, err 129 } 130 131 log.Debugf("Reading workflow '%s'", f.Name()) 132 workflow, err := ReadWorkflow(f) 133 if err != nil { 134 _ = f.Close() 135 if err == io.EOF { 136 return nil, fmt.Errorf("unable to read workflow '%s': file is empty: %w", wf.workflowDirEntry.Name(), err) 137 } 138 return nil, fmt.Errorf("workflow is not valid. '%s': %w", wf.workflowDirEntry.Name(), err) 139 } 140 _, err = f.Seek(0, 0) 141 if err != nil { 142 _ = f.Close() 143 return nil, fmt.Errorf("error occurring when resetting io pointer in '%s': %w", wf.workflowDirEntry.Name(), err) 144 } 145 146 workflow.File = wf.workflowDirEntry.Name() 147 if workflow.Name == "" { 148 workflow.Name = wf.workflowDirEntry.Name() 149 } 150 151 err = validateJobName(workflow) 152 if err != nil { 153 _ = f.Close() 154 return nil, err 155 } 156 157 wp.workflows = append(wp.workflows, workflow) 158 _ = f.Close() 159 } 160 } 161 162 return wp, nil 163 } 164 165 func NewSingleWorkflowPlanner(name string, f io.Reader) (WorkflowPlanner, error) { 166 wp := new(workflowPlanner) 167 168 log.Debugf("Reading workflow %s", name) 169 workflow, err := ReadWorkflow(f) 170 if err != nil { 171 if err == io.EOF { 172 return nil, fmt.Errorf("unable to read workflow '%s': file is empty: %w", name, err) 173 } 174 return nil, fmt.Errorf("workflow is not valid. '%s': %w", name, err) 175 } 176 workflow.File = name 177 if workflow.Name == "" { 178 workflow.Name = name 179 } 180 181 err = validateJobName(workflow) 182 if err != nil { 183 return nil, err 184 } 185 186 wp.workflows = append(wp.workflows, workflow) 187 188 return wp, nil 189 } 190 191 func validateJobName(workflow *Workflow) error { 192 jobNameRegex := regexp.MustCompile(`^([[:alpha:]_][[:alnum:]_\-]*)$`) 193 for k := range workflow.Jobs { 194 if ok := jobNameRegex.MatchString(k); !ok { 195 return fmt.Errorf("workflow is not valid. '%s': Job name '%s' is invalid. Names must start with a letter or '_' and contain only alphanumeric characters, '-', or '_'", workflow.Name, k) 196 } 197 } 198 return nil 199 } 200 201 type workflowPlanner struct { 202 workflows []*Workflow 203 } 204 205 // PlanEvent builds a new list of runs to execute in parallel for an event name 206 func (wp *workflowPlanner) PlanEvent(eventName string) (*Plan, error) { 207 plan := new(Plan) 208 if len(wp.workflows) == 0 { 209 log.Debug("no workflows found by planner") 210 return plan, nil 211 } 212 var lastErr error 213 214 for _, w := range wp.workflows { 215 events := w.On() 216 if len(events) == 0 { 217 log.Debugf("no events found for workflow: %s", w.File) 218 continue 219 } 220 221 for _, e := range events { 222 if e == eventName { 223 stages, err := createStages(w, w.GetJobIDs()...) 224 if err != nil { 225 log.Warn(err) 226 lastErr = err 227 } else { 228 plan.mergeStages(stages) 229 } 230 } 231 } 232 } 233 return plan, lastErr 234 } 235 236 // PlanJob builds a new run to execute in parallel for a job name 237 func (wp *workflowPlanner) PlanJob(jobName string) (*Plan, error) { 238 plan := new(Plan) 239 if len(wp.workflows) == 0 { 240 log.Debugf("no jobs found for workflow: %s", jobName) 241 } 242 var lastErr error 243 244 for _, w := range wp.workflows { 245 stages, err := createStages(w, jobName) 246 if err != nil { 247 log.Warn(err) 248 lastErr = err 249 } else { 250 plan.mergeStages(stages) 251 } 252 } 253 return plan, lastErr 254 } 255 256 // PlanAll builds a new run to execute in parallel all 257 func (wp *workflowPlanner) PlanAll() (*Plan, error) { 258 plan := new(Plan) 259 if len(wp.workflows) == 0 { 260 log.Debug("no workflows found by planner") 261 return plan, nil 262 } 263 var lastErr error 264 265 for _, w := range wp.workflows { 266 stages, err := createStages(w, w.GetJobIDs()...) 267 if err != nil { 268 log.Warn(err) 269 lastErr = err 270 } else { 271 plan.mergeStages(stages) 272 } 273 } 274 275 return plan, lastErr 276 } 277 278 // GetEvents gets all the events in the workflows file 279 func (wp *workflowPlanner) GetEvents() []string { 280 events := make([]string, 0) 281 for _, w := range wp.workflows { 282 found := false 283 for _, e := range events { 284 for _, we := range w.On() { 285 if e == we { 286 found = true 287 break 288 } 289 } 290 if found { 291 break 292 } 293 } 294 295 if !found { 296 events = append(events, w.On()...) 297 } 298 } 299 300 // sort the list based on depth of dependencies 301 sort.Slice(events, func(i, j int) bool { 302 return events[i] < events[j] 303 }) 304 305 return events 306 } 307 308 // MaxRunNameLen determines the max name length of all jobs 309 func (p *Plan) MaxRunNameLen() int { 310 maxRunNameLen := 0 311 for _, stage := range p.Stages { 312 for _, run := range stage.Runs { 313 runNameLen := len(run.String()) 314 if runNameLen > maxRunNameLen { 315 maxRunNameLen = runNameLen 316 } 317 } 318 } 319 return maxRunNameLen 320 } 321 322 // GetJobIDs will get all the job names in the stage 323 func (s *Stage) GetJobIDs() []string { 324 names := make([]string, 0) 325 for _, r := range s.Runs { 326 names = append(names, r.JobID) 327 } 328 return names 329 } 330 331 // Merge stages with existing stages in plan 332 func (p *Plan) mergeStages(stages []*Stage) { 333 newStages := make([]*Stage, int(math.Max(float64(len(p.Stages)), float64(len(stages))))) 334 for i := 0; i < len(newStages); i++ { 335 newStages[i] = new(Stage) 336 if i >= len(p.Stages) { 337 newStages[i].Runs = append(newStages[i].Runs, stages[i].Runs...) 338 } else if i >= len(stages) { 339 newStages[i].Runs = append(newStages[i].Runs, p.Stages[i].Runs...) 340 } else { 341 newStages[i].Runs = append(newStages[i].Runs, p.Stages[i].Runs...) 342 newStages[i].Runs = append(newStages[i].Runs, stages[i].Runs...) 343 } 344 } 345 p.Stages = newStages 346 } 347 348 func createStages(w *Workflow, jobIDs ...string) ([]*Stage, error) { 349 // first, build a list of all the necessary jobs to run, and their dependencies 350 jobDependencies := make(map[string][]string) 351 for len(jobIDs) > 0 { 352 newJobIDs := make([]string, 0) 353 for _, jID := range jobIDs { 354 // make sure we haven't visited this job yet 355 if _, ok := jobDependencies[jID]; !ok { 356 if job := w.GetJob(jID); job != nil { 357 jobDependencies[jID] = job.Needs() 358 newJobIDs = append(newJobIDs, job.Needs()...) 359 } 360 } 361 } 362 jobIDs = newJobIDs 363 } 364 365 // next, build an execution graph 366 stages := make([]*Stage, 0) 367 for len(jobDependencies) > 0 { 368 stage := new(Stage) 369 for jID, jDeps := range jobDependencies { 370 // make sure all deps are in the graph already 371 if listInStages(jDeps, stages...) { 372 stage.Runs = append(stage.Runs, &Run{ 373 Workflow: w, 374 JobID: jID, 375 }) 376 delete(jobDependencies, jID) 377 } 378 } 379 if len(stage.Runs) == 0 { 380 return nil, fmt.Errorf("unable to build dependency graph for %s (%s)", w.Name, w.File) 381 } 382 stages = append(stages, stage) 383 } 384 385 return stages, nil 386 } 387 388 // return true iff all strings in srcList exist in at least one of the stages 389 func listInStages(srcList []string, stages ...*Stage) bool { 390 for _, src := range srcList { 391 found := false 392 for _, stage := range stages { 393 for _, search := range stage.GetJobIDs() { 394 if src == search { 395 found = true 396 } 397 } 398 } 399 if !found { 400 return false 401 } 402 } 403 return true 404 }