github.com/westcoastroms/westcoastroms-build@v0.0.0-20190928114312-2350e5a73030/build/soong/cmd/multiproduct_kati/main.go (about) 1 // Copyright 2017 Google Inc. All rights reserved. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package main 16 17 import ( 18 "context" 19 "flag" 20 "fmt" 21 "io/ioutil" 22 "os" 23 "path/filepath" 24 "runtime" 25 "strings" 26 "sync" 27 "syscall" 28 "time" 29 30 "android/soong/ui/build" 31 "android/soong/ui/logger" 32 "android/soong/ui/tracer" 33 "android/soong/zip" 34 ) 35 36 // We default to number of cpus / 4, which seems to be the sweet spot for my 37 // system. I suspect this is mostly due to memory or disk bandwidth though, and 38 // may depend on the size ofthe source tree, so this probably isn't a great 39 // default. 40 func detectNumJobs() int { 41 if runtime.NumCPU() < 4 { 42 return 1 43 } 44 return runtime.NumCPU() / 4 45 } 46 47 var numJobs = flag.Int("j", detectNumJobs(), "number of parallel kati jobs") 48 49 var keepArtifacts = flag.Bool("keep", false, "keep archives of artifacts") 50 51 var outDir = flag.String("out", "", "path to store output directories (defaults to tmpdir under $OUT when empty)") 52 var alternateResultDir = flag.Bool("dist", false, "write select results to $DIST_DIR (or <out>/dist when empty)") 53 54 var onlyConfig = flag.Bool("only-config", false, "Only run product config (not Soong or Kati)") 55 var onlySoong = flag.Bool("only-soong", false, "Only run product config and Soong (not Kati)") 56 57 var buildVariant = flag.String("variant", "eng", "build variant to use") 58 59 var skipProducts = flag.String("skip-products", "", "comma-separated list of products to skip (known failures, etc)") 60 var includeProducts = flag.String("products", "", "comma-separated list of products to build") 61 62 const errorLeadingLines = 20 63 const errorTrailingLines = 20 64 65 type Product struct { 66 ctx build.Context 67 config build.Config 68 logFile string 69 } 70 71 type Status struct { 72 cur int 73 total int 74 failed int 75 76 ctx build.Context 77 haveBlankLine bool 78 smartTerminal bool 79 80 lock sync.Mutex 81 } 82 83 func NewStatus(ctx build.Context) *Status { 84 return &Status{ 85 ctx: ctx, 86 haveBlankLine: true, 87 smartTerminal: ctx.IsTerminal(), 88 } 89 } 90 91 func (s *Status) SetTotal(total int) { 92 s.total = total 93 } 94 95 func (s *Status) Fail(product string, err error, logFile string) { 96 s.Finish(product) 97 98 s.lock.Lock() 99 defer s.lock.Unlock() 100 101 if s.smartTerminal && !s.haveBlankLine { 102 fmt.Fprintln(s.ctx.Stdout()) 103 s.haveBlankLine = true 104 } 105 106 s.failed++ 107 fmt.Fprintln(s.ctx.Stderr(), "FAILED:", product) 108 s.ctx.Verboseln("FAILED:", product) 109 110 if logFile != "" { 111 data, err := ioutil.ReadFile(logFile) 112 if err == nil { 113 lines := strings.Split(strings.TrimSpace(string(data)), "\n") 114 if len(lines) > errorLeadingLines+errorTrailingLines+1 { 115 lines[errorLeadingLines] = fmt.Sprintf("... skipping %d lines ...", 116 len(lines)-errorLeadingLines-errorTrailingLines) 117 118 lines = append(lines[:errorLeadingLines+1], 119 lines[len(lines)-errorTrailingLines:]...) 120 } 121 for _, line := range lines { 122 fmt.Fprintln(s.ctx.Stderr(), "> ", line) 123 s.ctx.Verboseln(line) 124 } 125 } 126 } 127 128 s.ctx.Print(err) 129 } 130 131 func (s *Status) Finish(product string) { 132 s.lock.Lock() 133 defer s.lock.Unlock() 134 135 s.cur++ 136 line := fmt.Sprintf("[%d/%d] %s", s.cur, s.total, product) 137 138 if s.smartTerminal { 139 if max, ok := s.ctx.TermWidth(); ok { 140 if len(line) > max { 141 line = line[:max] 142 } 143 } 144 145 fmt.Fprint(s.ctx.Stdout(), "\r", line, "\x1b[K") 146 s.haveBlankLine = false 147 } else { 148 s.ctx.Println(line) 149 } 150 } 151 152 func (s *Status) Finished() int { 153 s.lock.Lock() 154 defer s.lock.Unlock() 155 156 if !s.haveBlankLine { 157 fmt.Fprintln(s.ctx.Stdout()) 158 s.haveBlankLine = true 159 } 160 return s.failed 161 } 162 163 // TODO(b/70370883): This tool uses a lot of open files -- over the default 164 // soft limit of 1024 on some systems. So bump up to the hard limit until I fix 165 // the algorithm. 166 func setMaxFiles(log logger.Logger) { 167 var limits syscall.Rlimit 168 169 err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limits) 170 if err != nil { 171 log.Println("Failed to get file limit:", err) 172 return 173 } 174 175 log.Verbosef("Current file limits: %d soft, %d hard", limits.Cur, limits.Max) 176 if limits.Cur == limits.Max { 177 return 178 } 179 180 limits.Cur = limits.Max 181 err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limits) 182 if err != nil { 183 log.Println("Failed to increase file limit:", err) 184 } 185 } 186 187 func inList(str string, list []string) bool { 188 for _, other := range list { 189 if str == other { 190 return true 191 } 192 } 193 return false 194 } 195 196 func main() { 197 log := logger.New(os.Stderr) 198 defer log.Cleanup() 199 200 flag.Parse() 201 202 ctx, cancel := context.WithCancel(context.Background()) 203 defer cancel() 204 205 trace := tracer.New(log) 206 defer trace.Close() 207 208 build.SetupSignals(log, cancel, func() { 209 trace.Close() 210 log.Cleanup() 211 }) 212 213 buildCtx := build.Context{&build.ContextImpl{ 214 Context: ctx, 215 Logger: log, 216 Tracer: trace, 217 StdioInterface: build.StdioImpl{}, 218 }} 219 220 status := NewStatus(buildCtx) 221 222 config := build.NewConfig(buildCtx) 223 if *outDir == "" { 224 name := "multiproduct-" + time.Now().Format("20060102150405") 225 226 *outDir = filepath.Join(config.OutDir(), name) 227 228 // Ensure the empty files exist in the output directory 229 // containing our output directory too. This is mostly for 230 // safety, but also triggers the ninja_build file so that our 231 // build servers know that they can parse the output as if it 232 // was ninja output. 233 build.SetupOutDir(buildCtx, config) 234 235 if err := os.MkdirAll(*outDir, 0777); err != nil { 236 log.Fatalf("Failed to create tempdir: %v", err) 237 } 238 } 239 config.Environment().Set("OUT_DIR", *outDir) 240 log.Println("Output directory:", *outDir) 241 242 logsDir := filepath.Join(config.OutDir(), "logs") 243 os.MkdirAll(logsDir, 0777) 244 245 build.SetupOutDir(buildCtx, config) 246 if *alternateResultDir { 247 distLogsDir := filepath.Join(config.DistDir(), "logs") 248 os.MkdirAll(distLogsDir, 0777) 249 log.SetOutput(filepath.Join(distLogsDir, "soong.log")) 250 trace.SetOutput(filepath.Join(distLogsDir, "build.trace")) 251 } else { 252 log.SetOutput(filepath.Join(config.OutDir(), "soong.log")) 253 trace.SetOutput(filepath.Join(config.OutDir(), "build.trace")) 254 } 255 256 setMaxFiles(log) 257 258 vars, err := build.DumpMakeVars(buildCtx, config, nil, []string{"all_named_products"}) 259 if err != nil { 260 log.Fatal(err) 261 } 262 var productsList []string 263 allProducts := strings.Fields(vars["all_named_products"]) 264 265 if *includeProducts != "" { 266 missingProducts := []string{} 267 for _, product := range strings.Split(*includeProducts, ",") { 268 if inList(product, allProducts) { 269 productsList = append(productsList, product) 270 } else { 271 missingProducts = append(missingProducts, product) 272 } 273 } 274 if len(missingProducts) > 0 { 275 log.Fatalf("Products don't exist: %s\n", missingProducts) 276 } 277 } else { 278 productsList = allProducts 279 } 280 281 products := make([]string, 0, len(productsList)) 282 skipList := strings.Split(*skipProducts, ",") 283 skipProduct := func(p string) bool { 284 for _, s := range skipList { 285 if p == s { 286 return true 287 } 288 } 289 return false 290 } 291 for _, product := range productsList { 292 if !skipProduct(product) { 293 products = append(products, product) 294 } else { 295 log.Verbose("Skipping: ", product) 296 } 297 } 298 299 log.Verbose("Got product list: ", products) 300 301 status.SetTotal(len(products)) 302 303 var wg sync.WaitGroup 304 productConfigs := make(chan Product, len(products)) 305 306 finder := build.NewSourceFinder(buildCtx, config) 307 defer finder.Shutdown() 308 309 // Run the product config for every product in parallel 310 for _, product := range products { 311 wg.Add(1) 312 go func(product string) { 313 var stdLog string 314 315 defer wg.Done() 316 defer logger.Recover(func(err error) { 317 status.Fail(product, err, stdLog) 318 }) 319 320 productOutDir := filepath.Join(config.OutDir(), product) 321 productLogDir := filepath.Join(logsDir, product) 322 323 if err := os.MkdirAll(productOutDir, 0777); err != nil { 324 log.Fatalf("Error creating out directory: %v", err) 325 } 326 if err := os.MkdirAll(productLogDir, 0777); err != nil { 327 log.Fatalf("Error creating log directory: %v", err) 328 } 329 330 stdLog = filepath.Join(productLogDir, "std.log") 331 f, err := os.Create(stdLog) 332 if err != nil { 333 log.Fatalf("Error creating std.log: %v", err) 334 } 335 336 productLog := logger.New(f) 337 productLog.SetOutput(filepath.Join(productLogDir, "soong.log")) 338 339 productCtx := build.Context{&build.ContextImpl{ 340 Context: ctx, 341 Logger: productLog, 342 Tracer: trace, 343 StdioInterface: build.NewCustomStdio(nil, f, f), 344 Thread: trace.NewThread(product), 345 }} 346 347 productConfig := build.NewConfig(productCtx) 348 productConfig.Environment().Set("OUT_DIR", productOutDir) 349 build.FindSources(productCtx, productConfig, finder) 350 productConfig.Lunch(productCtx, product, *buildVariant) 351 352 build.Build(productCtx, productConfig, build.BuildProductConfig) 353 productConfigs <- Product{productCtx, productConfig, stdLog} 354 }(product) 355 } 356 go func() { 357 defer close(productConfigs) 358 wg.Wait() 359 }() 360 361 var wg2 sync.WaitGroup 362 // Then run up to numJobs worth of Soong and Kati 363 for i := 0; i < *numJobs; i++ { 364 wg2.Add(1) 365 go func() { 366 defer wg2.Done() 367 for product := range productConfigs { 368 func() { 369 defer logger.Recover(func(err error) { 370 status.Fail(product.config.TargetProduct(), err, product.logFile) 371 }) 372 373 defer func() { 374 if *keepArtifacts { 375 args := zip.ZipArgs{ 376 FileArgs: []zip.FileArg{ 377 { 378 GlobDir: product.config.OutDir(), 379 SourcePrefixToStrip: product.config.OutDir(), 380 }, 381 }, 382 OutputFilePath: filepath.Join(config.OutDir(), product.config.TargetProduct()+".zip"), 383 NumParallelJobs: runtime.NumCPU(), 384 CompressionLevel: 5, 385 } 386 if err := zip.Run(args); err != nil { 387 log.Fatalf("Error zipping artifacts: %v", err) 388 } 389 } 390 os.RemoveAll(product.config.OutDir()) 391 }() 392 393 buildWhat := 0 394 if !*onlyConfig { 395 buildWhat |= build.BuildSoong 396 if !*onlySoong { 397 buildWhat |= build.BuildKati 398 } 399 } 400 build.Build(product.ctx, product.config, buildWhat) 401 status.Finish(product.config.TargetProduct()) 402 }() 403 } 404 }() 405 } 406 wg2.Wait() 407 408 if *alternateResultDir { 409 args := zip.ZipArgs{ 410 FileArgs: []zip.FileArg{ 411 {GlobDir: logsDir, SourcePrefixToStrip: logsDir}, 412 }, 413 OutputFilePath: filepath.Join(config.DistDir(), "logs.zip"), 414 NumParallelJobs: runtime.NumCPU(), 415 CompressionLevel: 5, 416 } 417 if err := zip.Run(args); err != nil { 418 log.Fatalf("Error zipping logs: %v", err) 419 } 420 } 421 422 if count := status.Finished(); count > 0 { 423 log.Fatalln(count, "products failed") 424 } 425 }