github.com/tiagovtristao/plz@v13.4.0+incompatible/src/core/utils.go (about) 1 package core 2 3 import ( 4 "bytes" 5 "context" 6 "crypto/rand" 7 "crypto/sha1" 8 "encoding/hex" 9 "fmt" 10 "io" 11 "os" 12 "os/exec" 13 "path" 14 "path/filepath" 15 "strings" 16 "sync" 17 "syscall" 18 "time" 19 20 "github.com/thought-machine/please/src/cli" 21 "github.com/thought-machine/please/src/fs" 22 ) 23 24 // RepoRoot is the root of the Please repository 25 var RepoRoot string 26 27 // initialWorkingDir is the directory we began in. Early on we chdir() to the repo root but for 28 // some things we need to remember this. 29 var initialWorkingDir string 30 31 // initialPackage is the initial subdir of the working directory, ie. what package did we start in. 32 // This is similar but not identical to initialWorkingDir. 33 var initialPackage string 34 35 // usingBazelWorkspace is true if we detected a Bazel WORKSPACE file to find our repo root. 36 var usingBazelWorkspace bool 37 38 // DirPermissions are the default permission bits we apply to directories. 39 const DirPermissions = os.ModeDir | 0775 40 41 // FindRepoRoot returns the root directory of the current repo and sets the initial working dir. 42 // It returns true if the repo root was found. 43 func FindRepoRoot() bool { 44 initialWorkingDir, _ = os.Getwd() 45 RepoRoot, initialPackage = getRepoRoot(ConfigFileName) 46 return RepoRoot != "" 47 } 48 49 // MustFindRepoRoot returns the root directory of the current repo and sets the initial working dir. 50 // It dies on failure, although will fall back to looking for a Bazel WORKSPACE file first. 51 func MustFindRepoRoot() string { 52 if RepoRoot != "" { 53 return RepoRoot 54 } else if FindRepoRoot() { 55 return RepoRoot 56 } 57 RepoRoot, initialPackage = getRepoRoot("WORKSPACE") 58 if RepoRoot != "" { 59 log.Warning("No .plzconfig file found to define the repo root.") 60 log.Warning("Falling back to Bazel WORKSPACE at %s", path.Join(RepoRoot, "WORKSPACE")) 61 usingBazelWorkspace = true 62 return RepoRoot 63 } 64 // Check the config for a default repo location. Of course, we have to load system-level config 65 // in order to do that... 66 config, err := ReadConfigFiles([]string{MachineConfigFileName, ExpandHomePath(UserConfigFileName)}, "") 67 if err != nil { 68 log.Fatalf("Error reading config file: %s", err) 69 } 70 if config.Please.DefaultRepo != "" { 71 log.Warning("Using default repo at %s", config.Please.DefaultRepo) 72 RepoRoot = ExpandHomePath(config.Please.DefaultRepo) 73 return RepoRoot 74 } 75 log.Fatalf("Couldn't locate the repo root. Are you sure you're inside a plz repo?") 76 return "" 77 } 78 79 // InitialPackage returns a label corresponding to the initial package we started in. 80 func InitialPackage() []BuildLabel { 81 // It's possible to start off in directories that aren't legal package names, because 82 // our package naming is stricter than directory naming requirements. 83 // In that case move up until we find somewhere we can run from. 84 dir := initialPackage 85 for dir != "." { 86 if label, err := TryNewBuildLabel(dir, "test"); err == nil { 87 label.Name = "..." 88 return []BuildLabel{label} 89 } 90 dir = filepath.Dir(dir) 91 } 92 return WholeGraph 93 } 94 95 // getRepoRoot returns the root directory of the current repo and the initial package. 96 func getRepoRoot(filename string) (string, string) { 97 dir, err := os.Getwd() 98 if err != nil { 99 log.Fatalf("Couldn't determine working directory: %s", err) 100 } 101 // Walk up directories looking for a .plzconfig file, which we use to identify the root. 102 initial := dir 103 for dir != "" { 104 if PathExists(path.Join(dir, filename)) { 105 return dir, strings.TrimLeft(initial[len(dir):], "/") 106 } 107 dir, _ = path.Split(dir) 108 dir = strings.TrimRight(dir, "/") 109 } 110 return "", "" 111 } 112 113 // StartedAtRepoRoot returns true if the build was initiated from the repo root. 114 // Used to provide slightly nicer output in some places. 115 func StartedAtRepoRoot() bool { 116 return RepoRoot == initialWorkingDir 117 } 118 119 // safeBuffer is an io.Writer that ensures that only one thread writes to it at a time. 120 // This is important because we potentially have both stdout and stderr writing to the same 121 // buffer, and os.exec only guarantees goroutine-safety if both are the same writer, which in 122 // our case they're not (but are both ultimately causing writes to the same buffer) 123 type safeBuffer struct { 124 sync.Mutex 125 buf bytes.Buffer 126 } 127 128 func (sb *safeBuffer) Write(b []byte) (int, error) { 129 sb.Lock() 130 defer sb.Unlock() 131 return sb.buf.Write(b) 132 } 133 134 func (sb *safeBuffer) Bytes() []byte { 135 return sb.buf.Bytes() 136 } 137 138 func (sb *safeBuffer) String() string { 139 return sb.buf.String() 140 } 141 142 // LogProgress logs a message once a minute until the given context has expired. 143 // Used to provide some notion of progress while waiting for external commands. 144 func LogProgress(ctx context.Context, target *BuildTarget, msg string) { 145 t := time.NewTicker(1 * time.Minute) 146 defer t.Stop() 147 for i := 1; i < 1000000; i++ { 148 select { 149 case <-ctx.Done(): 150 return 151 case <-t.C: 152 if i == 1 { 153 log.Notice("%s still %s after 1 minute %s", target.Label, msg, progressMessage(target)) 154 } else { 155 log.Notice("%s still %s after %d minutes %s", target.Label, msg, i, progressMessage(target)) 156 } 157 } 158 } 159 } 160 161 // progressMessage displays a progress message for a target, if it tracks progress. 162 func progressMessage(target *BuildTarget) string { 163 if target.ShowProgress { 164 return fmt.Sprintf("(%0.1f%% done)", target.Progress) 165 } 166 return "" 167 } 168 169 // TimeoutOrDefault uses the given timeout, or the default if it is not set. 170 func TimeoutOrDefault(timeout time.Duration, defaultTimeout cli.Duration) time.Duration { 171 if timeout != 0 { 172 return timeout 173 } else if defaultTimeout != 0 { 174 return time.Duration(defaultTimeout) 175 } 176 return 10 * time.Minute // fallback 177 } 178 179 // TargetTimeoutOrDefault is like TimeoutOrDefault but uses the given target, which can be nil. 180 func TargetTimeoutOrDefault(target *BuildTarget, state *BuildState) time.Duration { 181 if target != nil { 182 return TimeoutOrDefault(target.BuildTimeout, state.Config.Build.Timeout) 183 } 184 return TimeoutOrDefault(0, state.Config.Build.Timeout) 185 } 186 187 // ExecWithTimeout runs an external command with a timeout. 188 // If the command times out the returned error will be a context.DeadlineExceeded error. 189 // If showOutput is true then output will be printed to stderr as well as returned. 190 // It returns the stdout only, combined stdout and stderr and any error that occurred. 191 func ExecWithTimeout(target *BuildTarget, dir string, env []string, timeout time.Duration, defaultTimeout cli.Duration, showOutput, attachStdStreams bool, argv []string, msg string) ([]byte, []byte, error) { 192 timeout = TimeoutOrDefault(timeout, defaultTimeout) 193 ctx, cancel := context.WithTimeout(context.Background(), timeout) 194 defer cancel() 195 cmd := ExecCommand(argv[0], argv[1:]...) 196 cmd.Dir = dir 197 cmd.Env = env 198 199 var out bytes.Buffer 200 var outerr safeBuffer 201 if showOutput { 202 cmd.Stdout = io.MultiWriter(os.Stderr, &out, &outerr) 203 cmd.Stderr = io.MultiWriter(os.Stderr, &outerr) 204 } else { 205 cmd.Stdout = io.MultiWriter(&out, &outerr) 206 cmd.Stderr = &outerr 207 } 208 if target != nil && target.ShowProgress { 209 cmd.Stdout = newProgressWriter(target, cmd.Stdout) 210 cmd.Stderr = newProgressWriter(target, cmd.Stderr) 211 } 212 if attachStdStreams { 213 cmd.Stdin = os.Stdin 214 cmd.Stdout = os.Stdout 215 cmd.Stderr = os.Stderr 216 } 217 if target != nil { 218 go LogProgress(ctx, target, msg) 219 } 220 // Start the command, wait for the timeout & then kill it. 221 // We deliberately don't use CommandContext because it will only send SIGKILL which 222 // child processes can't handle themselves. 223 err := cmd.Start() 224 if err != nil { 225 return nil, nil, err 226 } 227 ch := make(chan error) 228 go runCommand(cmd, ch) 229 select { 230 case err = <-ch: 231 // Do nothing. 232 case <-time.After(timeout): 233 KillProcess(cmd) 234 err = fmt.Errorf("Timeout exceeded: %s", outerr.String()) 235 } 236 return out.Bytes(), outerr.Bytes(), err 237 } 238 239 // runCommand runs a command and signals on the given channel when it's done. 240 func runCommand(cmd *exec.Cmd, ch chan error) { 241 ch <- cmd.Wait() 242 } 243 244 // ExecWithTimeoutShell runs an external command within a Bash shell. 245 // Other arguments are as ExecWithTimeout. 246 // Note that the command is deliberately a single string. 247 func ExecWithTimeoutShell(state *BuildState, target *BuildTarget, dir string, env []string, timeout time.Duration, defaultTimeout cli.Duration, showOutput bool, cmd string, sandbox bool) ([]byte, []byte, error) { 248 return ExecWithTimeoutShellStdStreams(state, target, dir, env, timeout, defaultTimeout, showOutput, cmd, sandbox, false, "building") 249 } 250 251 // ExecWithTimeoutShellStdStreams is as ExecWithTimeoutShell but optionally attaches stdin to the subprocess. 252 func ExecWithTimeoutShellStdStreams(state *BuildState, target *BuildTarget, dir string, env []string, timeout time.Duration, defaultTimeout cli.Duration, showOutput bool, cmd string, sandbox, attachStdStreams bool, msg string) ([]byte, []byte, error) { 253 c := append([]string{"bash", "--noprofile", "--norc", "-u", "-o", "pipefail", "-c"}, cmd) 254 if sandbox { 255 cmd, err := SandboxCommand(state, c) 256 if err != nil { 257 return nil, nil, err 258 } 259 c = cmd 260 } 261 return ExecWithTimeout(target, dir, env, timeout, defaultTimeout, showOutput, attachStdStreams, c, msg) 262 } 263 264 // SandboxCommand applies a sandbox to the given command. 265 func SandboxCommand(state *BuildState, cmd []string) ([]string, error) { 266 tool, err := LookBuildPath(state.Config.Build.PleaseSandboxTool, state.Config) 267 if err != nil { 268 return nil, err 269 } 270 return append([]string{tool}, cmd...), nil 271 } 272 273 // MustSandboxCommand is like SandboxCommand but dies on errors. 274 func MustSandboxCommand(state *BuildState, cmd []string) []string { 275 c, err := SandboxCommand(state, cmd) 276 if err != nil { 277 log.Fatalf("%s", err) 278 } 279 return c 280 } 281 282 // ExecWithTimeoutSimple runs an external command with a timeout. 283 // It's a simpler version of ExecWithTimeout that gives less control. 284 func ExecWithTimeoutSimple(timeout cli.Duration, cmd ...string) ([]byte, error) { 285 _, out, err := ExecWithTimeout(nil, "", nil, time.Duration(timeout), timeout, false, false, cmd, "") 286 return out, err 287 } 288 289 // KillProcess kills a process, attempting to send it a SIGTERM first followed by a SIGKILL 290 // shortly after if it hasn't exited. 291 func KillProcess(cmd *exec.Cmd) { 292 if !killProcess(cmd, syscall.SIGTERM, 30*time.Millisecond) && !killProcess(cmd, syscall.SIGKILL, time.Second) { 293 log.Error("Failed to kill inferior process") 294 } 295 } 296 297 // killProcess implements the two-step killing of processes with a SIGTERM and a SIGKILL if 298 // that's unsuccessful. It returns true if the process exited within the timeout. 299 func killProcess(cmd *exec.Cmd, sig syscall.Signal, timeout time.Duration) bool { 300 // This is a bit of a fiddle. We want to wait for the process to exit but only for just so 301 // long (we do not want to get hung up if it ignores our SIGTERM). 302 log.Debug("Sending signal %s to %d", sig, cmd.Process.Pid) 303 syscall.Kill(cmd.Process.Pid, sig) 304 ch := make(chan error) 305 go runCommand(cmd, ch) 306 select { 307 case <-ch: 308 return true 309 case <-time.After(timeout): 310 return false 311 } 312 } 313 314 // A SourcePair represents a source file with its source and temporary locations. 315 // This isn't typically used much by callers; it's just useful to have a single type for channels. 316 type SourcePair struct{ Src, Tmp string } 317 318 // IterSources returns all the sources for a function, allowing for sources that are other rules 319 // and rules that require transitive dependencies. 320 // Yielded values are pairs of the original source location and its temporary location for this rule. 321 func IterSources(graph *BuildGraph, target *BuildTarget) <-chan SourcePair { 322 ch := make(chan SourcePair) 323 done := map[BuildLabel]bool{} 324 donePaths := map[string]bool{} 325 tmpDir := target.TmpDir() 326 var inner func(dependency *BuildTarget) 327 inner = func(dependency *BuildTarget) { 328 sources := dependency.AllSources() 329 if target == dependency { 330 // This is the current build rule, so link its sources. 331 for _, source := range sources { 332 for _, providedSource := range recursivelyProvideSource(graph, target, source) { 333 fullPaths := providedSource.FullPaths(graph) 334 for i, sourcePath := range providedSource.Paths(graph) { 335 tmpPath := path.Join(tmpDir, sourcePath) 336 ch <- SourcePair{fullPaths[i], tmpPath} 337 donePaths[tmpPath] = true 338 } 339 } 340 } 341 } else { 342 // This is a dependency of the rule, so link its outputs. 343 outDir := dependency.OutDir() 344 for _, dep := range dependency.Outputs() { 345 depPath := path.Join(outDir, dep) 346 pkgName := dependency.Label.PackageName 347 tmpPath := path.Join(tmpDir, pkgName, dep) 348 if !donePaths[tmpPath] { 349 ch <- SourcePair{depPath, tmpPath} 350 donePaths[tmpPath] = true 351 } 352 } 353 // Mark any label-type outputs as done. 354 for _, out := range dependency.DeclaredOutputs() { 355 if LooksLikeABuildLabel(out) { 356 label := ParseBuildLabel(out, target.Label.PackageName) 357 done[label] = true 358 } 359 } 360 } 361 // All the sources of this rule now count as done. 362 for _, source := range sources { 363 if label := source.Label(); label != nil && dependency.IsSourceOnlyDep(*label) { 364 done[*label] = true 365 } 366 } 367 368 done[dependency.Label] = true 369 if target == dependency || (target.NeedsTransitiveDependencies && !dependency.OutputIsComplete) { 370 for _, dep := range dependency.BuildDependencies() { 371 for _, dep2 := range recursivelyProvideFor(graph, target, dependency, dep.Label) { 372 if !done[dep2] && !dependency.IsTool(dep2) { 373 inner(graph.TargetOrDie(dep2)) 374 } 375 } 376 } 377 } else { 378 for _, dep := range dependency.ExportedDependencies() { 379 for _, dep2 := range recursivelyProvideFor(graph, target, dependency, dep) { 380 if !done[dep2] { 381 inner(graph.TargetOrDie(dep2)) 382 } 383 } 384 } 385 } 386 } 387 go func() { 388 inner(target) 389 close(ch) 390 }() 391 return ch 392 } 393 394 // recursivelyProvideFor recursively applies ProvideFor to a target. 395 func recursivelyProvideFor(graph *BuildGraph, target, dependency *BuildTarget, dep BuildLabel) []BuildLabel { 396 depTarget := graph.TargetOrDie(dep) 397 ret := depTarget.ProvideFor(dependency) 398 if len(ret) == 1 && ret[0] == dep { 399 // Dependency doesn't have a require/provide directly on this guy, up to the top-level 400 // target. We have to check the dep first to keep things consistent with what targets 401 // have actually been built. 402 ret = depTarget.ProvideFor(target) 403 if len(ret) == 1 && ret[0] == dep { 404 return ret 405 } 406 } 407 ret2 := make([]BuildLabel, 0, len(ret)) 408 for _, r := range ret { 409 if r == dep { 410 ret2 = append(ret2, r) // Providing itself, don't recurse 411 } else { 412 ret2 = append(ret2, recursivelyProvideFor(graph, target, dependency, r)...) 413 } 414 } 415 return ret2 416 } 417 418 // recursivelyProvideSource is similar to recursivelyProvideFor but operates on a BuildInput. 419 func recursivelyProvideSource(graph *BuildGraph, target *BuildTarget, src BuildInput) []BuildInput { 420 if label := src.nonOutputLabel(); label != nil { 421 dep := graph.TargetOrDie(*label) 422 provided := recursivelyProvideFor(graph, target, target, dep.Label) 423 ret := make([]BuildInput, len(provided)) 424 for i, p := range provided { 425 ret[i] = p 426 } 427 return ret 428 } 429 return []BuildInput{src} 430 } 431 432 // IterRuntimeFiles yields all the runtime files for a rule (outputs & data files), similar to above. 433 func IterRuntimeFiles(graph *BuildGraph, target *BuildTarget, absoluteOuts bool) <-chan SourcePair { 434 done := map[string]bool{} 435 ch := make(chan SourcePair) 436 437 makeOut := func(out string) string { 438 if absoluteOuts { 439 return path.Join(RepoRoot, target.TestDir(), out) 440 } 441 return out 442 } 443 444 pushOut := func(src, out string) { 445 out = makeOut(out) 446 if !done[out] { 447 ch <- SourcePair{src, out} 448 done[out] = true 449 } 450 } 451 452 var inner func(*BuildTarget) 453 inner = func(target *BuildTarget) { 454 outDir := target.OutDir() 455 for _, out := range target.Outputs() { 456 pushOut(path.Join(outDir, out), out) 457 } 458 for _, data := range target.Data { 459 fullPaths := data.FullPaths(graph) 460 for i, dataPath := range data.Paths(graph) { 461 pushOut(fullPaths[i], dataPath) 462 } 463 if label := data.Label(); label != nil { 464 for _, dep := range graph.TargetOrDie(*label).ExportedDependencies() { 465 inner(graph.TargetOrDie(dep)) 466 } 467 } 468 } 469 for _, dep := range target.ExportedDependencies() { 470 inner(graph.TargetOrDie(dep)) 471 } 472 } 473 go func() { 474 inner(target) 475 close(ch) 476 }() 477 return ch 478 } 479 480 // IterInputPaths yields all the transitive input files for a rule (sources & data files), similar to above (again). 481 func IterInputPaths(graph *BuildGraph, target *BuildTarget) <-chan string { 482 // Use a couple of maps to protect us from dep-graph loops and to stop parsing the same target 483 // multiple times. We also only want to push files to the channel that it has not already seen. 484 donePaths := map[string]bool{} 485 doneTargets := map[*BuildTarget]bool{} 486 ch := make(chan string) 487 var inner func(*BuildTarget) 488 inner = func(target *BuildTarget) { 489 if !doneTargets[target] { 490 // First yield all the sources of the target only ever pushing declared paths to 491 // the channel to prevent us outputting any intermediate files. 492 for _, source := range target.AllSources() { 493 // If the label is nil add any input paths contained here. 494 if label := source.nonOutputLabel(); label == nil { 495 for _, sourcePath := range source.FullPaths(graph) { 496 if !donePaths[sourcePath] { 497 ch <- sourcePath 498 donePaths[sourcePath] = true 499 } 500 } 501 // Otherwise we should recurse for this build label (and gather its sources) 502 } else { 503 inner(graph.TargetOrDie(*label)) 504 } 505 } 506 507 // Now yield all the data deps of this rule. 508 for _, data := range target.Data { 509 // If the label is nil add any input paths contained here. 510 if label := data.Label(); label == nil { 511 for _, sourcePath := range data.FullPaths(graph) { 512 if !donePaths[sourcePath] { 513 ch <- sourcePath 514 donePaths[sourcePath] = true 515 } 516 } 517 // Otherwise we should recurse for this build label (and gather its sources) 518 } else { 519 inner(graph.TargetOrDie(*label)) 520 } 521 } 522 523 // Finally recurse for all the deps of this rule. 524 for _, dep := range target.Dependencies() { 525 inner(dep) 526 } 527 doneTargets[target] = true 528 } 529 } 530 go func() { 531 inner(target) 532 close(ch) 533 }() 534 return ch 535 } 536 537 // PrepareSource symlinks a single source file for a build rule. 538 func PrepareSource(sourcePath string, tmpPath string) error { 539 dir := path.Dir(tmpPath) 540 if !PathExists(dir) { 541 if err := os.MkdirAll(dir, DirPermissions); err != nil { 542 return err 543 } 544 } 545 if !PathExists(sourcePath) { 546 return fmt.Errorf("Source file %s doesn't exist", sourcePath) 547 } 548 return fs.RecursiveLink(sourcePath, tmpPath, 0) 549 } 550 551 // PrepareSourcePair prepares a source file for a build. 552 func PrepareSourcePair(pair SourcePair) error { 553 if path.IsAbs(pair.Src) { 554 return PrepareSource(pair.Src, pair.Tmp) 555 } 556 return PrepareSource(path.Join(RepoRoot, pair.Src), pair.Tmp) 557 } 558 559 // CollapseHash combines our usual four-part hash into one by XOR'ing them together. 560 // This helps keep things short in places where sometimes we get complaints about filenames being 561 // too long (this is most noticeable on e.g. Ubuntu with an encrypted home directory, but 562 // not an entire encrypted disk) and where we don't especially care about breaking out the 563 // individual parts of hashes, which is important for many parts of the system. 564 func CollapseHash(key []byte) []byte { 565 short := [sha1.Size]byte{} 566 // We store the rule hash twice, if it's repeated we must make sure not to xor it 567 // against itself. 568 if bytes.Equal(key[0:sha1.Size], key[sha1.Size:2*sha1.Size]) { 569 for i := 0; i < sha1.Size; i++ { 570 short[i] = key[i] ^ key[i+2*sha1.Size] ^ key[i+3*sha1.Size] 571 } 572 } else { 573 for i := 0; i < sha1.Size; i++ { 574 short[i] = key[i] ^ key[i+sha1.Size] ^ key[i+2*sha1.Size] ^ key[i+3*sha1.Size] 575 } 576 } 577 return short[:] 578 } 579 580 // LookPath does roughly the same as exec.LookPath, i.e. looks for the named file on the path. 581 // The main difference is that it looks based on our config which isn't necessarily the same 582 // as the external environment variable. 583 func LookPath(filename string, paths []string) (string, error) { 584 for _, p := range paths { 585 for _, p2 := range strings.Split(p, ":") { 586 p3 := path.Join(p2, filename) 587 if _, err := os.Stat(p3); err == nil { 588 return p3, nil 589 } 590 } 591 } 592 return "", fmt.Errorf("%s not found in PATH %s", filename, strings.Join(paths, ":")) 593 } 594 595 // LookBuildPath is like LookPath but takes the config's build path into account. 596 func LookBuildPath(filename string, config *Configuration) (string, error) { 597 return LookPath(filename, append([]string{ExpandHomePath(config.Please.Location)}, config.Build.Path...)) 598 } 599 600 // AsyncDeleteDir deletes a directory asynchronously. 601 // First it renames the directory to something temporary and then forks to delete it. 602 // The rename is done synchronously but the actual deletion is async (after fork) so 603 // you don't have to wait for large directories to be removed. 604 // Conversely there is obviously no guarantee about at what point it will actually cease to 605 // be on disk any more. 606 func AsyncDeleteDir(dir string) error { 607 rm, err := exec.LookPath("rm") 608 if err != nil { 609 return err 610 } else if !PathExists(dir) { 611 return nil // not an error, just don't need to do anything. 612 } 613 newDir, err := moveDir(dir) 614 if err != nil { 615 return err 616 } 617 // Note that we can't fork() directly and continue running Go code, but ForkExec() works okay. 618 // Hence why we're using rm rather than fork() + os.RemoveAll. 619 _, err = syscall.ForkExec(rm, []string{rm, "-rf", newDir}, nil) 620 return err 621 } 622 623 // moveDir moves a directory to a new location and returns that new location. 624 func moveDir(dir string) (string, error) { 625 b := make([]byte, 16) 626 rand.Read(b) 627 name := path.Join(path.Dir(dir), ".plz_clean_"+hex.EncodeToString(b)) 628 log.Notice("Moving %s to %s", dir, name) 629 return name, os.Rename(dir, name) 630 } 631 632 // PathExists is an alias to fs.PathExists. 633 // TODO(peterebden): Remove and migrate everything over. 634 func PathExists(filename string) bool { 635 return fs.PathExists(filename) 636 }