golang.org/x/playground@v0.0.0-20230418134305-14ebe15bcd59/sandbox/sandbox.go (about) 1 // Copyright 2019 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // The sandbox program is an HTTP server that receives untrusted 6 // linux/amd64 binaries in a POST request and then executes them in 7 // a gvisor sandbox using Docker, returning the output as a response 8 // to the POST. 9 // 10 // It's part of the Go playground (https://play.golang.org/). 11 package main 12 13 import ( 14 "bufio" 15 "bytes" 16 "context" 17 "crypto/rand" 18 "encoding/json" 19 "errors" 20 "flag" 21 "fmt" 22 "io" 23 "log" 24 "net/http" 25 "os" 26 "os/exec" 27 "os/signal" 28 "runtime" 29 "sync" 30 "syscall" 31 "time" 32 33 "cloud.google.com/go/compute/metadata" 34 "go.opencensus.io/plugin/ochttp" 35 "go.opencensus.io/stats" 36 "go.opencensus.io/tag" 37 "go.opencensus.io/trace" 38 "golang.org/x/playground/internal" 39 "golang.org/x/playground/internal/metrics" 40 "golang.org/x/playground/sandbox/sandboxtypes" 41 ) 42 43 var ( 44 listenAddr = flag.String("listen", ":80", "HTTP server listen address. Only applicable when --mode=server") 45 mode = flag.String("mode", "server", "Whether to run in \"server\" mode or \"contained\" mode. The contained mode is used internally by the server mode.") 46 dev = flag.Bool("dev", false, "run in dev mode (show help messages)") 47 numWorkers = flag.Int("workers", runtime.NumCPU(), "number of parallel gvisor containers to pre-spin up & let run concurrently") 48 container = flag.String("untrusted-container", "gcr.io/golang-org/playground-sandbox-gvisor:latest", "container image name that hosts the untrusted binary under gvisor") 49 ) 50 51 const ( 52 maxBinarySize = 100 << 20 53 startTimeout = 30 * time.Second 54 runTimeout = 5 * time.Second 55 maxOutputSize = 100 << 20 56 memoryLimitBytes = 100 << 20 57 ) 58 59 var ( 60 errTooMuchOutput = errors.New("Output too large") 61 errRunTimeout = errors.New("timeout running program") 62 ) 63 64 // containedStartMessage is the first thing written to stdout by the 65 // gvisor-contained process when it starts up. This lets the parent HTTP 66 // server know that a particular container is ready to run a binary. 67 const containedStartMessage = "golang-gvisor-process-started\n" 68 69 // containedStderrHeader is written to stderr after the gvisor-contained process 70 // successfully reads the processMeta JSON line + executable binary from stdin, 71 // but before it's run. 72 var containedStderrHeader = []byte("golang-gvisor-process-got-input\n") 73 74 var ( 75 readyContainer chan *Container 76 runSem chan struct{} 77 ) 78 79 type Container struct { 80 name string 81 82 stdin io.WriteCloser 83 stdout *limitedWriter 84 stderr *limitedWriter 85 86 cmd *exec.Cmd 87 cancelCmd context.CancelFunc 88 89 waitErr chan error // 1-buffered; receives error from WaitOrStop(..., cmd, ...) 90 } 91 92 func (c *Container) Close() { 93 setContainerWanted(c.name, false) 94 95 c.cancelCmd() 96 if err := c.Wait(); err != nil { 97 log.Printf("error in c.Wait() for %q: %v", c.name, err) 98 } 99 } 100 101 func (c *Container) Wait() error { 102 err := <-c.waitErr 103 c.waitErr <- err 104 return err 105 } 106 107 var httpServer *http.Server 108 109 func main() { 110 flag.Parse() 111 if *mode == "contained" { 112 runInGvisor() 113 panic("runInGvisor didn't exit") 114 } 115 if flag.NArg() != 0 { 116 flag.Usage() 117 os.Exit(1) 118 } 119 log.Printf("Go playground sandbox starting.") 120 121 readyContainer = make(chan *Container) 122 runSem = make(chan struct{}, *numWorkers) 123 go handleSignals() 124 125 mux := http.NewServeMux() 126 127 gr, err := metrics.GCEResource("go-playground-sandbox") 128 if err != nil && metadata.OnGCE() { 129 log.Printf("metrics.GceService(%q) = _, %v, wanted no error.", "go-playground-sandbox", err) 130 } 131 if ms, err := metrics.NewService(gr, views); err != nil { 132 log.Printf("Failed to initialize metrics: metrics.NewService() = _, %v, wanted no error", err) 133 } else { 134 mux.Handle("/statusz", ochttp.WithRouteTag(ms, "/statusz")) 135 defer ms.Stop() 136 } 137 138 if out, err := exec.Command("docker", "version").CombinedOutput(); err != nil { 139 log.Fatalf("failed to connect to docker: %v, %s", err, out) 140 } 141 if *dev { 142 log.Printf("Running in dev mode; container published to host at: http://localhost:8080/") 143 log.Printf("Run a binary with: curl -v --data-binary @/home/bradfitz/hello http://localhost:8080/run\n") 144 } else { 145 if out, err := exec.Command("docker", "pull", *container).CombinedOutput(); err != nil { 146 log.Fatalf("error pulling %s: %v, %s", *container, err, out) 147 } 148 log.Printf("Listening on %s", *listenAddr) 149 } 150 151 mux.Handle("/health", ochttp.WithRouteTag(http.HandlerFunc(healthHandler), "/health")) 152 mux.Handle("/healthz", ochttp.WithRouteTag(http.HandlerFunc(healthHandler), "/healthz")) 153 mux.Handle("/", ochttp.WithRouteTag(http.HandlerFunc(rootHandler), "/")) 154 mux.Handle("/run", ochttp.WithRouteTag(http.HandlerFunc(runHandler), "/run")) 155 156 makeWorkers() 157 go internal.PeriodicallyDo(context.Background(), 10*time.Second, func(ctx context.Context, _ time.Time) { 158 countDockerContainers(ctx) 159 }) 160 161 trace.ApplyConfig(trace.Config{DefaultSampler: trace.NeverSample()}) 162 httpServer = &http.Server{ 163 Addr: *listenAddr, 164 Handler: &ochttp.Handler{Handler: mux}, 165 } 166 log.Fatal(httpServer.ListenAndServe()) 167 } 168 169 // dockerContainer is the structure of each line output from docker ps. 170 type dockerContainer struct { 171 // ID is the docker container ID. 172 ID string `json:"ID"` 173 // Image is the docker image name. 174 Image string `json:"Image"` 175 // Names is the docker container name. 176 Names string `json:"Names"` 177 } 178 179 // countDockerContainers records the metric for the current number of docker containers. 180 // It also records the count of any unwanted containers. 181 func countDockerContainers(ctx context.Context) { 182 cs, err := listDockerContainers(ctx) 183 if err != nil { 184 log.Printf("Error counting docker containers: %v", err) 185 } 186 stats.Record(ctx, mContainers.M(int64(len(cs)))) 187 var unwantedCount int64 188 for _, c := range cs { 189 if c.Names != "" && !isContainerWanted(c.Names) { 190 unwantedCount++ 191 } 192 } 193 stats.Record(ctx, mUnwantedContainers.M(unwantedCount)) 194 } 195 196 // listDockerContainers returns the current running play_run containers reported by docker. 197 func listDockerContainers(ctx context.Context) ([]dockerContainer, error) { 198 out := new(bytes.Buffer) 199 cmd := exec.Command("docker", "ps", "--quiet", "--filter", "name=play_run_", "--format", "{{json .}}") 200 cmd.Stdout, cmd.Stderr = out, out 201 if err := cmd.Start(); err != nil { 202 return nil, fmt.Errorf("listDockerContainers: cmd.Start() failed: %w", err) 203 } 204 ctx, cancel := context.WithTimeout(ctx, time.Second) 205 defer cancel() 206 if err := internal.WaitOrStop(ctx, cmd, os.Interrupt, 250*time.Millisecond); err != nil { 207 return nil, fmt.Errorf("listDockerContainers: internal.WaitOrStop() failed: %w", err) 208 } 209 return parseDockerContainers(out.Bytes()) 210 } 211 212 // parseDockerContainers parses the json formatted docker output from docker ps. 213 // 214 // If there is an error scanning the input, or non-JSON output is encountered, an error is returned. 215 func parseDockerContainers(b []byte) ([]dockerContainer, error) { 216 // Parse the output to ensure it is well-formatted in the structure we expect. 217 var containers []dockerContainer 218 // Each output line is it's own JSON object, so unmarshal one line at a time. 219 scanner := bufio.NewScanner(bytes.NewReader(b)) 220 for scanner.Scan() { 221 var do dockerContainer 222 if err := json.Unmarshal(scanner.Bytes(), &do); err != nil { 223 return nil, fmt.Errorf("parseDockerContainers: error parsing docker ps output: %w", err) 224 } 225 containers = append(containers, do) 226 } 227 if err := scanner.Err(); err != nil { 228 return nil, fmt.Errorf("parseDockerContainers: error reading docker ps output: %w", err) 229 } 230 return containers, nil 231 } 232 233 func handleSignals() { 234 c := make(chan os.Signal, 1) 235 signal.Notify(c, syscall.SIGINT) 236 s := <-c 237 log.Fatalf("closing on signal %d: %v", s, s) 238 } 239 240 var healthStatus struct { 241 sync.Mutex 242 lastCheck time.Time 243 lastVal error 244 } 245 246 func getHealthCached() error { 247 healthStatus.Lock() 248 defer healthStatus.Unlock() 249 const recentEnough = 5 * time.Second 250 if healthStatus.lastCheck.After(time.Now().Add(-recentEnough)) { 251 return healthStatus.lastVal 252 } 253 254 err := checkHealth() 255 if healthStatus.lastVal == nil && err != nil { 256 // On transition from healthy to unhealthy, close all 257 // idle HTTP connections so clients with them open 258 // don't reuse them. TODO: remove this if/when we 259 // switch away from direct load balancing between 260 // frontends and this sandbox backend. 261 httpServer.SetKeepAlivesEnabled(false) // side effect of closing all idle ones 262 httpServer.SetKeepAlivesEnabled(true) // and restore it back to normal 263 } 264 healthStatus.lastVal = err 265 healthStatus.lastCheck = time.Now() 266 return err 267 } 268 269 // checkHealth does a health check, without any caching. It's called via 270 // getHealthCached. 271 func checkHealth() error { 272 ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) 273 defer cancel() 274 c, err := getContainer(ctx) 275 if err != nil { 276 return fmt.Errorf("failed to get a sandbox container: %v", err) 277 } 278 // TODO: execute something too? for now we just check that sandboxed containers 279 // are available. 280 closed := make(chan struct{}) 281 go func() { 282 c.Close() 283 close(closed) 284 }() 285 select { 286 case <-closed: 287 // success. 288 return nil 289 case <-ctx.Done(): 290 return fmt.Errorf("timeout closing sandbox container") 291 } 292 } 293 294 func healthHandler(w http.ResponseWriter, r *http.Request) { 295 // TODO: split into liveness & readiness checks? 296 if err := getHealthCached(); err != nil { 297 w.WriteHeader(http.StatusInternalServerError) 298 fmt.Fprintf(w, "health check failure: %v\n", err) 299 return 300 } 301 io.WriteString(w, "OK\n") 302 } 303 304 func rootHandler(w http.ResponseWriter, r *http.Request) { 305 if r.URL.Path != "/" { 306 http.NotFound(w, r) 307 return 308 } 309 io.WriteString(w, "Hi from sandbox\n") 310 } 311 312 // processMeta is the JSON sent to the gvisor container before the untrusted binary. 313 // It currently contains only the arguments to pass to the binary. 314 // It might contain environment or other things later. 315 type processMeta struct { 316 Args []string `json:"args"` 317 } 318 319 // runInGvisor is run when we're now inside gvisor. We have no network 320 // at this point. We can read our binary in from stdin and then run 321 // it. 322 func runInGvisor() { 323 const binPath = "/tmpfs/play" 324 if _, err := io.WriteString(os.Stdout, containedStartMessage); err != nil { 325 log.Fatalf("writing to stdout: %v", err) 326 } 327 slurp, err := io.ReadAll(os.Stdin) 328 if err != nil { 329 log.Fatalf("reading stdin in contained mode: %v", err) 330 } 331 nl := bytes.IndexByte(slurp, '\n') 332 if nl == -1 { 333 log.Fatalf("no newline found in input") 334 } 335 metaJSON, bin := slurp[:nl], slurp[nl+1:] 336 337 if err := os.WriteFile(binPath, bin, 0755); err != nil { 338 log.Fatalf("writing contained binary: %v", err) 339 } 340 defer os.Remove(binPath) // not that it matters much, this container will be nuked 341 342 var meta processMeta 343 if err := json.NewDecoder(bytes.NewReader(metaJSON)).Decode(&meta); err != nil { 344 log.Fatalf("error decoding JSON meta: %v", err) 345 } 346 347 if _, err := os.Stderr.Write(containedStderrHeader); err != nil { 348 log.Fatalf("writing header to stderr: %v", err) 349 } 350 351 cmd := exec.Command(binPath) 352 cmd.Args = append(cmd.Args, meta.Args...) 353 cmd.Stdout = os.Stdout 354 cmd.Stderr = os.Stderr 355 if err := cmd.Start(); err != nil { 356 log.Fatalf("cmd.Start(): %v", err) 357 } 358 ctx, cancel := context.WithTimeout(context.Background(), runTimeout-500*time.Millisecond) 359 defer cancel() 360 if err = internal.WaitOrStop(ctx, cmd, os.Interrupt, 250*time.Millisecond); err != nil { 361 if errors.Is(err, context.DeadlineExceeded) { 362 fmt.Fprintln(os.Stderr, "timeout running program") 363 } 364 } 365 os.Exit(errExitCode(err)) 366 return 367 } 368 369 func makeWorkers() { 370 ctx := context.Background() 371 stats.Record(ctx, mMaxContainers.M(int64(*numWorkers))) 372 for i := 0; i < *numWorkers; i++ { 373 go workerLoop(ctx) 374 } 375 } 376 377 func workerLoop(ctx context.Context) { 378 for { 379 c, err := startContainer(ctx) 380 if err != nil { 381 log.Printf("error starting container: %v", err) 382 time.Sleep(5 * time.Second) 383 continue 384 } 385 readyContainer <- c 386 } 387 } 388 389 func randHex(n int) string { 390 b := make([]byte, n/2) 391 _, err := rand.Read(b) 392 if err != nil { 393 panic(err) 394 } 395 return fmt.Sprintf("%x", b) 396 } 397 398 var ( 399 wantedMu sync.Mutex 400 containerWanted = map[string]bool{} 401 ) 402 403 // setContainerWanted records whether a named container is wanted or 404 // not. Any unwanted containers are cleaned up asynchronously as a 405 // sanity check against leaks. 406 // 407 // TODO(bradfitz): add leak checker (background docker ps loop) 408 func setContainerWanted(name string, wanted bool) { 409 wantedMu.Lock() 410 defer wantedMu.Unlock() 411 if wanted { 412 containerWanted[name] = true 413 } else { 414 delete(containerWanted, name) 415 } 416 } 417 418 func isContainerWanted(name string) bool { 419 wantedMu.Lock() 420 defer wantedMu.Unlock() 421 return containerWanted[name] 422 } 423 424 func getContainer(ctx context.Context) (*Container, error) { 425 select { 426 case c := <-readyContainer: 427 return c, nil 428 case <-ctx.Done(): 429 return nil, ctx.Err() 430 } 431 } 432 433 func startContainer(ctx context.Context) (c *Container, err error) { 434 start := time.Now() 435 defer func() { 436 status := "success" 437 if err != nil { 438 status = "error" 439 } 440 // Ignore error. The only error can be invalid tag key or value length, which we know are safe. 441 _ = stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(kContainerCreateSuccess, status)}, 442 mContainerCreateLatency.M(float64(time.Since(start))/float64(time.Millisecond))) 443 }() 444 445 name := "play_run_" + randHex(8) 446 setContainerWanted(name, true) 447 cmd := exec.Command("docker", "run", 448 "--name="+name, 449 "--rm", 450 "--tmpfs=/tmpfs:exec", 451 "-i", // read stdin 452 453 "--runtime=runsc", 454 "--network=none", 455 "--memory="+fmt.Sprint(memoryLimitBytes), 456 457 *container, 458 "--mode=contained") 459 stdin, err := cmd.StdinPipe() 460 if err != nil { 461 return nil, err 462 } 463 pr, pw := io.Pipe() 464 stdout := &limitedWriter{dst: &bytes.Buffer{}, n: maxOutputSize + int64(len(containedStartMessage))} 465 stderr := &limitedWriter{dst: &bytes.Buffer{}, n: maxOutputSize} 466 cmd.Stdout = &switchWriter{switchAfter: []byte(containedStartMessage), dst1: pw, dst2: stdout} 467 cmd.Stderr = stderr 468 if err := cmd.Start(); err != nil { 469 return nil, err 470 } 471 472 ctx, cancel := context.WithCancel(ctx) 473 c = &Container{ 474 name: name, 475 stdin: stdin, 476 stdout: stdout, 477 stderr: stderr, 478 cmd: cmd, 479 cancelCmd: cancel, 480 waitErr: make(chan error, 1), 481 } 482 go func() { 483 c.waitErr <- internal.WaitOrStop(ctx, cmd, os.Interrupt, 250*time.Millisecond) 484 }() 485 defer func() { 486 if err != nil { 487 c.Close() 488 } 489 }() 490 491 startErr := make(chan error, 1) 492 go func() { 493 buf := make([]byte, len(containedStartMessage)) 494 _, err := io.ReadFull(pr, buf) 495 if err != nil { 496 startErr <- fmt.Errorf("error reading header from sandbox container: %v", err) 497 } else if string(buf) != containedStartMessage { 498 startErr <- fmt.Errorf("sandbox container sent wrong header %q; want %q", buf, containedStartMessage) 499 } else { 500 startErr <- nil 501 } 502 }() 503 504 timer := time.NewTimer(startTimeout) 505 defer timer.Stop() 506 select { 507 case <-timer.C: 508 err := fmt.Errorf("timeout starting container %q", name) 509 cancel() 510 <-startErr 511 return nil, err 512 513 case err := <-startErr: 514 if err != nil { 515 return nil, err 516 } 517 } 518 519 log.Printf("started container %q", name) 520 return c, nil 521 } 522 523 func runHandler(w http.ResponseWriter, r *http.Request) { 524 t0 := time.Now() 525 tlast := t0 526 var logmu sync.Mutex 527 logf := func(format string, args ...interface{}) { 528 if !*dev { 529 return 530 } 531 logmu.Lock() 532 defer logmu.Unlock() 533 t := time.Now() 534 d := t.Sub(tlast) 535 d0 := t.Sub(t0) 536 tlast = t 537 log.Print(fmt.Sprintf("+%10v +%10v ", d0, d) + fmt.Sprintf(format, args...)) 538 } 539 logf("/run") 540 541 if r.Method != "POST" { 542 http.Error(w, "expected a POST", http.StatusBadRequest) 543 return 544 } 545 546 // Bound the number of requests being processed at once. 547 // (Before we slurp the binary into memory) 548 select { 549 case runSem <- struct{}{}: 550 case <-r.Context().Done(): 551 return 552 } 553 defer func() { <-runSem }() 554 555 bin, err := io.ReadAll(http.MaxBytesReader(w, r.Body, maxBinarySize)) 556 if err != nil { 557 log.Printf("failed to read request body: %v", err) 558 http.Error(w, err.Error(), http.StatusInternalServerError) 559 return 560 } 561 logf("read %d bytes", len(bin)) 562 563 c, err := getContainer(r.Context()) 564 if err != nil { 565 if cerr := r.Context().Err(); cerr != nil { 566 log.Printf("getContainer, client side cancellation: %v", cerr) 567 return 568 } 569 http.Error(w, "failed to get container", http.StatusInternalServerError) 570 log.Printf("failed to get container: %v", err) 571 return 572 } 573 logf("got container %s", c.name) 574 575 ctx, cancel := context.WithTimeout(context.Background(), runTimeout) 576 closed := make(chan struct{}) 577 defer func() { 578 logf("leaving handler; about to close container") 579 cancel() 580 <-closed 581 }() 582 go func() { 583 <-ctx.Done() 584 if ctx.Err() == context.DeadlineExceeded { 585 logf("timeout") 586 } 587 c.Close() 588 close(closed) 589 }() 590 var meta processMeta 591 meta.Args = r.Header["X-Argument"] 592 metaJSON, _ := json.Marshal(&meta) 593 metaJSON = append(metaJSON, '\n') 594 if _, err := c.stdin.Write(metaJSON); err != nil { 595 log.Printf("failed to write meta to child: %v", err) 596 http.Error(w, "unknown error during docker run", http.StatusInternalServerError) 597 return 598 } 599 if _, err := c.stdin.Write(bin); err != nil { 600 log.Printf("failed to write binary to child: %v", err) 601 http.Error(w, "unknown error during docker run", http.StatusInternalServerError) 602 return 603 } 604 c.stdin.Close() 605 logf("wrote+closed") 606 err = c.Wait() 607 select { 608 case <-ctx.Done(): 609 // Timed out or canceled before or exactly as Wait returned. 610 // Either way, treat it as a timeout. 611 sendError(w, "timeout running program") 612 return 613 default: 614 logf("finished running; about to close container") 615 cancel() 616 } 617 res := &sandboxtypes.Response{} 618 if err != nil { 619 if c.stderr.n < 0 || c.stdout.n < 0 { 620 // Do not send truncated output, just send the error. 621 sendError(w, errTooMuchOutput.Error()) 622 return 623 } 624 var ee *exec.ExitError 625 if !errors.As(err, &ee) { 626 http.Error(w, "unknown error during docker run", http.StatusInternalServerError) 627 return 628 } 629 res.ExitCode = ee.ExitCode() 630 } 631 res.Stdout = c.stdout.dst.Bytes() 632 res.Stderr = cleanStderr(c.stderr.dst.Bytes()) 633 sendResponse(w, res) 634 } 635 636 // limitedWriter is an io.Writer that returns an errTooMuchOutput when the cap (n) is hit. 637 type limitedWriter struct { 638 dst *bytes.Buffer 639 n int64 // max bytes remaining 640 } 641 642 // Write is an io.Writer function that returns errTooMuchOutput when the cap (n) is hit. 643 // 644 // Partial data will be written to dst if p is larger than n, but errTooMuchOutput will be returned. 645 func (l *limitedWriter) Write(p []byte) (int, error) { 646 defer func() { l.n -= int64(len(p)) }() 647 648 if l.n <= 0 { 649 return 0, errTooMuchOutput 650 } 651 652 if int64(len(p)) > l.n { 653 n, err := l.dst.Write(p[:l.n]) 654 if err != nil { 655 return n, err 656 } 657 return n, errTooMuchOutput 658 } 659 660 return l.dst.Write(p) 661 } 662 663 // switchWriter writes to dst1 until switchAfter is written, the it writes to dst2. 664 type switchWriter struct { 665 dst1 io.Writer 666 dst2 io.Writer 667 switchAfter []byte 668 buf []byte 669 found bool 670 } 671 672 func (s *switchWriter) Write(p []byte) (int, error) { 673 if s.found { 674 return s.dst2.Write(p) 675 } 676 677 s.buf = append(s.buf, p...) 678 i := bytes.Index(s.buf, s.switchAfter) 679 if i == -1 { 680 if len(s.buf) >= len(s.switchAfter) { 681 s.buf = s.buf[len(s.buf)-len(s.switchAfter)+1:] 682 } 683 return s.dst1.Write(p) 684 } 685 686 s.found = true 687 nAfter := len(s.buf) - (i + len(s.switchAfter)) 688 s.buf = nil 689 690 n, err := s.dst1.Write(p[:len(p)-nAfter]) 691 if err != nil { 692 return n, err 693 } 694 n2, err := s.dst2.Write(p[len(p)-nAfter:]) 695 return n + n2, err 696 } 697 698 func errExitCode(err error) int { 699 if err == nil { 700 return 0 701 } 702 var ee *exec.ExitError 703 if errors.As(err, &ee) { 704 return ee.ExitCode() 705 } 706 return 1 707 } 708 709 func sendError(w http.ResponseWriter, errMsg string) { 710 sendResponse(w, &sandboxtypes.Response{Error: errMsg}) 711 } 712 713 func sendResponse(w http.ResponseWriter, r *sandboxtypes.Response) { 714 jres, err := json.MarshalIndent(r, "", " ") 715 if err != nil { 716 http.Error(w, "error encoding JSON", http.StatusInternalServerError) 717 log.Printf("json marshal: %v", err) 718 return 719 } 720 w.Header().Set("Content-Type", "application/json") 721 w.Header().Set("Content-Length", fmt.Sprint(len(jres))) 722 w.Write(jres) 723 } 724 725 // cleanStderr removes spam stderr lines from the beginning of x 726 // and returns a slice of x. 727 func cleanStderr(x []byte) []byte { 728 i := bytes.Index(x, containedStderrHeader) 729 if i == -1 { 730 return x 731 } 732 return x[i+len(containedStderrHeader):] 733 }