github.com/psexton/git-lfs@v2.1.1-0.20170517224304-289a18b2bc53+incompatible/test/cmd/lfstest-gitserver.go (about) 1 // +build testtools 2 3 package main 4 5 import ( 6 "bufio" 7 "bytes" 8 "crypto/rand" 9 "crypto/rsa" 10 "crypto/sha256" 11 "crypto/tls" 12 "crypto/x509" 13 "crypto/x509/pkix" 14 "encoding/base64" 15 "encoding/hex" 16 "encoding/json" 17 "encoding/pem" 18 "errors" 19 "fmt" 20 "io" 21 "io/ioutil" 22 "log" 23 "math" 24 "math/big" 25 "net/http" 26 "net/http/httptest" 27 "net/textproto" 28 "os" 29 "os/exec" 30 "regexp" 31 "sort" 32 "strconv" 33 "strings" 34 "sync" 35 "time" 36 37 "github.com/ThomsonReutersEikon/go-ntlm/ntlm" 38 ) 39 40 var ( 41 repoDir string 42 largeObjects = newLfsStorage() 43 server *httptest.Server 44 serverTLS *httptest.Server 45 serverClientCert *httptest.Server 46 47 // maps OIDs to content strings. Both the LFS and Storage test servers below 48 // see OIDs. 49 oidHandlers map[string]string 50 51 // These magic strings tell the test lfs server change their behavior so the 52 // integration tests can check those use cases. Tests will create objects with 53 // the magic strings as the contents. 54 // 55 // printf "status:lfs:404" > 404.dat 56 // 57 contentHandlers = []string{ 58 "status-batch-403", "status-batch-404", "status-batch-410", "status-batch-422", "status-batch-500", 59 "status-storage-403", "status-storage-404", "status-storage-410", "status-storage-422", "status-storage-500", "status-storage-503", 60 "status-batch-resume-206", "batch-resume-fail-fallback", "return-expired-action", "return-expired-action-forever", "return-invalid-size", 61 "object-authenticated", "storage-download-retry", "storage-upload-retry", "unknown-oid", 62 "send-verify-action", "send-deprecated-links", 63 } 64 ) 65 66 func main() { 67 repoDir = os.Getenv("LFSTEST_DIR") 68 69 mux := http.NewServeMux() 70 server = httptest.NewServer(mux) 71 serverTLS = httptest.NewTLSServer(mux) 72 serverClientCert = httptest.NewUnstartedServer(mux) 73 74 //setup Client Cert server 75 rootKey, rootCert := generateCARootCertificates() 76 _, clientCertPEM, clientKeyPEM := generateClientCertificates(rootCert, rootKey) 77 78 certPool := x509.NewCertPool() 79 certPool.AddCert(rootCert) 80 81 serverClientCert.TLS = &tls.Config{ 82 Certificates: []tls.Certificate{serverTLS.TLS.Certificates[0]}, 83 ClientAuth: tls.RequireAndVerifyClientCert, 84 ClientCAs: certPool, 85 } 86 serverClientCert.StartTLS() 87 88 ntlmSession, err := ntlm.CreateServerSession(ntlm.Version2, ntlm.ConnectionOrientedMode) 89 if err != nil { 90 fmt.Println("Error creating ntlm session:", err) 91 os.Exit(1) 92 } 93 ntlmSession.SetUserInfo("ntlmuser", "ntlmpass", "NTLMDOMAIN") 94 95 stopch := make(chan bool) 96 97 mux.HandleFunc("/shutdown", func(w http.ResponseWriter, r *http.Request) { 98 stopch <- true 99 }) 100 101 mux.HandleFunc("/storage/", storageHandler) 102 mux.HandleFunc("/verify", verifyHandler) 103 mux.HandleFunc("/redirect307/", redirect307Handler) 104 mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { 105 id, ok := reqId(w) 106 if !ok { 107 return 108 } 109 110 if strings.Contains(r.URL.Path, "/info/lfs") { 111 if !skipIfBadAuth(w, r, id, ntlmSession) { 112 lfsHandler(w, r, id) 113 } 114 115 return 116 } 117 118 debug(id, "git http-backend %s %s", r.Method, r.URL) 119 gitHandler(w, r) 120 }) 121 122 urlname := writeTestStateFile([]byte(server.URL), "LFSTEST_URL", "lfstest-gitserver") 123 defer os.RemoveAll(urlname) 124 125 sslurlname := writeTestStateFile([]byte(serverTLS.URL), "LFSTEST_SSL_URL", "lfstest-gitserver-ssl") 126 defer os.RemoveAll(sslurlname) 127 128 clientCertUrlname := writeTestStateFile([]byte(serverClientCert.URL), "LFSTEST_CLIENT_CERT_URL", "lfstest-gitserver-ssl") 129 defer os.RemoveAll(clientCertUrlname) 130 131 block := &pem.Block{} 132 block.Type = "CERTIFICATE" 133 block.Bytes = serverTLS.TLS.Certificates[0].Certificate[0] 134 pembytes := pem.EncodeToMemory(block) 135 136 certname := writeTestStateFile(pembytes, "LFSTEST_CERT", "lfstest-gitserver-cert") 137 defer os.RemoveAll(certname) 138 139 cccertname := writeTestStateFile(clientCertPEM, "LFSTEST_CLIENT_CERT", "lfstest-gitserver-client-cert") 140 defer os.RemoveAll(cccertname) 141 142 ckcertname := writeTestStateFile(clientKeyPEM, "LFSTEST_CLIENT_KEY", "lfstest-gitserver-client-key") 143 defer os.RemoveAll(ckcertname) 144 145 debug("init", "server url: %s", server.URL) 146 debug("init", "server tls url: %s", serverTLS.URL) 147 debug("init", "server client cert url: %s", serverClientCert.URL) 148 149 <-stopch 150 debug("init", "git server done") 151 } 152 153 // writeTestStateFile writes contents to either the file referenced by the 154 // environment variable envVar, or defaultFilename if that's not set. Returns 155 // the filename that was used 156 func writeTestStateFile(contents []byte, envVar, defaultFilename string) string { 157 f := os.Getenv(envVar) 158 if len(f) == 0 { 159 f = defaultFilename 160 } 161 file, err := os.Create(f) 162 if err != nil { 163 log.Fatalln(err) 164 } 165 file.Write(contents) 166 file.Close() 167 return f 168 } 169 170 type lfsObject struct { 171 Oid string `json:"oid,omitempty"` 172 Size int64 `json:"size,omitempty"` 173 Authenticated bool `json:"authenticated,omitempty"` 174 Actions map[string]*lfsLink `json:"actions,omitempty"` 175 Links map[string]*lfsLink `json:"_links,omitempty"` 176 Err *lfsError `json:"error,omitempty"` 177 } 178 179 type lfsLink struct { 180 Href string `json:"href"` 181 Header map[string]string `json:"header,omitempty"` 182 ExpiresAt time.Time `json:"expires_at,omitempty"` 183 ExpiresIn int `json:"expires_in,omitempty"` 184 } 185 186 type lfsError struct { 187 Code int `json:"code,omitempty"` 188 Message string `json:"message"` 189 } 190 191 func writeLFSError(w http.ResponseWriter, code int, msg string) { 192 by, err := json.Marshal(&lfsError{Message: msg}) 193 if err != nil { 194 http.Error(w, "json encoding error: "+err.Error(), 500) 195 return 196 } 197 198 w.Header().Set("Content-Type", "application/vnd.git-lfs+json") 199 w.WriteHeader(code) 200 w.Write(by) 201 } 202 203 // handles any requests with "{name}.server.git/info/lfs" in the path 204 func lfsHandler(w http.ResponseWriter, r *http.Request, id string) { 205 repo, err := repoFromLfsUrl(r.URL.Path) 206 if err != nil { 207 w.WriteHeader(500) 208 w.Write([]byte(err.Error())) 209 return 210 } 211 212 debug(id, "git lfs %s %s repo: %s", r.Method, r.URL, repo) 213 w.Header().Set("Content-Type", "application/vnd.git-lfs+json") 214 switch r.Method { 215 case "POST": 216 if strings.HasSuffix(r.URL.String(), "batch") { 217 lfsBatchHandler(w, r, id, repo) 218 } else { 219 locksHandler(w, r, repo) 220 } 221 case "DELETE": 222 lfsDeleteHandler(w, r, id, repo) 223 case "GET": 224 if strings.Contains(r.URL.String(), "/locks") { 225 locksHandler(w, r, repo) 226 } else { 227 w.WriteHeader(404) 228 w.Write([]byte("lock request")) 229 } 230 default: 231 w.WriteHeader(405) 232 } 233 } 234 235 func lfsUrl(repo, oid string) string { 236 return server.URL + "/storage/" + oid + "?r=" + repo 237 } 238 239 var ( 240 retries = make(map[string]uint32) 241 retriesMu sync.Mutex 242 ) 243 244 func incrementRetriesFor(api, direction, repo, oid string, check bool) (after uint32, ok bool) { 245 // fmtStr formats a string like "<api>-<direction>-[check]-<retry>", 246 // i.e., "legacy-upload-check-retry", or "storage-download-retry". 247 var fmtStr string 248 if check { 249 fmtStr = "%s-%s-check-retry" 250 } else { 251 fmtStr = "%s-%s-retry" 252 } 253 254 if oidHandlers[oid] != fmt.Sprintf(fmtStr, api, direction) { 255 return 0, false 256 } 257 258 retriesMu.Lock() 259 defer retriesMu.Unlock() 260 261 retryKey := strings.Join([]string{direction, repo, oid}, ":") 262 263 retries[retryKey]++ 264 retries := retries[retryKey] 265 266 return retries, true 267 } 268 269 func lfsDeleteHandler(w http.ResponseWriter, r *http.Request, id, repo string) { 270 parts := strings.Split(r.URL.Path, "/") 271 oid := parts[len(parts)-1] 272 273 largeObjects.Delete(repo, oid) 274 debug(id, "DELETE:", oid) 275 w.WriteHeader(200) 276 } 277 278 func lfsBatchHandler(w http.ResponseWriter, r *http.Request, id, repo string) { 279 checkingObject := r.Header.Get("X-Check-Object") == "1" 280 if !checkingObject && repo == "batchunsupported" { 281 w.WriteHeader(404) 282 return 283 } 284 285 if !checkingObject && repo == "badbatch" { 286 w.WriteHeader(203) 287 return 288 } 289 290 if repo == "netrctest" { 291 user, pass, err := extractAuth(r.Header.Get("Authorization")) 292 if err != nil || (user != "netrcuser" || pass != "netrcpass") { 293 w.WriteHeader(403) 294 return 295 } 296 } 297 298 if missingRequiredCreds(w, r, repo) { 299 return 300 } 301 302 type batchReq struct { 303 Transfers []string `json:"transfers"` 304 Operation string `json:"operation"` 305 Objects []lfsObject `json:"objects"` 306 } 307 type batchResp struct { 308 Transfer string `json:"transfer,omitempty"` 309 Objects []lfsObject `json:"objects"` 310 } 311 312 buf := &bytes.Buffer{} 313 tee := io.TeeReader(r.Body, buf) 314 var objs batchReq 315 err := json.NewDecoder(tee).Decode(&objs) 316 io.Copy(ioutil.Discard, r.Body) 317 r.Body.Close() 318 319 debug(id, "REQUEST") 320 debug(id, buf.String()) 321 322 if err != nil { 323 log.Fatal(err) 324 } 325 326 res := []lfsObject{} 327 testingChunked := testingChunkedTransferEncoding(r) 328 testingTus := testingTusUploadInBatchReq(r) 329 testingTusInterrupt := testingTusUploadInterruptedInBatchReq(r) 330 testingCustomTransfer := testingCustomTransfer(r) 331 var transferChoice string 332 var searchForTransfer string 333 if testingTus { 334 searchForTransfer = "tus" 335 } else if testingCustomTransfer { 336 searchForTransfer = "testcustom" 337 } 338 if len(searchForTransfer) > 0 { 339 for _, t := range objs.Transfers { 340 if t == searchForTransfer { 341 transferChoice = searchForTransfer 342 break 343 } 344 345 } 346 } 347 for _, obj := range objs.Objects { 348 handler := oidHandlers[obj.Oid] 349 action := objs.Operation 350 351 o := lfsObject{ 352 Size: obj.Size, 353 Actions: make(map[string]*lfsLink), 354 } 355 356 // Clobber the OID if told to do so. 357 if handler == "unknown-oid" { 358 o.Oid = "unknown-oid" 359 } else { 360 o.Oid = obj.Oid 361 } 362 363 exists := largeObjects.Has(repo, obj.Oid) 364 addAction := true 365 if action == "download" { 366 if !exists { 367 o.Err = &lfsError{Code: 404, Message: fmt.Sprintf("Object %v does not exist", obj.Oid)} 368 addAction = false 369 } 370 } else { 371 if exists { 372 // not an error but don't add an action 373 addAction = false 374 } 375 } 376 377 if handler == "object-authenticated" { 378 o.Authenticated = true 379 } 380 381 switch handler { 382 case "status-batch-403": 383 o.Err = &lfsError{Code: 403, Message: "welp"} 384 case "status-batch-404": 385 o.Err = &lfsError{Code: 404, Message: "welp"} 386 case "status-batch-410": 387 o.Err = &lfsError{Code: 410, Message: "welp"} 388 case "status-batch-422": 389 o.Err = &lfsError{Code: 422, Message: "welp"} 390 case "status-batch-500": 391 o.Err = &lfsError{Code: 500, Message: "welp"} 392 default: // regular 200 response 393 if handler == "return-invalid-size" { 394 o.Size = -1 395 } 396 397 if handler == "send-deprecated-links" { 398 o.Links = make(map[string]*lfsLink) 399 } 400 401 if addAction { 402 a := &lfsLink{ 403 Href: lfsUrl(repo, obj.Oid), 404 Header: map[string]string{}, 405 } 406 a = serveExpired(a, repo, handler) 407 408 if handler == "send-deprecated-links" { 409 o.Links[action] = a 410 } else { 411 o.Actions[action] = a 412 } 413 } 414 415 if handler == "send-verify-action" { 416 o.Actions["verify"] = &lfsLink{ 417 Href: server.URL + "/verify", 418 Header: map[string]string{ 419 "repo": repo, 420 }, 421 } 422 } 423 } 424 425 if testingChunked && addAction { 426 if handler == "send-deprecated-links" { 427 o.Links[action].Header["Transfer-Encoding"] = "chunked" 428 } else { 429 o.Actions[action].Header["Transfer-Encoding"] = "chunked" 430 } 431 } 432 if testingTusInterrupt && addAction { 433 if handler == "send-deprecated-links" { 434 o.Links[action].Header["Lfs-Tus-Interrupt"] = "true" 435 } else { 436 o.Actions[action].Header["Lfs-Tus-Interrupt"] = "true" 437 } 438 } 439 440 res = append(res, o) 441 } 442 443 ores := batchResp{Transfer: transferChoice, Objects: res} 444 445 by, err := json.Marshal(ores) 446 if err != nil { 447 log.Fatal(err) 448 } 449 450 debug(id, "RESPONSE: 200") 451 debug(id, string(by)) 452 453 w.WriteHeader(200) 454 w.Write(by) 455 } 456 457 // emu guards expiredRepos 458 var emu sync.Mutex 459 460 // expiredRepos is a map keyed by repository name, valuing to whether or not it 461 // has yet served an expired object. 462 var expiredRepos = map[string]bool{} 463 464 // serveExpired marks the given repo as having served an expired object, making 465 // it unable for that same repository to return an expired object in the future, 466 func serveExpired(a *lfsLink, repo, handler string) *lfsLink { 467 var ( 468 dur = -5 * time.Minute 469 at = time.Now().Add(dur) 470 ) 471 472 if handler == "return-expired-action-forever" || 473 (handler == "return-expired-action" && canServeExpired(repo)) { 474 475 emu.Lock() 476 expiredRepos[repo] = true 477 emu.Unlock() 478 479 a.ExpiresAt = at 480 return a 481 } 482 483 switch repo { 484 case "expired-absolute": 485 a.ExpiresAt = at 486 case "expired-relative": 487 a.ExpiresIn = -5 488 case "expired-both": 489 a.ExpiresAt = at 490 a.ExpiresIn = -5 491 } 492 493 return a 494 } 495 496 // canServeExpired returns whether or not a repository is capable of serving an 497 // expired object. In other words, canServeExpired returns whether or not the 498 // given repo has yet served an expired object. 499 func canServeExpired(repo string) bool { 500 emu.Lock() 501 defer emu.Unlock() 502 503 return !expiredRepos[repo] 504 } 505 506 // Persistent state across requests 507 var batchResumeFailFallbackStorageAttempts = 0 508 var tusStorageAttempts = 0 509 510 var ( 511 vmu sync.Mutex 512 verifyCounts = make(map[string]int) 513 verifyRetryRe = regexp.MustCompile(`verify-fail-(\d+)-times?$`) 514 ) 515 516 func verifyHandler(w http.ResponseWriter, r *http.Request) { 517 repo := r.Header.Get("repo") 518 var payload struct { 519 Oid string `json:"oid"` 520 Size int64 `json:"size"` 521 } 522 523 if err := json.NewDecoder(r.Body).Decode(&payload); err != nil { 524 writeLFSError(w, http.StatusUnprocessableEntity, err.Error()) 525 return 526 } 527 528 var max int 529 if matches := verifyRetryRe.FindStringSubmatch(repo); len(matches) < 2 { 530 return 531 } else { 532 max, _ = strconv.Atoi(matches[1]) 533 } 534 535 key := strings.Join([]string{repo, payload.Oid}, ":") 536 537 vmu.Lock() 538 verifyCounts[key] = verifyCounts[key] + 1 539 count := verifyCounts[key] 540 vmu.Unlock() 541 542 if count < max { 543 writeLFSError(w, http.StatusServiceUnavailable, fmt.Sprintf( 544 "intentionally failing verify request %d (out of %d)", count, max, 545 )) 546 return 547 } 548 } 549 550 // handles any /storage/{oid} requests 551 func storageHandler(w http.ResponseWriter, r *http.Request) { 552 id, ok := reqId(w) 553 if !ok { 554 return 555 } 556 557 repo := r.URL.Query().Get("r") 558 parts := strings.Split(r.URL.Path, "/") 559 oid := parts[len(parts)-1] 560 if missingRequiredCreds(w, r, repo) { 561 return 562 } 563 564 debug(id, "storage %s %s repo: %s", r.Method, oid, repo) 565 switch r.Method { 566 case "PUT": 567 switch oidHandlers[oid] { 568 case "status-storage-403": 569 w.WriteHeader(403) 570 return 571 case "status-storage-404": 572 w.WriteHeader(404) 573 return 574 case "status-storage-410": 575 w.WriteHeader(410) 576 return 577 case "status-storage-422": 578 w.WriteHeader(422) 579 return 580 case "status-storage-500": 581 w.WriteHeader(500) 582 return 583 case "status-storage-503": 584 writeLFSError(w, 503, "LFS is temporarily unavailable") 585 return 586 case "object-authenticated": 587 if len(r.Header.Get("Authorization")) > 0 { 588 w.WriteHeader(400) 589 w.Write([]byte("Should not send authentication")) 590 } 591 return 592 case "storage-upload-retry": 593 if retries, ok := incrementRetriesFor("storage", "upload", repo, oid, false); ok && retries < 3 { 594 w.WriteHeader(500) 595 w.Write([]byte("malformed content")) 596 597 return 598 } 599 } 600 601 if testingChunkedTransferEncoding(r) { 602 valid := false 603 for _, value := range r.TransferEncoding { 604 if value == "chunked" { 605 valid = true 606 break 607 } 608 } 609 if !valid { 610 debug(id, "Chunked transfer encoding expected") 611 } 612 } 613 614 hash := sha256.New() 615 buf := &bytes.Buffer{} 616 617 io.Copy(io.MultiWriter(hash, buf), r.Body) 618 oid := hex.EncodeToString(hash.Sum(nil)) 619 if !strings.HasSuffix(r.URL.Path, "/"+oid) { 620 w.WriteHeader(403) 621 return 622 } 623 624 largeObjects.Set(repo, oid, buf.Bytes()) 625 626 case "GET": 627 parts := strings.Split(r.URL.Path, "/") 628 oid := parts[len(parts)-1] 629 statusCode := 200 630 byteLimit := 0 631 resumeAt := int64(0) 632 633 if by, ok := largeObjects.Get(repo, oid); ok { 634 if len(by) == len("storage-download-retry") && string(by) == "storage-download-retry" { 635 if retries, ok := incrementRetriesFor("storage", "download", repo, oid, false); ok && retries < 3 { 636 statusCode = 500 637 by = []byte("malformed content") 638 } 639 } else if len(by) == len("status-batch-resume-206") && string(by) == "status-batch-resume-206" { 640 // Resume if header includes range, otherwise deliberately interrupt 641 if rangeHdr := r.Header.Get("Range"); rangeHdr != "" { 642 regex := regexp.MustCompile(`bytes=(\d+)\-.*`) 643 match := regex.FindStringSubmatch(rangeHdr) 644 if match != nil && len(match) > 1 { 645 statusCode = 206 646 resumeAt, _ = strconv.ParseInt(match[1], 10, 32) 647 w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", resumeAt, len(by), resumeAt-int64(len(by)))) 648 } 649 } else { 650 byteLimit = 10 651 } 652 } else if len(by) == len("batch-resume-fail-fallback") && string(by) == "batch-resume-fail-fallback" { 653 // Fail any Range: request even though we said we supported it 654 // To make sure client can fall back 655 if rangeHdr := r.Header.Get("Range"); rangeHdr != "" { 656 w.WriteHeader(416) 657 return 658 } 659 if batchResumeFailFallbackStorageAttempts == 0 { 660 // Truncate output on FIRST attempt to cause resume 661 // Second attempt (without range header) is fallback, complete successfully 662 byteLimit = 8 663 batchResumeFailFallbackStorageAttempts++ 664 } 665 } 666 w.WriteHeader(statusCode) 667 if byteLimit > 0 { 668 w.Write(by[0:byteLimit]) 669 } else if resumeAt > 0 { 670 w.Write(by[resumeAt:]) 671 } else { 672 w.Write(by) 673 } 674 return 675 } 676 677 w.WriteHeader(404) 678 case "HEAD": 679 // tus.io 680 if !validateTusHeaders(r, id) { 681 w.WriteHeader(400) 682 return 683 } 684 parts := strings.Split(r.URL.Path, "/") 685 oid := parts[len(parts)-1] 686 var offset int64 687 if by, ok := largeObjects.GetIncomplete(repo, oid); ok { 688 offset = int64(len(by)) 689 } 690 w.Header().Set("Upload-Offset", strconv.FormatInt(offset, 10)) 691 w.WriteHeader(200) 692 case "PATCH": 693 // tus.io 694 if !validateTusHeaders(r, id) { 695 w.WriteHeader(400) 696 return 697 } 698 parts := strings.Split(r.URL.Path, "/") 699 oid := parts[len(parts)-1] 700 701 offsetHdr := r.Header.Get("Upload-Offset") 702 offset, err := strconv.ParseInt(offsetHdr, 10, 64) 703 if err != nil { 704 log.Fatal("Unable to parse Upload-Offset header in request: ", err) 705 w.WriteHeader(400) 706 return 707 } 708 hash := sha256.New() 709 buf := &bytes.Buffer{} 710 out := io.MultiWriter(hash, buf) 711 712 if by, ok := largeObjects.GetIncomplete(repo, oid); ok { 713 if offset != int64(len(by)) { 714 log.Fatal(fmt.Sprintf("Incorrect offset in request, got %d expected %d", offset, len(by))) 715 w.WriteHeader(400) 716 return 717 } 718 _, err := out.Write(by) 719 if err != nil { 720 log.Fatal("Error reading incomplete bytes from store: ", err) 721 w.WriteHeader(500) 722 return 723 } 724 largeObjects.DeleteIncomplete(repo, oid) 725 debug(id, "Resuming upload of %v at byte %d", oid, offset) 726 } 727 728 // As a test, we intentionally break the upload from byte 0 by only 729 // reading some bytes the quitting & erroring, this forces a resume 730 // any offset > 0 will work ok 731 var copyErr error 732 if r.Header.Get("Lfs-Tus-Interrupt") == "true" && offset == 0 { 733 chdr := r.Header.Get("Content-Length") 734 contentLen, err := strconv.ParseInt(chdr, 10, 64) 735 if err != nil { 736 log.Fatal(fmt.Sprintf("Invalid Content-Length %q", chdr)) 737 w.WriteHeader(400) 738 return 739 } 740 truncated := contentLen / 3 741 _, _ = io.CopyN(out, r.Body, truncated) 742 r.Body.Close() 743 copyErr = fmt.Errorf("Simulated copy error") 744 } else { 745 _, copyErr = io.Copy(out, r.Body) 746 } 747 if copyErr != nil { 748 b := buf.Bytes() 749 if len(b) > 0 { 750 debug(id, "Incomplete upload of %v, %d bytes", oid, len(b)) 751 largeObjects.SetIncomplete(repo, oid, b) 752 } 753 w.WriteHeader(500) 754 } else { 755 checkoid := hex.EncodeToString(hash.Sum(nil)) 756 if checkoid != oid { 757 log.Fatal(fmt.Sprintf("Incorrect oid after calculation, got %q expected %q", checkoid, oid)) 758 w.WriteHeader(403) 759 return 760 } 761 762 b := buf.Bytes() 763 largeObjects.Set(repo, oid, b) 764 w.Header().Set("Upload-Offset", strconv.FormatInt(int64(len(b)), 10)) 765 w.WriteHeader(204) 766 } 767 768 default: 769 w.WriteHeader(405) 770 } 771 } 772 773 func validateTusHeaders(r *http.Request, id string) bool { 774 if len(r.Header.Get("Tus-Resumable")) == 0 { 775 debug(id, "Missing Tus-Resumable header in request") 776 return false 777 } 778 return true 779 } 780 781 func gitHandler(w http.ResponseWriter, r *http.Request) { 782 defer func() { 783 io.Copy(ioutil.Discard, r.Body) 784 r.Body.Close() 785 }() 786 787 cmd := exec.Command("git", "http-backend") 788 cmd.Env = []string{ 789 fmt.Sprintf("GIT_PROJECT_ROOT=%s", repoDir), 790 fmt.Sprintf("GIT_HTTP_EXPORT_ALL="), 791 fmt.Sprintf("PATH_INFO=%s", r.URL.Path), 792 fmt.Sprintf("QUERY_STRING=%s", r.URL.RawQuery), 793 fmt.Sprintf("REQUEST_METHOD=%s", r.Method), 794 fmt.Sprintf("CONTENT_TYPE=%s", r.Header.Get("Content-Type")), 795 } 796 797 buffer := &bytes.Buffer{} 798 cmd.Stdin = r.Body 799 cmd.Stdout = buffer 800 cmd.Stderr = os.Stderr 801 802 if err := cmd.Run(); err != nil { 803 log.Fatal(err) 804 } 805 806 text := textproto.NewReader(bufio.NewReader(buffer)) 807 808 code, _, _ := text.ReadCodeLine(-1) 809 810 if code != 0 { 811 w.WriteHeader(code) 812 } 813 814 headers, _ := text.ReadMIMEHeader() 815 head := w.Header() 816 for key, values := range headers { 817 for _, value := range values { 818 head.Add(key, value) 819 } 820 } 821 822 io.Copy(w, text.R) 823 } 824 825 func redirect307Handler(w http.ResponseWriter, r *http.Request) { 826 id, ok := reqId(w) 827 if !ok { 828 return 829 } 830 831 // Send a redirect to info/lfs 832 // Make it either absolute or relative depending on subpath 833 parts := strings.Split(r.URL.Path, "/") 834 // first element is always blank since rooted 835 var redirectTo string 836 if parts[2] == "rel" { 837 redirectTo = "/" + strings.Join(parts[3:], "/") 838 } else if parts[2] == "abs" { 839 redirectTo = server.URL + "/" + strings.Join(parts[3:], "/") 840 } else { 841 debug(id, "Invalid URL for redirect: %v", r.URL) 842 w.WriteHeader(404) 843 return 844 } 845 w.Header().Set("Location", redirectTo) 846 w.WriteHeader(307) 847 } 848 849 type User struct { 850 Name string `json:"name"` 851 } 852 853 type Lock struct { 854 Id string `json:"id"` 855 Path string `json:"path"` 856 Owner User `json:"owner"` 857 LockedAt time.Time `json:"locked_at"` 858 } 859 860 type LockRequest struct { 861 Path string `json:"path"` 862 } 863 864 type LockResponse struct { 865 Lock *Lock `json:"lock"` 866 Message string `json:"message,omitempty"` 867 } 868 869 type UnlockRequest struct { 870 Force bool `json:"force"` 871 } 872 873 type UnlockResponse struct { 874 Lock *Lock `json:"lock"` 875 Message string `json:"message,omitempty"` 876 } 877 878 type LockList struct { 879 Locks []Lock `json:"locks"` 880 NextCursor string `json:"next_cursor,omitempty"` 881 Message string `json:"message,omitempty"` 882 } 883 884 type VerifiableLockRequest struct { 885 Cursor string `json:"cursor,omitempty"` 886 Limit int `json:"limit,omitempty"` 887 } 888 889 type VerifiableLockList struct { 890 Ours []Lock `json:"ours"` 891 Theirs []Lock `json:"theirs"` 892 NextCursor string `json:"next_cursor,omitempty"` 893 Message string `json:"message,omitempty"` 894 } 895 896 var ( 897 lmu sync.RWMutex 898 repoLocks = map[string][]Lock{} 899 ) 900 901 func addLocks(repo string, l ...Lock) { 902 lmu.Lock() 903 defer lmu.Unlock() 904 repoLocks[repo] = append(repoLocks[repo], l...) 905 sort.Sort(LocksByCreatedAt(repoLocks[repo])) 906 } 907 908 func getLocks(repo string) []Lock { 909 lmu.RLock() 910 defer lmu.RUnlock() 911 912 locks := repoLocks[repo] 913 cp := make([]Lock, len(locks)) 914 for i, l := range locks { 915 cp[i] = l 916 } 917 918 return cp 919 } 920 921 func getFilteredLocks(repo, path, cursor, limit string) ([]Lock, string, error) { 922 locks := getLocks(repo) 923 if cursor != "" { 924 lastSeen := -1 925 for i, l := range locks { 926 if l.Id == cursor { 927 lastSeen = i 928 break 929 } 930 } 931 932 if lastSeen > -1 { 933 locks = locks[lastSeen:] 934 } else { 935 return nil, "", fmt.Errorf("cursor (%s) not found", cursor) 936 } 937 } 938 939 if path != "" { 940 var filtered []Lock 941 for _, l := range locks { 942 if l.Path == path { 943 filtered = append(filtered, l) 944 } 945 } 946 947 locks = filtered 948 } 949 950 if limit != "" { 951 size, err := strconv.Atoi(limit) 952 if err != nil { 953 return nil, "", errors.New("unable to parse limit amount") 954 } 955 956 size = int(math.Min(float64(len(locks)), 3)) 957 if size < 0 { 958 return nil, "", nil 959 } 960 961 if size+1 < len(locks) { 962 return locks[:size], locks[size+1].Id, nil 963 } 964 } 965 966 return locks, "", nil 967 } 968 969 func delLock(repo string, id string) *Lock { 970 lmu.RLock() 971 defer lmu.RUnlock() 972 973 var deleted *Lock 974 locks := make([]Lock, 0, len(repoLocks[repo])) 975 for _, l := range repoLocks[repo] { 976 if l.Id == id { 977 deleted = &l 978 continue 979 } 980 locks = append(locks, l) 981 } 982 repoLocks[repo] = locks 983 return deleted 984 } 985 986 type LocksByCreatedAt []Lock 987 988 func (c LocksByCreatedAt) Len() int { return len(c) } 989 func (c LocksByCreatedAt) Less(i, j int) bool { return c[i].LockedAt.Before(c[j].LockedAt) } 990 func (c LocksByCreatedAt) Swap(i, j int) { c[i], c[j] = c[j], c[i] } 991 992 var ( 993 lockRe = regexp.MustCompile(`/locks/?$`) 994 unlockRe = regexp.MustCompile(`locks/([^/]+)/unlock\z`) 995 ) 996 997 func locksHandler(w http.ResponseWriter, r *http.Request, repo string) { 998 dec := json.NewDecoder(r.Body) 999 enc := json.NewEncoder(w) 1000 1001 switch r.Method { 1002 case "GET": 1003 if !lockRe.MatchString(r.URL.Path) { 1004 w.Header().Set("Content-Type", "application/json") 1005 w.WriteHeader(http.StatusNotFound) 1006 w.Write([]byte(`{"message":"unknown path: ` + r.URL.Path + `"}`)) 1007 return 1008 } 1009 1010 if err := r.ParseForm(); err != nil { 1011 http.Error(w, "could not parse form values", http.StatusInternalServerError) 1012 return 1013 } 1014 1015 ll := &LockList{} 1016 w.Header().Set("Content-Type", "application/json") 1017 locks, nextCursor, err := getFilteredLocks(repo, 1018 r.FormValue("path"), 1019 r.FormValue("cursor"), 1020 r.FormValue("limit")) 1021 1022 if err != nil { 1023 ll.Message = err.Error() 1024 } else { 1025 ll.Locks = locks 1026 ll.NextCursor = nextCursor 1027 } 1028 1029 enc.Encode(ll) 1030 return 1031 case "POST": 1032 w.Header().Set("Content-Type", "application/json") 1033 if strings.HasSuffix(r.URL.Path, "unlock") { 1034 var unlockRequest UnlockRequest 1035 1036 var lockId string 1037 if matches := unlockRe.FindStringSubmatch(r.URL.Path); len(matches) > 1 { 1038 lockId = matches[1] 1039 } 1040 1041 if len(lockId) == 0 { 1042 enc.Encode(&UnlockResponse{Message: "Invalid lock"}) 1043 } 1044 1045 if err := dec.Decode(&unlockRequest); err != nil { 1046 enc.Encode(&UnlockResponse{Message: err.Error()}) 1047 return 1048 } 1049 1050 if l := delLock(repo, lockId); l != nil { 1051 enc.Encode(&UnlockResponse{Lock: l}) 1052 } else { 1053 enc.Encode(&UnlockResponse{Message: "unable to find lock"}) 1054 } 1055 return 1056 } 1057 1058 if strings.HasSuffix(r.URL.Path, "/locks/verify") { 1059 if strings.HasSuffix(repo, "verify-5xx") { 1060 w.WriteHeader(500) 1061 return 1062 } 1063 if strings.HasSuffix(repo, "verify-501") { 1064 w.WriteHeader(501) 1065 return 1066 } 1067 if strings.HasSuffix(repo, "verify-403") { 1068 w.WriteHeader(403) 1069 return 1070 } 1071 1072 switch repo { 1073 case "pre_push_locks_verify_404": 1074 w.WriteHeader(http.StatusNotFound) 1075 w.Write([]byte(`{"message":"pre_push_locks_verify_404"}`)) 1076 return 1077 case "pre_push_locks_verify_410": 1078 w.WriteHeader(http.StatusGone) 1079 w.Write([]byte(`{"message":"pre_push_locks_verify_410"}`)) 1080 return 1081 } 1082 1083 reqBody := &VerifiableLockRequest{} 1084 if err := dec.Decode(reqBody); err != nil { 1085 w.WriteHeader(http.StatusBadRequest) 1086 enc.Encode(struct { 1087 Message string `json:"message"` 1088 }{"json decode error: " + err.Error()}) 1089 return 1090 } 1091 1092 ll := &VerifiableLockList{} 1093 locks, nextCursor, err := getFilteredLocks(repo, "", 1094 reqBody.Cursor, 1095 strconv.Itoa(reqBody.Limit)) 1096 if err != nil { 1097 ll.Message = err.Error() 1098 } else { 1099 ll.NextCursor = nextCursor 1100 1101 for _, l := range locks { 1102 if strings.Contains(l.Path, "theirs") { 1103 ll.Theirs = append(ll.Theirs, l) 1104 } else { 1105 ll.Ours = append(ll.Ours, l) 1106 } 1107 } 1108 } 1109 1110 enc.Encode(ll) 1111 return 1112 } 1113 1114 if strings.HasSuffix(r.URL.Path, "/locks") { 1115 var lockRequest LockRequest 1116 if err := dec.Decode(&lockRequest); err != nil { 1117 enc.Encode(&LockResponse{Message: err.Error()}) 1118 } 1119 1120 for _, l := range getLocks(repo) { 1121 if l.Path == lockRequest.Path { 1122 enc.Encode(&LockResponse{Message: "lock already created"}) 1123 return 1124 } 1125 } 1126 1127 var id [20]byte 1128 rand.Read(id[:]) 1129 1130 lock := &Lock{ 1131 Id: fmt.Sprintf("%x", id[:]), 1132 Path: lockRequest.Path, 1133 Owner: User{Name: "Git LFS Tests"}, 1134 LockedAt: time.Now(), 1135 } 1136 1137 addLocks(repo, *lock) 1138 1139 // TODO(taylor): commit_needed case 1140 // TODO(taylor): err case 1141 1142 enc.Encode(&LockResponse{ 1143 Lock: lock, 1144 }) 1145 return 1146 } 1147 } 1148 1149 http.NotFound(w, r) 1150 } 1151 1152 func missingRequiredCreds(w http.ResponseWriter, r *http.Request, repo string) bool { 1153 if repo != "requirecreds" { 1154 return false 1155 } 1156 1157 auth := r.Header.Get("Authorization") 1158 user, pass, err := extractAuth(auth) 1159 if err != nil { 1160 writeLFSError(w, 403, err.Error()) 1161 return true 1162 } 1163 1164 if user != "requirecreds" || pass != "pass" { 1165 writeLFSError(w, 403, fmt.Sprintf("Got: '%s' => '%s' : '%s'", auth, user, pass)) 1166 return true 1167 } 1168 1169 return false 1170 } 1171 1172 func testingChunkedTransferEncoding(r *http.Request) bool { 1173 return strings.HasPrefix(r.URL.String(), "/test-chunked-transfer-encoding") 1174 } 1175 1176 func testingTusUploadInBatchReq(r *http.Request) bool { 1177 return strings.HasPrefix(r.URL.String(), "/test-tus-upload") 1178 } 1179 func testingTusUploadInterruptedInBatchReq(r *http.Request) bool { 1180 return strings.HasPrefix(r.URL.String(), "/test-tus-upload-interrupt") 1181 } 1182 func testingCustomTransfer(r *http.Request) bool { 1183 return strings.HasPrefix(r.URL.String(), "/test-custom-transfer") 1184 } 1185 1186 var lfsUrlRE = regexp.MustCompile(`\A/?([^/]+)/info/lfs`) 1187 1188 func repoFromLfsUrl(urlpath string) (string, error) { 1189 matches := lfsUrlRE.FindStringSubmatch(urlpath) 1190 if len(matches) != 2 { 1191 return "", fmt.Errorf("LFS url '%s' does not match %v", urlpath, lfsUrlRE) 1192 } 1193 1194 repo := matches[1] 1195 if strings.HasSuffix(repo, ".git") { 1196 return repo[0 : len(repo)-4], nil 1197 } 1198 return repo, nil 1199 } 1200 1201 type lfsStorage struct { 1202 objects map[string]map[string][]byte 1203 incomplete map[string]map[string][]byte 1204 mutex *sync.Mutex 1205 } 1206 1207 func (s *lfsStorage) Get(repo, oid string) ([]byte, bool) { 1208 s.mutex.Lock() 1209 defer s.mutex.Unlock() 1210 repoObjects, ok := s.objects[repo] 1211 if !ok { 1212 return nil, ok 1213 } 1214 1215 by, ok := repoObjects[oid] 1216 return by, ok 1217 } 1218 1219 func (s *lfsStorage) Has(repo, oid string) bool { 1220 s.mutex.Lock() 1221 defer s.mutex.Unlock() 1222 repoObjects, ok := s.objects[repo] 1223 if !ok { 1224 return false 1225 } 1226 1227 _, ok = repoObjects[oid] 1228 return ok 1229 } 1230 1231 func (s *lfsStorage) Set(repo, oid string, by []byte) { 1232 s.mutex.Lock() 1233 defer s.mutex.Unlock() 1234 repoObjects, ok := s.objects[repo] 1235 if !ok { 1236 repoObjects = make(map[string][]byte) 1237 s.objects[repo] = repoObjects 1238 } 1239 repoObjects[oid] = by 1240 } 1241 1242 func (s *lfsStorage) Delete(repo, oid string) { 1243 s.mutex.Lock() 1244 defer s.mutex.Unlock() 1245 repoObjects, ok := s.objects[repo] 1246 if ok { 1247 delete(repoObjects, oid) 1248 } 1249 } 1250 1251 func (s *lfsStorage) GetIncomplete(repo, oid string) ([]byte, bool) { 1252 s.mutex.Lock() 1253 defer s.mutex.Unlock() 1254 repoObjects, ok := s.incomplete[repo] 1255 if !ok { 1256 return nil, ok 1257 } 1258 1259 by, ok := repoObjects[oid] 1260 return by, ok 1261 } 1262 1263 func (s *lfsStorage) SetIncomplete(repo, oid string, by []byte) { 1264 s.mutex.Lock() 1265 defer s.mutex.Unlock() 1266 repoObjects, ok := s.incomplete[repo] 1267 if !ok { 1268 repoObjects = make(map[string][]byte) 1269 s.incomplete[repo] = repoObjects 1270 } 1271 repoObjects[oid] = by 1272 } 1273 1274 func (s *lfsStorage) DeleteIncomplete(repo, oid string) { 1275 s.mutex.Lock() 1276 defer s.mutex.Unlock() 1277 repoObjects, ok := s.incomplete[repo] 1278 if ok { 1279 delete(repoObjects, oid) 1280 } 1281 } 1282 1283 func newLfsStorage() *lfsStorage { 1284 return &lfsStorage{ 1285 objects: make(map[string]map[string][]byte), 1286 incomplete: make(map[string]map[string][]byte), 1287 mutex: &sync.Mutex{}, 1288 } 1289 } 1290 1291 func extractAuth(auth string) (string, string, error) { 1292 if strings.HasPrefix(auth, "Basic ") { 1293 decodeBy, err := base64.StdEncoding.DecodeString(auth[6:len(auth)]) 1294 decoded := string(decodeBy) 1295 1296 if err != nil { 1297 return "", "", err 1298 } 1299 1300 parts := strings.SplitN(decoded, ":", 2) 1301 if len(parts) == 2 { 1302 return parts[0], parts[1], nil 1303 } 1304 return "", "", nil 1305 } 1306 1307 return "", "", nil 1308 } 1309 1310 func skipIfBadAuth(w http.ResponseWriter, r *http.Request, id string, ntlmSession ntlm.ServerSession) bool { 1311 auth := r.Header.Get("Authorization") 1312 if strings.Contains(r.URL.Path, "ntlm") { 1313 return false 1314 } 1315 1316 if auth == "" { 1317 w.WriteHeader(401) 1318 return true 1319 } 1320 1321 user, pass, err := extractAuth(auth) 1322 if err != nil { 1323 w.WriteHeader(403) 1324 debug(id, "Error decoding auth: %s", err) 1325 return true 1326 } 1327 1328 switch user { 1329 case "user": 1330 if pass == "pass" { 1331 return false 1332 } 1333 case "netrcuser", "requirecreds": 1334 return false 1335 case "path": 1336 if strings.HasPrefix(r.URL.Path, "/"+pass) { 1337 return false 1338 } 1339 debug(id, "auth attempt against: %q", r.URL.Path) 1340 } 1341 1342 w.WriteHeader(403) 1343 debug(id, "Bad auth: %q", auth) 1344 return true 1345 } 1346 1347 func handleNTLM(w http.ResponseWriter, r *http.Request, authHeader string, session ntlm.ServerSession) { 1348 if strings.HasPrefix(strings.ToUpper(authHeader), "BASIC ") { 1349 authHeader = "" 1350 } 1351 1352 switch authHeader { 1353 case "": 1354 w.Header().Set("Www-Authenticate", "ntlm") 1355 w.WriteHeader(401) 1356 1357 // ntlmNegotiateMessage from httputil pkg 1358 case "NTLM TlRMTVNTUAABAAAAB7IIogwADAAzAAAACwALACgAAAAKAAAoAAAAD1dJTExISS1NQUlOTk9SVEhBTUVSSUNB": 1359 ch, err := session.GenerateChallengeMessage() 1360 if err != nil { 1361 writeLFSError(w, 500, err.Error()) 1362 return 1363 } 1364 1365 chMsg := base64.StdEncoding.EncodeToString(ch.Bytes()) 1366 w.Header().Set("Www-Authenticate", "ntlm "+chMsg) 1367 w.WriteHeader(401) 1368 1369 default: 1370 if !strings.HasPrefix(strings.ToUpper(authHeader), "NTLM ") { 1371 writeLFSError(w, 500, "bad authorization header: "+authHeader) 1372 return 1373 } 1374 1375 auth := authHeader[5:] // strip "ntlm " prefix 1376 val, err := base64.StdEncoding.DecodeString(auth) 1377 if err != nil { 1378 writeLFSError(w, 500, "base64 decode error: "+err.Error()) 1379 return 1380 } 1381 1382 _, err = ntlm.ParseAuthenticateMessage(val, 2) 1383 if err != nil { 1384 writeLFSError(w, 500, "auth parse error: "+err.Error()) 1385 return 1386 } 1387 } 1388 } 1389 1390 func init() { 1391 oidHandlers = make(map[string]string) 1392 for _, content := range contentHandlers { 1393 h := sha256.New() 1394 h.Write([]byte(content)) 1395 oidHandlers[hex.EncodeToString(h.Sum(nil))] = content 1396 } 1397 } 1398 1399 func debug(reqid, msg string, args ...interface{}) { 1400 fullargs := make([]interface{}, len(args)+1) 1401 fullargs[0] = reqid 1402 for i, a := range args { 1403 fullargs[i+1] = a 1404 } 1405 log.Printf("[%s] "+msg+"\n", fullargs...) 1406 } 1407 1408 func reqId(w http.ResponseWriter) (string, bool) { 1409 b := make([]byte, 16) 1410 _, err := rand.Read(b) 1411 if err != nil { 1412 http.Error(w, "error generating id: "+err.Error(), 500) 1413 return "", false 1414 } 1415 return fmt.Sprintf("%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]), true 1416 } 1417 1418 // https://ericchiang.github.io/post/go-tls/ 1419 func generateCARootCertificates() (rootKey *rsa.PrivateKey, rootCert *x509.Certificate) { 1420 1421 // generate a new key-pair 1422 rootKey, err := rsa.GenerateKey(rand.Reader, 2048) 1423 if err != nil { 1424 log.Fatalf("generating random key: %v", err) 1425 } 1426 1427 rootCertTmpl, err := CertTemplate() 1428 if err != nil { 1429 log.Fatalf("creating cert template: %v", err) 1430 } 1431 // describe what the certificate will be used for 1432 rootCertTmpl.IsCA = true 1433 rootCertTmpl.KeyUsage = x509.KeyUsageCertSign | x509.KeyUsageDigitalSignature 1434 rootCertTmpl.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth} 1435 // rootCertTmpl.IPAddresses = []net.IP{net.ParseIP("127.0.0.1")} 1436 1437 rootCert, _, err = CreateCert(rootCertTmpl, rootCertTmpl, &rootKey.PublicKey, rootKey) 1438 1439 return 1440 } 1441 1442 func generateClientCertificates(rootCert *x509.Certificate, rootKey interface{}) (clientKey *rsa.PrivateKey, clientCertPEM []byte, clientKeyPEM []byte) { 1443 1444 // create a key-pair for the client 1445 clientKey, err := rsa.GenerateKey(rand.Reader, 2048) 1446 if err != nil { 1447 log.Fatalf("generating random key: %v", err) 1448 } 1449 1450 // create a template for the client 1451 clientCertTmpl, err1 := CertTemplate() 1452 if err1 != nil { 1453 log.Fatalf("creating cert template: %v", err1) 1454 } 1455 clientCertTmpl.KeyUsage = x509.KeyUsageDigitalSignature 1456 clientCertTmpl.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth} 1457 1458 // the root cert signs the cert by again providing its private key 1459 _, clientCertPEM, err2 := CreateCert(clientCertTmpl, rootCert, &clientKey.PublicKey, rootKey) 1460 if err2 != nil { 1461 log.Fatalf("error creating cert: %v", err2) 1462 } 1463 1464 // encode and load the cert and private key for the client 1465 clientKeyPEM = pem.EncodeToMemory(&pem.Block{ 1466 Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(clientKey), 1467 }) 1468 1469 return 1470 } 1471 1472 // helper function to create a cert template with a serial number and other required fields 1473 func CertTemplate() (*x509.Certificate, error) { 1474 // generate a random serial number (a real cert authority would have some logic behind this) 1475 serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) 1476 serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) 1477 if err != nil { 1478 return nil, errors.New("failed to generate serial number: " + err.Error()) 1479 } 1480 1481 tmpl := x509.Certificate{ 1482 SerialNumber: serialNumber, 1483 Subject: pkix.Name{Organization: []string{"Yhat, Inc."}}, 1484 SignatureAlgorithm: x509.SHA256WithRSA, 1485 NotBefore: time.Now(), 1486 NotAfter: time.Now().Add(time.Hour), // valid for an hour 1487 BasicConstraintsValid: true, 1488 } 1489 return &tmpl, nil 1490 } 1491 1492 func CreateCert(template, parent *x509.Certificate, pub interface{}, parentPriv interface{}) ( 1493 cert *x509.Certificate, certPEM []byte, err error) { 1494 1495 certDER, err := x509.CreateCertificate(rand.Reader, template, parent, pub, parentPriv) 1496 if err != nil { 1497 return 1498 } 1499 // parse the resulting certificate so we can use it again 1500 cert, err = x509.ParseCertificate(certDER) 1501 if err != nil { 1502 return 1503 } 1504 // PEM encode the certificate (this is a standard TLS encoding) 1505 b := pem.Block{Type: "CERTIFICATE", Bytes: certDER} 1506 certPEM = pem.EncodeToMemory(&b) 1507 return 1508 }