github.com/minio/minio@v0.0.0-20240328213742-3f72439b8a27/cmd/test-utils_test.go (about) 1 // Copyright (c) 2015-2021 MinIO, Inc. 2 // 3 // This file is part of MinIO Object Storage stack 4 // 5 // This program is free software: you can redistribute it and/or modify 6 // it under the terms of the GNU Affero General Public License as published by 7 // the Free Software Foundation, either version 3 of the License, or 8 // (at your option) any later version. 9 // 10 // This program is distributed in the hope that it will be useful 11 // but WITHOUT ANY WARRANTY; without even the implied warranty of 12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 // GNU Affero General Public License for more details. 14 // 15 // You should have received a copy of the GNU Affero General Public License 16 // along with this program. If not, see <http://www.gnu.org/licenses/>. 17 18 package cmd 19 20 import ( 21 "archive/zip" 22 "bufio" 23 "bytes" 24 "context" 25 "crypto/ecdsa" 26 "crypto/hmac" 27 crand "crypto/rand" 28 "crypto/rsa" 29 "crypto/sha1" 30 "crypto/tls" 31 "crypto/x509" 32 "crypto/x509/pkix" 33 "encoding/base64" 34 "encoding/hex" 35 "encoding/pem" 36 "encoding/xml" 37 "errors" 38 "flag" 39 "fmt" 40 "io" 41 "math/big" 42 "math/rand" 43 "net" 44 "net/http" 45 "net/http/httptest" 46 "net/url" 47 "os" 48 "path" 49 "path/filepath" 50 "reflect" 51 "sort" 52 "strconv" 53 "strings" 54 "sync" 55 "testing" 56 "time" 57 "unsafe" 58 59 "github.com/fatih/color" 60 61 "github.com/minio/minio-go/v7/pkg/s3utils" 62 "github.com/minio/minio-go/v7/pkg/signer" 63 "github.com/minio/minio/internal/auth" 64 "github.com/minio/minio/internal/config" 65 "github.com/minio/minio/internal/crypto" 66 "github.com/minio/minio/internal/hash" 67 "github.com/minio/minio/internal/logger" 68 "github.com/minio/mux" 69 "github.com/minio/pkg/v2/policy" 70 ) 71 72 // TestMain to set up global env. 73 func TestMain(m *testing.M) { 74 flag.Parse() 75 76 // set to 'true' when testing is invoked 77 globalIsTesting = true 78 79 globalIsCICD = globalIsTesting 80 81 globalActiveCred = auth.Credentials{ 82 AccessKey: auth.DefaultAccessKey, 83 SecretKey: auth.DefaultSecretKey, 84 } 85 86 // disable ENVs which interfere with tests. 87 for _, env := range []string{ 88 crypto.EnvKMSAutoEncryption, 89 config.EnvAccessKey, 90 config.EnvSecretKey, 91 config.EnvRootUser, 92 config.EnvRootPassword, 93 } { 94 os.Unsetenv(env) 95 } 96 97 // Set as non-distributed. 98 globalIsDistErasure = false 99 100 // Disable printing console messages during tests. 101 color.Output = io.Discard 102 // Disable Error logging in testing. 103 logger.DisableErrorLog = true 104 105 // Uncomment the following line to see trace logs during unit tests. 106 // logger.AddTarget(console.New()) 107 108 // Set system resources to maximum. 109 setMaxResources() 110 111 // Initialize globalConsoleSys system 112 globalConsoleSys = NewConsoleLogger(context.Background()) 113 114 globalInternodeTransport = NewInternodeHTTPTransport(0)() 115 116 initHelp() 117 118 resetTestGlobals() 119 120 globalIsCICD = true 121 122 os.Exit(m.Run()) 123 } 124 125 // concurrency level for certain parallel tests. 126 const testConcurrencyLevel = 10 127 128 const iso8601TimeFormat = "2006-01-02T15:04:05.000Z" 129 130 // Excerpts from @lsegal - https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258 131 // 132 // User-Agent: 133 // 134 // This is ignored from signing because signing this causes problems with generating pre-signed URLs 135 // (that are executed by other agents) or when customers pass requests through proxies, which may 136 // modify the user-agent. 137 // 138 // Authorization: 139 // 140 // Is skipped for obvious reasons 141 var ignoredHeaders = map[string]bool{ 142 "Authorization": true, 143 "User-Agent": true, 144 } 145 146 // Headers to ignore in streaming v4 147 var ignoredStreamingHeaders = map[string]bool{ 148 "Authorization": true, 149 "Content-Type": true, 150 "Content-Md5": true, 151 "User-Agent": true, 152 } 153 154 // calculateSignedChunkLength - calculates the length of chunk metadata 155 func calculateSignedChunkLength(chunkDataSize int64) int64 { 156 return int64(len(fmt.Sprintf("%x", chunkDataSize))) + 157 17 + // ";chunk-signature=" 158 64 + // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2" 159 2 + // CRLF 160 chunkDataSize + 161 2 // CRLF 162 } 163 164 func mustGetPutObjReader(t TestErrHandler, data io.Reader, size int64, md5hex, sha256hex string) *PutObjReader { 165 hr, err := hash.NewReader(context.Background(), data, size, md5hex, sha256hex, size) 166 if err != nil { 167 t.Fatal(err) 168 } 169 return NewPutObjReader(hr) 170 } 171 172 // calculateSignedChunkLength - calculates the length of the overall stream (data + metadata) 173 func calculateStreamContentLength(dataLen, chunkSize int64) int64 { 174 if dataLen <= 0 { 175 return 0 176 } 177 chunksCount := dataLen / chunkSize 178 remainingBytes := dataLen % chunkSize 179 var streamLen int64 180 streamLen += chunksCount * calculateSignedChunkLength(chunkSize) 181 if remainingBytes > 0 { 182 streamLen += calculateSignedChunkLength(remainingBytes) 183 } 184 streamLen += calculateSignedChunkLength(0) 185 return streamLen 186 } 187 188 func prepareFS(ctx context.Context) (ObjectLayer, string, error) { 189 nDisks := 1 190 fsDirs, err := getRandomDisks(nDisks) 191 if err != nil { 192 return nil, "", err 193 } 194 obj, _, err := initObjectLayer(context.Background(), mustGetPoolEndpoints(0, fsDirs...)) 195 if err != nil { 196 return nil, "", err 197 } 198 199 initAllSubsystems(ctx) 200 201 globalIAMSys.Init(ctx, obj, globalEtcdClient, 2*time.Second) 202 return obj, fsDirs[0], nil 203 } 204 205 func prepareErasureSets32(ctx context.Context) (ObjectLayer, []string, error) { 206 return prepareErasure(ctx, 32) 207 } 208 209 func prepareErasure(ctx context.Context, nDisks int) (ObjectLayer, []string, error) { 210 fsDirs, err := getRandomDisks(nDisks) 211 if err != nil { 212 return nil, nil, err 213 } 214 obj, _, err := initObjectLayer(ctx, mustGetPoolEndpoints(0, fsDirs...)) 215 if err != nil { 216 removeRoots(fsDirs) 217 return nil, nil, err 218 } 219 220 return obj, fsDirs, nil 221 } 222 223 func prepareErasure16(ctx context.Context) (ObjectLayer, []string, error) { 224 return prepareErasure(ctx, 16) 225 } 226 227 // TestErrHandler - Go testing.T satisfy this interface. 228 // This makes it easy to run the TestServer from any of the tests. 229 // Using this interface, functionalities to be used in tests can be 230 // made generalized, and can be integrated in benchmarks/unit tests/go check suite tests. 231 type TestErrHandler interface { 232 testing.TB 233 } 234 235 const ( 236 // ErasureSDStr is the string which is used as notation for Single node ObjectLayer in the unit tests. 237 ErasureSDStr string = "ErasureSD" 238 239 // ErasureTestStr is the string which is used as notation for Erasure ObjectLayer in the unit tests. 240 ErasureTestStr string = "Erasure" 241 242 // ErasureSetsTestStr is the string which is used as notation for Erasure sets object layer in the unit tests. 243 ErasureSetsTestStr string = "ErasureSet" 244 ) 245 246 const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" 247 const ( 248 letterIdxBits = 6 // 6 bits to represent a letter index 249 letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits 250 letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits 251 ) 252 253 // Random number state. 254 // We generate random temporary file names so that there's a good 255 // chance the file doesn't exist yet. 256 var ( 257 randN uint32 258 randmu sync.Mutex 259 ) 260 261 // Temp files created in default Tmp dir 262 var globalTestTmpDir = os.TempDir() 263 264 // reseed - returns a new seed every time the function is called. 265 func reseed() uint32 { 266 return uint32(time.Now().UnixNano() + int64(os.Getpid())) 267 } 268 269 // nextSuffix - provides a new unique suffix every time the function is called. 270 func nextSuffix() string { 271 randmu.Lock() 272 r := randN 273 // Initial seed required, generate one. 274 if r == 0 { 275 r = reseed() 276 } 277 // constants from Numerical Recipes 278 r = r*1664525 + 1013904223 279 randN = r 280 randmu.Unlock() 281 return strconv.Itoa(int(1e9 + r%1e9))[1:] 282 } 283 284 // isSameType - compares two object types via reflect.TypeOf 285 func isSameType(obj1, obj2 interface{}) bool { 286 return reflect.TypeOf(obj1) == reflect.TypeOf(obj2) 287 } 288 289 // TestServer encapsulates an instantiation of a MinIO instance with a temporary backend. 290 // Example usage: 291 // 292 // s := StartTestServer(t,"Erasure") 293 // defer s.Stop() 294 type TestServer struct { 295 Root string 296 Disks EndpointServerPools 297 AccessKey string 298 SecretKey string 299 Server *httptest.Server 300 Obj ObjectLayer 301 cancel context.CancelFunc 302 rawDiskPaths []string 303 } 304 305 // UnstartedTestServer - Configures a temp FS/Erasure backend, 306 // initializes the endpoints and configures the test server. 307 // The server should be started using the Start() method. 308 func UnstartedTestServer(t TestErrHandler, instanceType string) TestServer { 309 ctx, cancel := context.WithCancel(context.Background()) 310 // create an instance of TestServer. 311 testServer := TestServer{cancel: cancel} 312 // return FS/Erasure object layer and temp backend. 313 objLayer, disks, err := prepareTestBackend(ctx, instanceType) 314 if err != nil { 315 t.Fatal(err) 316 } 317 318 // set new server configuration. 319 if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil { 320 t.Fatalf("%s", err) 321 } 322 323 return initTestServerWithBackend(ctx, t, testServer, objLayer, disks) 324 } 325 326 // initializes a test server with the given object layer and disks. 327 func initTestServerWithBackend(ctx context.Context, t TestErrHandler, testServer TestServer, objLayer ObjectLayer, disks []string) TestServer { 328 // Test Server needs to start before formatting of disks. 329 // Get credential. 330 credentials := globalActiveCred 331 332 testServer.Obj = objLayer 333 testServer.rawDiskPaths = disks 334 testServer.Disks = mustGetPoolEndpoints(0, disks...) 335 testServer.AccessKey = credentials.AccessKey 336 testServer.SecretKey = credentials.SecretKey 337 338 httpHandler, err := configureServerHandler(testServer.Disks) 339 if err != nil { 340 t.Fatalf("Failed to configure one of the RPC services <ERROR> %s", err) 341 } 342 343 // Run TestServer. 344 testServer.Server = httptest.NewUnstartedServer(setCriticalErrorHandler(corsHandler(httpHandler))) 345 346 globalObjLayerMutex.Lock() 347 globalObjectAPI = objLayer 348 globalObjLayerMutex.Unlock() 349 350 // initialize peer rpc 351 host, port := mustSplitHostPort(testServer.Server.Listener.Addr().String()) 352 globalMinioHost = host 353 globalMinioPort = port 354 globalMinioAddr = getEndpointsLocalAddr(testServer.Disks) 355 356 initAllSubsystems(ctx) 357 358 globalEtcdClient = nil 359 360 initConfigSubsystem(ctx, objLayer) 361 362 globalIAMSys.Init(ctx, objLayer, globalEtcdClient, 2*time.Second) 363 364 globalEventNotifier.InitBucketTargets(ctx, objLayer) 365 366 return testServer 367 } 368 369 // testServerCertPEM and testServerKeyPEM are generated by 370 // https://golang.org/src/crypto/tls/generate_cert.go 371 // $ go run generate_cert.go -ca --host 127.0.0.1 372 // The generated certificate contains IP SAN, that way we don't need 373 // to enable InsecureSkipVerify in TLS config 374 375 // Starts the test server and returns the TestServer with TLS configured instance. 376 func StartTestTLSServer(t TestErrHandler, instanceType string, cert, key []byte) TestServer { 377 // Fetch TLS key and pem files from test-data/ directory. 378 // dir, _ := os.Getwd() 379 // testDataDir := filepath.Join(filepath.Dir(dir), "test-data") 380 // 381 // pemFile := filepath.Join(testDataDir, "server.pem") 382 // keyFile := filepath.Join(testDataDir, "server.key") 383 cer, err := tls.X509KeyPair(cert, key) 384 if err != nil { 385 t.Fatalf("Failed to load certificate: %v", err) 386 } 387 config := &tls.Config{Certificates: []tls.Certificate{cer}} 388 389 testServer := UnstartedTestServer(t, instanceType) 390 testServer.Server.TLS = config 391 testServer.Server.StartTLS() 392 return testServer 393 } 394 395 // Starts the test server and returns the TestServer instance. 396 func StartTestServer(t TestErrHandler, instanceType string) TestServer { 397 // create an instance of TestServer. 398 testServer := UnstartedTestServer(t, instanceType) 399 testServer.Server.Start() 400 return testServer 401 } 402 403 // Sets the global config path to empty string. 404 func resetGlobalConfigPath() { 405 globalConfigDir = &ConfigDir{path: ""} 406 } 407 408 // sets globalObjectAPI to `nil`. 409 func resetGlobalObjectAPI() { 410 globalObjLayerMutex.Lock() 411 globalObjectAPI = nil 412 globalObjLayerMutex.Unlock() 413 } 414 415 // reset the value of the Global server config. 416 // set it to `nil`. 417 func resetGlobalConfig() { 418 // hold the mutex lock before a new config is assigned. 419 globalServerConfigMu.Lock() 420 // Save the loaded config globally. 421 globalServerConfig = nil 422 globalServerConfigMu.Unlock() 423 } 424 425 func resetGlobalEndpoints() { 426 globalEndpoints = EndpointServerPools{} 427 } 428 429 func resetGlobalIsErasure() { 430 globalIsErasure = false 431 } 432 433 // reset global heal state 434 func resetGlobalHealState() { 435 // Init global heal state 436 if globalAllHealState == nil { 437 globalAllHealState = newHealState(GlobalContext, false) 438 } else { 439 globalAllHealState.Lock() 440 for _, v := range globalAllHealState.healSeqMap { 441 if !v.hasEnded() { 442 v.stop() 443 } 444 } 445 globalAllHealState.Unlock() 446 } 447 448 // Init background heal state 449 if globalBackgroundHealState == nil { 450 globalBackgroundHealState = newHealState(GlobalContext, false) 451 } else { 452 globalBackgroundHealState.Lock() 453 for _, v := range globalBackgroundHealState.healSeqMap { 454 if !v.hasEnded() { 455 v.stop() 456 } 457 } 458 globalBackgroundHealState.Unlock() 459 } 460 } 461 462 // sets globalIAMSys to `nil`. 463 func resetGlobalIAMSys() { 464 globalIAMSys = nil 465 } 466 467 // Resets all the globals used modified in tests. 468 // Resetting ensures that the changes made to globals by one test doesn't affect others. 469 func resetTestGlobals() { 470 // set globalObjectAPI to `nil`. 471 resetGlobalObjectAPI() 472 // Reset config path set. 473 resetGlobalConfigPath() 474 // Reset Global server config. 475 resetGlobalConfig() 476 // Reset global endpoints. 477 resetGlobalEndpoints() 478 // Reset global isErasure flag. 479 resetGlobalIsErasure() 480 // Reset global heal state 481 resetGlobalHealState() 482 // Reset globalIAMSys to `nil` 483 resetGlobalIAMSys() 484 } 485 486 // Configure the server for the test run. 487 func newTestConfig(bucketLocation string, obj ObjectLayer) (err error) { 488 // Initialize server config. 489 if err = newSrvConfig(obj); err != nil { 490 return err 491 } 492 493 // Set a default region. 494 config.SetRegion(globalServerConfig, bucketLocation) 495 496 applyDynamicConfigForSubSys(context.Background(), obj, globalServerConfig, config.StorageClassSubSys) 497 498 // Save config. 499 return saveServerConfig(context.Background(), obj, globalServerConfig) 500 } 501 502 // Deleting the temporary backend and stopping the server. 503 func (testServer TestServer) Stop() { 504 testServer.cancel() 505 testServer.Server.Close() 506 testServer.Obj.Shutdown(context.Background()) 507 os.RemoveAll(testServer.Root) 508 for _, ep := range testServer.Disks { 509 for _, disk := range ep.Endpoints { 510 os.RemoveAll(disk.Path) 511 } 512 } 513 } 514 515 // Truncate request to simulate unexpected EOF for a request signed using streaming signature v4. 516 func truncateChunkByHalfSigv4(req *http.Request) (*http.Request, error) { 517 bufReader := bufio.NewReader(req.Body) 518 hexChunkSize, chunkSignature, err := readChunkLine(bufReader) 519 if err != nil { 520 return nil, err 521 } 522 523 newChunkHdr := []byte(fmt.Sprintf("%s"+s3ChunkSignatureStr+"%s\r\n", 524 hexChunkSize, chunkSignature)) 525 newChunk, err := io.ReadAll(bufReader) 526 if err != nil { 527 return nil, err 528 } 529 newReq := req 530 newReq.Body = io.NopCloser( 531 bytes.NewReader(bytes.Join([][]byte{newChunkHdr, newChunk[:len(newChunk)/2]}, 532 []byte(""))), 533 ) 534 return newReq, nil 535 } 536 537 // Malform data given a request signed using streaming signature V4. 538 func malformDataSigV4(req *http.Request, newByte byte) (*http.Request, error) { 539 bufReader := bufio.NewReader(req.Body) 540 hexChunkSize, chunkSignature, err := readChunkLine(bufReader) 541 if err != nil { 542 return nil, err 543 } 544 545 newChunkHdr := []byte(fmt.Sprintf("%s"+s3ChunkSignatureStr+"%s\r\n", 546 hexChunkSize, chunkSignature)) 547 newChunk, err := io.ReadAll(bufReader) 548 if err != nil { 549 return nil, err 550 } 551 552 newChunk[0] = newByte 553 newReq := req 554 newReq.Body = io.NopCloser( 555 bytes.NewReader(bytes.Join([][]byte{newChunkHdr, newChunk}, 556 []byte(""))), 557 ) 558 559 return newReq, nil 560 } 561 562 // Malform chunk size given a request signed using streaming signatureV4. 563 func malformChunkSizeSigV4(req *http.Request, badSize int64) (*http.Request, error) { 564 bufReader := bufio.NewReader(req.Body) 565 _, chunkSignature, err := readChunkLine(bufReader) 566 if err != nil { 567 return nil, err 568 } 569 570 n := badSize 571 newHexChunkSize := []byte(fmt.Sprintf("%x", n)) 572 newChunkHdr := []byte(fmt.Sprintf("%s"+s3ChunkSignatureStr+"%s\r\n", 573 newHexChunkSize, chunkSignature)) 574 newChunk, err := io.ReadAll(bufReader) 575 if err != nil { 576 return nil, err 577 } 578 579 newReq := req 580 newReq.Body = io.NopCloser( 581 bytes.NewReader(bytes.Join([][]byte{newChunkHdr, newChunk}, 582 []byte(""))), 583 ) 584 585 return newReq, nil 586 } 587 588 // Sign given request using Signature V4. 589 func signStreamingRequest(req *http.Request, accessKey, secretKey string, currTime time.Time) (string, error) { 590 // Get hashed payload. 591 hashedPayload := req.Header.Get("x-amz-content-sha256") 592 if hashedPayload == "" { 593 return "", fmt.Errorf("Invalid hashed payload") 594 } 595 596 // Set x-amz-date. 597 req.Header.Set("x-amz-date", currTime.Format(iso8601Format)) 598 599 // Get header map. 600 headerMap := make(map[string][]string) 601 for k, vv := range req.Header { 602 // If request header key is not in ignored headers, then add it. 603 if _, ok := ignoredStreamingHeaders[http.CanonicalHeaderKey(k)]; !ok { 604 headerMap[strings.ToLower(k)] = vv 605 } 606 } 607 608 // Get header keys. 609 headers := []string{"host"} 610 for k := range headerMap { 611 headers = append(headers, k) 612 } 613 sort.Strings(headers) 614 615 // Get canonical headers. 616 var buf bytes.Buffer 617 for _, k := range headers { 618 buf.WriteString(k) 619 buf.WriteByte(':') 620 switch { 621 case k == "host": 622 buf.WriteString(req.URL.Host) 623 fallthrough 624 default: 625 for idx, v := range headerMap[k] { 626 if idx > 0 { 627 buf.WriteByte(',') 628 } 629 buf.WriteString(v) 630 } 631 buf.WriteByte('\n') 632 } 633 } 634 canonicalHeaders := buf.String() 635 636 // Get signed headers. 637 signedHeaders := strings.Join(headers, ";") 638 639 // Get canonical query string. 640 req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20") 641 642 // Get canonical URI. 643 canonicalURI := s3utils.EncodePath(req.URL.Path) 644 645 // Get canonical request. 646 // canonicalRequest = 647 // <HTTPMethod>\n 648 // <CanonicalURI>\n 649 // <CanonicalQueryString>\n 650 // <CanonicalHeaders>\n 651 // <SignedHeaders>\n 652 // <HashedPayload> 653 // 654 canonicalRequest := strings.Join([]string{ 655 req.Method, 656 canonicalURI, 657 req.URL.RawQuery, 658 canonicalHeaders, 659 signedHeaders, 660 hashedPayload, 661 }, "\n") 662 663 // Get scope. 664 scope := strings.Join([]string{ 665 currTime.Format(yyyymmdd), 666 globalMinioDefaultRegion, 667 string(serviceS3), 668 "aws4_request", 669 }, SlashSeparator) 670 671 stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n" 672 stringToSign += scope + "\n" 673 stringToSign += getSHA256Hash([]byte(canonicalRequest)) 674 675 date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd))) 676 region := sumHMAC(date, []byte(globalMinioDefaultRegion)) 677 service := sumHMAC(region, []byte(string(serviceS3))) 678 signingKey := sumHMAC(service, []byte("aws4_request")) 679 680 signature := hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) 681 682 // final Authorization header 683 parts := []string{ 684 "AWS4-HMAC-SHA256" + " Credential=" + accessKey + SlashSeparator + scope, 685 "SignedHeaders=" + signedHeaders, 686 "Signature=" + signature, 687 } 688 auth := strings.Join(parts, ", ") 689 req.Header.Set("Authorization", auth) 690 691 return signature, nil 692 } 693 694 // Returns new HTTP request object. 695 func newTestStreamingRequest(method, urlStr string, dataLength, chunkSize int64, body io.ReadSeeker) (*http.Request, error) { 696 if method == "" { 697 method = http.MethodPost 698 } 699 700 req, err := http.NewRequest(method, urlStr, nil) 701 if err != nil { 702 return nil, err 703 } 704 705 if body == nil { 706 // this is added to avoid panic during io.ReadAll(req.Body). 707 // th stack trace can be found here https://github.com/minio/minio/pull/2074 . 708 // This is very similar to https://github.com/golang/go/issues/7527. 709 req.Body = io.NopCloser(bytes.NewReader([]byte(""))) 710 } 711 712 contentLength := calculateStreamContentLength(dataLength, chunkSize) 713 714 req.Header.Set("x-amz-content-sha256", "STREAMING-AWS4-HMAC-SHA256-PAYLOAD") 715 req.Header.Set("content-encoding", "aws-chunked") 716 req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLength, 10)) 717 req.Header.Set("content-length", strconv.FormatInt(contentLength, 10)) 718 719 // Seek back to beginning. 720 body.Seek(0, 0) 721 722 // Add body 723 req.Body = io.NopCloser(body) 724 req.ContentLength = contentLength 725 726 return req, nil 727 } 728 729 func assembleStreamingChunks(req *http.Request, body io.ReadSeeker, chunkSize int64, 730 secretKey, signature string, currTime time.Time) (*http.Request, error, 731 ) { 732 regionStr := globalSite.Region 733 var stream []byte 734 var buffer []byte 735 body.Seek(0, 0) 736 for { 737 buffer = make([]byte, chunkSize) 738 n, err := body.Read(buffer) 739 if err != nil && err != io.EOF { 740 return nil, err 741 } 742 743 // Get scope. 744 scope := strings.Join([]string{ 745 currTime.Format(yyyymmdd), 746 regionStr, 747 string(serviceS3), 748 "aws4_request", 749 }, SlashSeparator) 750 751 stringToSign := "AWS4-HMAC-SHA256-PAYLOAD" + "\n" 752 stringToSign = stringToSign + currTime.Format(iso8601Format) + "\n" 753 stringToSign = stringToSign + scope + "\n" 754 stringToSign = stringToSign + signature + "\n" 755 stringToSign = stringToSign + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + "\n" // hex(sum256("")) 756 stringToSign += getSHA256Hash(buffer[:n]) 757 758 date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd))) 759 region := sumHMAC(date, []byte(regionStr)) 760 service := sumHMAC(region, []byte(serviceS3)) 761 signingKey := sumHMAC(service, []byte("aws4_request")) 762 763 signature = hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) 764 765 stream = append(stream, []byte(fmt.Sprintf("%x", n)+";chunk-signature="+signature+"\r\n")...) 766 stream = append(stream, buffer[:n]...) 767 stream = append(stream, []byte("\r\n")...) 768 769 if n <= 0 { 770 break 771 } 772 773 } 774 req.Body = io.NopCloser(bytes.NewReader(stream)) 775 return req, nil 776 } 777 778 func newTestStreamingSignedBadChunkDateRequest(method, urlStr string, contentLength, chunkSize int64, body io.ReadSeeker, accessKey, secretKey string) (*http.Request, error) { 779 req, err := newTestStreamingRequest(method, urlStr, contentLength, chunkSize, body) 780 if err != nil { 781 return nil, err 782 } 783 784 currTime := UTCNow() 785 signature, err := signStreamingRequest(req, accessKey, secretKey, currTime) 786 if err != nil { 787 return nil, err 788 } 789 790 // skew the time between the chunk signature calculation and seed signature. 791 currTime = currTime.Add(1 * time.Second) 792 req, err = assembleStreamingChunks(req, body, chunkSize, secretKey, signature, currTime) 793 return req, err 794 } 795 796 func newTestStreamingSignedCustomEncodingRequest(method, urlStr string, contentLength, chunkSize int64, body io.ReadSeeker, accessKey, secretKey, contentEncoding string) (*http.Request, error) { 797 req, err := newTestStreamingRequest(method, urlStr, contentLength, chunkSize, body) 798 if err != nil { 799 return nil, err 800 } 801 802 // Set custom encoding. 803 req.Header.Set("content-encoding", contentEncoding) 804 805 currTime := UTCNow() 806 signature, err := signStreamingRequest(req, accessKey, secretKey, currTime) 807 if err != nil { 808 return nil, err 809 } 810 811 req, err = assembleStreamingChunks(req, body, chunkSize, secretKey, signature, currTime) 812 return req, err 813 } 814 815 // Returns new HTTP request object signed with streaming signature v4. 816 func newTestStreamingSignedRequest(method, urlStr string, contentLength, chunkSize int64, body io.ReadSeeker, accessKey, secretKey string) (*http.Request, error) { 817 req, err := newTestStreamingRequest(method, urlStr, contentLength, chunkSize, body) 818 if err != nil { 819 return nil, err 820 } 821 822 currTime := UTCNow() 823 signature, err := signStreamingRequest(req, accessKey, secretKey, currTime) 824 if err != nil { 825 return nil, err 826 } 827 828 req, err = assembleStreamingChunks(req, body, chunkSize, secretKey, signature, currTime) 829 return req, err 830 } 831 832 // preSignV4 presign the request, in accordance with 833 // http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. 834 func preSignV4(req *http.Request, accessKeyID, secretAccessKey string, expires int64) error { 835 // Presign is not needed for anonymous credentials. 836 if accessKeyID == "" || secretAccessKey == "" { 837 return errors.New("Presign cannot be generated without access and secret keys") 838 } 839 840 region := globalSite.Region 841 date := UTCNow() 842 scope := getScope(date, region) 843 credential := fmt.Sprintf("%s/%s", accessKeyID, scope) 844 845 // Set URL query. 846 query := req.URL.Query() 847 query.Set("X-Amz-Algorithm", signV4Algorithm) 848 query.Set("X-Amz-Date", date.Format(iso8601Format)) 849 query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10)) 850 query.Set("X-Amz-SignedHeaders", "host") 851 query.Set("X-Amz-Credential", credential) 852 query.Set("X-Amz-Content-Sha256", unsignedPayload) 853 854 // "host" is the only header required to be signed for Presigned URLs. 855 extractedSignedHeaders := make(http.Header) 856 extractedSignedHeaders.Set("host", req.Host) 857 858 queryStr := strings.ReplaceAll(query.Encode(), "+", "%20") 859 canonicalRequest := getCanonicalRequest(extractedSignedHeaders, unsignedPayload, queryStr, req.URL.Path, req.Method) 860 stringToSign := getStringToSign(canonicalRequest, date, scope) 861 signingKey := getSigningKey(secretAccessKey, date, region, serviceS3) 862 signature := getSignature(signingKey, stringToSign) 863 864 req.URL.RawQuery = query.Encode() 865 866 // Add signature header to RawQuery. 867 req.URL.RawQuery += "&X-Amz-Signature=" + url.QueryEscape(signature) 868 869 // Construct the final presigned URL. 870 return nil 871 } 872 873 // preSignV2 - presign the request in following style. 874 // https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}. 875 func preSignV2(req *http.Request, accessKeyID, secretAccessKey string, expires int64) error { 876 // Presign is not needed for anonymous credentials. 877 if accessKeyID == "" || secretAccessKey == "" { 878 return errors.New("Presign cannot be generated without access and secret keys") 879 } 880 881 // FIXME: Remove following portion of code after fixing a bug in minio-go preSignV2. 882 883 d := UTCNow() 884 // Find epoch expires when the request will expire. 885 epochExpires := d.Unix() + expires 886 887 // Add expires header if not present. 888 expiresStr := req.Header.Get("Expires") 889 if expiresStr == "" { 890 expiresStr = strconv.FormatInt(epochExpires, 10) 891 req.Header.Set("Expires", expiresStr) 892 } 893 894 // url.RawPath will be valid if path has any encoded characters, if not it will 895 // be empty - in which case we need to consider url.Path (bug in net/http?) 896 encodedResource := req.URL.RawPath 897 encodedQuery := req.URL.RawQuery 898 if encodedResource == "" { 899 splits := strings.SplitN(req.URL.Path, "?", 2) 900 encodedResource = splits[0] 901 if len(splits) == 2 { 902 encodedQuery = splits[1] 903 } 904 } 905 906 unescapedQueries, err := unescapeQueries(encodedQuery) 907 if err != nil { 908 return err 909 } 910 911 // Get presigned string to sign. 912 stringToSign := getStringToSignV2(req.Method, encodedResource, strings.Join(unescapedQueries, "&"), req.Header, expiresStr) 913 hm := hmac.New(sha1.New, []byte(secretAccessKey)) 914 hm.Write([]byte(stringToSign)) 915 916 // Calculate signature. 917 signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) 918 919 query := req.URL.Query() 920 // Handle specially for Google Cloud Storage. 921 query.Set("AWSAccessKeyId", accessKeyID) 922 // Fill in Expires for presigned query. 923 query.Set("Expires", strconv.FormatInt(epochExpires, 10)) 924 925 // Encode query and save. 926 req.URL.RawQuery = query.Encode() 927 928 // Save signature finally. 929 req.URL.RawQuery += "&Signature=" + url.QueryEscape(signature) 930 return nil 931 } 932 933 // Sign given request using Signature V2. 934 func signRequestV2(req *http.Request, accessKey, secretKey string) error { 935 signer.SignV2(*req, accessKey, secretKey, false) 936 return nil 937 } 938 939 // Sign given request using Signature V4. 940 func signRequestV4(req *http.Request, accessKey, secretKey string) error { 941 // Get hashed payload. 942 hashedPayload := req.Header.Get("x-amz-content-sha256") 943 if hashedPayload == "" { 944 return fmt.Errorf("Invalid hashed payload") 945 } 946 947 currTime := UTCNow() 948 949 // Set x-amz-date. 950 req.Header.Set("x-amz-date", currTime.Format(iso8601Format)) 951 952 // Get header map. 953 headerMap := make(map[string][]string) 954 for k, vv := range req.Header { 955 // If request header key is not in ignored headers, then add it. 956 if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; !ok { 957 headerMap[strings.ToLower(k)] = vv 958 } 959 } 960 961 // Get header keys. 962 headers := []string{"host"} 963 for k := range headerMap { 964 headers = append(headers, k) 965 } 966 sort.Strings(headers) 967 968 region := globalSite.Region 969 970 // Get canonical headers. 971 var buf bytes.Buffer 972 for _, k := range headers { 973 buf.WriteString(k) 974 buf.WriteByte(':') 975 switch { 976 case k == "host": 977 buf.WriteString(req.URL.Host) 978 fallthrough 979 default: 980 for idx, v := range headerMap[k] { 981 if idx > 0 { 982 buf.WriteByte(',') 983 } 984 buf.WriteString(v) 985 } 986 buf.WriteByte('\n') 987 } 988 } 989 canonicalHeaders := buf.String() 990 991 // Get signed headers. 992 signedHeaders := strings.Join(headers, ";") 993 994 // Get canonical query string. 995 req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20") 996 997 // Get canonical URI. 998 canonicalURI := s3utils.EncodePath(req.URL.Path) 999 1000 // Get canonical request. 1001 // canonicalRequest = 1002 // <HTTPMethod>\n 1003 // <CanonicalURI>\n 1004 // <CanonicalQueryString>\n 1005 // <CanonicalHeaders>\n 1006 // <SignedHeaders>\n 1007 // <HashedPayload> 1008 // 1009 canonicalRequest := strings.Join([]string{ 1010 req.Method, 1011 canonicalURI, 1012 req.URL.RawQuery, 1013 canonicalHeaders, 1014 signedHeaders, 1015 hashedPayload, 1016 }, "\n") 1017 1018 // Get scope. 1019 scope := strings.Join([]string{ 1020 currTime.Format(yyyymmdd), 1021 region, 1022 string(serviceS3), 1023 "aws4_request", 1024 }, SlashSeparator) 1025 1026 stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n" 1027 stringToSign = stringToSign + scope + "\n" 1028 stringToSign += getSHA256Hash([]byte(canonicalRequest)) 1029 1030 date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd))) 1031 regionHMAC := sumHMAC(date, []byte(region)) 1032 service := sumHMAC(regionHMAC, []byte(serviceS3)) 1033 signingKey := sumHMAC(service, []byte("aws4_request")) 1034 1035 signature := hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) 1036 1037 // final Authorization header 1038 parts := []string{ 1039 "AWS4-HMAC-SHA256" + " Credential=" + accessKey + SlashSeparator + scope, 1040 "SignedHeaders=" + signedHeaders, 1041 "Signature=" + signature, 1042 } 1043 auth := strings.Join(parts, ", ") 1044 req.Header.Set("Authorization", auth) 1045 1046 return nil 1047 } 1048 1049 // getCredentialString generate a credential string. 1050 func getCredentialString(accessKeyID, location string, t time.Time) string { 1051 return accessKeyID + SlashSeparator + getScope(t, location) 1052 } 1053 1054 // getMD5HashBase64 returns MD5 hash in base64 encoding of given data. 1055 func getMD5HashBase64(data []byte) string { 1056 return base64.StdEncoding.EncodeToString(getMD5Sum(data)) 1057 } 1058 1059 // Returns new HTTP request object. 1060 func newTestRequest(method, urlStr string, contentLength int64, body io.ReadSeeker) (*http.Request, error) { 1061 if method == "" { 1062 method = http.MethodPost 1063 } 1064 1065 // Save for subsequent use 1066 var hashedPayload string 1067 var md5Base64 string 1068 switch { 1069 case body == nil: 1070 hashedPayload = getSHA256Hash([]byte{}) 1071 default: 1072 payloadBytes, err := io.ReadAll(body) 1073 if err != nil { 1074 return nil, err 1075 } 1076 hashedPayload = getSHA256Hash(payloadBytes) 1077 md5Base64 = getMD5HashBase64(payloadBytes) 1078 } 1079 // Seek back to beginning. 1080 if body != nil { 1081 body.Seek(0, 0) 1082 } else { 1083 body = bytes.NewReader([]byte("")) 1084 } 1085 req, err := http.NewRequest(method, urlStr, body) 1086 if err != nil { 1087 return nil, err 1088 } 1089 if md5Base64 != "" { 1090 req.Header.Set("Content-Md5", md5Base64) 1091 } 1092 req.Header.Set("x-amz-content-sha256", hashedPayload) 1093 1094 // Add Content-Length 1095 req.ContentLength = contentLength 1096 1097 return req, nil 1098 } 1099 1100 // Various signature types we are supporting, currently 1101 // two main signature types. 1102 type signerType int 1103 1104 const ( 1105 signerV2 signerType = iota 1106 signerV4 1107 ) 1108 1109 func newTestSignedRequest(method, urlStr string, contentLength int64, body io.ReadSeeker, accessKey, secretKey string, signer signerType) (*http.Request, error) { 1110 if signer == signerV2 { 1111 return newTestSignedRequestV2(method, urlStr, contentLength, body, accessKey, secretKey, nil) 1112 } 1113 return newTestSignedRequestV4(method, urlStr, contentLength, body, accessKey, secretKey, nil) 1114 } 1115 1116 // Returns request with correct signature but with incorrect SHA256. 1117 func newTestSignedBadSHARequest(method, urlStr string, contentLength int64, body io.ReadSeeker, accessKey, secretKey string, signer signerType) (*http.Request, error) { 1118 req, err := newTestRequest(method, urlStr, contentLength, body) 1119 if err != nil { 1120 return nil, err 1121 } 1122 1123 // Anonymous request return early. 1124 if accessKey == "" || secretKey == "" { 1125 return req, nil 1126 } 1127 1128 if signer == signerV2 { 1129 err = signRequestV2(req, accessKey, secretKey) 1130 req.Header.Del("x-amz-content-sha256") 1131 } else { 1132 req.Header.Set("x-amz-content-sha256", "92b165232fbd011da355eca0b033db22b934ba9af0145a437a832d27310b89f9") 1133 err = signRequestV4(req, accessKey, secretKey) 1134 } 1135 1136 return req, err 1137 } 1138 1139 // Returns new HTTP request object signed with signature v2. 1140 func newTestSignedRequestV2(method, urlStr string, contentLength int64, body io.ReadSeeker, accessKey, secretKey string, headers map[string]string) (*http.Request, error) { 1141 req, err := newTestRequest(method, urlStr, contentLength, body) 1142 if err != nil { 1143 return nil, err 1144 } 1145 req.Header.Del("x-amz-content-sha256") 1146 1147 // Anonymous request return quickly. 1148 if accessKey == "" || secretKey == "" { 1149 return req, nil 1150 } 1151 1152 for k, v := range headers { 1153 req.Header.Set(k, v) 1154 } 1155 1156 err = signRequestV2(req, accessKey, secretKey) 1157 if err != nil { 1158 return nil, err 1159 } 1160 1161 return req, nil 1162 } 1163 1164 // Returns new HTTP request object signed with signature v4. 1165 func newTestSignedRequestV4(method, urlStr string, contentLength int64, body io.ReadSeeker, accessKey, secretKey string, headers map[string]string) (*http.Request, error) { 1166 req, err := newTestRequest(method, urlStr, contentLength, body) 1167 if err != nil { 1168 return nil, err 1169 } 1170 1171 // Anonymous request return quickly. 1172 if accessKey == "" || secretKey == "" { 1173 return req, nil 1174 } 1175 1176 for k, v := range headers { 1177 req.Header.Set(k, v) 1178 } 1179 1180 err = signRequestV4(req, accessKey, secretKey) 1181 if err != nil { 1182 return nil, err 1183 } 1184 1185 return req, nil 1186 } 1187 1188 var src = rand.NewSource(time.Now().UnixNano()) 1189 1190 func randString(n int) string { 1191 b := make([]byte, n) 1192 // A src.Int63() generates 63 random bits, enough for letterIdxMax characters! 1193 for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; { 1194 if remain == 0 { 1195 cache, remain = src.Int63(), letterIdxMax 1196 } 1197 if idx := int(cache & letterIdxMask); idx < len(letterBytes) { 1198 b[i] = letterBytes[idx] 1199 i-- 1200 } 1201 cache >>= letterIdxBits 1202 remain-- 1203 } 1204 1205 return *(*string)(unsafe.Pointer(&b)) 1206 } 1207 1208 // generate random object name. 1209 func getRandomObjectName() string { 1210 return randString(16) 1211 } 1212 1213 // generate random bucket name. 1214 func getRandomBucketName() string { 1215 return randString(60) 1216 } 1217 1218 // construct URL for http requests for bucket operations. 1219 func makeTestTargetURL(endPoint, bucketName, objectName string, queryValues url.Values) string { 1220 urlStr := endPoint + SlashSeparator 1221 if bucketName != "" { 1222 urlStr = urlStr + bucketName + SlashSeparator 1223 } 1224 if objectName != "" { 1225 urlStr += s3utils.EncodePath(objectName) 1226 } 1227 if len(queryValues) > 0 { 1228 urlStr = urlStr + "?" + queryValues.Encode() 1229 } 1230 return urlStr 1231 } 1232 1233 // return URL for uploading object into the bucket. 1234 func getPutObjectURL(endPoint, bucketName, objectName string) string { 1235 return makeTestTargetURL(endPoint, bucketName, objectName, url.Values{}) 1236 } 1237 1238 func getPutObjectPartURL(endPoint, bucketName, objectName, uploadID, partNumber string) string { 1239 queryValues := url.Values{} 1240 queryValues.Set("uploadId", uploadID) 1241 queryValues.Set("partNumber", partNumber) 1242 return makeTestTargetURL(endPoint, bucketName, objectName, queryValues) 1243 } 1244 1245 func getCopyObjectPartURL(endPoint, bucketName, objectName, uploadID, partNumber string) string { 1246 queryValues := url.Values{} 1247 queryValues.Set("uploadId", uploadID) 1248 queryValues.Set("partNumber", partNumber) 1249 return makeTestTargetURL(endPoint, bucketName, objectName, queryValues) 1250 } 1251 1252 // return URL for fetching object from the bucket. 1253 func getGetObjectURL(endPoint, bucketName, objectName string) string { 1254 return makeTestTargetURL(endPoint, bucketName, objectName, url.Values{}) 1255 } 1256 1257 // return URL for deleting the object from the bucket. 1258 func getDeleteObjectURL(endPoint, bucketName, objectName string) string { 1259 return makeTestTargetURL(endPoint, bucketName, objectName, url.Values{}) 1260 } 1261 1262 // return URL for deleting multiple objects from a bucket. 1263 func getMultiDeleteObjectURL(endPoint, bucketName string) string { 1264 queryValue := url.Values{} 1265 queryValue.Set("delete", "") 1266 return makeTestTargetURL(endPoint, bucketName, "", queryValue) 1267 } 1268 1269 // return URL for HEAD on the object. 1270 func getHeadObjectURL(endPoint, bucketName, objectName string) string { 1271 return makeTestTargetURL(endPoint, bucketName, objectName, url.Values{}) 1272 } 1273 1274 // return url to be used while copying the object. 1275 func getCopyObjectURL(endPoint, bucketName, objectName string) string { 1276 return makeTestTargetURL(endPoint, bucketName, objectName, url.Values{}) 1277 } 1278 1279 // return URL for inserting bucket notification. 1280 func getPutNotificationURL(endPoint, bucketName string) string { 1281 queryValue := url.Values{} 1282 queryValue.Set("notification", "") 1283 return makeTestTargetURL(endPoint, bucketName, "", queryValue) 1284 } 1285 1286 // return URL for inserting bucket policy. 1287 func getPutPolicyURL(endPoint, bucketName string) string { 1288 queryValue := url.Values{} 1289 queryValue.Set("policy", "") 1290 return makeTestTargetURL(endPoint, bucketName, "", queryValue) 1291 } 1292 1293 // return URL for fetching bucket policy. 1294 func getGetPolicyURL(endPoint, bucketName string) string { 1295 queryValue := url.Values{} 1296 queryValue.Set("policy", "") 1297 return makeTestTargetURL(endPoint, bucketName, "", queryValue) 1298 } 1299 1300 // return URL for deleting bucket policy. 1301 func getDeletePolicyURL(endPoint, bucketName string) string { 1302 queryValue := url.Values{} 1303 queryValue.Set("policy", "") 1304 return makeTestTargetURL(endPoint, bucketName, "", queryValue) 1305 } 1306 1307 // return URL for creating the bucket. 1308 func getMakeBucketURL(endPoint, bucketName string) string { 1309 return makeTestTargetURL(endPoint, bucketName, "", url.Values{}) 1310 } 1311 1312 // return URL for creating the bucket. 1313 func getBucketVersioningConfigURL(endPoint, bucketName string) string { 1314 vals := make(url.Values) 1315 vals.Set("versioning", "") 1316 return makeTestTargetURL(endPoint, bucketName, "", vals) 1317 } 1318 1319 // return URL for listing buckets. 1320 func getListBucketURL(endPoint string) string { 1321 return makeTestTargetURL(endPoint, "", "", url.Values{}) 1322 } 1323 1324 // return URL for HEAD on the bucket. 1325 func getHEADBucketURL(endPoint, bucketName string) string { 1326 return makeTestTargetURL(endPoint, bucketName, "", url.Values{}) 1327 } 1328 1329 // return URL for deleting the bucket. 1330 func getDeleteBucketURL(endPoint, bucketName string) string { 1331 return makeTestTargetURL(endPoint, bucketName, "", url.Values{}) 1332 } 1333 1334 // return URL for deleting the bucket. 1335 func getDeleteMultipleObjectsURL(endPoint, bucketName string) string { 1336 queryValue := url.Values{} 1337 queryValue.Set("delete", "") 1338 return makeTestTargetURL(endPoint, bucketName, "", queryValue) 1339 } 1340 1341 // return URL For fetching location of the bucket. 1342 func getBucketLocationURL(endPoint, bucketName string) string { 1343 queryValue := url.Values{} 1344 queryValue.Set("location", "") 1345 return makeTestTargetURL(endPoint, bucketName, "", queryValue) 1346 } 1347 1348 // return URL For set/get lifecycle of the bucket. 1349 func getBucketLifecycleURL(endPoint, bucketName string) (ret string) { 1350 queryValue := url.Values{} 1351 queryValue.Set("lifecycle", "") 1352 return makeTestTargetURL(endPoint, bucketName, "", queryValue) 1353 } 1354 1355 // return URL for listing objects in the bucket with V1 legacy API. 1356 func getListObjectsV1URL(endPoint, bucketName, prefix, maxKeys, encodingType string) string { 1357 queryValue := url.Values{} 1358 if maxKeys != "" { 1359 queryValue.Set("max-keys", maxKeys) 1360 } 1361 if encodingType != "" { 1362 queryValue.Set("encoding-type", encodingType) 1363 } 1364 return makeTestTargetURL(endPoint, bucketName, prefix, queryValue) 1365 } 1366 1367 // return URL for listing objects in the bucket with V1 legacy API. 1368 func getListObjectVersionsURL(endPoint, bucketName, prefix, maxKeys, encodingType string) string { 1369 queryValue := url.Values{} 1370 if maxKeys != "" { 1371 queryValue.Set("max-keys", maxKeys) 1372 } 1373 if encodingType != "" { 1374 queryValue.Set("encoding-type", encodingType) 1375 } 1376 queryValue.Set("versions", "") 1377 return makeTestTargetURL(endPoint, bucketName, prefix, queryValue) 1378 } 1379 1380 // return URL for listing objects in the bucket with V2 API. 1381 func getListObjectsV2URL(endPoint, bucketName, prefix, maxKeys, fetchOwner, encodingType string) string { 1382 queryValue := url.Values{} 1383 queryValue.Set("list-type", "2") // Enables list objects V2 URL. 1384 if maxKeys != "" { 1385 queryValue.Set("max-keys", maxKeys) 1386 } 1387 if fetchOwner != "" { 1388 queryValue.Set("fetch-owner", fetchOwner) 1389 } 1390 if encodingType != "" { 1391 queryValue.Set("encoding-type", encodingType) 1392 } 1393 return makeTestTargetURL(endPoint, bucketName, prefix, queryValue) 1394 } 1395 1396 // return URL for a new multipart upload. 1397 func getNewMultipartURL(endPoint, bucketName, objectName string) string { 1398 queryValue := url.Values{} 1399 queryValue.Set("uploads", "") 1400 return makeTestTargetURL(endPoint, bucketName, objectName, queryValue) 1401 } 1402 1403 // return URL for a new multipart upload. 1404 func getPartUploadURL(endPoint, bucketName, objectName, uploadID, partNumber string) string { 1405 queryValues := url.Values{} 1406 queryValues.Set("uploadId", uploadID) 1407 queryValues.Set("partNumber", partNumber) 1408 return makeTestTargetURL(endPoint, bucketName, objectName, queryValues) 1409 } 1410 1411 // return URL for aborting multipart upload. 1412 func getAbortMultipartUploadURL(endPoint, bucketName, objectName, uploadID string) string { 1413 queryValue := url.Values{} 1414 queryValue.Set("uploadId", uploadID) 1415 return makeTestTargetURL(endPoint, bucketName, objectName, queryValue) 1416 } 1417 1418 // return URL for a listing pending multipart uploads. 1419 func getListMultipartURL(endPoint, bucketName string) string { 1420 queryValue := url.Values{} 1421 queryValue.Set("uploads", "") 1422 return makeTestTargetURL(endPoint, bucketName, "", queryValue) 1423 } 1424 1425 // return URL for listing pending multipart uploads with parameters. 1426 func getListMultipartUploadsURLWithParams(endPoint, bucketName, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads string) string { 1427 queryValue := url.Values{} 1428 queryValue.Set("uploads", "") 1429 queryValue.Set("prefix", prefix) 1430 queryValue.Set("delimiter", delimiter) 1431 queryValue.Set("key-marker", keyMarker) 1432 queryValue.Set("upload-id-marker", uploadIDMarker) 1433 queryValue.Set("max-uploads", maxUploads) 1434 return makeTestTargetURL(endPoint, bucketName, "", queryValue) 1435 } 1436 1437 // return URL for a listing parts on a given upload id. 1438 func getListMultipartURLWithParams(endPoint, bucketName, objectName, uploadID, maxParts, partNumberMarker, encoding string) string { 1439 queryValues := url.Values{} 1440 queryValues.Set("uploadId", uploadID) 1441 queryValues.Set("max-parts", maxParts) 1442 if partNumberMarker != "" { 1443 queryValues.Set("part-number-marker", partNumberMarker) 1444 } 1445 return makeTestTargetURL(endPoint, bucketName, objectName, queryValues) 1446 } 1447 1448 // return URL for completing multipart upload. 1449 // complete multipart upload request is sent after all parts are uploaded. 1450 func getCompleteMultipartUploadURL(endPoint, bucketName, objectName, uploadID string) string { 1451 queryValue := url.Values{} 1452 queryValue.Set("uploadId", uploadID) 1453 return makeTestTargetURL(endPoint, bucketName, objectName, queryValue) 1454 } 1455 1456 // return URL for listen bucket notification. 1457 func getListenNotificationURL(endPoint, bucketName string, prefixes, suffixes, events []string) string { 1458 queryValue := url.Values{} 1459 1460 queryValue["prefix"] = prefixes 1461 queryValue["suffix"] = suffixes 1462 queryValue["events"] = events 1463 return makeTestTargetURL(endPoint, bucketName, "", queryValue) 1464 } 1465 1466 // getRandomDisks - Creates a slice of N random disks, each of the form - minio-XXX 1467 func getRandomDisks(n int) ([]string, error) { 1468 var erasureDisks []string 1469 for i := 0; i < n; i++ { 1470 path, err := os.MkdirTemp(globalTestTmpDir, "minio-") 1471 if err != nil { 1472 // Remove directories created so far. 1473 removeRoots(erasureDisks) 1474 return nil, err 1475 } 1476 erasureDisks = append(erasureDisks, path) 1477 } 1478 return erasureDisks, nil 1479 } 1480 1481 // Initialize object layer with the supplied disks, objectLayer is nil upon any error. 1482 func newTestObjectLayer(ctx context.Context, endpointServerPools EndpointServerPools) (newObject ObjectLayer, err error) { 1483 initAllSubsystems(ctx) 1484 1485 return newErasureServerPools(ctx, endpointServerPools) 1486 } 1487 1488 // initObjectLayer - Instantiates object layer and returns it. 1489 func initObjectLayer(ctx context.Context, endpointServerPools EndpointServerPools) (ObjectLayer, []StorageAPI, error) { 1490 objLayer, err := newTestObjectLayer(ctx, endpointServerPools) 1491 if err != nil { 1492 return nil, nil, err 1493 } 1494 1495 var formattedDisks []StorageAPI 1496 // Should use the object layer tests for validating cache. 1497 if z, ok := objLayer.(*erasureServerPools); ok { 1498 formattedDisks = z.serverPools[0].GetDisks(0)() 1499 } 1500 1501 // Success. 1502 return objLayer, formattedDisks, nil 1503 } 1504 1505 // removeRoots - Cleans up initialized directories during tests. 1506 func removeRoots(roots []string) { 1507 for _, root := range roots { 1508 os.RemoveAll(root) 1509 } 1510 } 1511 1512 // creates a bucket for the tests and returns the bucket name. 1513 // initializes the specified API endpoints for the tests. 1514 // initializes the root and returns its path. 1515 // return credentials. 1516 func initAPIHandlerTest(ctx context.Context, obj ObjectLayer, endpoints []string) (string, http.Handler, error) { 1517 initAllSubsystems(ctx) 1518 1519 initConfigSubsystem(ctx, obj) 1520 1521 globalIAMSys.Init(ctx, obj, globalEtcdClient, 2*time.Second) 1522 1523 // get random bucket name. 1524 bucketName := getRandomBucketName() 1525 1526 // Create bucket. 1527 err := obj.MakeBucket(context.Background(), bucketName, MakeBucketOptions{}) 1528 if err != nil { 1529 // failed to create newbucket, return err. 1530 return "", nil, err 1531 } 1532 // Register the API end points with Erasure object layer. 1533 // Registering only the GetObject handler. 1534 apiRouter := initTestAPIEndPoints(obj, endpoints) 1535 f := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 1536 r.RequestURI = r.URL.RequestURI() 1537 apiRouter.ServeHTTP(w, r) 1538 }) 1539 return bucketName, f, nil 1540 } 1541 1542 // prepare test backend. 1543 // create FS/Erasure/ErasureSet backend. 1544 // return object layer, backend disks. 1545 func prepareTestBackend(ctx context.Context, instanceType string) (ObjectLayer, []string, error) { 1546 switch instanceType { 1547 // Total number of disks for Erasure sets backend is set to 32. 1548 case ErasureSetsTestStr: 1549 return prepareErasureSets32(ctx) 1550 // Total number of disks for Erasure backend is set to 16. 1551 case ErasureTestStr: 1552 return prepareErasure16(ctx) 1553 default: 1554 // return FS backend by default. 1555 obj, disk, err := prepareFS(ctx) 1556 if err != nil { 1557 return nil, nil, err 1558 } 1559 return obj, []string{disk}, nil 1560 } 1561 } 1562 1563 // ExecObjectLayerAPIAnonTest - Helper function to validate object Layer API handler 1564 // response for anonymous/unsigned and unknown signature type HTTP request. 1565 1566 // Here is the brief description of some of the arguments to the function below. 1567 // 1568 // apiRouter - http.Handler with the relevant API endPoint (API endPoint under test) registered. 1569 // anonReq - unsigned *http.Request to invoke the handler's response for anonymous requests. 1570 // policyFunc - function to return bucketPolicy statement which would permit the anonymous request to be served. 1571 // 1572 // The test works in 2 steps, here is the description of the steps. 1573 // 1574 // STEP 1: Call the handler with the unsigned HTTP request (anonReq), assert for the `ErrAccessDenied` error response. 1575 func ExecObjectLayerAPIAnonTest(t *testing.T, obj ObjectLayer, testName, bucketName, objectName, instanceType string, apiRouter http.Handler, 1576 anonReq *http.Request, bucketPolicy *policy.BucketPolicy, 1577 ) { 1578 anonTestStr := "Anonymous HTTP request test" 1579 unknownSignTestStr := "Unknown HTTP signature test" 1580 1581 // simple function which returns a message which gives the context of the test 1582 // and then followed by the actual error message. 1583 failTestStr := func(testType, failMsg string) string { 1584 return fmt.Sprintf("MinIO %s: %s fail for \"%s\": \n<Error> %s", instanceType, testType, testName, failMsg) 1585 } 1586 1587 // httptest Recorder to capture all the response by the http handler. 1588 rec := httptest.NewRecorder() 1589 // reading the body to preserve it so that it can be used again for second attempt of sending unsigned HTTP request. 1590 // If the body is read in the handler the same request cannot be made use of. 1591 buf, err := io.ReadAll(anonReq.Body) 1592 if err != nil { 1593 t.Fatal(failTestStr(anonTestStr, err.Error())) 1594 } 1595 1596 // creating 2 read closer (to set as request body) from the body content. 1597 readerOne := io.NopCloser(bytes.NewBuffer(buf)) 1598 readerTwo := io.NopCloser(bytes.NewBuffer(buf)) 1599 1600 anonReq.Body = readerOne 1601 1602 // call the HTTP handler. 1603 apiRouter.ServeHTTP(rec, anonReq) 1604 1605 // expected error response when the unsigned HTTP request is not permitted. 1606 accessDenied := getAPIError(ErrAccessDenied).HTTPStatusCode 1607 if rec.Code != accessDenied { 1608 t.Fatal(failTestStr(anonTestStr, fmt.Sprintf("Object API Nil Test expected to fail with %d, but failed with %d", accessDenied, rec.Code))) 1609 } 1610 1611 // HEAD HTTTP request doesn't contain response body. 1612 if anonReq.Method != http.MethodHead { 1613 // read the response body. 1614 var actualContent []byte 1615 actualContent, err = io.ReadAll(rec.Body) 1616 if err != nil { 1617 t.Fatal(failTestStr(anonTestStr, fmt.Sprintf("Failed parsing response body: <ERROR> %v", err))) 1618 } 1619 1620 actualError := &APIErrorResponse{} 1621 if err = xml.Unmarshal(actualContent, actualError); err != nil { 1622 t.Fatal(failTestStr(anonTestStr, "error response failed to parse error XML")) 1623 } 1624 1625 if actualError.BucketName != bucketName { 1626 t.Fatal(failTestStr(anonTestStr, "error response bucket name differs from expected value")) 1627 } 1628 1629 if actualError.Key != objectName { 1630 t.Fatal(failTestStr(anonTestStr, "error response object name differs from expected value")) 1631 } 1632 } 1633 1634 // test for unknown auth case. 1635 anonReq.Body = readerTwo 1636 // Setting the `Authorization` header to a random value so that the signature falls into unknown auth case. 1637 anonReq.Header.Set("Authorization", "nothingElse") 1638 // initialize new response recorder. 1639 rec = httptest.NewRecorder() 1640 // call the handler using the HTTP Request. 1641 apiRouter.ServeHTTP(rec, anonReq) 1642 // verify the response body for `ErrAccessDenied` message =. 1643 if anonReq.Method != http.MethodHead { 1644 // read the response body. 1645 actualContent, err := io.ReadAll(rec.Body) 1646 if err != nil { 1647 t.Fatal(failTestStr(unknownSignTestStr, fmt.Sprintf("Failed parsing response body: <ERROR> %v", err))) 1648 } 1649 1650 actualError := &APIErrorResponse{} 1651 if err = xml.Unmarshal(actualContent, actualError); err != nil { 1652 t.Fatal(failTestStr(unknownSignTestStr, "error response failed to parse error XML")) 1653 } 1654 1655 if path.Clean(actualError.Resource) != pathJoin(SlashSeparator, bucketName, SlashSeparator, objectName) { 1656 t.Fatal(failTestStr(unknownSignTestStr, "error response resource differs from expected value")) 1657 } 1658 } 1659 1660 // expected error response when the unsigned HTTP request is not permitted. 1661 unsupportedSignature := getAPIError(ErrSignatureVersionNotSupported).HTTPStatusCode 1662 if rec.Code != unsupportedSignature { 1663 t.Fatal(failTestStr(unknownSignTestStr, fmt.Sprintf("Object API Unknown auth test for \"%s\", expected to fail with %d, but failed with %d", testName, unsupportedSignature, rec.Code))) 1664 } 1665 } 1666 1667 // ExecObjectLayerAPINilTest - Sets the object layer to `nil`, and calls rhe registered object layer API endpoint, 1668 // and assert the error response. The purpose is to validate the API handlers response when the object layer is uninitialized. 1669 // Usage hint: Should be used at the end of the API end points tests (ex: check the last few lines of `testAPIListObjectPartsHandler`), 1670 // need a sample HTTP request to be sent as argument so that the relevant handler is called, the handler registration is expected 1671 // to be done since its called from within the API handler tests, the reference to the registered HTTP handler has to be sent 1672 // as an argument. 1673 func ExecObjectLayerAPINilTest(t TestErrHandler, bucketName, objectName, instanceType string, apiRouter http.Handler, req *http.Request) { 1674 // httptest Recorder to capture all the response by the http handler. 1675 rec := httptest.NewRecorder() 1676 1677 // The API handler gets the reference to the object layer via the global object Layer, 1678 // setting it to `nil` in order test for handlers response for uninitialized object layer. 1679 globalObjLayerMutex.Lock() 1680 globalObjectAPI = nil 1681 globalObjLayerMutex.Unlock() 1682 1683 // call the HTTP handler. 1684 apiRouter.ServeHTTP(rec, req) 1685 1686 // expected error response when the API handler is called before the object layer is initialized, 1687 // or when objectLayer is `nil`. 1688 serverNotInitializedErr := getAPIError(ErrServerNotInitialized).HTTPStatusCode 1689 if rec.Code != serverNotInitializedErr { 1690 t.Errorf("Object API Nil Test expected to fail with %d, but failed with %d", serverNotInitializedErr, rec.Code) 1691 } 1692 1693 // HEAD HTTP Request doesn't contain body in its response, 1694 // for other type of HTTP requests compare the response body content with the expected one. 1695 if req.Method != http.MethodHead { 1696 // read the response body. 1697 actualContent, err := io.ReadAll(rec.Body) 1698 if err != nil { 1699 t.Fatalf("MinIO %s: Failed parsing response body: <ERROR> %v", instanceType, err) 1700 } 1701 1702 actualError := &APIErrorResponse{} 1703 if err = xml.Unmarshal(actualContent, actualError); err != nil { 1704 t.Errorf("MinIO %s: error response failed to parse error XML", instanceType) 1705 } 1706 1707 if actualError.BucketName != bucketName { 1708 t.Errorf("MinIO %s: error response bucket name differs from expected value", instanceType) 1709 } 1710 1711 if actualError.Key != objectName { 1712 t.Errorf("MinIO %s: error response object name differs from expected value", instanceType) 1713 } 1714 } 1715 } 1716 1717 // ExecObjectLayerAPITest - executes object layer API tests. 1718 // Creates single node and Erasure ObjectLayer instance, registers the specified API end points and runs test for both the layers. 1719 func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endpoints []string) { 1720 ctx, cancel := context.WithCancel(context.Background()) 1721 defer cancel() 1722 1723 // reset globals. 1724 // this is to make sure that the tests are not affected by modified value. 1725 resetTestGlobals() 1726 1727 objLayer, fsDir, err := prepareFS(ctx) 1728 if err != nil { 1729 t.Fatalf("Initialization of object layer failed for single node setup: %s", err) 1730 } 1731 1732 bucketFS, fsAPIRouter, err := initAPIHandlerTest(ctx, objLayer, endpoints) 1733 if err != nil { 1734 t.Fatalf("Initialization of API handler tests failed: <ERROR> %s", err) 1735 } 1736 1737 // initialize the server and obtain the credentials and root. 1738 // credentials are necessary to sign the HTTP request. 1739 if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil { 1740 t.Fatalf("Unable to initialize server config. %s", err) 1741 } 1742 1743 credentials := globalActiveCred 1744 1745 // Executing the object layer tests for single node setup. 1746 objAPITest(objLayer, ErasureSDStr, bucketFS, fsAPIRouter, credentials, t) 1747 1748 // reset globals. 1749 // this is to make sure that the tests are not affected by modified value. 1750 resetTestGlobals() 1751 1752 objLayer, erasureDisks, err := prepareErasure16(ctx) 1753 if err != nil { 1754 t.Fatalf("Initialization of object layer failed for Erasure setup: %s", err) 1755 } 1756 defer objLayer.Shutdown(ctx) 1757 1758 bucketErasure, erAPIRouter, err := initAPIHandlerTest(ctx, objLayer, endpoints) 1759 if err != nil { 1760 t.Fatalf("Initialization of API handler tests failed: <ERROR> %s", err) 1761 } 1762 1763 // initialize the server and obtain the credentials and root. 1764 // credentials are necessary to sign the HTTP request. 1765 if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil { 1766 t.Fatalf("Unable to initialize server config. %s", err) 1767 } 1768 1769 // Executing the object layer tests for Erasure. 1770 objAPITest(objLayer, ErasureTestStr, bucketErasure, erAPIRouter, credentials, t) 1771 1772 // clean up the temporary test backend. 1773 removeRoots(append(erasureDisks, fsDir)) 1774 } 1775 1776 // ExecExtendedObjectLayerTest will execute the tests with combinations of encrypted & compressed. 1777 // This can be used to test functionality when reading and writing data. 1778 func ExecExtendedObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endpoints []string) { 1779 execExtended(t, func(t *testing.T) { 1780 ExecObjectLayerAPITest(t, objAPITest, endpoints) 1781 }) 1782 } 1783 1784 // function to be passed to ExecObjectLayerAPITest, for executing object layr API handler tests. 1785 type objAPITestType func(obj ObjectLayer, instanceType string, bucketName string, 1786 apiRouter http.Handler, credentials auth.Credentials, t *testing.T) 1787 1788 // Regular object test type. 1789 type objTestType func(obj ObjectLayer, instanceType string, t TestErrHandler) 1790 1791 // Special test type for test with directories 1792 type objTestTypeWithDirs func(obj ObjectLayer, instanceType string, dirs []string, t TestErrHandler) 1793 1794 // Special object test type for disk not found situations. 1795 type objTestDiskNotFoundType func(obj ObjectLayer, instanceType string, dirs []string, t *testing.T) 1796 1797 // ExecObjectLayerTest - executes object layer tests. 1798 // Creates single node and Erasure ObjectLayer instance and runs test for both the layers. 1799 func ExecObjectLayerTest(t TestErrHandler, objTest objTestType) { 1800 { 1801 ctx, cancel := context.WithCancel(context.Background()) 1802 if localMetacacheMgr != nil { 1803 localMetacacheMgr.deleteAll() 1804 } 1805 1806 objLayer, fsDir, err := prepareFS(ctx) 1807 if err != nil { 1808 t.Fatalf("Initialization of object layer failed for single node setup: %s", err) 1809 } 1810 setObjectLayer(objLayer) 1811 initAllSubsystems(ctx) 1812 1813 // initialize the server and obtain the credentials and root. 1814 // credentials are necessary to sign the HTTP request. 1815 if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil { 1816 t.Fatal("Unexpected error", err) 1817 } 1818 initConfigSubsystem(ctx, objLayer) 1819 globalIAMSys.Init(ctx, objLayer, globalEtcdClient, 2*time.Second) 1820 1821 // Executing the object layer tests for single node setup. 1822 objTest(objLayer, ErasureSDStr, t) 1823 1824 // Call clean up functions 1825 cancel() 1826 setObjectLayer(newObjectLayerFn()) 1827 removeRoots([]string{fsDir}) 1828 } 1829 1830 { 1831 ctx, cancel := context.WithCancel(context.Background()) 1832 1833 if localMetacacheMgr != nil { 1834 localMetacacheMgr.deleteAll() 1835 } 1836 1837 initAllSubsystems(ctx) 1838 objLayer, fsDirs, err := prepareErasureSets32(ctx) 1839 if err != nil { 1840 t.Fatalf("Initialization of object layer failed for Erasure setup: %s", err) 1841 } 1842 setObjectLayer(objLayer) 1843 initConfigSubsystem(ctx, objLayer) 1844 globalIAMSys.Init(ctx, objLayer, globalEtcdClient, 2*time.Second) 1845 1846 // Executing the object layer tests for Erasure. 1847 objTest(objLayer, ErasureTestStr, t) 1848 1849 objLayer.Shutdown(context.Background()) 1850 if localMetacacheMgr != nil { 1851 localMetacacheMgr.deleteAll() 1852 } 1853 setObjectLayer(newObjectLayerFn()) 1854 cancel() 1855 removeRoots(fsDirs) 1856 } 1857 } 1858 1859 // ExecObjectLayerTestWithDirs - executes object layer tests. 1860 // Creates single node and Erasure ObjectLayer instance and runs test for both the layers. 1861 func ExecObjectLayerTestWithDirs(t TestErrHandler, objTest objTestTypeWithDirs) { 1862 ctx, cancel := context.WithCancel(context.Background()) 1863 defer cancel() 1864 1865 if localMetacacheMgr != nil { 1866 localMetacacheMgr.deleteAll() 1867 } 1868 1869 initAllSubsystems(ctx) 1870 objLayer, fsDirs, err := prepareErasure16(ctx) 1871 if err != nil { 1872 t.Fatalf("Initialization of object layer failed for Erasure setup: %s", err) 1873 } 1874 setObjectLayer(objLayer) 1875 initConfigSubsystem(ctx, objLayer) 1876 globalIAMSys.Init(ctx, objLayer, globalEtcdClient, 2*time.Second) 1877 1878 // Executing the object layer tests for Erasure. 1879 objTest(objLayer, ErasureTestStr, fsDirs, t) 1880 1881 objLayer.Shutdown(context.Background()) 1882 if localMetacacheMgr != nil { 1883 localMetacacheMgr.deleteAll() 1884 } 1885 setObjectLayer(newObjectLayerFn()) 1886 cancel() 1887 removeRoots(fsDirs) 1888 } 1889 1890 // ExecObjectLayerDiskAlteredTest - executes object layer tests while altering 1891 // disks in between tests. Creates Erasure ObjectLayer instance and runs test for Erasure layer. 1892 func ExecObjectLayerDiskAlteredTest(t *testing.T, objTest objTestDiskNotFoundType) { 1893 ctx, cancel := context.WithCancel(context.Background()) 1894 defer cancel() 1895 1896 objLayer, fsDirs, err := prepareErasure16(ctx) 1897 if err != nil { 1898 t.Fatalf("Initialization of object layer failed for Erasure setup: %s", err) 1899 } 1900 defer objLayer.Shutdown(ctx) 1901 1902 if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil { 1903 t.Fatal("Failed to create config directory", err) 1904 } 1905 1906 // Executing the object layer tests for Erasure. 1907 objTest(objLayer, ErasureTestStr, fsDirs, t) 1908 defer removeRoots(fsDirs) 1909 } 1910 1911 // Special object test type for stale files situations. 1912 type objTestStaleFilesType func(obj ObjectLayer, instanceType string, dirs []string, t *testing.T) 1913 1914 // ExecObjectLayerStaleFilesTest - executes object layer tests those leaves stale 1915 // files/directories under .minio/tmp. Creates Erasure ObjectLayer instance and runs test for Erasure layer. 1916 func ExecObjectLayerStaleFilesTest(t *testing.T, objTest objTestStaleFilesType) { 1917 ctx, cancel := context.WithCancel(context.Background()) 1918 defer cancel() 1919 1920 nDisks := 16 1921 erasureDisks, err := getRandomDisks(nDisks) 1922 if err != nil { 1923 t.Fatalf("Initialization of drives for Erasure setup: %s", err) 1924 } 1925 objLayer, _, err := initObjectLayer(ctx, mustGetPoolEndpoints(0, erasureDisks...)) 1926 if err != nil { 1927 t.Fatalf("Initialization of object layer failed for Erasure setup: %s", err) 1928 } 1929 if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil { 1930 t.Fatal("Failed to create config directory", err) 1931 } 1932 1933 // Executing the object layer tests for Erasure. 1934 objTest(objLayer, ErasureTestStr, erasureDisks, t) 1935 defer removeRoots(erasureDisks) 1936 } 1937 1938 func registerBucketLevelFunc(bucket *mux.Router, api objectAPIHandlers, apiFunctions ...string) { 1939 for _, apiFunction := range apiFunctions { 1940 switch apiFunction { 1941 case "PostPolicy": 1942 // Register PostPolicy handler. 1943 bucket.Methods(http.MethodPost).HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(api.PostPolicyBucketHandler) 1944 case "HeadObject": 1945 // Register HeadObject handler. 1946 bucket.Methods("Head").Path("/{object:.+}").HandlerFunc(api.HeadObjectHandler) 1947 case "GetObject": 1948 // Register GetObject handler. 1949 bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(api.GetObjectHandler) 1950 case "PutObject": 1951 // Register PutObject handler. 1952 bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(api.PutObjectHandler) 1953 case "DeleteObject": 1954 // Register Delete Object handler. 1955 bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(api.DeleteObjectHandler) 1956 case "CopyObject": 1957 // Register Copy Object handler. 1958 bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(api.CopyObjectHandler) 1959 case "PutBucketPolicy": 1960 // Register PutBucket Policy handler. 1961 bucket.Methods(http.MethodPut).HandlerFunc(api.PutBucketPolicyHandler).Queries("policy", "") 1962 case "DeleteBucketPolicy": 1963 // Register Delete bucket HTTP policy handler. 1964 bucket.Methods(http.MethodDelete).HandlerFunc(api.DeleteBucketPolicyHandler).Queries("policy", "") 1965 case "GetBucketPolicy": 1966 // Register Get Bucket policy HTTP Handler. 1967 bucket.Methods(http.MethodGet).HandlerFunc(api.GetBucketPolicyHandler).Queries("policy", "") 1968 case "GetBucketLifecycle": 1969 bucket.Methods(http.MethodGet).HandlerFunc(api.GetBucketLifecycleHandler).Queries("lifecycle", "") 1970 case "PutBucketLifecycle": 1971 bucket.Methods(http.MethodPut).HandlerFunc(api.PutBucketLifecycleHandler).Queries("lifecycle", "") 1972 case "DeleteBucketLifecycle": 1973 bucket.Methods(http.MethodDelete).HandlerFunc(api.DeleteBucketLifecycleHandler).Queries("lifecycle", "") 1974 case "GetBucketLocation": 1975 // Register GetBucketLocation handler. 1976 bucket.Methods(http.MethodGet).HandlerFunc(api.GetBucketLocationHandler).Queries("location", "") 1977 case "HeadBucket": 1978 // Register HeadBucket handler. 1979 bucket.Methods(http.MethodHead).HandlerFunc(api.HeadBucketHandler) 1980 case "DeleteMultipleObjects": 1981 // Register DeleteMultipleObjects handler. 1982 bucket.Methods(http.MethodPost).HandlerFunc(api.DeleteMultipleObjectsHandler).Queries("delete", "") 1983 case "NewMultipart": 1984 // Register New Multipart upload handler. 1985 bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(api.NewMultipartUploadHandler).Queries("uploads", "") 1986 case "CopyObjectPart": 1987 // Register CopyObjectPart handler. 1988 bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(api.CopyObjectPartHandler).Queries("partNumber", "{partNumber:.*}", "uploadId", "{uploadId:.*}") 1989 case "PutObjectPart": 1990 // Register PutObjectPart handler. 1991 bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(api.PutObjectPartHandler).Queries("partNumber", "{partNumber:.*}", "uploadId", "{uploadId:.*}") 1992 case "ListObjectParts": 1993 // Register ListObjectParts handler. 1994 bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(api.ListObjectPartsHandler).Queries("uploadId", "{uploadId:.*}") 1995 case "ListMultipartUploads": 1996 // Register ListMultipartUploads handler. 1997 bucket.Methods(http.MethodGet).HandlerFunc(api.ListMultipartUploadsHandler).Queries("uploads", "") 1998 case "CompleteMultipart": 1999 // Register Complete Multipart Upload handler. 2000 bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(api.CompleteMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}") 2001 case "AbortMultipart": 2002 // Register AbortMultipart Handler. 2003 bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(api.AbortMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}") 2004 case "GetBucketNotification": 2005 // Register GetBucketNotification Handler. 2006 bucket.Methods(http.MethodGet).HandlerFunc(api.GetBucketNotificationHandler).Queries("notification", "") 2007 case "PutBucketNotification": 2008 // Register PutBucketNotification Handler. 2009 bucket.Methods(http.MethodPut).HandlerFunc(api.PutBucketNotificationHandler).Queries("notification", "") 2010 case "ListenNotification": 2011 // Register ListenNotification Handler. 2012 bucket.Methods(http.MethodGet).HandlerFunc(api.ListenNotificationHandler).Queries("events", "{events:.*}") 2013 } 2014 } 2015 } 2016 2017 // registerAPIFunctions helper function to add API functions identified by name to the routers. 2018 func registerAPIFunctions(muxRouter *mux.Router, objLayer ObjectLayer, apiFunctions ...string) { 2019 if len(apiFunctions) == 0 { 2020 // Register all api endpoints by default. 2021 registerAPIRouter(muxRouter) 2022 return 2023 } 2024 // API Router. 2025 apiRouter := muxRouter.PathPrefix(SlashSeparator).Subrouter() 2026 // Bucket router. 2027 bucketRouter := apiRouter.PathPrefix("/{bucket}").Subrouter() 2028 2029 // All object storage operations are registered as HTTP handlers on `objectAPIHandlers`. 2030 // When the handlers get a HTTP request they use the underlying ObjectLayer to perform operations. 2031 globalObjLayerMutex.Lock() 2032 globalObjectAPI = objLayer 2033 globalObjLayerMutex.Unlock() 2034 2035 // When cache is enabled, Put and Get operations are passed 2036 // to underlying cache layer to manage object layer operation and disk caching 2037 // operation 2038 api := objectAPIHandlers{ 2039 ObjectAPI: func() ObjectLayer { 2040 return globalObjectAPI 2041 }, 2042 } 2043 2044 // Register ListBuckets handler. 2045 apiRouter.Methods(http.MethodGet).HandlerFunc(api.ListBucketsHandler) 2046 // Register all bucket level handlers. 2047 registerBucketLevelFunc(bucketRouter, api, apiFunctions...) 2048 } 2049 2050 // Takes in Erasure object layer, and the list of API end points to be tested/required, registers the API end points and returns the HTTP handler. 2051 // Need isolated registration of API end points while writing unit tests for end points. 2052 // All the API end points are registered only for the default case. 2053 func initTestAPIEndPoints(objLayer ObjectLayer, apiFunctions []string) http.Handler { 2054 // initialize a new mux router. 2055 // goriilla/mux is the library used to register all the routes and handle them. 2056 muxRouter := mux.NewRouter().SkipClean(true).UseEncodedPath() 2057 if len(apiFunctions) > 0 { 2058 // Iterate the list of API functions requested for and register them in mux HTTP handler. 2059 registerAPIFunctions(muxRouter, objLayer, apiFunctions...) 2060 muxRouter.Use(globalMiddlewares...) 2061 return muxRouter 2062 } 2063 registerAPIRouter(muxRouter) 2064 muxRouter.Use(globalMiddlewares...) 2065 return muxRouter 2066 } 2067 2068 // generateTLSCertKey creates valid key/cert with registered DNS or IP address 2069 // depending on the passed parameter. That way, we can use tls config without 2070 // passing InsecureSkipVerify flag. This code is a simplified version of 2071 // https://golang.org/src/crypto/tls/generate_cert.go 2072 func generateTLSCertKey(host string) ([]byte, []byte, error) { 2073 validFor := 365 * 24 * time.Hour 2074 rsaBits := 2048 2075 2076 if len(host) == 0 { 2077 return nil, nil, fmt.Errorf("Missing host parameter") 2078 } 2079 2080 publicKey := func(priv interface{}) interface{} { 2081 switch k := priv.(type) { 2082 case *rsa.PrivateKey: 2083 return &k.PublicKey 2084 case *ecdsa.PrivateKey: 2085 return &k.PublicKey 2086 default: 2087 return nil 2088 } 2089 } 2090 2091 pemBlockForKey := func(priv interface{}) *pem.Block { 2092 switch k := priv.(type) { 2093 case *rsa.PrivateKey: 2094 return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)} 2095 case *ecdsa.PrivateKey: 2096 b, err := x509.MarshalECPrivateKey(k) 2097 if err != nil { 2098 fmt.Fprintf(os.Stderr, "Unable to marshal ECDSA private key: %v", err) 2099 os.Exit(2) 2100 } 2101 return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} 2102 default: 2103 return nil 2104 } 2105 } 2106 2107 var priv interface{} 2108 var err error 2109 priv, err = rsa.GenerateKey(crand.Reader, rsaBits) 2110 if err != nil { 2111 return nil, nil, fmt.Errorf("failed to generate private key: %w", err) 2112 } 2113 2114 notBefore := time.Now() 2115 notAfter := notBefore.Add(validFor) 2116 2117 serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) 2118 serialNumber, err := crand.Int(crand.Reader, serialNumberLimit) 2119 if err != nil { 2120 return nil, nil, fmt.Errorf("failed to generate serial number: %w", err) 2121 } 2122 2123 template := x509.Certificate{ 2124 SerialNumber: serialNumber, 2125 Subject: pkix.Name{ 2126 Organization: []string{"Acme Co"}, 2127 }, 2128 NotBefore: notBefore, 2129 NotAfter: notAfter, 2130 2131 KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, 2132 ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, 2133 BasicConstraintsValid: true, 2134 } 2135 2136 hosts := strings.Split(host, ",") 2137 for _, h := range hosts { 2138 if ip := net.ParseIP(h); ip != nil { 2139 template.IPAddresses = append(template.IPAddresses, ip) 2140 } else { 2141 template.DNSNames = append(template.DNSNames, h) 2142 } 2143 } 2144 2145 template.IsCA = true 2146 template.KeyUsage |= x509.KeyUsageCertSign 2147 2148 derBytes, err := x509.CreateCertificate(crand.Reader, &template, &template, publicKey(priv), priv) 2149 if err != nil { 2150 return nil, nil, fmt.Errorf("Failed to create certificate: %w", err) 2151 } 2152 2153 certOut := bytes.NewBuffer([]byte{}) 2154 pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) 2155 2156 keyOut := bytes.NewBuffer([]byte{}) 2157 pem.Encode(keyOut, pemBlockForKey(priv)) 2158 2159 return certOut.Bytes(), keyOut.Bytes(), nil 2160 } 2161 2162 func mustGetPoolEndpoints(poolIdx int, args ...string) EndpointServerPools { 2163 drivesPerSet := len(args) 2164 setCount := 1 2165 if len(args) >= 16 { 2166 drivesPerSet = 16 2167 setCount = len(args) / 16 2168 } 2169 endpoints := mustGetNewEndpoints(poolIdx, drivesPerSet, args...) 2170 return []PoolEndpoints{{ 2171 SetCount: setCount, 2172 DrivesPerSet: drivesPerSet, 2173 Endpoints: endpoints, 2174 CmdLine: strings.Join(args, " "), 2175 }} 2176 } 2177 2178 func mustGetNewEndpoints(poolIdx int, drivesPerSet int, args ...string) (endpoints Endpoints) { 2179 endpoints, err := NewEndpoints(args...) 2180 if err != nil { 2181 panic(err) 2182 } 2183 for i := range endpoints { 2184 endpoints[i].SetPoolIndex(poolIdx) 2185 endpoints[i].SetSetIndex(i / drivesPerSet) 2186 endpoints[i].SetDiskIndex(i % drivesPerSet) 2187 } 2188 return endpoints 2189 } 2190 2191 func getEndpointsLocalAddr(endpointServerPools EndpointServerPools) string { 2192 for _, endpoints := range endpointServerPools { 2193 for _, endpoint := range endpoints.Endpoints { 2194 if endpoint.IsLocal && endpoint.Type() == URLEndpointType { 2195 return endpoint.Host 2196 } 2197 } 2198 } 2199 2200 return net.JoinHostPort(globalMinioHost, globalMinioPort) 2201 } 2202 2203 // fetches a random number between range min-max. 2204 func getRandomRange(min, max int, seed int64) int { 2205 // special value -1 means no explicit seeding. 2206 if seed != -1 { 2207 rand.Seed(seed) 2208 } 2209 return rand.Intn(max-min) + min 2210 } 2211 2212 // Randomizes the order of bytes in the byte array 2213 // using Knuth Fisher-Yates shuffle algorithm. 2214 func randomizeBytes(s []byte, seed int64) []byte { 2215 // special value -1 means no explicit seeding. 2216 if seed != -1 { 2217 rand.Seed(seed) 2218 } 2219 n := len(s) 2220 var j int 2221 for i := 0; i < n-1; i++ { 2222 j = i + rand.Intn(n-i) 2223 s[i], s[j] = s[j], s[i] 2224 } 2225 return s 2226 } 2227 2228 func TestToErrIsNil(t *testing.T) { 2229 if toObjectErr(nil) != nil { 2230 t.Errorf("Test expected to return nil, failed instead got a non-nil value %s", toObjectErr(nil)) 2231 } 2232 if toStorageErr(nil) != nil { 2233 t.Errorf("Test expected to return nil, failed instead got a non-nil value %s", toStorageErr(nil)) 2234 } 2235 ctx := context.Background() 2236 if toAPIError(ctx, nil) != noError { 2237 t.Errorf("Test expected error code to be ErrNone, failed instead provided %s", toAPIError(ctx, nil).Code) 2238 } 2239 } 2240 2241 // Uploads an object using DummyDataGen directly via the http 2242 // handler. Each part in a multipart object is a new DummyDataGen 2243 // instance (so the part sizes are needed to reconstruct the whole 2244 // object). When `len(partSizes) == 1`, asMultipart is used to upload 2245 // the object as multipart with 1 part or as a regular single object. 2246 // 2247 // All upload failures are considered test errors - this function is 2248 // intended as a helper for other tests. 2249 func uploadTestObject(t *testing.T, apiRouter http.Handler, creds auth.Credentials, bucketName, objectName string, 2250 partSizes []int64, metadata map[string]string, asMultipart bool, 2251 ) { 2252 if len(partSizes) == 0 { 2253 t.Fatalf("Cannot upload an object without part sizes") 2254 } 2255 if len(partSizes) > 1 { 2256 asMultipart = true 2257 } 2258 2259 checkRespErr := func(rec *httptest.ResponseRecorder, exp int) { 2260 t.Helper() 2261 if rec.Code != exp { 2262 b, err := io.ReadAll(rec.Body) 2263 t.Fatalf("Expected: %v, Got: %v, Body: %s, err: %v", exp, rec.Code, string(b), err) 2264 } 2265 } 2266 2267 if !asMultipart { 2268 srcData := NewDummyDataGen(partSizes[0], 0) 2269 req, err := newTestSignedRequestV4(http.MethodPut, getPutObjectURL("", bucketName, objectName), 2270 partSizes[0], srcData, creds.AccessKey, creds.SecretKey, metadata) 2271 if err != nil { 2272 t.Fatalf("Unexpected err: %#v", err) 2273 } 2274 rec := httptest.NewRecorder() 2275 apiRouter.ServeHTTP(rec, req) 2276 checkRespErr(rec, http.StatusOK) 2277 } else { 2278 // Multipart upload - each part is a new DummyDataGen 2279 // (so the part lengths are required to verify the 2280 // object when reading). 2281 2282 // Initiate mp upload 2283 reqI, err := newTestSignedRequestV4(http.MethodPost, getNewMultipartURL("", bucketName, objectName), 2284 0, nil, creds.AccessKey, creds.SecretKey, metadata) 2285 if err != nil { 2286 t.Fatalf("Unexpected err: %#v", err) 2287 } 2288 rec := httptest.NewRecorder() 2289 apiRouter.ServeHTTP(rec, reqI) 2290 checkRespErr(rec, http.StatusOK) 2291 decoder := xml.NewDecoder(rec.Body) 2292 multipartResponse := &InitiateMultipartUploadResponse{} 2293 err = decoder.Decode(multipartResponse) 2294 if err != nil { 2295 t.Fatalf("Error decoding the recorded response Body") 2296 } 2297 upID := multipartResponse.UploadID 2298 2299 // Upload each part 2300 var cp []CompletePart 2301 cumulativeSum := int64(0) 2302 for i, partLen := range partSizes { 2303 partID := i + 1 2304 partSrc := NewDummyDataGen(partLen, cumulativeSum) 2305 cumulativeSum += partLen 2306 req, errP := newTestSignedRequestV4(http.MethodPut, 2307 getPutObjectPartURL("", bucketName, objectName, upID, fmt.Sprintf("%d", partID)), 2308 partLen, partSrc, creds.AccessKey, creds.SecretKey, metadata) 2309 if errP != nil { 2310 t.Fatalf("Unexpected err: %#v", errP) 2311 } 2312 rec = httptest.NewRecorder() 2313 apiRouter.ServeHTTP(rec, req) 2314 checkRespErr(rec, http.StatusOK) 2315 header := rec.Header() 2316 if v, ok := header["ETag"]; ok { 2317 etag := v[0] 2318 if etag == "" { 2319 t.Fatalf("Unexpected empty etag") 2320 } 2321 cp = append(cp, CompletePart{PartNumber: partID, ETag: etag[1 : len(etag)-1]}) 2322 } else { 2323 t.Fatalf("Missing etag header") 2324 } 2325 } 2326 2327 // Call CompleteMultipart API 2328 compMpBody, err := xml.Marshal(CompleteMultipartUpload{Parts: cp}) 2329 if err != nil { 2330 t.Fatalf("Unexpected err: %#v", err) 2331 } 2332 reqC, errP := newTestSignedRequestV4(http.MethodPost, 2333 getCompleteMultipartUploadURL("", bucketName, objectName, upID), 2334 int64(len(compMpBody)), bytes.NewReader(compMpBody), 2335 creds.AccessKey, creds.SecretKey, metadata) 2336 if errP != nil { 2337 t.Fatalf("Unexpected err: %#v", errP) 2338 } 2339 rec = httptest.NewRecorder() 2340 apiRouter.ServeHTTP(rec, reqC) 2341 checkRespErr(rec, http.StatusOK) 2342 } 2343 } 2344 2345 // unzip a file into a specific target dir - used to unzip sample data in cmd/testdata/ 2346 func unzipArchive(zipFilePath, targetDir string) error { 2347 zipReader, err := zip.OpenReader(zipFilePath) 2348 if err != nil { 2349 return err 2350 } 2351 for _, file := range zipReader.Reader.File { 2352 zippedFile, err := file.Open() 2353 if err != nil { 2354 return err 2355 } 2356 err = func() (err error) { 2357 defer zippedFile.Close() 2358 extractedFilePath := filepath.Join(targetDir, file.Name) 2359 if file.FileInfo().IsDir() { 2360 return os.MkdirAll(extractedFilePath, file.Mode()) 2361 } 2362 outputFile, err := os.OpenFile(extractedFilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.Mode()) 2363 if err != nil { 2364 return err 2365 } 2366 defer outputFile.Close() 2367 _, err = io.Copy(outputFile, zippedFile) 2368 return err 2369 }() 2370 if err != nil { 2371 return err 2372 } 2373 } 2374 return nil 2375 }