storj.io/minio@v0.0.0-20230509071714-0cbc90f649b1/cmd/server-main.go (about) 1 /* 2 * MinIO Cloud Storage, (C) 2015-2019 MinIO, Inc. 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package cmd 18 19 import ( 20 "context" 21 "crypto/tls" 22 "errors" 23 "fmt" 24 "math/rand" 25 "net" 26 "os" 27 "os/signal" 28 "strings" 29 "sync" 30 "syscall" 31 "time" 32 33 "github.com/minio/cli" 34 35 "storj.io/minio/cmd/config" 36 xhttp "storj.io/minio/cmd/http" 37 "storj.io/minio/cmd/logger" 38 "storj.io/minio/cmd/rest" 39 "storj.io/minio/pkg/auth" 40 "storj.io/minio/pkg/bucket/bandwidth" 41 "storj.io/minio/pkg/certs" 42 "storj.io/minio/pkg/color" 43 "storj.io/minio/pkg/env" 44 "storj.io/minio/pkg/fips" 45 "storj.io/minio/pkg/madmin" 46 "storj.io/minio/pkg/sync/errgroup" 47 ) 48 49 // ServerFlags - server command specific flags 50 var ServerFlags = []cli.Flag{ 51 cli.StringFlag{ 52 Name: "address", 53 Value: ":" + GlobalMinioDefaultPort, 54 Usage: "bind to a specific ADDRESS:PORT, ADDRESS can be an IP or hostname", 55 }, 56 } 57 58 var serverCmd = cli.Command{ 59 Name: "server", 60 Usage: "start object storage server", 61 Flags: append(ServerFlags, GlobalFlags...), 62 Action: serverMain, 63 CustomHelpTemplate: `NAME: 64 {{.HelpName}} - {{.Usage}} 65 66 USAGE: 67 {{.HelpName}} {{if .VisibleFlags}}[FLAGS] {{end}}DIR1 [DIR2..] 68 {{.HelpName}} {{if .VisibleFlags}}[FLAGS] {{end}}DIR{1...64} 69 {{.HelpName}} {{if .VisibleFlags}}[FLAGS] {{end}}DIR{1...64} DIR{65...128} 70 71 DIR: 72 DIR points to a directory on a filesystem. When you want to combine 73 multiple drives into a single large system, pass one directory per 74 filesystem separated by space. You may also use a '...' convention 75 to abbreviate the directory arguments. Remote directories in a 76 distributed setup are encoded as HTTP(s) URIs. 77 {{if .VisibleFlags}} 78 FLAGS: 79 {{range .VisibleFlags}}{{.}} 80 {{end}}{{end}} 81 EXAMPLES: 82 1. Start minio server on "/home/shared" directory. 83 {{.Prompt}} {{.HelpName}} /home/shared 84 85 2. Start single node server with 64 local drives "/mnt/data1" to "/mnt/data64". 86 {{.Prompt}} {{.HelpName}} /mnt/data{1...64} 87 88 3. Start distributed minio server on an 32 node setup with 32 drives each, run following command on all the nodes 89 {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_USER{{.AssignmentOperator}}minio 90 {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_PASSWORD{{.AssignmentOperator}}miniostorage 91 {{.Prompt}} {{.HelpName}} http://node{1...32}.example.com/mnt/export{1...32} 92 93 4. Start distributed minio server in an expanded setup, run the following command on all the nodes 94 {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_USER{{.AssignmentOperator}}minio 95 {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_PASSWORD{{.AssignmentOperator}}miniostorage 96 {{.Prompt}} {{.HelpName}} http://node{1...16}.example.com/mnt/export{1...32} \ 97 http://node{17...64}.example.com/mnt/export{1...64} 98 `, 99 } 100 101 func serverCmdArgs(ctx *cli.Context) []string { 102 v := env.Get(config.EnvArgs, "") 103 if v == "" { 104 // Fall back to older ENV MINIO_ENDPOINTS 105 v = env.Get(config.EnvEndpoints, "") 106 } 107 if v == "" { 108 if !ctx.Args().Present() || ctx.Args().First() == "help" { 109 cli.ShowCommandHelpAndExit(ctx, ctx.Command.Name, 1) 110 } 111 return ctx.Args() 112 } 113 return strings.Fields(v) 114 } 115 116 func serverHandleCmdArgs(ctx *cli.Context) { 117 // Handle common command args. 118 handleCommonCmdArgs(ctx) 119 120 logger.FatalIf(CheckLocalServerAddr(GlobalCLIContext.Addr), "Unable to validate passed arguments") 121 122 var err error 123 var setupType SetupType 124 125 // Check and load TLS certificates. 126 globalPublicCerts, globalTLSCerts, GlobalIsTLS, err = getTLSConfig() 127 logger.FatalIf(err, "Unable to load the TLS configuration") 128 129 // Check and load Root CAs. 130 globalRootCAs, err = certs.GetRootCAs(globalCertsCADir.Get()) 131 logger.FatalIf(err, "Failed to read root CAs (%v)", err) 132 133 // Add the global public crts as part of global root CAs 134 for _, publicCrt := range globalPublicCerts { 135 globalRootCAs.AddCert(publicCrt) 136 } 137 138 // Register root CAs for remote ENVs 139 env.RegisterGlobalCAs(globalRootCAs) 140 141 globalMinioAddr = GlobalCLIContext.Addr 142 143 globalMinioHost, globalMinioPort = mustSplitHostPort(globalMinioAddr) 144 globalEndpoints, setupType, err = createServerEndpoints(GlobalCLIContext.Addr, serverCmdArgs(ctx)...) 145 logger.FatalIf(err, "Invalid command line arguments") 146 147 globalLocalNodeName = GetLocalPeer(globalEndpoints, globalMinioHost, globalMinioPort) 148 149 globalRemoteEndpoints = make(map[string]Endpoint) 150 for _, z := range globalEndpoints { 151 for _, ep := range z.Endpoints { 152 if ep.IsLocal { 153 globalRemoteEndpoints[globalLocalNodeName] = ep 154 } else { 155 globalRemoteEndpoints[ep.Host] = ep 156 } 157 } 158 } 159 160 // allow transport to be HTTP/1.1 for proxying. 161 globalProxyTransport = newCustomHTTPProxyTransport(&tls.Config{ 162 RootCAs: globalRootCAs, 163 CipherSuites: fips.CipherSuitesTLS(), 164 CurvePreferences: fips.EllipticCurvesTLS(), 165 }, rest.DefaultTimeout)() 166 globalProxyEndpoints = GetProxyEndpoints(globalEndpoints) 167 globalInternodeTransport = newInternodeHTTPTransport(&tls.Config{ 168 RootCAs: globalRootCAs, 169 CipherSuites: fips.CipherSuitesTLS(), 170 CurvePreferences: fips.EllipticCurvesTLS(), 171 }, rest.DefaultTimeout)() 172 173 // On macOS, if a process already listens on LOCALIPADDR:PORT, net.Listen() falls back 174 // to IPv6 address ie minio will start listening on IPv6 address whereas another 175 // (non-)minio process is listening on IPv4 of given port. 176 // To avoid this error situation we check for port availability. 177 logger.FatalIf(checkPortAvailability(globalMinioHost, globalMinioPort), "Unable to start the server") 178 179 globalIsErasure = (setupType == ErasureSetupType) 180 globalIsDistErasure = (setupType == DistErasureSetupType) 181 if globalIsDistErasure { 182 globalIsErasure = true 183 } 184 } 185 186 func serverHandleEnvVars() { 187 // Handle common environment variables. 188 HandleCommonEnvVars() 189 } 190 191 var globalHealStateLK sync.RWMutex 192 193 func newAllSubsystems() { 194 if globalIsErasure { 195 globalHealStateLK.Lock() 196 // New global heal state 197 globalAllHealState = newHealState(true) 198 globalBackgroundHealState = newHealState(false) 199 globalHealStateLK.Unlock() 200 } 201 202 // Create new notification system and initialize notification targets 203 GlobalNotificationSys = NewNotificationSys(globalEndpoints) 204 205 // Create new bucket metadata system. 206 if globalBucketMetadataSys == nil { 207 globalBucketMetadataSys = NewBucketMetadataSys() 208 } else { 209 // Reinitialize safely when testing. 210 globalBucketMetadataSys.Reset() 211 } 212 213 // Create the bucket bandwidth monitor 214 globalBucketMonitor = bandwidth.NewMonitor(GlobalServiceDoneCh) 215 216 // Create a new config system. 217 globalConfigSys = NewConfigSys() 218 219 // Create new IAM system. 220 GlobalIAMSys = NewIAMSys() 221 222 // Create new policy system. 223 globalPolicySys = NewPolicySys() 224 225 // Create new lifecycle system. 226 globalLifecycleSys = NewLifecycleSys() 227 228 // Create new bucket encryption subsystem 229 globalBucketSSEConfigSys = NewBucketSSEConfigSys() 230 231 // Create new bucket object lock subsystem 232 globalBucketObjectLockSys = NewBucketObjectLockSys() 233 234 // Create new bucket quota subsystem 235 GlobalBucketQuotaSys = NewBucketQuotaSys() 236 237 // Create new bucket versioning subsystem 238 if globalBucketVersioningSys == nil { 239 globalBucketVersioningSys = NewBucketVersioningSys() 240 } else { 241 globalBucketVersioningSys.Reset() 242 } 243 244 // Create new bucket replication subsytem 245 globalBucketTargetSys = NewBucketTargetSys() 246 } 247 248 func configRetriableErrors(err error) bool { 249 // Initializing sub-systems needs a retry mechanism for 250 // the following reasons: 251 // - Read quorum is lost just after the initialization 252 // of the object layer. 253 // - Write quorum not met when upgrading configuration 254 // version is needed, migration is needed etc. 255 rquorum := InsufficientReadQuorum{} 256 wquorum := InsufficientWriteQuorum{} 257 258 // One of these retriable errors shall be retried. 259 return errors.Is(err, errDiskNotFound) || 260 errors.Is(err, errConfigNotFound) || 261 errors.Is(err, context.DeadlineExceeded) || 262 errors.Is(err, errErasureWriteQuorum) || 263 errors.Is(err, errErasureReadQuorum) || 264 errors.As(err, &rquorum) || 265 errors.As(err, &wquorum) || 266 isErrBucketNotFound(err) || 267 errors.Is(err, os.ErrDeadlineExceeded) 268 } 269 270 func initServer(ctx context.Context, newObject ObjectLayer) error { 271 // Once the config is fully loaded, initialize the new object layer. 272 SetObjectLayer(newObject) 273 274 // Make sure to hold lock for entire migration to avoid 275 // such that only one server should migrate the entire config 276 // at a given time, this big transaction lock ensures this 277 // appropriately. This is also true for rotation of encrypted 278 // content. 279 txnLk := newObject.NewNSLock(minioMetaBucket, minioConfigPrefix+"/transaction.lock") 280 281 // **** WARNING **** 282 // Migrating to encrypted backend should happen before initialization of any 283 // sub-systems, make sure that we do not move the above codeblock elsewhere. 284 285 r := rand.New(rand.NewSource(time.Now().UnixNano())) 286 287 lockTimeout := newDynamicTimeout(5*time.Second, 3*time.Second) 288 289 var err error 290 for { 291 select { 292 case <-ctx.Done(): 293 // Retry was canceled successfully. 294 return fmt.Errorf("Initializing sub-systems stopped gracefully %w", ctx.Err()) 295 default: 296 } 297 298 // let one of the server acquire the lock, if not let them timeout. 299 // which shall be retried again by this loop. 300 if _, err = txnLk.GetLock(ctx, lockTimeout); err != nil { 301 logger.Info("Waiting for all MinIO sub-systems to be initialized.. trying to acquire lock") 302 303 time.Sleep(time.Duration(r.Float64() * float64(5*time.Second))) 304 continue 305 } 306 307 // These messages only meant primarily for distributed setup, so only log during distributed setup. 308 if globalIsDistErasure { 309 logger.Info("Waiting for all MinIO sub-systems to be initialized.. lock acquired") 310 } 311 312 // Migrate all backend configs to encrypted backend configs, optionally 313 // handles rotating keys for encryption, if there is any retriable failure 314 // that shall be retried if there is an error. 315 if err = handleEncryptedConfigBackend(newObject); err == nil { 316 // Upon success migrating the config, initialize all sub-systems 317 // if all sub-systems initialized successfully return right away 318 if err = initAllSubsystems(ctx, newObject); err == nil { 319 txnLk.Unlock() 320 // All successful return. 321 if globalIsDistErasure { 322 // These messages only meant primarily for distributed setup, so only log during distributed setup. 323 logger.Info("All MinIO sub-systems initialized successfully") 324 } 325 return nil 326 } 327 } 328 329 txnLk.Unlock() // Unlock the transaction lock and allow other nodes to acquire the lock if possible. 330 331 if configRetriableErrors(err) { 332 logger.Info("Waiting for all MinIO sub-systems to be initialized.. possible cause (%v)", err) 333 time.Sleep(time.Duration(r.Float64() * float64(5*time.Second))) 334 continue 335 } 336 337 // Any other unhandled return right here. 338 return fmt.Errorf("Unable to initialize sub-systems: %w", err) 339 } 340 } 341 342 func initAllSubsystems(ctx context.Context, newObject ObjectLayer) (err error) { 343 // %w is used by all error returns here to make sure 344 // we wrap the underlying error, make sure when you 345 // are modifying this code that you do so, if and when 346 // you want to add extra context to your error. This 347 // ensures top level retry works accordingly. 348 // List buckets to heal, and be re-used for loading configs. 349 350 buckets, err := newObject.ListBuckets(ctx) 351 if err != nil { 352 return fmt.Errorf("Unable to list buckets to heal: %w", err) 353 } 354 355 if globalIsErasure { 356 if len(buckets) > 0 { 357 if len(buckets) == 1 { 358 logger.Info(fmt.Sprintf("Verifying if %d bucket is consistent across drives...", len(buckets))) 359 } else { 360 logger.Info(fmt.Sprintf("Verifying if %d buckets are consistent across drives...", len(buckets))) 361 } 362 } 363 364 // Limit to no more than 50 concurrent buckets. 365 g := errgroup.WithNErrs(len(buckets)).WithConcurrency(50) 366 ctx, cancel := g.WithCancelOnError(ctx) 367 defer cancel() 368 for index := range buckets { 369 index := index 370 g.Go(func() error { 371 _, berr := newObject.HealBucket(ctx, buckets[index].Name, madmin.HealOpts{Recreate: true}) 372 return berr 373 }, index) 374 } 375 if err := g.WaitErr(); err != nil { 376 return fmt.Errorf("Unable to list buckets to heal: %w", err) 377 } 378 } 379 380 // Initialize bucket metadata sub-system. 381 globalBucketMetadataSys.Init(ctx, buckets, newObject) 382 383 // Initialize notification system. 384 GlobalNotificationSys.Init(ctx, buckets, newObject) 385 386 // Initialize bucket targets sub-system. 387 globalBucketTargetSys.Init(ctx, buckets, newObject) 388 389 return nil 390 } 391 392 // serverMain handler called for 'minio server' command. 393 func serverMain(ctx *cli.Context) { 394 defer globalDNSCache.Stop() 395 396 signal.Notify(globalOSSignalCh, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT) 397 398 go handleSignals() 399 400 setDefaultProfilerRates() 401 402 // Initialize globalConsoleSys system 403 globalConsoleSys = NewConsoleLogger(GlobalContext) 404 logger.AddTarget(globalConsoleSys) 405 406 // Perform any self-tests 407 bitrotSelfTest() 408 erasureSelfTest() 409 compressSelfTest() 410 411 // Handle all server command args. 412 serverHandleCmdArgs(ctx) 413 414 // Handle all server environment vars. 415 serverHandleEnvVars() 416 417 // Set node name, only set for distributed setup. 418 globalConsoleSys.SetNodeName(globalLocalNodeName) 419 420 // Initialize all help 421 initHelp() 422 423 // Initialize all sub-systems 424 newAllSubsystems() 425 426 globalMinioEndpoint = func() string { 427 host := globalMinioHost 428 if host == "" { 429 host = sortIPs(localIP4.ToSlice())[0] 430 } 431 return fmt.Sprintf("%s://%s", getURLScheme(GlobalIsTLS), net.JoinHostPort(host, globalMinioPort)) 432 }() 433 434 // Is distributed setup, error out if no certificates are found for HTTPS endpoints. 435 if globalIsDistErasure { 436 if globalEndpoints.HTTPS() && !GlobalIsTLS { 437 logger.Fatal(config.ErrNoCertsAndHTTPSEndpoints(nil), "Unable to start the server") 438 } 439 if !globalEndpoints.HTTPS() && GlobalIsTLS { 440 logger.Fatal(config.ErrCertsAndHTTPEndpoints(nil), "Unable to start the server") 441 } 442 } 443 444 if !GlobalCLIContext.Quiet && !globalInplaceUpdateDisabled { 445 // Check for new updates from dl.min.io. 446 checkUpdate(getMinioMode()) 447 } 448 449 if !globalActiveCred.IsValid() && globalIsDistErasure { 450 logger.Fatal(config.ErrEnvCredentialsMissingDistributed(nil), 451 "Unable to initialize the server in distributed mode") 452 } 453 454 // Set system resources to maximum. 455 setMaxResources() 456 457 // Configure server. 458 handler, err := configureServerHandler(globalEndpoints) 459 if err != nil { 460 logger.Fatal(config.ErrUnexpectedError(err), "Unable to configure one of server's RPC services") 461 } 462 463 var getCert certs.GetCertificateFunc 464 if globalTLSCerts != nil { 465 getCert = globalTLSCerts.GetCertificate 466 } 467 468 httpServer := xhttp.NewServer([]string{globalMinioAddr}, criticalErrorHandler{corsHandler(handler)}, getCert) 469 httpServer.BaseContext = func(listener net.Listener) context.Context { 470 return GlobalContext 471 } 472 go func() { 473 globalHTTPServerErrorCh <- httpServer.Start() 474 }() 475 476 setHTTPServer(httpServer) 477 478 if globalIsDistErasure && globalEndpoints.FirstLocal() { 479 for { 480 // Additionally in distributed setup, validate the setup and configuration. 481 err := verifyServerSystemConfig(GlobalContext, globalEndpoints) 482 if err == nil || errors.Is(err, context.Canceled) { 483 break 484 } 485 logger.LogIf(GlobalContext, err, "Unable to initialize distributed setup, retrying.. after 5 seconds") 486 select { 487 case <-GlobalContext.Done(): 488 return 489 case <-time.After(500 * time.Millisecond): 490 } 491 } 492 } 493 494 newObject, err := newObjectLayer(GlobalContext, globalEndpoints) 495 if err != nil { 496 logFatalErrs(err, Endpoint{}, true) 497 } 498 499 logger.SetDeploymentID(globalDeploymentID) 500 501 // Enable background operations for erasure coding 502 if globalIsErasure { 503 initAutoHeal(GlobalContext, newObject) 504 initBackgroundTransition(GlobalContext, newObject) 505 } 506 507 initBackgroundExpiry(GlobalContext, newObject) 508 initDataScanner(GlobalContext, newObject) 509 510 if err = initServer(GlobalContext, newObject); err != nil { 511 var cerr config.Err 512 // For any config error, we don't need to drop into safe-mode 513 // instead its a user error and should be fixed by user. 514 if errors.As(err, &cerr) { 515 logger.FatalIf(err, "Unable to initialize the server") 516 } 517 518 // If context was canceled 519 if errors.Is(err, context.Canceled) { 520 logger.FatalIf(err, "Server startup canceled upon user request") 521 } 522 } 523 524 if globalIsErasure { // to be done after config init 525 initBackgroundReplication(GlobalContext, newObject) 526 } 527 if globalCacheConfig.Enabled { 528 // initialize the new disk cache objects. 529 var cacheAPI CacheObjectLayer 530 cacheAPI, err = newServerCacheObjects(GlobalContext, globalCacheConfig) 531 logger.FatalIf(err, "Unable to initialize disk caching") 532 533 setCacheObjectLayer(cacheAPI) 534 } 535 536 // Initialize users credentials and policies in background right after config has initialized. 537 go GlobalIAMSys.Init(GlobalContext, newObject) 538 539 // Prints the formatted startup message, if err is not nil then it prints additional information as well. 540 printStartupMessage(getAPIEndpoints(), err) 541 542 if globalActiveCred.Equal(auth.DefaultCredentials) { 543 msg := fmt.Sprintf("Detected default credentials '%s', please change the credentials immediately using 'MINIO_ROOT_USER' and 'MINIO_ROOT_PASSWORD'", globalActiveCred) 544 logger.StartupMessage(color.RedBold(msg)) 545 } 546 547 <-globalOSSignalCh 548 } 549 550 // Initialize object layer with the supplied disks, objectLayer is nil upon any error. 551 func newObjectLayer(ctx context.Context, endpointServerPools EndpointServerPools) (newObject ObjectLayer, err error) { 552 // For FS only, directly use the disk. 553 if endpointServerPools.NEndpoints() == 1 { 554 // Initialize new FS object layer. 555 return NewFSObjectLayer(endpointServerPools[0].Endpoints[0].Path) 556 } 557 558 return newErasureServerPools(ctx, endpointServerPools) 559 }