github.com/coreos/mantle@v0.13.0/cmd/plume/release.go (about) 1 // Copyright 2016 CoreOS, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package main 16 17 import ( 18 "bytes" 19 "encoding/json" 20 "fmt" 21 "io/ioutil" 22 "net/http" 23 "net/url" 24 "path/filepath" 25 "sort" 26 "strings" 27 "time" 28 29 "github.com/aws/aws-sdk-go/aws/awserr" 30 "github.com/spf13/cobra" 31 "golang.org/x/net/context" 32 "google.golang.org/api/compute/v1" 33 gs "google.golang.org/api/storage/v1" 34 35 "github.com/coreos/mantle/platform/api/aws" 36 "github.com/coreos/mantle/platform/api/azure" 37 "github.com/coreos/mantle/platform/api/gcloud" 38 "github.com/coreos/mantle/storage" 39 "github.com/coreos/mantle/storage/index" 40 ) 41 42 var ( 43 releaseDryRun bool 44 cmdRelease = &cobra.Command{ 45 Use: "release [options]", 46 Short: "Publish a new CoreOS release.", 47 Run: runRelease, 48 Long: `Publish a new CoreOS release.`, 49 } 50 ) 51 52 func init() { 53 cmdRelease.Flags().StringVar(&awsCredentialsFile, "aws-credentials", "", "AWS credentials file") 54 cmdRelease.Flags().StringVar(&selectedDistro, "distro", "cl", "system to release") 55 cmdRelease.Flags().StringVar(&azureProfile, "azure-profile", "", "Azure Profile json file") 56 cmdRelease.Flags().BoolVarP(&releaseDryRun, "dry-run", "n", false, 57 "perform a trial run, do not make changes") 58 AddSpecFlags(cmdRelease.Flags()) 59 AddFedoraSpecFlags(cmdRelease.Flags()) 60 AddFcosSpecFlags(cmdRelease.Flags()) 61 root.AddCommand(cmdRelease) 62 } 63 64 func runRelease(cmd *cobra.Command, args []string) { 65 switch selectedDistro { 66 case "cl": 67 if err := runCLRelease(cmd, args); err != nil { 68 plog.Fatal(err) 69 } 70 case "fcos": 71 if err := runFcosRelease(cmd, args); err != nil { 72 plog.Fatal(err) 73 } 74 default: 75 plog.Fatalf("Unknown distro %q:", selectedDistro) 76 } 77 } 78 79 func runFcosRelease(cmd *cobra.Command, args []string) error { 80 if len(args) > 0 { 81 plog.Fatal("No args accepted") 82 } 83 84 spec := FcosChannelSpec() 85 FcosValidateArguments() 86 87 doS3(&spec) 88 89 modifyReleaseMetadataIndex(&spec, specCommitId) 90 91 return nil 92 } 93 94 func runFedoraRelease(cmd *cobra.Command, args []string) error { 95 if len(args) > 0 { 96 plog.Fatal("No args accepted") 97 } 98 99 spec, err := ChannelFedoraSpec() 100 if err != nil { 101 return err 102 } 103 ctx := context.Background() 104 client := &http.Client{} 105 106 // Make AWS images public. 107 doAWS(ctx, client, nil, &spec) 108 109 return nil 110 } 111 112 func runCLRelease(cmd *cobra.Command, args []string) error { 113 if len(args) > 0 { 114 plog.Fatal("No args accepted") 115 } 116 117 spec := ChannelSpec() 118 ctx := context.Background() 119 client, err := getGoogleClient() 120 if err != nil { 121 plog.Fatalf("Authentication failed: %v", err) 122 } 123 124 src, err := storage.NewBucket(client, spec.SourceURL()) 125 if err != nil { 126 plog.Fatal(err) 127 } 128 src.WriteDryRun(releaseDryRun) 129 130 if err := src.Fetch(ctx); err != nil { 131 plog.Fatal(err) 132 } 133 134 // Sanity check! 135 if vertxt := src.Object(src.Prefix() + "version.txt"); vertxt == nil { 136 verurl := src.URL().String() + "version.txt" 137 plog.Fatalf("File not found: %s", verurl) 138 } 139 140 // Register GCE image if needed. 141 doGCE(ctx, client, src, &spec) 142 143 // Make Azure images public. 144 doAzure(ctx, client, src, &spec) 145 146 // Make AWS images public. 147 doAWS(ctx, client, src, &spec) 148 149 for _, dSpec := range spec.Destinations { 150 dst, err := storage.NewBucket(client, dSpec.BaseURL) 151 if err != nil { 152 plog.Fatal(err) 153 } 154 dst.WriteDryRun(releaseDryRun) 155 156 // Fetch parent directories non-recursively to re-index it later. 157 for _, prefix := range dSpec.ParentPrefixes() { 158 if err := dst.FetchPrefix(ctx, prefix, false); err != nil { 159 plog.Fatal(err) 160 } 161 } 162 163 // Fetch and sync each destination directory. 164 for _, prefix := range dSpec.FinalPrefixes() { 165 if err := dst.FetchPrefix(ctx, prefix, true); err != nil { 166 plog.Fatal(err) 167 } 168 169 sync := index.NewSyncIndexJob(src, dst) 170 sync.DestinationPrefix(prefix) 171 sync.DirectoryHTML(dSpec.DirectoryHTML) 172 sync.IndexHTML(dSpec.IndexHTML) 173 sync.Delete(true) 174 if dSpec.Title != "" { 175 sync.Name(dSpec.Title) 176 } 177 if err := sync.Do(ctx); err != nil { 178 plog.Fatal(err) 179 } 180 } 181 182 // Now refresh the parent directory indexes. 183 for _, prefix := range dSpec.ParentPrefixes() { 184 parent := index.NewIndexJob(dst) 185 parent.Prefix(prefix) 186 parent.DirectoryHTML(dSpec.DirectoryHTML) 187 parent.IndexHTML(dSpec.IndexHTML) 188 parent.Recursive(false) 189 parent.Delete(true) 190 if dSpec.Title != "" { 191 parent.Name(dSpec.Title) 192 } 193 if err := parent.Do(ctx); err != nil { 194 plog.Fatal(err) 195 } 196 } 197 } 198 199 return nil 200 } 201 202 func sanitizeVersion() string { 203 v := strings.Replace(specVersion, ".", "-", -1) 204 return strings.Replace(v, "+", "-", -1) 205 } 206 207 func gceWaitForImage(pending *gcloud.Pending) { 208 plog.Infof("Waiting for image creation to finish...") 209 pending.Interval = 3 * time.Second 210 pending.Progress = func(_ string, _ time.Duration, op *compute.Operation) error { 211 status := strings.ToLower(op.Status) 212 if op.Progress != 0 { 213 plog.Infof("Image creation is %s: %s % 2d%%", status, op.StatusMessage, op.Progress) 214 } else { 215 plog.Infof("Image creation is %s. %s", status, op.StatusMessage) 216 } 217 return nil 218 } 219 if err := pending.Wait(); err != nil { 220 plog.Fatal(err) 221 } 222 plog.Info("Success!") 223 } 224 225 func gceUploadImage(spec *channelSpec, api *gcloud.API, obj *gs.Object, name, desc string) string { 226 plog.Noticef("Creating GCE image %s", name) 227 op, pending, err := api.CreateImage(&gcloud.ImageSpec{ 228 SourceImage: obj.MediaLink, 229 Family: spec.GCE.Family, 230 Name: name, 231 Description: desc, 232 Licenses: spec.GCE.Licenses, 233 }, false) 234 if err != nil { 235 plog.Fatalf("GCE image creation failed: %v", err) 236 } 237 238 gceWaitForImage(pending) 239 240 return op.TargetLink 241 } 242 243 func doGCE(ctx context.Context, client *http.Client, src *storage.Bucket, spec *channelSpec) { 244 if spec.GCE.Project == "" || spec.GCE.Image == "" { 245 plog.Notice("GCE image creation disabled.") 246 return 247 } 248 249 api, err := gcloud.New(&gcloud.Options{ 250 Project: spec.GCE.Project, 251 JSONKeyFile: gceJSONKeyFile, 252 }) 253 if err != nil { 254 plog.Fatalf("GCE client failed: %v", err) 255 } 256 257 nameVer := fmt.Sprintf("%s-%s-v", spec.GCE.Family, sanitizeVersion()) 258 date := time.Now().UTC() 259 name := nameVer + date.Format("20060102") 260 desc := fmt.Sprintf("%s, %s, %s published on %s", spec.GCE.Description, 261 specVersion, specBoard, date.Format("2006-01-02")) 262 263 images, err := api.ListImages(ctx, spec.GCE.Family+"-") 264 if err != nil { 265 plog.Fatal(err) 266 } 267 268 var conflicting, oldImages []*compute.Image 269 for _, image := range images { 270 if strings.HasPrefix(image.Name, nameVer) { 271 conflicting = append(conflicting, image) 272 } else { 273 oldImages = append(oldImages, image) 274 } 275 } 276 sort.Slice(oldImages, func(i, j int) bool { 277 getCreation := func(image *compute.Image) time.Time { 278 stamp, err := time.Parse(time.RFC3339, image.CreationTimestamp) 279 if err != nil { 280 plog.Fatalf("Couldn't parse timestamp %q: %v", image.CreationTimestamp, err) 281 } 282 return stamp 283 } 284 return getCreation(oldImages[i]).After(getCreation(oldImages[j])) 285 }) 286 287 // Check for any with the same version but possibly different dates. 288 var imageLink string 289 if len(conflicting) > 1 { 290 plog.Fatalf("Duplicate GCE images found: %v", conflicting) 291 } else if len(conflicting) == 1 { 292 image := conflicting[0] 293 name = image.Name 294 imageLink = image.SelfLink 295 296 if image.Status == "FAILED" { 297 plog.Fatalf("Found existing GCE image %q in state %q", name, image.Status) 298 } 299 300 plog.Noticef("GCE image already exists: %s", name) 301 302 if releaseDryRun { 303 return 304 } 305 306 if image.Status == "PENDING" { 307 pending, err := api.GetPendingForImage(image) 308 if err != nil { 309 plog.Fatalf("Couldn't wait for image creation: %v", err) 310 } 311 gceWaitForImage(pending) 312 } 313 } else { 314 obj := src.Object(src.Prefix() + spec.GCE.Image) 315 if obj == nil { 316 plog.Fatalf("GCE image not found %s%s", src.URL(), spec.GCE.Image) 317 } 318 319 if releaseDryRun { 320 plog.Noticef("Would create GCE image %s", name) 321 return 322 } 323 324 imageLink = gceUploadImage(spec, api, obj, name, desc) 325 } 326 327 if spec.GCE.Publish != "" { 328 obj := gs.Object{ 329 Name: src.Prefix() + spec.GCE.Publish, 330 ContentType: "text/plain", 331 } 332 media := strings.NewReader( 333 fmt.Sprintf("projects/%s/global/images/%s\n", 334 spec.GCE.Project, name)) 335 if err := src.Upload(ctx, &obj, media); err != nil { 336 plog.Fatal(err) 337 } 338 } else { 339 plog.Notice("GCE image name publishing disabled.") 340 } 341 342 var pendings []*gcloud.Pending 343 for _, old := range oldImages { 344 if old.Deprecated != nil && old.Deprecated.State != "" { 345 continue 346 } 347 plog.Noticef("Deprecating old image %s", old.Name) 348 pending, err := api.DeprecateImage(old.Name, gcloud.DeprecationStateDeprecated, imageLink) 349 if err != nil { 350 plog.Fatal(err) 351 } 352 pending.Interval = 1 * time.Second 353 pending.Timeout = 0 354 pendings = append(pendings, pending) 355 } 356 357 if spec.GCE.Limit > 0 && len(oldImages) > spec.GCE.Limit { 358 plog.Noticef("Pruning %d GCE images.", len(oldImages)-spec.GCE.Limit) 359 for _, old := range oldImages[spec.GCE.Limit:] { 360 if old.Name == "coreos-alpha-1122-0-0-v20160727" { 361 plog.Noticef("%v: not deleting: hardcoded solution to hardcoded problem", old.Name) 362 continue 363 } 364 plog.Noticef("Deleting old image %s", old.Name) 365 pending, err := api.DeleteImage(old.Name) 366 if err != nil { 367 plog.Fatal(err) 368 } 369 pending.Interval = 1 * time.Second 370 pending.Timeout = 0 371 pendings = append(pendings, pending) 372 } 373 } 374 375 plog.Infof("Waiting on %d operations.", len(pendings)) 376 for _, pending := range pendings { 377 err := pending.Wait() 378 if err != nil { 379 plog.Fatal(err) 380 } 381 } 382 } 383 384 func doAzure(ctx context.Context, client *http.Client, src *storage.Bucket, spec *channelSpec) { 385 if spec.Azure.StorageAccount == "" { 386 plog.Notice("Azure image creation disabled.") 387 return 388 } 389 390 // channel name should be caps for azure image 391 imageName := fmt.Sprintf("%s-%s-%s", spec.Azure.Offer, strings.Title(specChannel), specVersion) 392 393 for _, environment := range spec.Azure.Environments { 394 api, err := azure.New(&azure.Options{ 395 AzureProfile: azureProfile, 396 AzureSubscription: environment.SubscriptionName, 397 }) 398 if err != nil { 399 plog.Fatalf("failed to create Azure API: %v", err) 400 } 401 402 if releaseDryRun { 403 // TODO(bgilbert): check that the image exists 404 plog.Printf("Would share %q on %v", imageName, environment.SubscriptionName) 405 continue 406 } else { 407 plog.Printf("Sharing %q on %v...", imageName, environment.SubscriptionName) 408 } 409 410 if err := api.ShareImage(imageName, "public"); err != nil { 411 plog.Fatalf("failed to share image %q: %v", imageName, err) 412 } 413 } 414 } 415 416 func doAWS(ctx context.Context, client *http.Client, src *storage.Bucket, spec *channelSpec) { 417 if spec.AWS.Image == "" { 418 plog.Notice("AWS image creation disabled.") 419 return 420 } 421 422 awsImageMetadata, err := getSpecAWSImageMetadata(spec) 423 if err != nil { 424 return 425 } 426 427 imageName := awsImageMetadata["imageName"] 428 429 for _, part := range spec.AWS.Partitions { 430 for _, region := range part.Regions { 431 if releaseDryRun { 432 plog.Printf("Checking for images in %v %v...", part.Name, region) 433 } else { 434 plog.Printf("Publishing images in %v %v...", part.Name, region) 435 } 436 437 api, err := aws.New(&aws.Options{ 438 CredentialsFile: awsCredentialsFile, 439 Profile: part.Profile, 440 Region: region, 441 }) 442 if err != nil { 443 plog.Fatalf("creating client for %v %v: %v", part.Name, region, err) 444 } 445 446 publish := func(imageName string) { 447 imageID, err := api.FindImage(imageName) 448 if err != nil { 449 plog.Fatalf("couldn't find image %q in %v %v: %v", imageName, part.Name, region, err) 450 } 451 452 if !releaseDryRun { 453 err := api.PublishImage(imageID) 454 if err != nil { 455 plog.Fatalf("couldn't publish image in %v %v: %v", part.Name, region, err) 456 } 457 } 458 } 459 if aws.RegionSupportsPV(region) { 460 publish(imageName) 461 } 462 publish(imageName + "-hvm") 463 } 464 } 465 } 466 467 func doS3(spec *fcosChannelSpec) { 468 api, err := aws.New(&aws.Options{ 469 CredentialsFile: awsCredentialsFile, 470 Profile: spec.Profile, 471 Region: spec.Region, 472 }) 473 if err != nil { 474 plog.Fatalf("creating aws client: %v", err) 475 } 476 477 // Assumes the bucket layout defined inside of 478 // https://github.com/coreos/fedora-coreos-tracker/issues/189 479 err = api.UpdateBucketObjectsACL(spec.Bucket, filepath.Join("prod", "streams", specChannel, "builds", specVersion), specPolicy) 480 if err != nil { 481 plog.Fatalf("updating object ACLs: %v", err) 482 } 483 } 484 485 func modifyReleaseMetadataIndex(spec *fcosChannelSpec, commitId string) { 486 api, err := aws.New(&aws.Options{ 487 CredentialsFile: awsCredentialsFile, 488 Profile: spec.Profile, 489 Region: spec.Region, 490 }) 491 if err != nil { 492 plog.Fatalf("creating aws client: %v", err) 493 } 494 495 path := filepath.Join("prod", "streams", specChannel, "releases.json") 496 data, err := func() ([]byte, error) { 497 f, err := api.DownloadFile(spec.Bucket, path) 498 if err != nil { 499 if awsErr, ok := err.(awserr.Error); ok { 500 if awsErr.Code() == "NoSuchKey" { 501 return []byte("{}"), nil 502 } 503 } 504 return []byte{}, fmt.Errorf("downloading release metadata index: %v", err) 505 } 506 defer f.Close() 507 d, err := ioutil.ReadAll(f) 508 if err != nil { 509 return []byte{}, fmt.Errorf("reading release metadata index: %v", err) 510 } 511 return d, nil 512 }() 513 if err != nil { 514 plog.Fatal(err) 515 } 516 517 var m ReleaseMetadata 518 err = json.Unmarshal(data, &m) 519 if err != nil { 520 plog.Fatalf("unmarshaling release metadata json: %v", err) 521 } 522 523 releasePath := filepath.Join("prod", "streams", specChannel, "builds", specVersion, "release.json") 524 url, err := url.Parse(fmt.Sprintf("https://builds.coreos.fedoraproject.org/%s", releasePath)) 525 if err != nil { 526 plog.Fatalf("creating metadata url: %v", err) 527 } 528 529 releaseFile, err := api.DownloadFile(spec.Bucket, releasePath) 530 if err != nil { 531 plog.Fatalf("downloading release metadata: %v", err) 532 } 533 defer releaseFile.Close() 534 535 releaseData, err := ioutil.ReadAll(releaseFile) 536 if err != nil { 537 plog.Fatalf("reading release metadata: %v", err) 538 } 539 540 var im IndividualReleaseMetadata 541 err = json.Unmarshal(releaseData, &im) 542 if err != nil { 543 plog.Fatalf("unmarshaling release metadata: %v", err) 544 } 545 546 var commits []Commit 547 for arch, vals := range im.Architectures { 548 commits = append(commits, Commit{ 549 Architecture: arch, 550 Checksum: vals.Commit, 551 }) 552 } 553 554 newRel := BuildMetadata{ 555 CommitHash: commits, 556 Version: specVersion, 557 Endpoint: url.String(), 558 } 559 560 for i, rel := range m.Releases { 561 if compareStaticReleaseInfo(rel, newRel) { 562 if i != (len(m.Releases) - 1) { 563 plog.Fatalf("build is already present and is not the latest release") 564 } 565 566 comp := compareCommits(rel.CommitHash, newRel.CommitHash) 567 if comp == 0 { 568 // the build is already the latest release, exit 569 return 570 } else if comp == -1 { 571 // the build is present and contains a subset of the new release data, 572 // pop the old entry and add the new version 573 m.Releases = m.Releases[:len(m.Releases)-1] 574 break 575 } else { 576 // the commit hash of the new build is not a superset of the current release 577 plog.Fatalf("build is present but commit hashes are not a superset of latest release") 578 } 579 } 580 } 581 582 for _, archs := range im.Architectures { 583 for name, media := range archs.Media { 584 if name == "aws" { 585 for region, ami := range media.Images { 586 aws_api, err := aws.New(&aws.Options{ 587 CredentialsFile: awsCredentialsFile, 588 Profile: specProfile, 589 Region: region, 590 }) 591 if err != nil { 592 plog.Fatalf("creating AWS API for modifying launch permissions: %v", err) 593 } 594 595 err = aws_api.PublishImage(ami.Image) 596 if err != nil { 597 plog.Fatalf("couldn't publish image in %v: %v", region, err) 598 } 599 } 600 } 601 } 602 } 603 604 m.Releases = append(m.Releases, newRel) 605 606 m.Metadata.LastModified = time.Now().UTC().Format("2006-01-02T15:04:05Z") 607 m.Note = "For use only by Fedora CoreOS internal tooling. All other applications should obtain release info from stream metadata endpoints." 608 m.Stream = specChannel 609 610 out, err := json.Marshal(m) 611 if err != nil { 612 plog.Fatalf("marshalling release metadata json: %v", err) 613 } 614 615 err = api.UploadObject(bytes.NewReader(out), spec.Bucket, path, true, specPolicy, aws.ContentTypeJSON) 616 if err != nil { 617 plog.Fatalf("uploading release metadata json: %v", err) 618 } 619 } 620 621 func compareStaticReleaseInfo(a, b BuildMetadata) bool { 622 if a.Version != b.Version || a.Endpoint != b.Endpoint { 623 return false 624 } 625 return true 626 } 627 628 // returns -1 if a is a subset of b, 0 if equal, 1 if a is not a subset of b 629 func compareCommits(a, b []Commit) int { 630 if len(a) > len(b) { 631 return 1 632 } 633 sameLength := len(a) == len(b) 634 for _, aHash := range a { 635 found := false 636 for _, bHash := range b { 637 if aHash.Architecture == bHash.Architecture && aHash.Checksum == bHash.Checksum { 638 found = true 639 break 640 } 641 } 642 if !found { 643 return 1 644 } 645 } 646 if sameLength { 647 return 0 648 } 649 return -1 650 }