github.com/recobe182/terraform@v0.8.5-0.20170117231232-49ab22a935b7/builtin/providers/aws/resource_aws_elasticache_cluster.go (about) 1 package aws 2 3 import ( 4 "fmt" 5 "log" 6 "sort" 7 "strings" 8 "time" 9 10 "github.com/aws/aws-sdk-go/aws" 11 "github.com/aws/aws-sdk-go/aws/awserr" 12 "github.com/aws/aws-sdk-go/service/elasticache" 13 "github.com/hashicorp/terraform/helper/resource" 14 "github.com/hashicorp/terraform/helper/schema" 15 ) 16 17 func resourceAwsElastiCacheCommonSchema() map[string]*schema.Schema { 18 19 return map[string]*schema.Schema{ 20 "availability_zones": &schema.Schema{ 21 Type: schema.TypeSet, 22 Optional: true, 23 ForceNew: true, 24 Elem: &schema.Schema{Type: schema.TypeString}, 25 Set: schema.HashString, 26 }, 27 "node_type": &schema.Schema{ 28 Type: schema.TypeString, 29 Required: true, 30 }, 31 "engine": &schema.Schema{ 32 Type: schema.TypeString, 33 Required: true, 34 }, 35 "engine_version": &schema.Schema{ 36 Type: schema.TypeString, 37 Optional: true, 38 Computed: true, 39 }, 40 "parameter_group_name": &schema.Schema{ 41 Type: schema.TypeString, 42 Optional: true, 43 Computed: true, 44 }, 45 "subnet_group_name": &schema.Schema{ 46 Type: schema.TypeString, 47 Optional: true, 48 Computed: true, 49 ForceNew: true, 50 }, 51 "security_group_names": &schema.Schema{ 52 Type: schema.TypeSet, 53 Optional: true, 54 Computed: true, 55 ForceNew: true, 56 Elem: &schema.Schema{Type: schema.TypeString}, 57 Set: schema.HashString, 58 }, 59 "security_group_ids": &schema.Schema{ 60 Type: schema.TypeSet, 61 Optional: true, 62 Computed: true, 63 Elem: &schema.Schema{Type: schema.TypeString}, 64 Set: schema.HashString, 65 }, 66 // A single-element string list containing an Amazon Resource Name (ARN) that 67 // uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot 68 // file will be used to populate the node group. 69 // 70 // See also: 71 // https://github.com/aws/aws-sdk-go/blob/4862a174f7fc92fb523fc39e68f00b87d91d2c3d/service/elasticache/api.go#L2079 72 "snapshot_arns": &schema.Schema{ 73 Type: schema.TypeSet, 74 Optional: true, 75 ForceNew: true, 76 Elem: &schema.Schema{Type: schema.TypeString}, 77 Set: schema.HashString, 78 }, 79 "snapshot_window": &schema.Schema{ 80 Type: schema.TypeString, 81 Optional: true, 82 Computed: true, 83 ValidateFunc: validateOnceADayWindowFormat, 84 }, 85 "snapshot_name": &schema.Schema{ 86 Type: schema.TypeString, 87 Optional: true, 88 ForceNew: true, 89 }, 90 91 "maintenance_window": &schema.Schema{ 92 Type: schema.TypeString, 93 Optional: true, 94 Computed: true, 95 StateFunc: func(val interface{}) string { 96 // Elasticache always changes the maintenance 97 // to lowercase 98 return strings.ToLower(val.(string)) 99 }, 100 ValidateFunc: validateOnceAWeekWindowFormat, 101 }, 102 "port": &schema.Schema{ 103 Type: schema.TypeInt, 104 Required: true, 105 ForceNew: true, 106 }, 107 "notification_topic_arn": &schema.Schema{ 108 Type: schema.TypeString, 109 Optional: true, 110 }, 111 112 "snapshot_retention_limit": &schema.Schema{ 113 Type: schema.TypeInt, 114 Optional: true, 115 ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { 116 value := v.(int) 117 if value > 35 { 118 es = append(es, fmt.Errorf( 119 "snapshot retention limit cannot be more than 35 days")) 120 } 121 return 122 }, 123 }, 124 125 "apply_immediately": &schema.Schema{ 126 Type: schema.TypeBool, 127 Optional: true, 128 Computed: true, 129 }, 130 131 "tags": tagsSchema(), 132 } 133 } 134 135 func resourceAwsElasticacheCluster() *schema.Resource { 136 resourceSchema := resourceAwsElastiCacheCommonSchema() 137 138 resourceSchema["cluster_id"] = &schema.Schema{ 139 Type: schema.TypeString, 140 Required: true, 141 ForceNew: true, 142 StateFunc: func(val interface{}) string { 143 // Elasticache normalizes cluster ids to lowercase, 144 // so we have to do this too or else we can end up 145 // with non-converging diffs. 146 return strings.ToLower(val.(string)) 147 }, 148 ValidateFunc: validateElastiCacheClusterId, 149 } 150 151 resourceSchema["num_cache_nodes"] = &schema.Schema{ 152 Type: schema.TypeInt, 153 Required: true, 154 } 155 156 resourceSchema["az_mode"] = &schema.Schema{ 157 Type: schema.TypeString, 158 Optional: true, 159 Computed: true, 160 ForceNew: true, 161 } 162 163 resourceSchema["availability_zone"] = &schema.Schema{ 164 Type: schema.TypeString, 165 Optional: true, 166 Computed: true, 167 ForceNew: true, 168 } 169 170 resourceSchema["configuration_endpoint"] = &schema.Schema{ 171 Type: schema.TypeString, 172 Computed: true, 173 } 174 175 resourceSchema["cluster_address"] = &schema.Schema{ 176 Type: schema.TypeString, 177 Computed: true, 178 } 179 180 resourceSchema["replication_group_id"] = &schema.Schema{ 181 Type: schema.TypeString, 182 Computed: true, 183 } 184 185 resourceSchema["cache_nodes"] = &schema.Schema{ 186 Type: schema.TypeList, 187 Computed: true, 188 Elem: &schema.Resource{ 189 Schema: map[string]*schema.Schema{ 190 "id": &schema.Schema{ 191 Type: schema.TypeString, 192 Computed: true, 193 }, 194 "address": &schema.Schema{ 195 Type: schema.TypeString, 196 Computed: true, 197 }, 198 "port": &schema.Schema{ 199 Type: schema.TypeInt, 200 Computed: true, 201 }, 202 "availability_zone": &schema.Schema{ 203 Type: schema.TypeString, 204 Computed: true, 205 }, 206 }, 207 }, 208 } 209 210 return &schema.Resource{ 211 Create: resourceAwsElasticacheClusterCreate, 212 Read: resourceAwsElasticacheClusterRead, 213 Update: resourceAwsElasticacheClusterUpdate, 214 Delete: resourceAwsElasticacheClusterDelete, 215 Importer: &schema.ResourceImporter{ 216 State: schema.ImportStatePassthrough, 217 }, 218 219 Schema: resourceSchema, 220 } 221 } 222 223 func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{}) error { 224 conn := meta.(*AWSClient).elasticacheconn 225 226 clusterId := d.Get("cluster_id").(string) 227 nodeType := d.Get("node_type").(string) // e.g) cache.m1.small 228 numNodes := int64(d.Get("num_cache_nodes").(int)) // 2 229 engine := d.Get("engine").(string) // memcached 230 engineVersion := d.Get("engine_version").(string) // 1.4.14 231 port := int64(d.Get("port").(int)) // e.g) 11211 232 subnetGroupName := d.Get("subnet_group_name").(string) 233 securityNameSet := d.Get("security_group_names").(*schema.Set) 234 securityIdSet := d.Get("security_group_ids").(*schema.Set) 235 236 securityNames := expandStringList(securityNameSet.List()) 237 securityIds := expandStringList(securityIdSet.List()) 238 tags := tagsFromMapEC(d.Get("tags").(map[string]interface{})) 239 240 req := &elasticache.CreateCacheClusterInput{ 241 CacheClusterId: aws.String(clusterId), 242 CacheNodeType: aws.String(nodeType), 243 NumCacheNodes: aws.Int64(numNodes), 244 Engine: aws.String(engine), 245 EngineVersion: aws.String(engineVersion), 246 Port: aws.Int64(port), 247 CacheSubnetGroupName: aws.String(subnetGroupName), 248 CacheSecurityGroupNames: securityNames, 249 SecurityGroupIds: securityIds, 250 Tags: tags, 251 } 252 253 // parameter groups are optional and can be defaulted by AWS 254 if v, ok := d.GetOk("parameter_group_name"); ok { 255 req.CacheParameterGroupName = aws.String(v.(string)) 256 } 257 258 if v, ok := d.GetOk("snapshot_retention_limit"); ok { 259 req.SnapshotRetentionLimit = aws.Int64(int64(v.(int))) 260 } 261 262 if v, ok := d.GetOk("snapshot_window"); ok { 263 req.SnapshotWindow = aws.String(v.(string)) 264 } 265 266 if v, ok := d.GetOk("maintenance_window"); ok { 267 req.PreferredMaintenanceWindow = aws.String(v.(string)) 268 } 269 270 if v, ok := d.GetOk("notification_topic_arn"); ok { 271 req.NotificationTopicArn = aws.String(v.(string)) 272 } 273 274 snaps := d.Get("snapshot_arns").(*schema.Set).List() 275 if len(snaps) > 0 { 276 s := expandStringList(snaps) 277 req.SnapshotArns = s 278 log.Printf("[DEBUG] Restoring Redis cluster from S3 snapshot: %#v", s) 279 } 280 281 if v, ok := d.GetOk("snapshot_name"); ok { 282 req.SnapshotName = aws.String(v.(string)) 283 } 284 285 if v, ok := d.GetOk("az_mode"); ok { 286 req.AZMode = aws.String(v.(string)) 287 } 288 289 if v, ok := d.GetOk("availability_zone"); ok { 290 req.PreferredAvailabilityZone = aws.String(v.(string)) 291 } 292 293 preferred_azs := d.Get("availability_zones").(*schema.Set).List() 294 if len(preferred_azs) > 0 { 295 azs := expandStringList(preferred_azs) 296 req.PreferredAvailabilityZones = azs 297 } 298 299 if v, ok := d.GetOk("replication_group_id"); ok { 300 req.ReplicationGroupId = aws.String(v.(string)) 301 } 302 303 resp, err := conn.CreateCacheCluster(req) 304 if err != nil { 305 return fmt.Errorf("Error creating Elasticache: %s", err) 306 } 307 308 // Assign the cluster id as the resource ID 309 // Elasticache always retains the id in lower case, so we have to 310 // mimic that or else we won't be able to refresh a resource whose 311 // name contained uppercase characters. 312 d.SetId(strings.ToLower(*resp.CacheCluster.CacheClusterId)) 313 314 pending := []string{"creating", "modifying", "restoring"} 315 stateConf := &resource.StateChangeConf{ 316 Pending: pending, 317 Target: []string{"available"}, 318 Refresh: cacheClusterStateRefreshFunc(conn, d.Id(), "available", pending), 319 Timeout: 40 * time.Minute, 320 MinTimeout: 10 * time.Second, 321 Delay: 30 * time.Second, 322 } 323 324 log.Printf("[DEBUG] Waiting for state to become available: %v", d.Id()) 325 _, sterr := stateConf.WaitForState() 326 if sterr != nil { 327 return fmt.Errorf("Error waiting for elasticache (%s) to be created: %s", d.Id(), sterr) 328 } 329 330 return resourceAwsElasticacheClusterRead(d, meta) 331 } 332 333 func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{}) error { 334 conn := meta.(*AWSClient).elasticacheconn 335 req := &elasticache.DescribeCacheClustersInput{ 336 CacheClusterId: aws.String(d.Id()), 337 ShowCacheNodeInfo: aws.Bool(true), 338 } 339 340 res, err := conn.DescribeCacheClusters(req) 341 if err != nil { 342 if eccErr, ok := err.(awserr.Error); ok && eccErr.Code() == "CacheClusterNotFound" { 343 log.Printf("[WARN] ElastiCache Cluster (%s) not found", d.Id()) 344 d.SetId("") 345 return nil 346 } 347 348 return err 349 } 350 351 if len(res.CacheClusters) == 1 { 352 c := res.CacheClusters[0] 353 d.Set("cluster_id", c.CacheClusterId) 354 d.Set("node_type", c.CacheNodeType) 355 d.Set("num_cache_nodes", c.NumCacheNodes) 356 d.Set("engine", c.Engine) 357 d.Set("engine_version", c.EngineVersion) 358 if c.ConfigurationEndpoint != nil { 359 d.Set("port", c.ConfigurationEndpoint.Port) 360 d.Set("configuration_endpoint", aws.String(fmt.Sprintf("%s:%d", *c.ConfigurationEndpoint.Address, *c.ConfigurationEndpoint.Port))) 361 d.Set("cluster_address", aws.String(fmt.Sprintf("%s", *c.ConfigurationEndpoint.Address))) 362 } 363 364 if c.ReplicationGroupId != nil { 365 d.Set("replication_group_id", c.ReplicationGroupId) 366 } 367 368 d.Set("subnet_group_name", c.CacheSubnetGroupName) 369 d.Set("security_group_names", flattenElastiCacheSecurityGroupNames(c.CacheSecurityGroups)) 370 d.Set("security_group_ids", flattenElastiCacheSecurityGroupIds(c.SecurityGroups)) 371 if c.CacheParameterGroup != nil { 372 d.Set("parameter_group_name", c.CacheParameterGroup.CacheParameterGroupName) 373 } 374 d.Set("maintenance_window", c.PreferredMaintenanceWindow) 375 d.Set("snapshot_window", c.SnapshotWindow) 376 d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit) 377 if c.NotificationConfiguration != nil { 378 if *c.NotificationConfiguration.TopicStatus == "active" { 379 d.Set("notification_topic_arn", c.NotificationConfiguration.TopicArn) 380 } 381 } 382 d.Set("availability_zone", c.PreferredAvailabilityZone) 383 384 if err := setCacheNodeData(d, c); err != nil { 385 return err 386 } 387 // list tags for resource 388 // set tags 389 arn, err := buildECARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) 390 if err != nil { 391 log.Printf("[DEBUG] Error building ARN for ElastiCache Cluster, not setting Tags for cluster %s", *c.CacheClusterId) 392 } else { 393 resp, err := conn.ListTagsForResource(&elasticache.ListTagsForResourceInput{ 394 ResourceName: aws.String(arn), 395 }) 396 397 if err != nil { 398 log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn) 399 } 400 401 var et []*elasticache.Tag 402 if len(resp.TagList) > 0 { 403 et = resp.TagList 404 } 405 d.Set("tags", tagsToMapEC(et)) 406 } 407 } 408 409 return nil 410 } 411 412 func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{}) error { 413 conn := meta.(*AWSClient).elasticacheconn 414 arn, err := buildECARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) 415 if err != nil { 416 log.Printf("[DEBUG] Error building ARN for ElastiCache Cluster, not updating Tags for cluster %s", d.Id()) 417 } else { 418 if err := setTagsEC(conn, d, arn); err != nil { 419 return err 420 } 421 } 422 423 req := &elasticache.ModifyCacheClusterInput{ 424 CacheClusterId: aws.String(d.Id()), 425 ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)), 426 } 427 428 requestUpdate := false 429 if d.HasChange("security_group_ids") { 430 if attr := d.Get("security_group_ids").(*schema.Set); attr.Len() > 0 { 431 req.SecurityGroupIds = expandStringList(attr.List()) 432 requestUpdate = true 433 } 434 } 435 436 if d.HasChange("parameter_group_name") { 437 req.CacheParameterGroupName = aws.String(d.Get("parameter_group_name").(string)) 438 requestUpdate = true 439 } 440 441 if d.HasChange("maintenance_window") { 442 req.PreferredMaintenanceWindow = aws.String(d.Get("maintenance_window").(string)) 443 requestUpdate = true 444 } 445 446 if d.HasChange("notification_topic_arn") { 447 v := d.Get("notification_topic_arn").(string) 448 req.NotificationTopicArn = aws.String(v) 449 if v == "" { 450 inactive := "inactive" 451 req.NotificationTopicStatus = &inactive 452 } 453 requestUpdate = true 454 } 455 456 if d.HasChange("engine_version") { 457 req.EngineVersion = aws.String(d.Get("engine_version").(string)) 458 requestUpdate = true 459 } 460 461 if d.HasChange("snapshot_window") { 462 req.SnapshotWindow = aws.String(d.Get("snapshot_window").(string)) 463 requestUpdate = true 464 } 465 466 if d.HasChange("node_type") { 467 req.CacheNodeType = aws.String(d.Get("node_type").(string)) 468 requestUpdate = true 469 } 470 471 if d.HasChange("snapshot_retention_limit") { 472 req.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int))) 473 requestUpdate = true 474 } 475 476 if d.HasChange("num_cache_nodes") { 477 oraw, nraw := d.GetChange("num_cache_nodes") 478 o := oraw.(int) 479 n := nraw.(int) 480 if v, ok := d.GetOk("az_mode"); ok && v.(string) == "cross-az" && n == 1 { 481 return fmt.Errorf("[WARN] Error updateing Elasticache cluster (%s), error: Cross-AZ mode is not supported in a single cache node.", d.Id()) 482 } 483 if n < o { 484 log.Printf("[INFO] Cluster %s is marked for Decreasing cache nodes from %d to %d", d.Id(), o, n) 485 nodesToRemove := getCacheNodesToRemove(d, o, o-n) 486 req.CacheNodeIdsToRemove = nodesToRemove 487 } 488 489 req.NumCacheNodes = aws.Int64(int64(d.Get("num_cache_nodes").(int))) 490 requestUpdate = true 491 492 } 493 494 if requestUpdate { 495 log.Printf("[DEBUG] Modifying ElastiCache Cluster (%s), opts:\n%s", d.Id(), req) 496 _, err := conn.ModifyCacheCluster(req) 497 if err != nil { 498 return fmt.Errorf("[WARN] Error updating ElastiCache cluster (%s), error: %s", d.Id(), err) 499 } 500 501 log.Printf("[DEBUG] Waiting for update: %s", d.Id()) 502 pending := []string{"modifying", "rebooting cache cluster nodes", "snapshotting"} 503 stateConf := &resource.StateChangeConf{ 504 Pending: pending, 505 Target: []string{"available"}, 506 Refresh: cacheClusterStateRefreshFunc(conn, d.Id(), "available", pending), 507 Timeout: 80 * time.Minute, 508 MinTimeout: 10 * time.Second, 509 Delay: 30 * time.Second, 510 } 511 512 _, sterr := stateConf.WaitForState() 513 if sterr != nil { 514 return fmt.Errorf("Error waiting for elasticache (%s) to update: %s", d.Id(), sterr) 515 } 516 } 517 518 return resourceAwsElasticacheClusterRead(d, meta) 519 } 520 521 func getCacheNodesToRemove(d *schema.ResourceData, oldNumberOfNodes int, cacheNodesToRemove int) []*string { 522 nodesIdsToRemove := []*string{} 523 for i := oldNumberOfNodes; i > oldNumberOfNodes-cacheNodesToRemove && i > 0; i-- { 524 s := fmt.Sprintf("%04d", i) 525 nodesIdsToRemove = append(nodesIdsToRemove, &s) 526 } 527 528 return nodesIdsToRemove 529 } 530 531 func setCacheNodeData(d *schema.ResourceData, c *elasticache.CacheCluster) error { 532 sortedCacheNodes := make([]*elasticache.CacheNode, len(c.CacheNodes)) 533 copy(sortedCacheNodes, c.CacheNodes) 534 sort.Sort(byCacheNodeId(sortedCacheNodes)) 535 536 cacheNodeData := make([]map[string]interface{}, 0, len(sortedCacheNodes)) 537 538 for _, node := range sortedCacheNodes { 539 if node.CacheNodeId == nil || node.Endpoint == nil || node.Endpoint.Address == nil || node.Endpoint.Port == nil || node.CustomerAvailabilityZone == nil { 540 return fmt.Errorf("Unexpected nil pointer in: %s", node) 541 } 542 cacheNodeData = append(cacheNodeData, map[string]interface{}{ 543 "id": *node.CacheNodeId, 544 "address": *node.Endpoint.Address, 545 "port": int(*node.Endpoint.Port), 546 "availability_zone": *node.CustomerAvailabilityZone, 547 }) 548 } 549 550 return d.Set("cache_nodes", cacheNodeData) 551 } 552 553 type byCacheNodeId []*elasticache.CacheNode 554 555 func (b byCacheNodeId) Len() int { return len(b) } 556 func (b byCacheNodeId) Swap(i, j int) { b[i], b[j] = b[j], b[i] } 557 func (b byCacheNodeId) Less(i, j int) bool { 558 return b[i].CacheNodeId != nil && b[j].CacheNodeId != nil && 559 *b[i].CacheNodeId < *b[j].CacheNodeId 560 } 561 562 func resourceAwsElasticacheClusterDelete(d *schema.ResourceData, meta interface{}) error { 563 conn := meta.(*AWSClient).elasticacheconn 564 565 req := &elasticache.DeleteCacheClusterInput{ 566 CacheClusterId: aws.String(d.Id()), 567 } 568 _, err := conn.DeleteCacheCluster(req) 569 if err != nil { 570 return err 571 } 572 573 log.Printf("[DEBUG] Waiting for deletion: %v", d.Id()) 574 stateConf := &resource.StateChangeConf{ 575 Pending: []string{"creating", "available", "deleting", "incompatible-parameters", "incompatible-network", "restore-failed"}, 576 Target: []string{}, 577 Refresh: cacheClusterStateRefreshFunc(conn, d.Id(), "", []string{}), 578 Timeout: 40 * time.Minute, 579 MinTimeout: 10 * time.Second, 580 Delay: 30 * time.Second, 581 } 582 583 _, sterr := stateConf.WaitForState() 584 if sterr != nil { 585 return fmt.Errorf("Error waiting for elasticache (%s) to delete: %s", d.Id(), sterr) 586 } 587 588 d.SetId("") 589 590 return nil 591 } 592 593 func cacheClusterStateRefreshFunc(conn *elasticache.ElastiCache, clusterID, givenState string, pending []string) resource.StateRefreshFunc { 594 return func() (interface{}, string, error) { 595 resp, err := conn.DescribeCacheClusters(&elasticache.DescribeCacheClustersInput{ 596 CacheClusterId: aws.String(clusterID), 597 ShowCacheNodeInfo: aws.Bool(true), 598 }) 599 if err != nil { 600 apierr := err.(awserr.Error) 601 log.Printf("[DEBUG] message: %v, code: %v", apierr.Message(), apierr.Code()) 602 if apierr.Message() == fmt.Sprintf("CacheCluster not found: %v", clusterID) { 603 log.Printf("[DEBUG] Detect deletion") 604 return nil, "", nil 605 } 606 607 log.Printf("[ERROR] CacheClusterStateRefreshFunc: %s", err) 608 return nil, "", err 609 } 610 611 if len(resp.CacheClusters) == 0 { 612 return nil, "", fmt.Errorf("[WARN] Error: no Cache Clusters found for id (%s)", clusterID) 613 } 614 615 var c *elasticache.CacheCluster 616 for _, cluster := range resp.CacheClusters { 617 if *cluster.CacheClusterId == clusterID { 618 log.Printf("[DEBUG] Found matching ElastiCache cluster: %s", *cluster.CacheClusterId) 619 c = cluster 620 } 621 } 622 623 if c == nil { 624 return nil, "", fmt.Errorf("[WARN] Error: no matching Elastic Cache cluster for id (%s)", clusterID) 625 } 626 627 log.Printf("[DEBUG] ElastiCache Cluster (%s) status: %v", clusterID, *c.CacheClusterStatus) 628 629 // return the current state if it's in the pending array 630 for _, p := range pending { 631 log.Printf("[DEBUG] ElastiCache: checking pending state (%s) for cluster (%s), cluster status: %s", pending, clusterID, *c.CacheClusterStatus) 632 s := *c.CacheClusterStatus 633 if p == s { 634 log.Printf("[DEBUG] Return with status: %v", *c.CacheClusterStatus) 635 return c, p, nil 636 } 637 } 638 639 // return given state if it's not in pending 640 if givenState != "" { 641 log.Printf("[DEBUG] ElastiCache: checking given state (%s) of cluster (%s) against cluster status (%s)", givenState, clusterID, *c.CacheClusterStatus) 642 // check to make sure we have the node count we're expecting 643 if int64(len(c.CacheNodes)) != *c.NumCacheNodes { 644 log.Printf("[DEBUG] Node count is not what is expected: %d found, %d expected", len(c.CacheNodes), *c.NumCacheNodes) 645 return nil, "creating", nil 646 } 647 648 log.Printf("[DEBUG] Node count matched (%d)", len(c.CacheNodes)) 649 // loop the nodes and check their status as well 650 for _, n := range c.CacheNodes { 651 log.Printf("[DEBUG] Checking cache node for status: %s", n) 652 if n.CacheNodeStatus != nil && *n.CacheNodeStatus != "available" { 653 log.Printf("[DEBUG] Node (%s) is not yet available, status: %s", *n.CacheNodeId, *n.CacheNodeStatus) 654 return nil, "creating", nil 655 } 656 log.Printf("[DEBUG] Cache node not in expected state") 657 } 658 log.Printf("[DEBUG] ElastiCache returning given state (%s), cluster: %s", givenState, c) 659 return c, givenState, nil 660 } 661 log.Printf("[DEBUG] current status: %v", *c.CacheClusterStatus) 662 return c, *c.CacheClusterStatus, nil 663 } 664 } 665 666 func buildECARN(identifier, partition, accountid, region string) (string, error) { 667 if partition == "" { 668 return "", fmt.Errorf("Unable to construct ElastiCache ARN because of missing AWS partition") 669 } 670 if accountid == "" { 671 return "", fmt.Errorf("Unable to construct ElastiCache ARN because of missing AWS Account ID") 672 } 673 arn := fmt.Sprintf("arn:%s:elasticache:%s:%s:cluster:%s", partition, region, accountid, identifier) 674 return arn, nil 675 676 }