github.com/keshavdv/terraform@v0.7.0-rc2.0.20160711232630-d69256dcb425/builtin/providers/aws/resource_aws_elasticache_cluster.go (about) 1 package aws 2 3 import ( 4 "fmt" 5 "log" 6 "sort" 7 "strings" 8 "time" 9 10 "github.com/aws/aws-sdk-go/aws" 11 "github.com/aws/aws-sdk-go/aws/awserr" 12 "github.com/aws/aws-sdk-go/service/elasticache" 13 "github.com/aws/aws-sdk-go/service/iam" 14 "github.com/hashicorp/terraform/helper/resource" 15 "github.com/hashicorp/terraform/helper/schema" 16 ) 17 18 func resourceAwsElasticacheCluster() *schema.Resource { 19 return &schema.Resource{ 20 Create: resourceAwsElasticacheClusterCreate, 21 Read: resourceAwsElasticacheClusterRead, 22 Update: resourceAwsElasticacheClusterUpdate, 23 Delete: resourceAwsElasticacheClusterDelete, 24 25 Schema: map[string]*schema.Schema{ 26 "cluster_id": &schema.Schema{ 27 Type: schema.TypeString, 28 Required: true, 29 ForceNew: true, 30 StateFunc: func(val interface{}) string { 31 // Elasticache normalizes cluster ids to lowercase, 32 // so we have to do this too or else we can end up 33 // with non-converging diffs. 34 return strings.ToLower(val.(string)) 35 }, 36 ValidateFunc: validateElastiCacheClusterId, 37 }, 38 "configuration_endpoint": &schema.Schema{ 39 Type: schema.TypeString, 40 Computed: true, 41 }, 42 "engine": &schema.Schema{ 43 Type: schema.TypeString, 44 Required: true, 45 }, 46 "node_type": &schema.Schema{ 47 Type: schema.TypeString, 48 Required: true, 49 ForceNew: true, 50 }, 51 "num_cache_nodes": &schema.Schema{ 52 Type: schema.TypeInt, 53 Required: true, 54 }, 55 "parameter_group_name": &schema.Schema{ 56 Type: schema.TypeString, 57 Optional: true, 58 Computed: true, 59 }, 60 "port": &schema.Schema{ 61 Type: schema.TypeInt, 62 Required: true, 63 ForceNew: true, 64 }, 65 "engine_version": &schema.Schema{ 66 Type: schema.TypeString, 67 Optional: true, 68 Computed: true, 69 }, 70 "maintenance_window": &schema.Schema{ 71 Type: schema.TypeString, 72 Optional: true, 73 Computed: true, 74 StateFunc: func(val interface{}) string { 75 // Elasticache always changes the maintenance 76 // to lowercase 77 return strings.ToLower(val.(string)) 78 }, 79 }, 80 "subnet_group_name": &schema.Schema{ 81 Type: schema.TypeString, 82 Optional: true, 83 Computed: true, 84 ForceNew: true, 85 }, 86 "security_group_names": &schema.Schema{ 87 Type: schema.TypeSet, 88 Optional: true, 89 Computed: true, 90 ForceNew: true, 91 Elem: &schema.Schema{Type: schema.TypeString}, 92 Set: schema.HashString, 93 }, 94 "security_group_ids": &schema.Schema{ 95 Type: schema.TypeSet, 96 Optional: true, 97 Computed: true, 98 Elem: &schema.Schema{Type: schema.TypeString}, 99 Set: schema.HashString, 100 }, 101 // Exported Attributes 102 "cache_nodes": &schema.Schema{ 103 Type: schema.TypeList, 104 Computed: true, 105 Elem: &schema.Resource{ 106 Schema: map[string]*schema.Schema{ 107 "id": &schema.Schema{ 108 Type: schema.TypeString, 109 Computed: true, 110 }, 111 "address": &schema.Schema{ 112 Type: schema.TypeString, 113 Computed: true, 114 }, 115 "port": &schema.Schema{ 116 Type: schema.TypeInt, 117 Computed: true, 118 }, 119 "availability_zone": &schema.Schema{ 120 Type: schema.TypeString, 121 Computed: true, 122 }, 123 }, 124 }, 125 }, 126 "notification_topic_arn": &schema.Schema{ 127 Type: schema.TypeString, 128 Optional: true, 129 }, 130 // A single-element string list containing an Amazon Resource Name (ARN) that 131 // uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot 132 // file will be used to populate the node group. 133 // 134 // See also: 135 // https://github.com/aws/aws-sdk-go/blob/4862a174f7fc92fb523fc39e68f00b87d91d2c3d/service/elasticache/api.go#L2079 136 "snapshot_arns": &schema.Schema{ 137 Type: schema.TypeSet, 138 Optional: true, 139 ForceNew: true, 140 Elem: &schema.Schema{Type: schema.TypeString}, 141 Set: schema.HashString, 142 }, 143 144 "snapshot_window": &schema.Schema{ 145 Type: schema.TypeString, 146 Optional: true, 147 Computed: true, 148 }, 149 150 "snapshot_retention_limit": &schema.Schema{ 151 Type: schema.TypeInt, 152 Optional: true, 153 ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { 154 value := v.(int) 155 if value > 35 { 156 es = append(es, fmt.Errorf( 157 "snapshot retention limit cannot be more than 35 days")) 158 } 159 return 160 }, 161 }, 162 163 "az_mode": &schema.Schema{ 164 Type: schema.TypeString, 165 Optional: true, 166 Computed: true, 167 ForceNew: true, 168 }, 169 170 "availability_zone": &schema.Schema{ 171 Type: schema.TypeString, 172 Optional: true, 173 Computed: true, 174 ForceNew: true, 175 }, 176 177 "availability_zones": &schema.Schema{ 178 Type: schema.TypeSet, 179 Optional: true, 180 ForceNew: true, 181 Elem: &schema.Schema{Type: schema.TypeString}, 182 Set: schema.HashString, 183 }, 184 185 "tags": tagsSchema(), 186 187 // apply_immediately is used to determine when the update modifications 188 // take place. 189 // See http://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_ModifyCacheCluster.html 190 "apply_immediately": &schema.Schema{ 191 Type: schema.TypeBool, 192 Optional: true, 193 Computed: true, 194 }, 195 }, 196 } 197 } 198 199 func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{}) error { 200 conn := meta.(*AWSClient).elasticacheconn 201 202 clusterId := d.Get("cluster_id").(string) 203 nodeType := d.Get("node_type").(string) // e.g) cache.m1.small 204 numNodes := int64(d.Get("num_cache_nodes").(int)) // 2 205 engine := d.Get("engine").(string) // memcached 206 engineVersion := d.Get("engine_version").(string) // 1.4.14 207 port := int64(d.Get("port").(int)) // e.g) 11211 208 subnetGroupName := d.Get("subnet_group_name").(string) 209 securityNameSet := d.Get("security_group_names").(*schema.Set) 210 securityIdSet := d.Get("security_group_ids").(*schema.Set) 211 212 securityNames := expandStringList(securityNameSet.List()) 213 securityIds := expandStringList(securityIdSet.List()) 214 215 tags := tagsFromMapEC(d.Get("tags").(map[string]interface{})) 216 req := &elasticache.CreateCacheClusterInput{ 217 CacheClusterId: aws.String(clusterId), 218 CacheNodeType: aws.String(nodeType), 219 NumCacheNodes: aws.Int64(numNodes), 220 Engine: aws.String(engine), 221 EngineVersion: aws.String(engineVersion), 222 Port: aws.Int64(port), 223 CacheSubnetGroupName: aws.String(subnetGroupName), 224 CacheSecurityGroupNames: securityNames, 225 SecurityGroupIds: securityIds, 226 Tags: tags, 227 } 228 229 // parameter groups are optional and can be defaulted by AWS 230 if v, ok := d.GetOk("parameter_group_name"); ok { 231 req.CacheParameterGroupName = aws.String(v.(string)) 232 } 233 234 if v, ok := d.GetOk("snapshot_retention_limit"); ok { 235 req.SnapshotRetentionLimit = aws.Int64(int64(v.(int))) 236 } 237 238 if v, ok := d.GetOk("snapshot_window"); ok { 239 req.SnapshotWindow = aws.String(v.(string)) 240 } 241 242 if v, ok := d.GetOk("maintenance_window"); ok { 243 req.PreferredMaintenanceWindow = aws.String(v.(string)) 244 } 245 246 if v, ok := d.GetOk("notification_topic_arn"); ok { 247 req.NotificationTopicArn = aws.String(v.(string)) 248 } 249 250 snaps := d.Get("snapshot_arns").(*schema.Set).List() 251 if len(snaps) > 0 { 252 s := expandStringList(snaps) 253 req.SnapshotArns = s 254 log.Printf("[DEBUG] Restoring Redis cluster from S3 snapshot: %#v", s) 255 } 256 257 if v, ok := d.GetOk("az_mode"); ok { 258 req.AZMode = aws.String(v.(string)) 259 } 260 261 if v, ok := d.GetOk("availability_zone"); ok { 262 req.PreferredAvailabilityZone = aws.String(v.(string)) 263 } 264 265 preferred_azs := d.Get("availability_zones").(*schema.Set).List() 266 if len(preferred_azs) > 0 { 267 azs := expandStringList(preferred_azs) 268 req.PreferredAvailabilityZones = azs 269 } 270 271 resp, err := conn.CreateCacheCluster(req) 272 if err != nil { 273 return fmt.Errorf("Error creating Elasticache: %s", err) 274 } 275 276 // Assign the cluster id as the resource ID 277 // Elasticache always retains the id in lower case, so we have to 278 // mimic that or else we won't be able to refresh a resource whose 279 // name contained uppercase characters. 280 d.SetId(strings.ToLower(*resp.CacheCluster.CacheClusterId)) 281 282 pending := []string{"creating"} 283 stateConf := &resource.StateChangeConf{ 284 Pending: pending, 285 Target: []string{"available"}, 286 Refresh: cacheClusterStateRefreshFunc(conn, d.Id(), "available", pending), 287 Timeout: 10 * time.Minute, 288 Delay: 10 * time.Second, 289 MinTimeout: 3 * time.Second, 290 } 291 292 log.Printf("[DEBUG] Waiting for state to become available: %v", d.Id()) 293 _, sterr := stateConf.WaitForState() 294 if sterr != nil { 295 return fmt.Errorf("Error waiting for elasticache (%s) to be created: %s", d.Id(), sterr) 296 } 297 298 return resourceAwsElasticacheClusterRead(d, meta) 299 } 300 301 func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{}) error { 302 conn := meta.(*AWSClient).elasticacheconn 303 req := &elasticache.DescribeCacheClustersInput{ 304 CacheClusterId: aws.String(d.Id()), 305 ShowCacheNodeInfo: aws.Bool(true), 306 } 307 308 res, err := conn.DescribeCacheClusters(req) 309 if err != nil { 310 if eccErr, ok := err.(awserr.Error); ok && eccErr.Code() == "CacheClusterNotFound" { 311 log.Printf("[WARN] ElastiCache Cluster (%s) not found", d.Id()) 312 d.SetId("") 313 return nil 314 } 315 316 return err 317 } 318 319 if len(res.CacheClusters) == 1 { 320 c := res.CacheClusters[0] 321 d.Set("cluster_id", c.CacheClusterId) 322 d.Set("node_type", c.CacheNodeType) 323 d.Set("num_cache_nodes", c.NumCacheNodes) 324 d.Set("engine", c.Engine) 325 d.Set("engine_version", c.EngineVersion) 326 if c.ConfigurationEndpoint != nil { 327 d.Set("port", c.ConfigurationEndpoint.Port) 328 d.Set("configuration_endpoint", aws.String(fmt.Sprintf("%s:%d", *c.ConfigurationEndpoint.Address, *c.ConfigurationEndpoint.Port))) 329 } 330 331 d.Set("subnet_group_name", c.CacheSubnetGroupName) 332 d.Set("security_group_names", c.CacheSecurityGroups) 333 d.Set("security_group_ids", c.SecurityGroups) 334 d.Set("parameter_group_name", c.CacheParameterGroup) 335 d.Set("maintenance_window", c.PreferredMaintenanceWindow) 336 d.Set("snapshot_window", c.SnapshotWindow) 337 d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit) 338 if c.NotificationConfiguration != nil { 339 if *c.NotificationConfiguration.TopicStatus == "active" { 340 d.Set("notification_topic_arn", c.NotificationConfiguration.TopicArn) 341 } 342 } 343 d.Set("availability_zone", c.PreferredAvailabilityZone) 344 345 if err := setCacheNodeData(d, c); err != nil { 346 return err 347 } 348 // list tags for resource 349 // set tags 350 arn, err := buildECARN(d, meta) 351 if err != nil { 352 log.Printf("[DEBUG] Error building ARN for ElastiCache Cluster, not setting Tags for cluster %s", *c.CacheClusterId) 353 } else { 354 resp, err := conn.ListTagsForResource(&elasticache.ListTagsForResourceInput{ 355 ResourceName: aws.String(arn), 356 }) 357 358 if err != nil { 359 log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn) 360 } 361 362 var et []*elasticache.Tag 363 if len(resp.TagList) > 0 { 364 et = resp.TagList 365 } 366 d.Set("tags", tagsToMapEC(et)) 367 } 368 } 369 370 return nil 371 } 372 373 func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{}) error { 374 conn := meta.(*AWSClient).elasticacheconn 375 arn, err := buildECARN(d, meta) 376 if err != nil { 377 log.Printf("[DEBUG] Error building ARN for ElastiCache Cluster, not updating Tags for cluster %s", d.Id()) 378 } else { 379 if err := setTagsEC(conn, d, arn); err != nil { 380 return err 381 } 382 } 383 384 req := &elasticache.ModifyCacheClusterInput{ 385 CacheClusterId: aws.String(d.Id()), 386 ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)), 387 } 388 389 requestUpdate := false 390 if d.HasChange("security_group_ids") { 391 if attr := d.Get("security_group_ids").(*schema.Set); attr.Len() > 0 { 392 req.SecurityGroupIds = expandStringList(attr.List()) 393 requestUpdate = true 394 } 395 } 396 397 if d.HasChange("parameter_group_name") { 398 req.CacheParameterGroupName = aws.String(d.Get("parameter_group_name").(string)) 399 requestUpdate = true 400 } 401 402 if d.HasChange("maintenance_window") { 403 req.PreferredMaintenanceWindow = aws.String(d.Get("maintenance_window").(string)) 404 requestUpdate = true 405 } 406 407 if d.HasChange("notification_topic_arn") { 408 v := d.Get("notification_topic_arn").(string) 409 req.NotificationTopicArn = aws.String(v) 410 if v == "" { 411 inactive := "inactive" 412 req.NotificationTopicStatus = &inactive 413 } 414 requestUpdate = true 415 } 416 417 if d.HasChange("engine_version") { 418 req.EngineVersion = aws.String(d.Get("engine_version").(string)) 419 requestUpdate = true 420 } 421 422 if d.HasChange("snapshot_window") { 423 req.SnapshotWindow = aws.String(d.Get("snapshot_window").(string)) 424 requestUpdate = true 425 } 426 427 if d.HasChange("snapshot_retention_limit") { 428 req.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int))) 429 requestUpdate = true 430 } 431 432 if d.HasChange("num_cache_nodes") { 433 oraw, nraw := d.GetChange("num_cache_nodes") 434 o := oraw.(int) 435 n := nraw.(int) 436 if v, ok := d.GetOk("az_mode"); ok && v.(string) == "cross-az" && n == 1 { 437 return fmt.Errorf("[WARN] Error updateing Elasticache cluster (%s), error: Cross-AZ mode is not supported in a single cache node.", d.Id()) 438 } 439 if n < o { 440 log.Printf("[INFO] Cluster %s is marked for Decreasing cache nodes from %d to %d", d.Id(), o, n) 441 nodesToRemove := getCacheNodesToRemove(d, o, o-n) 442 req.CacheNodeIdsToRemove = nodesToRemove 443 } 444 445 req.NumCacheNodes = aws.Int64(int64(d.Get("num_cache_nodes").(int))) 446 requestUpdate = true 447 448 } 449 450 if requestUpdate { 451 log.Printf("[DEBUG] Modifying ElastiCache Cluster (%s), opts:\n%s", d.Id(), req) 452 _, err := conn.ModifyCacheCluster(req) 453 if err != nil { 454 return fmt.Errorf("[WARN] Error updating ElastiCache cluster (%s), error: %s", d.Id(), err) 455 } 456 457 log.Printf("[DEBUG] Waiting for update: %s", d.Id()) 458 pending := []string{"modifying", "rebooting cache cluster nodes", "snapshotting"} 459 stateConf := &resource.StateChangeConf{ 460 Pending: pending, 461 Target: []string{"available"}, 462 Refresh: cacheClusterStateRefreshFunc(conn, d.Id(), "available", pending), 463 Timeout: 5 * time.Minute, 464 Delay: 5 * time.Second, 465 MinTimeout: 3 * time.Second, 466 } 467 468 _, sterr := stateConf.WaitForState() 469 if sterr != nil { 470 return fmt.Errorf("Error waiting for elasticache (%s) to update: %s", d.Id(), sterr) 471 } 472 } 473 474 return resourceAwsElasticacheClusterRead(d, meta) 475 } 476 477 func getCacheNodesToRemove(d *schema.ResourceData, oldNumberOfNodes int, cacheNodesToRemove int) []*string { 478 nodesIdsToRemove := []*string{} 479 for i := oldNumberOfNodes; i > oldNumberOfNodes-cacheNodesToRemove && i > 0; i-- { 480 s := fmt.Sprintf("%04d", i) 481 nodesIdsToRemove = append(nodesIdsToRemove, &s) 482 } 483 484 return nodesIdsToRemove 485 } 486 487 func setCacheNodeData(d *schema.ResourceData, c *elasticache.CacheCluster) error { 488 sortedCacheNodes := make([]*elasticache.CacheNode, len(c.CacheNodes)) 489 copy(sortedCacheNodes, c.CacheNodes) 490 sort.Sort(byCacheNodeId(sortedCacheNodes)) 491 492 cacheNodeData := make([]map[string]interface{}, 0, len(sortedCacheNodes)) 493 494 for _, node := range sortedCacheNodes { 495 if node.CacheNodeId == nil || node.Endpoint == nil || node.Endpoint.Address == nil || node.Endpoint.Port == nil || node.CustomerAvailabilityZone == nil { 496 return fmt.Errorf("Unexpected nil pointer in: %s", node) 497 } 498 cacheNodeData = append(cacheNodeData, map[string]interface{}{ 499 "id": *node.CacheNodeId, 500 "address": *node.Endpoint.Address, 501 "port": int(*node.Endpoint.Port), 502 "availability_zone": *node.CustomerAvailabilityZone, 503 }) 504 } 505 506 return d.Set("cache_nodes", cacheNodeData) 507 } 508 509 type byCacheNodeId []*elasticache.CacheNode 510 511 func (b byCacheNodeId) Len() int { return len(b) } 512 func (b byCacheNodeId) Swap(i, j int) { b[i], b[j] = b[j], b[i] } 513 func (b byCacheNodeId) Less(i, j int) bool { 514 return b[i].CacheNodeId != nil && b[j].CacheNodeId != nil && 515 *b[i].CacheNodeId < *b[j].CacheNodeId 516 } 517 518 func resourceAwsElasticacheClusterDelete(d *schema.ResourceData, meta interface{}) error { 519 conn := meta.(*AWSClient).elasticacheconn 520 521 req := &elasticache.DeleteCacheClusterInput{ 522 CacheClusterId: aws.String(d.Id()), 523 } 524 _, err := conn.DeleteCacheCluster(req) 525 if err != nil { 526 return err 527 } 528 529 log.Printf("[DEBUG] Waiting for deletion: %v", d.Id()) 530 stateConf := &resource.StateChangeConf{ 531 Pending: []string{"creating", "available", "deleting", "incompatible-parameters", "incompatible-network", "restore-failed"}, 532 Target: []string{}, 533 Refresh: cacheClusterStateRefreshFunc(conn, d.Id(), "", []string{}), 534 Timeout: 20 * time.Minute, 535 Delay: 10 * time.Second, 536 MinTimeout: 3 * time.Second, 537 } 538 539 _, sterr := stateConf.WaitForState() 540 if sterr != nil { 541 return fmt.Errorf("Error waiting for elasticache (%s) to delete: %s", d.Id(), sterr) 542 } 543 544 d.SetId("") 545 546 return nil 547 } 548 549 func cacheClusterStateRefreshFunc(conn *elasticache.ElastiCache, clusterID, givenState string, pending []string) resource.StateRefreshFunc { 550 return func() (interface{}, string, error) { 551 resp, err := conn.DescribeCacheClusters(&elasticache.DescribeCacheClustersInput{ 552 CacheClusterId: aws.String(clusterID), 553 ShowCacheNodeInfo: aws.Bool(true), 554 }) 555 if err != nil { 556 apierr := err.(awserr.Error) 557 log.Printf("[DEBUG] message: %v, code: %v", apierr.Message(), apierr.Code()) 558 if apierr.Message() == fmt.Sprintf("CacheCluster not found: %v", clusterID) { 559 log.Printf("[DEBUG] Detect deletion") 560 return nil, "", nil 561 } 562 563 log.Printf("[ERROR] CacheClusterStateRefreshFunc: %s", err) 564 return nil, "", err 565 } 566 567 if len(resp.CacheClusters) == 0 { 568 return nil, "", fmt.Errorf("[WARN] Error: no Cache Clusters found for id (%s)", clusterID) 569 } 570 571 var c *elasticache.CacheCluster 572 for _, cluster := range resp.CacheClusters { 573 if *cluster.CacheClusterId == clusterID { 574 log.Printf("[DEBUG] Found matching ElastiCache cluster: %s", *cluster.CacheClusterId) 575 c = cluster 576 } 577 } 578 579 if c == nil { 580 return nil, "", fmt.Errorf("[WARN] Error: no matching Elastic Cache cluster for id (%s)", clusterID) 581 } 582 583 log.Printf("[DEBUG] ElastiCache Cluster (%s) status: %v", clusterID, *c.CacheClusterStatus) 584 585 // return the current state if it's in the pending array 586 for _, p := range pending { 587 log.Printf("[DEBUG] ElastiCache: checking pending state (%s) for cluster (%s), cluster status: %s", pending, clusterID, *c.CacheClusterStatus) 588 s := *c.CacheClusterStatus 589 if p == s { 590 log.Printf("[DEBUG] Return with status: %v", *c.CacheClusterStatus) 591 return c, p, nil 592 } 593 } 594 595 // return given state if it's not in pending 596 if givenState != "" { 597 log.Printf("[DEBUG] ElastiCache: checking given state (%s) of cluster (%s) against cluster status (%s)", givenState, clusterID, *c.CacheClusterStatus) 598 // check to make sure we have the node count we're expecting 599 if int64(len(c.CacheNodes)) != *c.NumCacheNodes { 600 log.Printf("[DEBUG] Node count is not what is expected: %d found, %d expected", len(c.CacheNodes), *c.NumCacheNodes) 601 return nil, "creating", nil 602 } 603 604 log.Printf("[DEBUG] Node count matched (%d)", len(c.CacheNodes)) 605 // loop the nodes and check their status as well 606 for _, n := range c.CacheNodes { 607 log.Printf("[DEBUG] Checking cache node for status: %s", n) 608 if n.CacheNodeStatus != nil && *n.CacheNodeStatus != "available" { 609 log.Printf("[DEBUG] Node (%s) is not yet available, status: %s", *n.CacheNodeId, *n.CacheNodeStatus) 610 return nil, "creating", nil 611 } 612 log.Printf("[DEBUG] Cache node not in expected state") 613 } 614 log.Printf("[DEBUG] ElastiCache returning given state (%s), cluster: %s", givenState, c) 615 return c, givenState, nil 616 } 617 log.Printf("[DEBUG] current status: %v", *c.CacheClusterStatus) 618 return c, *c.CacheClusterStatus, nil 619 } 620 } 621 622 func buildECARN(d *schema.ResourceData, meta interface{}) (string, error) { 623 iamconn := meta.(*AWSClient).iamconn 624 region := meta.(*AWSClient).region 625 // An zero value GetUserInput{} defers to the currently logged in user 626 resp, err := iamconn.GetUser(&iam.GetUserInput{}) 627 if err != nil { 628 return "", err 629 } 630 userARN := *resp.User.Arn 631 accountID := strings.Split(userARN, ":")[4] 632 arn := fmt.Sprintf("arn:aws:elasticache:%s:%s:cluster:%s", region, accountID, d.Id()) 633 return arn, nil 634 }