github.com/wangzhucn/terraform@v0.6.7-0.20151109233120-4eea011b56b3/builtin/providers/aws/resource_aws_elasticache_cluster.go (about)

     1  package aws
     2  
     3  import (
     4  	"fmt"
     5  	"log"
     6  	"sort"
     7  	"strings"
     8  	"time"
     9  
    10  	"github.com/aws/aws-sdk-go/aws"
    11  	"github.com/aws/aws-sdk-go/aws/awserr"
    12  	"github.com/aws/aws-sdk-go/service/elasticache"
    13  	"github.com/aws/aws-sdk-go/service/iam"
    14  	"github.com/hashicorp/terraform/helper/hashcode"
    15  	"github.com/hashicorp/terraform/helper/resource"
    16  	"github.com/hashicorp/terraform/helper/schema"
    17  )
    18  
    19  func resourceAwsElasticacheCluster() *schema.Resource {
    20  	return &schema.Resource{
    21  		Create: resourceAwsElasticacheClusterCreate,
    22  		Read:   resourceAwsElasticacheClusterRead,
    23  		Update: resourceAwsElasticacheClusterUpdate,
    24  		Delete: resourceAwsElasticacheClusterDelete,
    25  
    26  		Schema: map[string]*schema.Schema{
    27  			"cluster_id": &schema.Schema{
    28  				Type:     schema.TypeString,
    29  				Required: true,
    30  				ForceNew: true,
    31  				StateFunc: func(val interface{}) string {
    32  					// Elasticache normalizes cluster ids to lowercase,
    33  					// so we have to do this too or else we can end up
    34  					// with non-converging diffs.
    35  					return strings.ToLower(val.(string))
    36  				},
    37  			},
    38  			"configuration_endpoint": &schema.Schema{
    39  				Type:     schema.TypeString,
    40  				Computed: true,
    41  			},
    42  			"engine": &schema.Schema{
    43  				Type:     schema.TypeString,
    44  				Required: true,
    45  			},
    46  			"node_type": &schema.Schema{
    47  				Type:     schema.TypeString,
    48  				Required: true,
    49  				ForceNew: true,
    50  			},
    51  			"num_cache_nodes": &schema.Schema{
    52  				Type:     schema.TypeInt,
    53  				Required: true,
    54  			},
    55  			"parameter_group_name": &schema.Schema{
    56  				Type:     schema.TypeString,
    57  				Optional: true,
    58  				Computed: true,
    59  			},
    60  			"port": &schema.Schema{
    61  				Type:     schema.TypeInt,
    62  				Required: true,
    63  				ForceNew: true,
    64  			},
    65  			"engine_version": &schema.Schema{
    66  				Type:     schema.TypeString,
    67  				Optional: true,
    68  				Computed: true,
    69  			},
    70  			"maintenance_window": &schema.Schema{
    71  				Type:     schema.TypeString,
    72  				Optional: true,
    73  				Computed: true,
    74  			},
    75  			"subnet_group_name": &schema.Schema{
    76  				Type:     schema.TypeString,
    77  				Optional: true,
    78  				Computed: true,
    79  				ForceNew: true,
    80  			},
    81  			"security_group_names": &schema.Schema{
    82  				Type:     schema.TypeSet,
    83  				Optional: true,
    84  				Computed: true,
    85  				ForceNew: true,
    86  				Elem:     &schema.Schema{Type: schema.TypeString},
    87  				Set: func(v interface{}) int {
    88  					return hashcode.String(v.(string))
    89  				},
    90  			},
    91  			"security_group_ids": &schema.Schema{
    92  				Type:     schema.TypeSet,
    93  				Optional: true,
    94  				Computed: true,
    95  				Elem:     &schema.Schema{Type: schema.TypeString},
    96  				Set: func(v interface{}) int {
    97  					return hashcode.String(v.(string))
    98  				},
    99  			},
   100  			// Exported Attributes
   101  			"cache_nodes": &schema.Schema{
   102  				Type:     schema.TypeList,
   103  				Computed: true,
   104  				Elem: &schema.Resource{
   105  					Schema: map[string]*schema.Schema{
   106  						"id": &schema.Schema{
   107  							Type:     schema.TypeString,
   108  							Computed: true,
   109  						},
   110  						"address": &schema.Schema{
   111  							Type:     schema.TypeString,
   112  							Computed: true,
   113  						},
   114  						"port": &schema.Schema{
   115  							Type:     schema.TypeInt,
   116  							Computed: true,
   117  						},
   118  					},
   119  				},
   120  			},
   121  			"notification_topic_arn": &schema.Schema{
   122  				Type:     schema.TypeString,
   123  				Optional: true,
   124  			},
   125  			// A single-element string list containing an Amazon Resource Name (ARN) that
   126  			// uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot
   127  			// file will be used to populate the node group.
   128  			//
   129  			// See also:
   130  			// https://github.com/aws/aws-sdk-go/blob/4862a174f7fc92fb523fc39e68f00b87d91d2c3d/service/elasticache/api.go#L2079
   131  			"snapshot_arns": &schema.Schema{
   132  				Type:     schema.TypeSet,
   133  				Optional: true,
   134  				ForceNew: true,
   135  				Elem:     &schema.Schema{Type: schema.TypeString},
   136  				Set: func(v interface{}) int {
   137  					return hashcode.String(v.(string))
   138  				},
   139  			},
   140  
   141  			"snapshot_window": &schema.Schema{
   142  				Type:     schema.TypeString,
   143  				Optional: true,
   144  			},
   145  
   146  			"snapshot_retention_limit": &schema.Schema{
   147  				Type:     schema.TypeInt,
   148  				Optional: true,
   149  				ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
   150  					value := v.(int)
   151  					if value > 35 {
   152  						es = append(es, fmt.Errorf(
   153  							"snapshot retention limit cannot be more than 35 days"))
   154  					}
   155  					return
   156  				},
   157  			},
   158  
   159  			"tags": tagsSchema(),
   160  
   161  			// apply_immediately is used to determine when the update modifications
   162  			// take place.
   163  			// See http://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_ModifyCacheCluster.html
   164  			"apply_immediately": &schema.Schema{
   165  				Type:     schema.TypeBool,
   166  				Optional: true,
   167  				Computed: true,
   168  			},
   169  		},
   170  	}
   171  }
   172  
   173  func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{}) error {
   174  	conn := meta.(*AWSClient).elasticacheconn
   175  
   176  	clusterId := d.Get("cluster_id").(string)
   177  	nodeType := d.Get("node_type").(string)           // e.g) cache.m1.small
   178  	numNodes := int64(d.Get("num_cache_nodes").(int)) // 2
   179  	engine := d.Get("engine").(string)                // memcached
   180  	engineVersion := d.Get("engine_version").(string) // 1.4.14
   181  	port := int64(d.Get("port").(int))                // e.g) 11211
   182  	subnetGroupName := d.Get("subnet_group_name").(string)
   183  	securityNameSet := d.Get("security_group_names").(*schema.Set)
   184  	securityIdSet := d.Get("security_group_ids").(*schema.Set)
   185  
   186  	securityNames := expandStringList(securityNameSet.List())
   187  	securityIds := expandStringList(securityIdSet.List())
   188  
   189  	tags := tagsFromMapEC(d.Get("tags").(map[string]interface{}))
   190  	req := &elasticache.CreateCacheClusterInput{
   191  		CacheClusterId:          aws.String(clusterId),
   192  		CacheNodeType:           aws.String(nodeType),
   193  		NumCacheNodes:           aws.Int64(numNodes),
   194  		Engine:                  aws.String(engine),
   195  		EngineVersion:           aws.String(engineVersion),
   196  		Port:                    aws.Int64(port),
   197  		CacheSubnetGroupName:    aws.String(subnetGroupName),
   198  		CacheSecurityGroupNames: securityNames,
   199  		SecurityGroupIds:        securityIds,
   200  		Tags:                    tags,
   201  	}
   202  
   203  	// parameter groups are optional and can be defaulted by AWS
   204  	if v, ok := d.GetOk("parameter_group_name"); ok {
   205  		req.CacheParameterGroupName = aws.String(v.(string))
   206  	}
   207  
   208  	if v, ok := d.GetOk("snapshot_retention_limit"); ok {
   209  		req.SnapshotRetentionLimit = aws.Int64(int64(v.(int)))
   210  	}
   211  
   212  	if v, ok := d.GetOk("snapshot_window"); ok {
   213  		req.SnapshotWindow = aws.String(v.(string))
   214  	}
   215  
   216  	if v, ok := d.GetOk("maintenance_window"); ok {
   217  		req.PreferredMaintenanceWindow = aws.String(v.(string))
   218  	}
   219  
   220  	if v, ok := d.GetOk("notification_topic_arn"); ok {
   221  		req.NotificationTopicArn = aws.String(v.(string))
   222  	}
   223  
   224  	snaps := d.Get("snapshot_arns").(*schema.Set).List()
   225  	if len(snaps) > 0 {
   226  		s := expandStringList(snaps)
   227  		req.SnapshotArns = s
   228  		log.Printf("[DEBUG] Restoring Redis cluster from S3 snapshot: %#v", s)
   229  	}
   230  
   231  	resp, err := conn.CreateCacheCluster(req)
   232  	if err != nil {
   233  		return fmt.Errorf("Error creating Elasticache: %s", err)
   234  	}
   235  
   236  	// Assign the cluster id as the resource ID
   237  	// Elasticache always retains the id in lower case, so we have to
   238  	// mimic that or else we won't be able to refresh a resource whose
   239  	// name contained uppercase characters.
   240  	d.SetId(strings.ToLower(*resp.CacheCluster.CacheClusterId))
   241  
   242  	pending := []string{"creating"}
   243  	stateConf := &resource.StateChangeConf{
   244  		Pending:    pending,
   245  		Target:     "available",
   246  		Refresh:    cacheClusterStateRefreshFunc(conn, d.Id(), "available", pending),
   247  		Timeout:    10 * time.Minute,
   248  		Delay:      10 * time.Second,
   249  		MinTimeout: 3 * time.Second,
   250  	}
   251  
   252  	log.Printf("[DEBUG] Waiting for state to become available: %v", d.Id())
   253  	_, sterr := stateConf.WaitForState()
   254  	if sterr != nil {
   255  		return fmt.Errorf("Error waiting for elasticache (%s) to be created: %s", d.Id(), sterr)
   256  	}
   257  
   258  	return resourceAwsElasticacheClusterRead(d, meta)
   259  }
   260  
   261  func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{}) error {
   262  	conn := meta.(*AWSClient).elasticacheconn
   263  	req := &elasticache.DescribeCacheClustersInput{
   264  		CacheClusterId:    aws.String(d.Id()),
   265  		ShowCacheNodeInfo: aws.Bool(true),
   266  	}
   267  
   268  	res, err := conn.DescribeCacheClusters(req)
   269  	if err != nil {
   270  		if eccErr, ok := err.(awserr.Error); ok && eccErr.Code() == "CacheClusterNotFound" {
   271  			log.Printf("[WARN] ElastiCache Cluster (%s) not found", d.Id())
   272  			d.SetId("")
   273  			return nil
   274  		}
   275  
   276  		return err
   277  	}
   278  
   279  	if len(res.CacheClusters) == 1 {
   280  		c := res.CacheClusters[0]
   281  		d.Set("cluster_id", c.CacheClusterId)
   282  		d.Set("node_type", c.CacheNodeType)
   283  		d.Set("num_cache_nodes", c.NumCacheNodes)
   284  		d.Set("engine", c.Engine)
   285  		d.Set("engine_version", c.EngineVersion)
   286  		if c.ConfigurationEndpoint != nil {
   287  			d.Set("port", c.ConfigurationEndpoint.Port)
   288  			d.Set("configuration_endpoint", aws.String(fmt.Sprintf("%s:%d", *c.ConfigurationEndpoint.Address, *c.ConfigurationEndpoint.Port)))
   289  		}
   290  
   291  		d.Set("subnet_group_name", c.CacheSubnetGroupName)
   292  		d.Set("security_group_names", c.CacheSecurityGroups)
   293  		d.Set("security_group_ids", c.SecurityGroups)
   294  		d.Set("parameter_group_name", c.CacheParameterGroup)
   295  		d.Set("maintenance_window", c.PreferredMaintenanceWindow)
   296  		d.Set("snapshot_window", c.SnapshotWindow)
   297  		d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit)
   298  		if c.NotificationConfiguration != nil {
   299  			if *c.NotificationConfiguration.TopicStatus == "active" {
   300  				d.Set("notification_topic_arn", c.NotificationConfiguration.TopicArn)
   301  			}
   302  		}
   303  
   304  		if err := setCacheNodeData(d, c); err != nil {
   305  			return err
   306  		}
   307  		// list tags for resource
   308  		// set tags
   309  		arn, err := buildECARN(d, meta)
   310  		if err != nil {
   311  			log.Printf("[DEBUG] Error building ARN for ElastiCache Cluster, not setting Tags for cluster %s", *c.CacheClusterId)
   312  		} else {
   313  			resp, err := conn.ListTagsForResource(&elasticache.ListTagsForResourceInput{
   314  				ResourceName: aws.String(arn),
   315  			})
   316  
   317  			if err != nil {
   318  				log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn)
   319  			}
   320  
   321  			var et []*elasticache.Tag
   322  			if len(resp.TagList) > 0 {
   323  				et = resp.TagList
   324  			}
   325  			d.Set("tags", tagsToMapEC(et))
   326  		}
   327  	}
   328  
   329  	return nil
   330  }
   331  
   332  func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{}) error {
   333  	conn := meta.(*AWSClient).elasticacheconn
   334  	arn, err := buildECARN(d, meta)
   335  	if err != nil {
   336  		log.Printf("[DEBUG] Error building ARN for ElastiCache Cluster, not updating Tags for cluster %s", d.Id())
   337  	} else {
   338  		if err := setTagsEC(conn, d, arn); err != nil {
   339  			return err
   340  		}
   341  	}
   342  
   343  	req := &elasticache.ModifyCacheClusterInput{
   344  		CacheClusterId:   aws.String(d.Id()),
   345  		ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)),
   346  	}
   347  
   348  	requestUpdate := false
   349  	if d.HasChange("security_group_ids") {
   350  		if attr := d.Get("security_group_ids").(*schema.Set); attr.Len() > 0 {
   351  			req.SecurityGroupIds = expandStringList(attr.List())
   352  			requestUpdate = true
   353  		}
   354  	}
   355  
   356  	if d.HasChange("parameter_group_name") {
   357  		req.CacheParameterGroupName = aws.String(d.Get("parameter_group_name").(string))
   358  		requestUpdate = true
   359  	}
   360  
   361  	if d.HasChange("maintenance_window") {
   362  		req.PreferredMaintenanceWindow = aws.String(d.Get("maintenance_window").(string))
   363  		requestUpdate = true
   364  	}
   365  
   366  	if d.HasChange("notification_topic_arn") {
   367  		v := d.Get("notification_topic_arn").(string)
   368  		req.NotificationTopicArn = aws.String(v)
   369  		if v == "" {
   370  			inactive := "inactive"
   371  			req.NotificationTopicStatus = &inactive
   372  		}
   373  		requestUpdate = true
   374  	}
   375  
   376  	if d.HasChange("engine_version") {
   377  		req.EngineVersion = aws.String(d.Get("engine_version").(string))
   378  		requestUpdate = true
   379  	}
   380  
   381  	if d.HasChange("snapshot_window") {
   382  		req.SnapshotWindow = aws.String(d.Get("snapshot_window").(string))
   383  		requestUpdate = true
   384  	}
   385  
   386  	if d.HasChange("snapshot_retention_limit") {
   387  		req.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int)))
   388  		requestUpdate = true
   389  	}
   390  
   391  	if d.HasChange("num_cache_nodes") {
   392  		req.NumCacheNodes = aws.Int64(int64(d.Get("num_cache_nodes").(int)))
   393  		requestUpdate = true
   394  	}
   395  
   396  	if requestUpdate {
   397  		log.Printf("[DEBUG] Modifying ElastiCache Cluster (%s), opts:\n%s", d.Id(), req)
   398  		_, err := conn.ModifyCacheCluster(req)
   399  		if err != nil {
   400  			return fmt.Errorf("[WARN] Error updating ElastiCache cluster (%s), error: %s", d.Id(), err)
   401  		}
   402  
   403  		log.Printf("[DEBUG] Waiting for update: %s", d.Id())
   404  		pending := []string{"modifying", "rebooting cache cluster nodes", "snapshotting"}
   405  		stateConf := &resource.StateChangeConf{
   406  			Pending:    pending,
   407  			Target:     "available",
   408  			Refresh:    cacheClusterStateRefreshFunc(conn, d.Id(), "available", pending),
   409  			Timeout:    5 * time.Minute,
   410  			Delay:      5 * time.Second,
   411  			MinTimeout: 3 * time.Second,
   412  		}
   413  
   414  		_, sterr := stateConf.WaitForState()
   415  		if sterr != nil {
   416  			return fmt.Errorf("Error waiting for elasticache (%s) to update: %s", d.Id(), sterr)
   417  		}
   418  	}
   419  
   420  	return resourceAwsElasticacheClusterRead(d, meta)
   421  }
   422  
   423  func setCacheNodeData(d *schema.ResourceData, c *elasticache.CacheCluster) error {
   424  	sortedCacheNodes := make([]*elasticache.CacheNode, len(c.CacheNodes))
   425  	copy(sortedCacheNodes, c.CacheNodes)
   426  	sort.Sort(byCacheNodeId(sortedCacheNodes))
   427  
   428  	cacheNodeData := make([]map[string]interface{}, 0, len(sortedCacheNodes))
   429  
   430  	for _, node := range sortedCacheNodes {
   431  		if node.CacheNodeId == nil || node.Endpoint == nil || node.Endpoint.Address == nil || node.Endpoint.Port == nil {
   432  			return fmt.Errorf("Unexpected nil pointer in: %s", node)
   433  		}
   434  		cacheNodeData = append(cacheNodeData, map[string]interface{}{
   435  			"id":      *node.CacheNodeId,
   436  			"address": *node.Endpoint.Address,
   437  			"port":    int(*node.Endpoint.Port),
   438  		})
   439  	}
   440  
   441  	return d.Set("cache_nodes", cacheNodeData)
   442  }
   443  
   444  type byCacheNodeId []*elasticache.CacheNode
   445  
   446  func (b byCacheNodeId) Len() int      { return len(b) }
   447  func (b byCacheNodeId) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
   448  func (b byCacheNodeId) Less(i, j int) bool {
   449  	return b[i].CacheNodeId != nil && b[j].CacheNodeId != nil &&
   450  		*b[i].CacheNodeId < *b[j].CacheNodeId
   451  }
   452  
   453  func resourceAwsElasticacheClusterDelete(d *schema.ResourceData, meta interface{}) error {
   454  	conn := meta.(*AWSClient).elasticacheconn
   455  
   456  	req := &elasticache.DeleteCacheClusterInput{
   457  		CacheClusterId: aws.String(d.Id()),
   458  	}
   459  	_, err := conn.DeleteCacheCluster(req)
   460  	if err != nil {
   461  		return err
   462  	}
   463  
   464  	log.Printf("[DEBUG] Waiting for deletion: %v", d.Id())
   465  	stateConf := &resource.StateChangeConf{
   466  		Pending:    []string{"creating", "available", "deleting", "incompatible-parameters", "incompatible-network", "restore-failed"},
   467  		Target:     "",
   468  		Refresh:    cacheClusterStateRefreshFunc(conn, d.Id(), "", []string{}),
   469  		Timeout:    10 * time.Minute,
   470  		Delay:      10 * time.Second,
   471  		MinTimeout: 3 * time.Second,
   472  	}
   473  
   474  	_, sterr := stateConf.WaitForState()
   475  	if sterr != nil {
   476  		return fmt.Errorf("Error waiting for elasticache (%s) to delete: %s", d.Id(), sterr)
   477  	}
   478  
   479  	d.SetId("")
   480  
   481  	return nil
   482  }
   483  
   484  func cacheClusterStateRefreshFunc(conn *elasticache.ElastiCache, clusterID, givenState string, pending []string) resource.StateRefreshFunc {
   485  	return func() (interface{}, string, error) {
   486  		resp, err := conn.DescribeCacheClusters(&elasticache.DescribeCacheClustersInput{
   487  			CacheClusterId:    aws.String(clusterID),
   488  			ShowCacheNodeInfo: aws.Bool(true),
   489  		})
   490  		if err != nil {
   491  			apierr := err.(awserr.Error)
   492  			log.Printf("[DEBUG] message: %v, code: %v", apierr.Message(), apierr.Code())
   493  			if apierr.Message() == fmt.Sprintf("CacheCluster not found: %v", clusterID) {
   494  				log.Printf("[DEBUG] Detect deletion")
   495  				return nil, "", nil
   496  			}
   497  
   498  			log.Printf("[ERROR] CacheClusterStateRefreshFunc: %s", err)
   499  			return nil, "", err
   500  		}
   501  
   502  		if len(resp.CacheClusters) == 0 {
   503  			return nil, "", fmt.Errorf("[WARN] Error: no Cache Clusters found for id (%s)", clusterID)
   504  		}
   505  
   506  		var c *elasticache.CacheCluster
   507  		for _, cluster := range resp.CacheClusters {
   508  			if *cluster.CacheClusterId == clusterID {
   509  				log.Printf("[DEBUG] Found matching ElastiCache cluster: %s", *cluster.CacheClusterId)
   510  				c = cluster
   511  			}
   512  		}
   513  
   514  		if c == nil {
   515  			return nil, "", fmt.Errorf("[WARN] Error: no matching Elastic Cache cluster for id (%s)", clusterID)
   516  		}
   517  
   518  		log.Printf("[DEBUG] ElastiCache Cluster (%s) status: %v", clusterID, *c.CacheClusterStatus)
   519  
   520  		// return the current state if it's in the pending array
   521  		for _, p := range pending {
   522  			log.Printf("[DEBUG] ElastiCache: checking pending state (%s) for cluster (%s), cluster status: %s", pending, clusterID, *c.CacheClusterStatus)
   523  			s := *c.CacheClusterStatus
   524  			if p == s {
   525  				log.Printf("[DEBUG] Return with status: %v", *c.CacheClusterStatus)
   526  				return c, p, nil
   527  			}
   528  		}
   529  
   530  		// return given state if it's not in pending
   531  		if givenState != "" {
   532  			log.Printf("[DEBUG] ElastiCache: checking given state (%s) of cluster (%s) against cluster status (%s)", givenState, clusterID, *c.CacheClusterStatus)
   533  			// check to make sure we have the node count we're expecting
   534  			if int64(len(c.CacheNodes)) != *c.NumCacheNodes {
   535  				log.Printf("[DEBUG] Node count is not what is expected: %d found, %d expected", len(c.CacheNodes), *c.NumCacheNodes)
   536  				return nil, "creating", nil
   537  			}
   538  
   539  			log.Printf("[DEBUG] Node count matched (%d)", len(c.CacheNodes))
   540  			// loop the nodes and check their status as well
   541  			for _, n := range c.CacheNodes {
   542  				log.Printf("[DEBUG] Checking cache node for status: %s", n)
   543  				if n.CacheNodeStatus != nil && *n.CacheNodeStatus != "available" {
   544  					log.Printf("[DEBUG] Node (%s) is not yet available, status: %s", *n.CacheNodeId, *n.CacheNodeStatus)
   545  					return nil, "creating", nil
   546  				}
   547  				log.Printf("[DEBUG] Cache node not in expected state")
   548  			}
   549  			log.Printf("[DEBUG] ElastiCache returning given state (%s), cluster: %s", givenState, c)
   550  			return c, givenState, nil
   551  		}
   552  		log.Printf("[DEBUG] current status: %v", *c.CacheClusterStatus)
   553  		return c, *c.CacheClusterStatus, nil
   554  	}
   555  }
   556  
   557  func buildECARN(d *schema.ResourceData, meta interface{}) (string, error) {
   558  	iamconn := meta.(*AWSClient).iamconn
   559  	region := meta.(*AWSClient).region
   560  	// An zero value GetUserInput{} defers to the currently logged in user
   561  	resp, err := iamconn.GetUser(&iam.GetUserInput{})
   562  	if err != nil {
   563  		return "", err
   564  	}
   565  	userARN := *resp.User.Arn
   566  	accountID := strings.Split(userARN, ":")[4]
   567  	arn := fmt.Sprintf("arn:aws:elasticache:%s:%s:cluster:%s", region, accountID, d.Id())
   568  	return arn, nil
   569  }