github.com/koding/terraform@v0.6.4-0.20170608090606-5d7e0339779d/builtin/providers/google/resource_container_cluster.go (about)

     1  package google
     2  
     3  import (
     4  	"fmt"
     5  	"log"
     6  	"net"
     7  	"regexp"
     8  
     9  	"github.com/hashicorp/terraform/helper/resource"
    10  	"github.com/hashicorp/terraform/helper/schema"
    11  	"google.golang.org/api/container/v1"
    12  )
    13  
    14  var (
    15  	instanceGroupManagerURL = regexp.MustCompile("^https://www.googleapis.com/compute/v1/projects/([a-z][a-z0-9-]{5}(?:[-a-z0-9]{0,23}[a-z0-9])?)/zones/([a-z0-9-]*)/instanceGroupManagers/([^/]*)")
    16  )
    17  
    18  func resourceContainerCluster() *schema.Resource {
    19  	return &schema.Resource{
    20  		Create: resourceContainerClusterCreate,
    21  		Read:   resourceContainerClusterRead,
    22  		Update: resourceContainerClusterUpdate,
    23  		Delete: resourceContainerClusterDelete,
    24  
    25  		Schema: map[string]*schema.Schema{
    26  			"master_auth": &schema.Schema{
    27  				Type:     schema.TypeList,
    28  				Optional: true,
    29  				ForceNew: true,
    30  				MaxItems: 1,
    31  				Computed: true,
    32  				Elem: &schema.Resource{
    33  					Schema: map[string]*schema.Schema{
    34  						"client_certificate": &schema.Schema{
    35  							Type:     schema.TypeString,
    36  							Computed: true,
    37  						},
    38  						"client_key": &schema.Schema{
    39  							Type:      schema.TypeString,
    40  							Computed:  true,
    41  							Sensitive: true,
    42  						},
    43  						"cluster_ca_certificate": &schema.Schema{
    44  							Type:     schema.TypeString,
    45  							Computed: true,
    46  						},
    47  						"password": &schema.Schema{
    48  							Type:      schema.TypeString,
    49  							Required:  true,
    50  							ForceNew:  true,
    51  							Sensitive: true,
    52  						},
    53  						"username": &schema.Schema{
    54  							Type:     schema.TypeString,
    55  							Required: true,
    56  							ForceNew: true,
    57  						},
    58  					},
    59  				},
    60  			},
    61  
    62  			"name": &schema.Schema{
    63  				Type:     schema.TypeString,
    64  				Required: true,
    65  				ForceNew: true,
    66  				ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
    67  					value := v.(string)
    68  
    69  					if len(value) > 40 {
    70  						errors = append(errors, fmt.Errorf(
    71  							"%q cannot be longer than 40 characters", k))
    72  					}
    73  					if !regexp.MustCompile("^[a-z0-9-]+$").MatchString(value) {
    74  						errors = append(errors, fmt.Errorf(
    75  							"%q can only contain lowercase letters, numbers and hyphens", k))
    76  					}
    77  					if !regexp.MustCompile("^[a-z]").MatchString(value) {
    78  						errors = append(errors, fmt.Errorf(
    79  							"%q must start with a letter", k))
    80  					}
    81  					if !regexp.MustCompile("[a-z0-9]$").MatchString(value) {
    82  						errors = append(errors, fmt.Errorf(
    83  							"%q must end with a number or a letter", k))
    84  					}
    85  					return
    86  				},
    87  			},
    88  
    89  			"zone": &schema.Schema{
    90  				Type:     schema.TypeString,
    91  				Required: true,
    92  				ForceNew: true,
    93  			},
    94  
    95  			"initial_node_count": &schema.Schema{
    96  				Type:     schema.TypeInt,
    97  				Optional: true,
    98  				ForceNew: true,
    99  			},
   100  
   101  			"additional_zones": &schema.Schema{
   102  				Type:     schema.TypeList,
   103  				Optional: true,
   104  				Computed: true,
   105  				ForceNew: true,
   106  				Elem:     &schema.Schema{Type: schema.TypeString},
   107  			},
   108  
   109  			"cluster_ipv4_cidr": &schema.Schema{
   110  				Type:     schema.TypeString,
   111  				Optional: true,
   112  				Computed: true,
   113  				ForceNew: true,
   114  				ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
   115  					value := v.(string)
   116  					_, ipnet, err := net.ParseCIDR(value)
   117  
   118  					if err != nil || ipnet == nil || value != ipnet.String() {
   119  						errors = append(errors, fmt.Errorf(
   120  							"%q must contain a valid CIDR", k))
   121  					}
   122  					return
   123  				},
   124  			},
   125  
   126  			"description": &schema.Schema{
   127  				Type:     schema.TypeString,
   128  				Optional: true,
   129  				ForceNew: true,
   130  			},
   131  
   132  			"endpoint": &schema.Schema{
   133  				Type:     schema.TypeString,
   134  				Computed: true,
   135  			},
   136  
   137  			"instance_group_urls": &schema.Schema{
   138  				Type:     schema.TypeList,
   139  				Computed: true,
   140  				Elem:     &schema.Schema{Type: schema.TypeString},
   141  			},
   142  
   143  			"logging_service": &schema.Schema{
   144  				Type:     schema.TypeString,
   145  				Optional: true,
   146  				Computed: true,
   147  				ForceNew: true,
   148  			},
   149  
   150  			"monitoring_service": &schema.Schema{
   151  				Type:     schema.TypeString,
   152  				Optional: true,
   153  				Computed: true,
   154  				ForceNew: true,
   155  			},
   156  
   157  			"network": &schema.Schema{
   158  				Type:     schema.TypeString,
   159  				Optional: true,
   160  				Default:  "default",
   161  				ForceNew: true,
   162  			},
   163  			"subnetwork": &schema.Schema{
   164  				Type:     schema.TypeString,
   165  				Optional: true,
   166  				ForceNew: true,
   167  			},
   168  			"addons_config": &schema.Schema{
   169  				Type:     schema.TypeList,
   170  				Optional: true,
   171  				ForceNew: true,
   172  				MaxItems: 1,
   173  				Elem: &schema.Resource{
   174  					Schema: map[string]*schema.Schema{
   175  						"http_load_balancing": &schema.Schema{
   176  							Type:     schema.TypeList,
   177  							Optional: true,
   178  							ForceNew: true,
   179  							MaxItems: 1,
   180  							Elem: &schema.Resource{
   181  								Schema: map[string]*schema.Schema{
   182  									"disabled": &schema.Schema{
   183  										Type:     schema.TypeBool,
   184  										Optional: true,
   185  										ForceNew: true,
   186  									},
   187  								},
   188  							},
   189  						},
   190  						"horizontal_pod_autoscaling": &schema.Schema{
   191  							Type:     schema.TypeList,
   192  							Optional: true,
   193  							ForceNew: true,
   194  							MaxItems: 1,
   195  							Elem: &schema.Resource{
   196  								Schema: map[string]*schema.Schema{
   197  									"disabled": &schema.Schema{
   198  										Type:     schema.TypeBool,
   199  										Optional: true,
   200  										ForceNew: true,
   201  									},
   202  								},
   203  							},
   204  						},
   205  					},
   206  				},
   207  			},
   208  			"node_config": &schema.Schema{
   209  				Type:     schema.TypeList,
   210  				Optional: true,
   211  				Computed: true,
   212  				ForceNew: true,
   213  				Elem: &schema.Resource{
   214  					Schema: map[string]*schema.Schema{
   215  						"machine_type": &schema.Schema{
   216  							Type:     schema.TypeString,
   217  							Optional: true,
   218  							Computed: true,
   219  							ForceNew: true,
   220  						},
   221  
   222  						"disk_size_gb": &schema.Schema{
   223  							Type:     schema.TypeInt,
   224  							Optional: true,
   225  							Computed: true,
   226  							ForceNew: true,
   227  							ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
   228  								value := v.(int)
   229  
   230  								if value < 10 {
   231  									errors = append(errors, fmt.Errorf(
   232  										"%q cannot be less than 10", k))
   233  								}
   234  								return
   235  							},
   236  						},
   237  
   238  						"local_ssd_count": &schema.Schema{
   239  							Type:     schema.TypeInt,
   240  							Optional: true,
   241  							Computed: true,
   242  							ForceNew: true,
   243  							ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
   244  								value := v.(int)
   245  
   246  								if value < 0 {
   247  									errors = append(errors, fmt.Errorf(
   248  										"%q cannot be negative", k))
   249  								}
   250  								return
   251  							},
   252  						},
   253  
   254  						"oauth_scopes": &schema.Schema{
   255  							Type:     schema.TypeList,
   256  							Optional: true,
   257  							Computed: true,
   258  							ForceNew: true,
   259  							Elem: &schema.Schema{
   260  								Type: schema.TypeString,
   261  								StateFunc: func(v interface{}) string {
   262  									return canonicalizeServiceScope(v.(string))
   263  								},
   264  							},
   265  						},
   266  
   267  						"service_account": &schema.Schema{
   268  							Type:     schema.TypeString,
   269  							Optional: true,
   270  							Computed: true,
   271  							ForceNew: true,
   272  						},
   273  
   274  						"metadata": &schema.Schema{
   275  							Type:     schema.TypeMap,
   276  							Optional: true,
   277  							ForceNew: true,
   278  							Elem:     schema.TypeString,
   279  						},
   280  
   281  						"image_type": &schema.Schema{
   282  							Type:     schema.TypeString,
   283  							Optional: true,
   284  							Computed: true,
   285  							ForceNew: true,
   286  						},
   287  					},
   288  				},
   289  			},
   290  
   291  			"node_version": &schema.Schema{
   292  				Type:     schema.TypeString,
   293  				Optional: true,
   294  				Computed: true,
   295  			},
   296  
   297  			"node_pool": &schema.Schema{
   298  				Type:     schema.TypeList,
   299  				Optional: true,
   300  				Computed: true,
   301  				ForceNew: true, // TODO(danawillow): Add ability to add/remove nodePools
   302  				Elem: &schema.Resource{
   303  					Schema: map[string]*schema.Schema{
   304  						"initial_node_count": &schema.Schema{
   305  							Type:     schema.TypeInt,
   306  							Required: true,
   307  							ForceNew: true,
   308  						},
   309  
   310  						"name": &schema.Schema{
   311  							Type:          schema.TypeString,
   312  							Optional:      true,
   313  							Computed:      true,
   314  							ConflictsWith: []string{"node_pool.name_prefix"},
   315  							ForceNew:      true,
   316  						},
   317  
   318  						"name_prefix": &schema.Schema{
   319  							Type:     schema.TypeString,
   320  							Optional: true,
   321  							ForceNew: true,
   322  						},
   323  					},
   324  				},
   325  			},
   326  
   327  			"project": &schema.Schema{
   328  				Type:     schema.TypeString,
   329  				Optional: true,
   330  				ForceNew: true,
   331  			},
   332  		},
   333  	}
   334  }
   335  
   336  func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) error {
   337  	config := meta.(*Config)
   338  
   339  	project, err := getProject(d, config)
   340  	if err != nil {
   341  		return err
   342  	}
   343  
   344  	zoneName := d.Get("zone").(string)
   345  	clusterName := d.Get("name").(string)
   346  
   347  	cluster := &container.Cluster{
   348  		Name:             clusterName,
   349  		InitialNodeCount: int64(d.Get("initial_node_count").(int)),
   350  	}
   351  
   352  	if v, ok := d.GetOk("master_auth"); ok {
   353  		masterAuths := v.([]interface{})
   354  		masterAuth := masterAuths[0].(map[string]interface{})
   355  		cluster.MasterAuth = &container.MasterAuth{
   356  			Password: masterAuth["password"].(string),
   357  			Username: masterAuth["username"].(string),
   358  		}
   359  	}
   360  
   361  	if v, ok := d.GetOk("node_version"); ok {
   362  		cluster.InitialClusterVersion = v.(string)
   363  	}
   364  
   365  	if v, ok := d.GetOk("additional_zones"); ok {
   366  		locationsList := v.([]interface{})
   367  		locations := []string{}
   368  		for _, v := range locationsList {
   369  			location := v.(string)
   370  			locations = append(locations, location)
   371  			if location == zoneName {
   372  				return fmt.Errorf("additional_zones should not contain the original 'zone'.")
   373  			}
   374  		}
   375  		locations = append(locations, zoneName)
   376  		cluster.Locations = locations
   377  	}
   378  
   379  	if v, ok := d.GetOk("cluster_ipv4_cidr"); ok {
   380  		cluster.ClusterIpv4Cidr = v.(string)
   381  	}
   382  
   383  	if v, ok := d.GetOk("description"); ok {
   384  		cluster.Description = v.(string)
   385  	}
   386  
   387  	if v, ok := d.GetOk("logging_service"); ok {
   388  		cluster.LoggingService = v.(string)
   389  	}
   390  
   391  	if v, ok := d.GetOk("monitoring_service"); ok {
   392  		cluster.MonitoringService = v.(string)
   393  	}
   394  
   395  	if _, ok := d.GetOk("network"); ok {
   396  		network, err := getNetworkName(d, "network")
   397  		if err != nil {
   398  			return err
   399  		}
   400  		cluster.Network = network
   401  	}
   402  
   403  	if v, ok := d.GetOk("subnetwork"); ok {
   404  		cluster.Subnetwork = v.(string)
   405  	}
   406  
   407  	if v, ok := d.GetOk("addons_config"); ok {
   408  		addonsConfig := v.([]interface{})[0].(map[string]interface{})
   409  		cluster.AddonsConfig = &container.AddonsConfig{}
   410  
   411  		if v, ok := addonsConfig["http_load_balancing"]; ok && len(v.([]interface{})) > 0 {
   412  			addon := v.([]interface{})[0].(map[string]interface{})
   413  			cluster.AddonsConfig.HttpLoadBalancing = &container.HttpLoadBalancing{
   414  				Disabled: addon["disabled"].(bool),
   415  			}
   416  		}
   417  
   418  		if v, ok := addonsConfig["horizontal_pod_autoscaling"]; ok && len(v.([]interface{})) > 0 {
   419  			addon := v.([]interface{})[0].(map[string]interface{})
   420  			cluster.AddonsConfig.HorizontalPodAutoscaling = &container.HorizontalPodAutoscaling{
   421  				Disabled: addon["disabled"].(bool),
   422  			}
   423  		}
   424  	}
   425  	if v, ok := d.GetOk("node_config"); ok {
   426  		nodeConfigs := v.([]interface{})
   427  		if len(nodeConfigs) > 1 {
   428  			return fmt.Errorf("Cannot specify more than one node_config.")
   429  		}
   430  		nodeConfig := nodeConfigs[0].(map[string]interface{})
   431  
   432  		cluster.NodeConfig = &container.NodeConfig{}
   433  
   434  		if v, ok = nodeConfig["machine_type"]; ok {
   435  			cluster.NodeConfig.MachineType = v.(string)
   436  		}
   437  
   438  		if v, ok = nodeConfig["disk_size_gb"]; ok {
   439  			cluster.NodeConfig.DiskSizeGb = int64(v.(int))
   440  		}
   441  
   442  		if v, ok = nodeConfig["local_ssd_count"]; ok {
   443  			cluster.NodeConfig.LocalSsdCount = int64(v.(int))
   444  		}
   445  
   446  		if v, ok := nodeConfig["oauth_scopes"]; ok {
   447  			scopesList := v.([]interface{})
   448  			scopes := []string{}
   449  			for _, v := range scopesList {
   450  				scopes = append(scopes, canonicalizeServiceScope(v.(string)))
   451  			}
   452  
   453  			cluster.NodeConfig.OauthScopes = scopes
   454  		}
   455  
   456  		if v, ok = nodeConfig["service_account"]; ok {
   457  			cluster.NodeConfig.ServiceAccount = v.(string)
   458  		}
   459  
   460  		if v, ok = nodeConfig["metadata"]; ok {
   461  			m := make(map[string]string)
   462  			for k, val := range v.(map[string]interface{}) {
   463  				m[k] = val.(string)
   464  			}
   465  			cluster.NodeConfig.Metadata = m
   466  		}
   467  
   468  		if v, ok = nodeConfig["image_type"]; ok {
   469  			cluster.NodeConfig.ImageType = v.(string)
   470  		}
   471  	}
   472  
   473  	nodePoolsCount := d.Get("node_pool.#").(int)
   474  	if nodePoolsCount > 0 {
   475  		nodePools := make([]*container.NodePool, 0, nodePoolsCount)
   476  		for i := 0; i < nodePoolsCount; i++ {
   477  			prefix := fmt.Sprintf("node_pool.%d", i)
   478  
   479  			nodeCount := d.Get(prefix + ".initial_node_count").(int)
   480  
   481  			var name string
   482  			if v, ok := d.GetOk(prefix + ".name"); ok {
   483  				name = v.(string)
   484  			} else if v, ok := d.GetOk(prefix + ".name_prefix"); ok {
   485  				name = resource.PrefixedUniqueId(v.(string))
   486  			} else {
   487  				name = resource.UniqueId()
   488  			}
   489  
   490  			nodePool := &container.NodePool{
   491  				Name:             name,
   492  				InitialNodeCount: int64(nodeCount),
   493  			}
   494  
   495  			nodePools = append(nodePools, nodePool)
   496  		}
   497  		cluster.NodePools = nodePools
   498  	}
   499  
   500  	req := &container.CreateClusterRequest{
   501  		Cluster: cluster,
   502  	}
   503  
   504  	op, err := config.clientContainer.Projects.Zones.Clusters.Create(
   505  		project, zoneName, req).Do()
   506  	if err != nil {
   507  		return err
   508  	}
   509  
   510  	// Wait until it's created
   511  	waitErr := containerOperationWait(config, op, project, zoneName, "creating GKE cluster", 30, 3)
   512  	if waitErr != nil {
   513  		// The resource didn't actually create
   514  		d.SetId("")
   515  		return waitErr
   516  	}
   517  
   518  	log.Printf("[INFO] GKE cluster %s has been created", clusterName)
   519  
   520  	d.SetId(clusterName)
   521  
   522  	return resourceContainerClusterRead(d, meta)
   523  }
   524  
   525  func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) error {
   526  	config := meta.(*Config)
   527  
   528  	project, err := getProject(d, config)
   529  	if err != nil {
   530  		return err
   531  	}
   532  
   533  	zoneName := d.Get("zone").(string)
   534  
   535  	cluster, err := config.clientContainer.Projects.Zones.Clusters.Get(
   536  		project, zoneName, d.Get("name").(string)).Do()
   537  	if err != nil {
   538  		return handleNotFoundError(err, d, fmt.Sprintf("Container Cluster %q", d.Get("name").(string)))
   539  	}
   540  
   541  	d.Set("name", cluster.Name)
   542  	d.Set("zone", cluster.Zone)
   543  
   544  	locations := []string{}
   545  	if len(cluster.Locations) > 1 {
   546  		for _, location := range cluster.Locations {
   547  			if location != cluster.Zone {
   548  				locations = append(locations, location)
   549  			}
   550  		}
   551  	}
   552  	d.Set("additional_zones", locations)
   553  
   554  	d.Set("endpoint", cluster.Endpoint)
   555  
   556  	masterAuth := []map[string]interface{}{
   557  		map[string]interface{}{
   558  			"username":               cluster.MasterAuth.Username,
   559  			"password":               cluster.MasterAuth.Password,
   560  			"client_certificate":     cluster.MasterAuth.ClientCertificate,
   561  			"client_key":             cluster.MasterAuth.ClientKey,
   562  			"cluster_ca_certificate": cluster.MasterAuth.ClusterCaCertificate,
   563  		},
   564  	}
   565  	d.Set("master_auth", masterAuth)
   566  
   567  	d.Set("initial_node_count", cluster.InitialNodeCount)
   568  	d.Set("node_version", cluster.CurrentNodeVersion)
   569  	d.Set("cluster_ipv4_cidr", cluster.ClusterIpv4Cidr)
   570  	d.Set("description", cluster.Description)
   571  	d.Set("logging_service", cluster.LoggingService)
   572  	d.Set("monitoring_service", cluster.MonitoringService)
   573  	d.Set("network", d.Get("network").(string))
   574  	d.Set("subnetwork", cluster.Subnetwork)
   575  	d.Set("node_config", flattenClusterNodeConfig(cluster.NodeConfig))
   576  	d.Set("node_pool", flattenClusterNodePools(d, cluster.NodePools))
   577  
   578  	if igUrls, err := getInstanceGroupUrlsFromManagerUrls(config, cluster.InstanceGroupUrls); err != nil {
   579  		return err
   580  	} else {
   581  		d.Set("instance_group_urls", igUrls)
   582  	}
   583  
   584  	return nil
   585  }
   586  
   587  func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) error {
   588  	config := meta.(*Config)
   589  
   590  	project, err := getProject(d, config)
   591  	if err != nil {
   592  		return err
   593  	}
   594  
   595  	zoneName := d.Get("zone").(string)
   596  	clusterName := d.Get("name").(string)
   597  	desiredNodeVersion := d.Get("node_version").(string)
   598  
   599  	req := &container.UpdateClusterRequest{
   600  		Update: &container.ClusterUpdate{
   601  			DesiredNodeVersion: desiredNodeVersion,
   602  		},
   603  	}
   604  	op, err := config.clientContainer.Projects.Zones.Clusters.Update(
   605  		project, zoneName, clusterName, req).Do()
   606  	if err != nil {
   607  		return err
   608  	}
   609  
   610  	// Wait until it's updated
   611  	waitErr := containerOperationWait(config, op, project, zoneName, "updating GKE cluster", 10, 2)
   612  	if waitErr != nil {
   613  		return waitErr
   614  	}
   615  
   616  	log.Printf("[INFO] GKE cluster %s has been updated to %s", d.Id(),
   617  		desiredNodeVersion)
   618  
   619  	return resourceContainerClusterRead(d, meta)
   620  }
   621  
   622  func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) error {
   623  	config := meta.(*Config)
   624  
   625  	project, err := getProject(d, config)
   626  	if err != nil {
   627  		return err
   628  	}
   629  
   630  	zoneName := d.Get("zone").(string)
   631  	clusterName := d.Get("name").(string)
   632  
   633  	log.Printf("[DEBUG] Deleting GKE cluster %s", d.Get("name").(string))
   634  	op, err := config.clientContainer.Projects.Zones.Clusters.Delete(
   635  		project, zoneName, clusterName).Do()
   636  	if err != nil {
   637  		return err
   638  	}
   639  
   640  	// Wait until it's deleted
   641  	waitErr := containerOperationWait(config, op, project, zoneName, "deleting GKE cluster", 10, 3)
   642  	if waitErr != nil {
   643  		return waitErr
   644  	}
   645  
   646  	log.Printf("[INFO] GKE cluster %s has been deleted", d.Id())
   647  
   648  	d.SetId("")
   649  
   650  	return nil
   651  }
   652  
   653  // container engine's API currently mistakenly returns the instance group manager's
   654  // URL instead of the instance group's URL in its responses. This shim detects that
   655  // error, and corrects it, by fetching the instance group manager URL and retrieving
   656  // the instance group manager, then using that to look up the instance group URL, which
   657  // is then substituted.
   658  //
   659  // This should be removed when the API response is fixed.
   660  func getInstanceGroupUrlsFromManagerUrls(config *Config, igmUrls []string) ([]string, error) {
   661  	instanceGroupURLs := make([]string, 0, len(igmUrls))
   662  	for _, u := range igmUrls {
   663  		if !instanceGroupManagerURL.MatchString(u) {
   664  			instanceGroupURLs = append(instanceGroupURLs, u)
   665  			continue
   666  		}
   667  		matches := instanceGroupManagerURL.FindStringSubmatch(u)
   668  		instanceGroupManager, err := config.clientCompute.InstanceGroupManagers.Get(matches[1], matches[2], matches[3]).Do()
   669  		if err != nil {
   670  			return nil, fmt.Errorf("Error reading instance group manager returned as an instance group URL: %s", err)
   671  		}
   672  		instanceGroupURLs = append(instanceGroupURLs, instanceGroupManager.InstanceGroup)
   673  	}
   674  	return instanceGroupURLs, nil
   675  }
   676  
   677  func flattenClusterNodeConfig(c *container.NodeConfig) []map[string]interface{} {
   678  	config := []map[string]interface{}{
   679  		map[string]interface{}{
   680  			"machine_type":    c.MachineType,
   681  			"disk_size_gb":    c.DiskSizeGb,
   682  			"local_ssd_count": c.LocalSsdCount,
   683  			"service_account": c.ServiceAccount,
   684  			"metadata":        c.Metadata,
   685  			"image_type":      c.ImageType,
   686  		},
   687  	}
   688  
   689  	if len(c.OauthScopes) > 0 {
   690  		config[0]["oauth_scopes"] = c.OauthScopes
   691  	}
   692  
   693  	return config
   694  }
   695  
   696  func flattenClusterNodePools(d *schema.ResourceData, c []*container.NodePool) []map[string]interface{} {
   697  	count := len(c)
   698  
   699  	nodePools := make([]map[string]interface{}, 0, count)
   700  
   701  	for i, np := range c {
   702  		nodePool := map[string]interface{}{
   703  			"name":               np.Name,
   704  			"name_prefix":        d.Get(fmt.Sprintf("node_pool.%d.name_prefix", i)),
   705  			"initial_node_count": np.InitialNodeCount,
   706  		}
   707  		nodePools = append(nodePools, nodePool)
   708  	}
   709  
   710  	return nodePools
   711  }