github.com/koding/terraform@v0.6.4-0.20170608090606-5d7e0339779d/builtin/providers/google/resource_bigquery_table.go (about) 1 package google 2 3 import ( 4 "encoding/json" 5 "fmt" 6 "log" 7 "strings" 8 9 "github.com/hashicorp/terraform/helper/schema" 10 "github.com/hashicorp/terraform/helper/structure" 11 "github.com/hashicorp/terraform/helper/validation" 12 "google.golang.org/api/bigquery/v2" 13 ) 14 15 func resourceBigQueryTable() *schema.Resource { 16 return &schema.Resource{ 17 Create: resourceBigQueryTableCreate, 18 Read: resourceBigQueryTableRead, 19 Delete: resourceBigQueryTableDelete, 20 Update: resourceBigQueryTableUpdate, 21 Importer: &schema.ResourceImporter{ 22 State: schema.ImportStatePassthrough, 23 }, 24 Schema: map[string]*schema.Schema{ 25 // TableId: [Required] The ID of the table. The ID must contain only 26 // letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum 27 // length is 1,024 characters. 28 "table_id": { 29 Type: schema.TypeString, 30 Required: true, 31 ForceNew: true, 32 }, 33 34 // DatasetId: [Required] The ID of the dataset containing this table. 35 "dataset_id": { 36 Type: schema.TypeString, 37 Required: true, 38 ForceNew: true, 39 }, 40 41 // ProjectId: [Required] The ID of the project containing this table. 42 "project": { 43 Type: schema.TypeString, 44 Optional: true, 45 ForceNew: true, 46 }, 47 48 // Description: [Optional] A user-friendly description of this table. 49 "description": { 50 Type: schema.TypeString, 51 Optional: true, 52 }, 53 54 // ExpirationTime: [Optional] The time when this table expires, in 55 // milliseconds since the epoch. If not present, the table will persist 56 // indefinitely. Expired tables will be deleted and their storage 57 // reclaimed. 58 "expiration_time": { 59 Type: schema.TypeInt, 60 Optional: true, 61 Computed: true, 62 }, 63 64 // FriendlyName: [Optional] A descriptive name for this table. 65 "friendly_name": { 66 Type: schema.TypeString, 67 Optional: true, 68 }, 69 70 // Labels: [Experimental] The labels associated with this table. You can 71 // use these to organize and group your tables. Label keys and values 72 // can be no longer than 63 characters, can only contain lowercase 73 // letters, numeric characters, underscores and dashes. International 74 // characters are allowed. Label values are optional. Label keys must 75 // start with a letter and each label in the list must have a different 76 // key. 77 "labels": &schema.Schema{ 78 Type: schema.TypeMap, 79 Optional: true, 80 Elem: schema.TypeString, 81 }, 82 83 // Schema: [Optional] Describes the schema of this table. 84 "schema": { 85 Type: schema.TypeString, 86 Optional: true, 87 Computed: true, 88 ValidateFunc: validation.ValidateJsonString, 89 StateFunc: func(v interface{}) string { 90 json, _ := structure.NormalizeJsonString(v) 91 return json 92 }, 93 }, 94 95 // TimePartitioning: [Experimental] If specified, configures time-based 96 // partitioning for this table. 97 "time_partitioning": &schema.Schema{ 98 Type: schema.TypeList, 99 Optional: true, 100 MaxItems: 1, 101 Elem: &schema.Resource{ 102 Schema: map[string]*schema.Schema{ 103 // ExpirationMs: [Optional] Number of milliseconds for which to keep the 104 // storage for a partition. 105 "expiration_ms": { 106 Type: schema.TypeInt, 107 Optional: true, 108 }, 109 110 // Type: [Required] The only type supported is DAY, which will generate 111 // one partition per day based on data loading time. 112 "type": { 113 Type: schema.TypeString, 114 Required: true, 115 ValidateFunc: validation.StringInSlice([]string{"DAY"}, false), 116 }, 117 }, 118 }, 119 }, 120 121 // CreationTime: [Output-only] The time when this table was created, in 122 // milliseconds since the epoch. 123 "creation_time": { 124 Type: schema.TypeInt, 125 Computed: true, 126 }, 127 128 // Etag: [Output-only] A hash of this resource. 129 "etag": { 130 Type: schema.TypeString, 131 Computed: true, 132 }, 133 134 // LastModifiedTime: [Output-only] The time when this table was last 135 // modified, in milliseconds since the epoch. 136 "last_modified_time": { 137 Type: schema.TypeInt, 138 Computed: true, 139 }, 140 141 // Location: [Output-only] The geographic location where the table 142 // resides. This value is inherited from the dataset. 143 "location": { 144 Type: schema.TypeString, 145 Computed: true, 146 }, 147 148 // NumBytes: [Output-only] The size of this table in bytes, excluding 149 // any data in the streaming buffer. 150 "num_bytes": { 151 Type: schema.TypeInt, 152 Computed: true, 153 }, 154 155 // NumLongTermBytes: [Output-only] The number of bytes in the table that 156 // are considered "long-term storage". 157 "num_long_term_bytes": { 158 Type: schema.TypeInt, 159 Computed: true, 160 }, 161 162 // NumRows: [Output-only] The number of rows of data in this table, 163 // excluding any data in the streaming buffer. 164 "num_rows": { 165 Type: schema.TypeInt, 166 Computed: true, 167 }, 168 169 // SelfLink: [Output-only] A URL that can be used to access this 170 // resource again. 171 "self_link": { 172 Type: schema.TypeString, 173 Computed: true, 174 }, 175 176 // Type: [Output-only] Describes the table type. The following values 177 // are supported: TABLE: A normal BigQuery table. VIEW: A virtual table 178 // defined by a SQL query. EXTERNAL: A table that references data stored 179 // in an external storage system, such as Google Cloud Storage. The 180 // default value is TABLE. 181 "type": { 182 Type: schema.TypeString, 183 Computed: true, 184 }, 185 }, 186 } 187 } 188 189 func resourceTable(d *schema.ResourceData, meta interface{}) (*bigquery.Table, error) { 190 config := meta.(*Config) 191 192 project, err := getProject(d, config) 193 if err != nil { 194 return nil, err 195 } 196 197 table := &bigquery.Table{ 198 TableReference: &bigquery.TableReference{ 199 DatasetId: d.Get("dataset_id").(string), 200 TableId: d.Get("table_id").(string), 201 ProjectId: project, 202 }, 203 } 204 205 if v, ok := d.GetOk("description"); ok { 206 table.Description = v.(string) 207 } 208 209 if v, ok := d.GetOk("expiration_time"); ok { 210 table.ExpirationTime = v.(int64) 211 } 212 213 if v, ok := d.GetOk("friendly_name"); ok { 214 table.FriendlyName = v.(string) 215 } 216 217 if v, ok := d.GetOk("labels"); ok { 218 labels := map[string]string{} 219 220 for k, v := range v.(map[string]interface{}) { 221 labels[k] = v.(string) 222 } 223 224 table.Labels = labels 225 } 226 227 if v, ok := d.GetOk("schema"); ok { 228 schema, err := expandSchema(v) 229 if err != nil { 230 return nil, err 231 } 232 233 table.Schema = schema 234 } 235 236 if v, ok := d.GetOk("time_partitioning"); ok { 237 table.TimePartitioning = expandTimePartitioning(v) 238 } 239 240 return table, nil 241 } 242 243 func resourceBigQueryTableCreate(d *schema.ResourceData, meta interface{}) error { 244 config := meta.(*Config) 245 246 project, err := getProject(d, config) 247 if err != nil { 248 return err 249 } 250 251 table, err := resourceTable(d, meta) 252 if err != nil { 253 return err 254 } 255 256 datasetID := d.Get("dataset_id").(string) 257 258 log.Printf("[INFO] Creating BigQuery table: %s", table.TableReference.TableId) 259 260 res, err := config.clientBigQuery.Tables.Insert(project, datasetID, table).Do() 261 if err != nil { 262 return err 263 } 264 265 log.Printf("[INFO] BigQuery table %s has been created", res.Id) 266 267 d.SetId(fmt.Sprintf("%s:%s.%s", res.TableReference.ProjectId, res.TableReference.DatasetId, res.TableReference.TableId)) 268 269 return resourceBigQueryTableRead(d, meta) 270 } 271 272 func resourceBigQueryTableParseID(id string) (string, string, string) { 273 parts := strings.FieldsFunc(id, func(r rune) bool { return r == ':' || r == '.' }) 274 return parts[0], parts[1], parts[2] // projectID, datasetID, tableID 275 } 276 277 func resourceBigQueryTableRead(d *schema.ResourceData, meta interface{}) error { 278 config := meta.(*Config) 279 280 log.Printf("[INFO] Reading BigQuery table: %s", d.Id()) 281 282 projectID, datasetID, tableID := resourceBigQueryTableParseID(d.Id()) 283 284 res, err := config.clientBigQuery.Tables.Get(projectID, datasetID, tableID).Do() 285 if err != nil { 286 return handleNotFoundError(err, d, fmt.Sprintf("BigQuery table %q", tableID)) 287 } 288 289 d.Set("description", res.Description) 290 d.Set("expiration_time", res.ExpirationTime) 291 d.Set("friendly_name", res.FriendlyName) 292 d.Set("labels", res.Labels) 293 d.Set("creation_time", res.CreationTime) 294 d.Set("etag", res.Etag) 295 d.Set("last_modified_time", res.LastModifiedTime) 296 d.Set("location", res.Location) 297 d.Set("num_bytes", res.NumBytes) 298 d.Set("table_id", res.TableReference.TableId) 299 d.Set("dataset_id", res.TableReference.DatasetId) 300 d.Set("num_long_term_bytes", res.NumLongTermBytes) 301 d.Set("num_rows", res.NumRows) 302 d.Set("self_link", res.SelfLink) 303 d.Set("type", res.Type) 304 305 if res.TimePartitioning != nil { 306 if err := d.Set("time_partitioning", flattenTimePartitioning(res.TimePartitioning)); err != nil { 307 return err 308 } 309 } 310 311 if res.Schema != nil { 312 schema, err := flattenSchema(res.Schema) 313 if err != nil { 314 return err 315 } 316 317 d.Set("schema", schema) 318 } 319 320 return nil 321 } 322 323 func resourceBigQueryTableUpdate(d *schema.ResourceData, meta interface{}) error { 324 config := meta.(*Config) 325 326 table, err := resourceTable(d, meta) 327 if err != nil { 328 return err 329 } 330 331 log.Printf("[INFO] Updating BigQuery table: %s", d.Id()) 332 333 projectID, datasetID, tableID := resourceBigQueryTableParseID(d.Id()) 334 335 if _, err = config.clientBigQuery.Tables.Update(projectID, datasetID, tableID, table).Do(); err != nil { 336 return err 337 } 338 339 return resourceBigQueryTableRead(d, meta) 340 } 341 342 func resourceBigQueryTableDelete(d *schema.ResourceData, meta interface{}) error { 343 config := meta.(*Config) 344 345 log.Printf("[INFO] Deleting BigQuery table: %s", d.Id()) 346 347 projectID, datasetID, tableID := resourceBigQueryTableParseID(d.Id()) 348 349 if err := config.clientBigQuery.Tables.Delete(projectID, datasetID, tableID).Do(); err != nil { 350 return err 351 } 352 353 d.SetId("") 354 355 return nil 356 } 357 358 func expandSchema(raw interface{}) (*bigquery.TableSchema, error) { 359 var fields []*bigquery.TableFieldSchema 360 361 if err := json.Unmarshal([]byte(raw.(string)), &fields); err != nil { 362 return nil, err 363 } 364 365 return &bigquery.TableSchema{Fields: fields}, nil 366 } 367 368 func flattenSchema(tableSchema *bigquery.TableSchema) (string, error) { 369 schema, err := json.Marshal(tableSchema.Fields) 370 if err != nil { 371 return "", err 372 } 373 374 return string(schema), nil 375 } 376 377 func expandTimePartitioning(configured interface{}) *bigquery.TimePartitioning { 378 raw := configured.([]interface{})[0].(map[string]interface{}) 379 tp := &bigquery.TimePartitioning{Type: raw["type"].(string)} 380 381 if v, ok := raw["expiration_ms"]; ok { 382 tp.ExpirationMs = int64(v.(int)) 383 } 384 385 return tp 386 } 387 388 func flattenTimePartitioning(tp *bigquery.TimePartitioning) []map[string]interface{} { 389 result := map[string]interface{}{"type": tp.Type} 390 391 if tp.ExpirationMs != 0 { 392 result["expiration_ms"] = tp.ExpirationMs 393 } 394 395 return []map[string]interface{}{result} 396 }