github.com/koding/terraform@v0.6.4-0.20170608090606-5d7e0339779d/builtin/providers/azurerm/resource_arm_storage_blob.go (about)

     1  package azurerm
     2  
     3  import (
     4  	"bytes"
     5  	"crypto/rand"
     6  	"encoding/base64"
     7  	"fmt"
     8  	"io"
     9  	"log"
    10  	"os"
    11  	"runtime"
    12  	"strings"
    13  	"sync"
    14  
    15  	"github.com/Azure/azure-sdk-for-go/storage"
    16  	"github.com/hashicorp/terraform/helper/schema"
    17  )
    18  
    19  func resourceArmStorageBlob() *schema.Resource {
    20  	return &schema.Resource{
    21  		Create: resourceArmStorageBlobCreate,
    22  		Read:   resourceArmStorageBlobRead,
    23  		Exists: resourceArmStorageBlobExists,
    24  		Delete: resourceArmStorageBlobDelete,
    25  
    26  		Schema: map[string]*schema.Schema{
    27  			"name": {
    28  				Type:     schema.TypeString,
    29  				Required: true,
    30  				ForceNew: true,
    31  			},
    32  			"resource_group_name": {
    33  				Type:     schema.TypeString,
    34  				Required: true,
    35  				ForceNew: true,
    36  			},
    37  			"storage_account_name": {
    38  				Type:     schema.TypeString,
    39  				Required: true,
    40  				ForceNew: true,
    41  			},
    42  			"storage_container_name": {
    43  				Type:     schema.TypeString,
    44  				Required: true,
    45  				ForceNew: true,
    46  			},
    47  			"type": {
    48  				Type:         schema.TypeString,
    49  				Optional:     true,
    50  				ForceNew:     true,
    51  				ValidateFunc: validateArmStorageBlobType,
    52  			},
    53  			"size": {
    54  				Type:         schema.TypeInt,
    55  				Optional:     true,
    56  				ForceNew:     true,
    57  				Default:      0,
    58  				ValidateFunc: validateArmStorageBlobSize,
    59  			},
    60  			"source": {
    61  				Type:          schema.TypeString,
    62  				Optional:      true,
    63  				ForceNew:      true,
    64  				ConflictsWith: []string{"source_uri"},
    65  			},
    66  			"source_uri": {
    67  				Type:          schema.TypeString,
    68  				Optional:      true,
    69  				ForceNew:      true,
    70  				ConflictsWith: []string{"source"},
    71  			},
    72  			"url": {
    73  				Type:     schema.TypeString,
    74  				Computed: true,
    75  			},
    76  			"parallelism": {
    77  				Type:         schema.TypeInt,
    78  				Optional:     true,
    79  				Default:      8,
    80  				ForceNew:     true,
    81  				ValidateFunc: validateArmStorageBlobParallelism,
    82  			},
    83  			"attempts": {
    84  				Type:         schema.TypeInt,
    85  				Optional:     true,
    86  				Default:      1,
    87  				ForceNew:     true,
    88  				ValidateFunc: validateArmStorageBlobAttempts,
    89  			},
    90  		},
    91  	}
    92  }
    93  
    94  func validateArmStorageBlobParallelism(v interface{}, k string) (ws []string, errors []error) {
    95  	value := v.(int)
    96  
    97  	if value <= 0 {
    98  		errors = append(errors, fmt.Errorf("Blob Parallelism %q is invalid, must be greater than 0", value))
    99  	}
   100  
   101  	return
   102  }
   103  
   104  func validateArmStorageBlobAttempts(v interface{}, k string) (ws []string, errors []error) {
   105  	value := v.(int)
   106  
   107  	if value <= 0 {
   108  		errors = append(errors, fmt.Errorf("Blob Attempts %q is invalid, must be greater than 0", value))
   109  	}
   110  
   111  	return
   112  }
   113  
   114  func validateArmStorageBlobSize(v interface{}, k string) (ws []string, errors []error) {
   115  	value := v.(int)
   116  
   117  	if value%512 != 0 {
   118  		errors = append(errors, fmt.Errorf("Blob Size %q is invalid, must be a multiple of 512", value))
   119  	}
   120  
   121  	return
   122  }
   123  
   124  func validateArmStorageBlobType(v interface{}, k string) (ws []string, errors []error) {
   125  	value := strings.ToLower(v.(string))
   126  	validTypes := map[string]struct{}{
   127  		"block": struct{}{},
   128  		"page":  struct{}{},
   129  	}
   130  
   131  	if _, ok := validTypes[value]; !ok {
   132  		errors = append(errors, fmt.Errorf("Blob type %q is invalid, must be %q or %q", value, "block", "page"))
   133  	}
   134  	return
   135  }
   136  
   137  func resourceArmStorageBlobCreate(d *schema.ResourceData, meta interface{}) error {
   138  	armClient := meta.(*ArmClient)
   139  
   140  	resourceGroupName := d.Get("resource_group_name").(string)
   141  	storageAccountName := d.Get("storage_account_name").(string)
   142  
   143  	blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroupName, storageAccountName)
   144  	if err != nil {
   145  		return err
   146  	}
   147  	if !accountExists {
   148  		return fmt.Errorf("Storage Account %q Not Found", storageAccountName)
   149  	}
   150  
   151  	name := d.Get("name").(string)
   152  	blobType := d.Get("type").(string)
   153  	cont := d.Get("storage_container_name").(string)
   154  	sourceUri := d.Get("source_uri").(string)
   155  
   156  	log.Printf("[INFO] Creating blob %q in storage account %q", name, storageAccountName)
   157  	if sourceUri != "" {
   158  		options := &storage.CopyOptions{}
   159  		container := blobClient.GetContainerReference(cont)
   160  		blob := container.GetBlobReference(name)
   161  		err := blob.Copy(sourceUri, options)
   162  		if err != nil {
   163  			return fmt.Errorf("Error creating storage blob on Azure: %s", err)
   164  		}
   165  	} else {
   166  		switch strings.ToLower(blobType) {
   167  		case "block":
   168  			options := &storage.PutBlobOptions{}
   169  			container := blobClient.GetContainerReference(cont)
   170  			blob := container.GetBlobReference(name)
   171  			err := blob.CreateBlockBlob(options)
   172  			if err != nil {
   173  				return fmt.Errorf("Error creating storage blob on Azure: %s", err)
   174  			}
   175  
   176  			source := d.Get("source").(string)
   177  			if source != "" {
   178  				parallelism := d.Get("parallelism").(int)
   179  				attempts := d.Get("attempts").(int)
   180  				if err := resourceArmStorageBlobBlockUploadFromSource(cont, name, source, blobClient, parallelism, attempts); err != nil {
   181  					return fmt.Errorf("Error creating storage blob on Azure: %s", err)
   182  				}
   183  			}
   184  		case "page":
   185  			source := d.Get("source").(string)
   186  			if source != "" {
   187  				parallelism := d.Get("parallelism").(int)
   188  				attempts := d.Get("attempts").(int)
   189  				if err := resourceArmStorageBlobPageUploadFromSource(cont, name, source, blobClient, parallelism, attempts); err != nil {
   190  					return fmt.Errorf("Error creating storage blob on Azure: %s", err)
   191  				}
   192  			} else {
   193  				size := int64(d.Get("size").(int))
   194  				options := &storage.PutBlobOptions{}
   195  
   196  				container := blobClient.GetContainerReference(cont)
   197  				blob := container.GetBlobReference(name)
   198  				blob.Properties.ContentLength = size
   199  				err := blob.PutPageBlob(options)
   200  				if err != nil {
   201  					return fmt.Errorf("Error creating storage blob on Azure: %s", err)
   202  				}
   203  			}
   204  		}
   205  	}
   206  
   207  	d.SetId(name)
   208  	return resourceArmStorageBlobRead(d, meta)
   209  }
   210  
   211  type resourceArmStorageBlobPage struct {
   212  	offset  int64
   213  	section *io.SectionReader
   214  }
   215  
   216  func resourceArmStorageBlobPageUploadFromSource(container, name, source string, client *storage.BlobStorageClient, parallelism, attempts int) error {
   217  	workerCount := parallelism * runtime.NumCPU()
   218  
   219  	file, err := os.Open(source)
   220  	if err != nil {
   221  		return fmt.Errorf("Error opening source file for upload %q: %s", source, err)
   222  	}
   223  	defer file.Close()
   224  
   225  	blobSize, pageList, err := resourceArmStorageBlobPageSplit(file)
   226  	if err != nil {
   227  		return fmt.Errorf("Error splitting source file %q into pages: %s", source, err)
   228  	}
   229  
   230  	options := &storage.PutBlobOptions{}
   231  	containerRef := client.GetContainerReference(container)
   232  	blob := containerRef.GetBlobReference(name)
   233  	blob.Properties.ContentLength = blobSize
   234  	err = blob.PutPageBlob(options)
   235  	if err != nil {
   236  		return fmt.Errorf("Error creating storage blob on Azure: %s", err)
   237  	}
   238  
   239  	pages := make(chan resourceArmStorageBlobPage, len(pageList))
   240  	errors := make(chan error, len(pageList))
   241  	wg := &sync.WaitGroup{}
   242  	wg.Add(len(pageList))
   243  
   244  	total := int64(0)
   245  	for _, page := range pageList {
   246  		total += page.section.Size()
   247  		pages <- page
   248  	}
   249  	close(pages)
   250  
   251  	for i := 0; i < workerCount; i++ {
   252  		go resourceArmStorageBlobPageUploadWorker(resourceArmStorageBlobPageUploadContext{
   253  			container: container,
   254  			name:      name,
   255  			source:    source,
   256  			blobSize:  blobSize,
   257  			client:    client,
   258  			pages:     pages,
   259  			errors:    errors,
   260  			wg:        wg,
   261  			attempts:  attempts,
   262  		})
   263  	}
   264  
   265  	wg.Wait()
   266  
   267  	if len(errors) > 0 {
   268  		return fmt.Errorf("Error while uploading source file %q: %s", source, <-errors)
   269  	}
   270  
   271  	return nil
   272  }
   273  
   274  func resourceArmStorageBlobPageSplit(file *os.File) (int64, []resourceArmStorageBlobPage, error) {
   275  	const (
   276  		minPageSize int64 = 4 * 1024
   277  		maxPageSize int64 = 4 * 1024 * 1024
   278  	)
   279  
   280  	info, err := file.Stat()
   281  	if err != nil {
   282  		return int64(0), nil, fmt.Errorf("Could not stat file %q: %s", file.Name(), err)
   283  	}
   284  
   285  	blobSize := info.Size()
   286  	if info.Size()%minPageSize != 0 {
   287  		blobSize = info.Size() + (minPageSize - (info.Size() % minPageSize))
   288  	}
   289  
   290  	emptyPage := make([]byte, minPageSize)
   291  
   292  	type byteRange struct {
   293  		offset int64
   294  		length int64
   295  	}
   296  
   297  	var nonEmptyRanges []byteRange
   298  	var currentRange byteRange
   299  	for i := int64(0); i < blobSize; i += minPageSize {
   300  		pageBuf := make([]byte, minPageSize)
   301  		_, err = file.ReadAt(pageBuf, i)
   302  		if err != nil && err != io.EOF {
   303  			return int64(0), nil, fmt.Errorf("Could not read chunk at %d: %s", i, err)
   304  		}
   305  
   306  		if bytes.Equal(pageBuf, emptyPage) {
   307  			if currentRange.length != 0 {
   308  				nonEmptyRanges = append(nonEmptyRanges, currentRange)
   309  			}
   310  			currentRange = byteRange{
   311  				offset: i + minPageSize,
   312  			}
   313  		} else {
   314  			currentRange.length += minPageSize
   315  			if currentRange.length == maxPageSize || (currentRange.offset+currentRange.length == blobSize) {
   316  				nonEmptyRanges = append(nonEmptyRanges, currentRange)
   317  				currentRange = byteRange{
   318  					offset: i + minPageSize,
   319  				}
   320  			}
   321  		}
   322  	}
   323  
   324  	var pages []resourceArmStorageBlobPage
   325  	for _, nonEmptyRange := range nonEmptyRanges {
   326  		pages = append(pages, resourceArmStorageBlobPage{
   327  			offset:  nonEmptyRange.offset,
   328  			section: io.NewSectionReader(file, nonEmptyRange.offset, nonEmptyRange.length),
   329  		})
   330  	}
   331  
   332  	return info.Size(), pages, nil
   333  }
   334  
   335  type resourceArmStorageBlobPageUploadContext struct {
   336  	container string
   337  	name      string
   338  	source    string
   339  	blobSize  int64
   340  	client    *storage.BlobStorageClient
   341  	pages     chan resourceArmStorageBlobPage
   342  	errors    chan error
   343  	wg        *sync.WaitGroup
   344  	attempts  int
   345  }
   346  
   347  func resourceArmStorageBlobPageUploadWorker(ctx resourceArmStorageBlobPageUploadContext) {
   348  	for page := range ctx.pages {
   349  		start := page.offset
   350  		end := page.offset + page.section.Size() - 1
   351  		if end > ctx.blobSize-1 {
   352  			end = ctx.blobSize - 1
   353  		}
   354  		size := end - start + 1
   355  
   356  		chunk := make([]byte, size)
   357  		_, err := page.section.Read(chunk)
   358  		if err != nil && err != io.EOF {
   359  			ctx.errors <- fmt.Errorf("Error reading source file %q at offset %d: %s", ctx.source, page.offset, err)
   360  			ctx.wg.Done()
   361  			continue
   362  		}
   363  
   364  		for x := 0; x < ctx.attempts; x++ {
   365  			container := ctx.client.GetContainerReference(ctx.container)
   366  			blob := container.GetBlobReference(ctx.name)
   367  			blobRange := storage.BlobRange{
   368  				Start: uint64(start),
   369  				End:   uint64(end),
   370  			}
   371  			options := &storage.PutPageOptions{}
   372  			reader := bytes.NewReader(chunk)
   373  			err = blob.WriteRange(blobRange, reader, options)
   374  			if err == nil {
   375  				break
   376  			}
   377  		}
   378  		if err != nil {
   379  			ctx.errors <- fmt.Errorf("Error writing page at offset %d for file %q: %s", page.offset, ctx.source, err)
   380  			ctx.wg.Done()
   381  			continue
   382  		}
   383  
   384  		ctx.wg.Done()
   385  	}
   386  }
   387  
   388  type resourceArmStorageBlobBlock struct {
   389  	section *io.SectionReader
   390  	id      string
   391  }
   392  
   393  func resourceArmStorageBlobBlockUploadFromSource(container, name, source string, client *storage.BlobStorageClient, parallelism, attempts int) error {
   394  	workerCount := parallelism * runtime.NumCPU()
   395  
   396  	file, err := os.Open(source)
   397  	if err != nil {
   398  		return fmt.Errorf("Error opening source file for upload %q: %s", source, err)
   399  	}
   400  	defer file.Close()
   401  
   402  	blockList, parts, err := resourceArmStorageBlobBlockSplit(file)
   403  	if err != nil {
   404  		return fmt.Errorf("Error reading and splitting source file for upload %q: %s", source, err)
   405  	}
   406  
   407  	wg := &sync.WaitGroup{}
   408  	blocks := make(chan resourceArmStorageBlobBlock, len(parts))
   409  	errors := make(chan error, len(parts))
   410  
   411  	wg.Add(len(parts))
   412  	for _, p := range parts {
   413  		blocks <- p
   414  	}
   415  	close(blocks)
   416  
   417  	for i := 0; i < workerCount; i++ {
   418  		go resourceArmStorageBlobBlockUploadWorker(resourceArmStorageBlobBlockUploadContext{
   419  			client:    client,
   420  			source:    source,
   421  			container: container,
   422  			name:      name,
   423  			blocks:    blocks,
   424  			errors:    errors,
   425  			wg:        wg,
   426  			attempts:  attempts,
   427  		})
   428  	}
   429  
   430  	wg.Wait()
   431  
   432  	if len(errors) > 0 {
   433  		return fmt.Errorf("Error while uploading source file %q: %s", source, <-errors)
   434  	}
   435  
   436  	containerReference := client.GetContainerReference(container)
   437  	blobReference := containerReference.GetBlobReference(name)
   438  	options := &storage.PutBlockListOptions{}
   439  	err = blobReference.PutBlockList(blockList, options)
   440  	if err != nil {
   441  		return fmt.Errorf("Error updating block list for source file %q: %s", source, err)
   442  	}
   443  
   444  	return nil
   445  }
   446  
   447  func resourceArmStorageBlobBlockSplit(file *os.File) ([]storage.Block, []resourceArmStorageBlobBlock, error) {
   448  	const (
   449  		idSize          = 64
   450  		blockSize int64 = 4 * 1024 * 1024
   451  	)
   452  	var parts []resourceArmStorageBlobBlock
   453  	var blockList []storage.Block
   454  
   455  	info, err := file.Stat()
   456  	if err != nil {
   457  		return nil, nil, fmt.Errorf("Error stating source file %q: %s", file.Name(), err)
   458  	}
   459  
   460  	for i := int64(0); i < info.Size(); i = i + blockSize {
   461  		entropy := make([]byte, idSize)
   462  		_, err = rand.Read(entropy)
   463  		if err != nil {
   464  			return nil, nil, fmt.Errorf("Error generating a random block ID for source file %q: %s", file.Name(), err)
   465  		}
   466  
   467  		sectionSize := blockSize
   468  		remainder := info.Size() - i
   469  		if remainder < blockSize {
   470  			sectionSize = remainder
   471  		}
   472  
   473  		block := storage.Block{
   474  			ID:     base64.StdEncoding.EncodeToString(entropy),
   475  			Status: storage.BlockStatusUncommitted,
   476  		}
   477  
   478  		blockList = append(blockList, block)
   479  
   480  		parts = append(parts, resourceArmStorageBlobBlock{
   481  			id:      block.ID,
   482  			section: io.NewSectionReader(file, i, sectionSize),
   483  		})
   484  	}
   485  
   486  	return blockList, parts, nil
   487  }
   488  
   489  type resourceArmStorageBlobBlockUploadContext struct {
   490  	client    *storage.BlobStorageClient
   491  	container string
   492  	name      string
   493  	source    string
   494  	attempts  int
   495  	blocks    chan resourceArmStorageBlobBlock
   496  	errors    chan error
   497  	wg        *sync.WaitGroup
   498  }
   499  
   500  func resourceArmStorageBlobBlockUploadWorker(ctx resourceArmStorageBlobBlockUploadContext) {
   501  	for block := range ctx.blocks {
   502  		buffer := make([]byte, block.section.Size())
   503  
   504  		_, err := block.section.Read(buffer)
   505  		if err != nil {
   506  			ctx.errors <- fmt.Errorf("Error reading source file %q: %s", ctx.source, err)
   507  			ctx.wg.Done()
   508  			continue
   509  		}
   510  
   511  		for i := 0; i < ctx.attempts; i++ {
   512  			container := ctx.client.GetContainerReference(ctx.container)
   513  			blob := container.GetBlobReference(ctx.name)
   514  			options := &storage.PutBlockOptions{}
   515  			err = blob.PutBlock(block.id, buffer, options)
   516  			if err == nil {
   517  				break
   518  			}
   519  		}
   520  		if err != nil {
   521  			ctx.errors <- fmt.Errorf("Error uploading block %q for source file %q: %s", block.id, ctx.source, err)
   522  			ctx.wg.Done()
   523  			continue
   524  		}
   525  
   526  		ctx.wg.Done()
   527  	}
   528  }
   529  
   530  func resourceArmStorageBlobRead(d *schema.ResourceData, meta interface{}) error {
   531  	armClient := meta.(*ArmClient)
   532  
   533  	resourceGroupName := d.Get("resource_group_name").(string)
   534  	storageAccountName := d.Get("storage_account_name").(string)
   535  
   536  	blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroupName, storageAccountName)
   537  	if err != nil {
   538  		return err
   539  	}
   540  	if !accountExists {
   541  		log.Printf("[DEBUG] Storage account %q not found, removing blob %q from state", storageAccountName, d.Id())
   542  		d.SetId("")
   543  		return nil
   544  	}
   545  
   546  	exists, err := resourceArmStorageBlobExists(d, meta)
   547  	if err != nil {
   548  		return err
   549  	}
   550  
   551  	if !exists {
   552  		// Exists already removed this from state
   553  		return nil
   554  	}
   555  
   556  	name := d.Get("name").(string)
   557  	storageContainerName := d.Get("storage_container_name").(string)
   558  
   559  	container := blobClient.GetContainerReference(storageContainerName)
   560  	blob := container.GetBlobReference(name)
   561  	url := blob.GetURL()
   562  	if url == "" {
   563  		log.Printf("[INFO] URL for %q is empty", name)
   564  	}
   565  	d.Set("url", url)
   566  
   567  	return nil
   568  }
   569  
   570  func resourceArmStorageBlobExists(d *schema.ResourceData, meta interface{}) (bool, error) {
   571  	armClient := meta.(*ArmClient)
   572  
   573  	resourceGroupName := d.Get("resource_group_name").(string)
   574  	storageAccountName := d.Get("storage_account_name").(string)
   575  
   576  	blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroupName, storageAccountName)
   577  	if err != nil {
   578  		return false, err
   579  	}
   580  	if !accountExists {
   581  		log.Printf("[DEBUG] Storage account %q not found, removing blob %q from state", storageAccountName, d.Id())
   582  		d.SetId("")
   583  		return false, nil
   584  	}
   585  
   586  	name := d.Get("name").(string)
   587  	storageContainerName := d.Get("storage_container_name").(string)
   588  
   589  	log.Printf("[INFO] Checking for existence of storage blob %q.", name)
   590  	container := blobClient.GetContainerReference(storageContainerName)
   591  	blob := container.GetBlobReference(name)
   592  	exists, err := blob.Exists()
   593  	if err != nil {
   594  		return false, fmt.Errorf("error testing existence of storage blob %q: %s", name, err)
   595  	}
   596  
   597  	if !exists {
   598  		log.Printf("[INFO] Storage blob %q no longer exists, removing from state...", name)
   599  		d.SetId("")
   600  	}
   601  
   602  	return exists, nil
   603  }
   604  
   605  func resourceArmStorageBlobDelete(d *schema.ResourceData, meta interface{}) error {
   606  	armClient := meta.(*ArmClient)
   607  
   608  	resourceGroupName := d.Get("resource_group_name").(string)
   609  	storageAccountName := d.Get("storage_account_name").(string)
   610  
   611  	blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroupName, storageAccountName)
   612  	if err != nil {
   613  		return err
   614  	}
   615  	if !accountExists {
   616  		log.Printf("[INFO]Storage Account %q doesn't exist so the blob won't exist", storageAccountName)
   617  		return nil
   618  	}
   619  
   620  	name := d.Get("name").(string)
   621  	storageContainerName := d.Get("storage_container_name").(string)
   622  
   623  	log.Printf("[INFO] Deleting storage blob %q", name)
   624  	options := &storage.DeleteBlobOptions{}
   625  	container := blobClient.GetContainerReference(storageContainerName)
   626  	blob := container.GetBlobReference(name)
   627  	_, err = blob.DeleteIfExists(options)
   628  	if err != nil {
   629  		return fmt.Errorf("Error deleting storage blob %q: %s", name, err)
   630  	}
   631  
   632  	d.SetId("")
   633  	return nil
   634  }