github.com/kwoods/terraform@v0.6.11-0.20160809170336-13497db7138e/builtin/providers/azurerm/resource_arm_storage_blob.go (about)

     1  package azurerm
     2  
     3  import (
     4  	"bytes"
     5  	"crypto/rand"
     6  	"encoding/base64"
     7  	"fmt"
     8  	"io"
     9  	"log"
    10  	"os"
    11  	"runtime"
    12  	"strings"
    13  	"sync"
    14  
    15  	"github.com/Azure/azure-sdk-for-go/storage"
    16  	"github.com/hashicorp/terraform/helper/schema"
    17  )
    18  
    19  func resourceArmStorageBlob() *schema.Resource {
    20  	return &schema.Resource{
    21  		Create: resourceArmStorageBlobCreate,
    22  		Read:   resourceArmStorageBlobRead,
    23  		Exists: resourceArmStorageBlobExists,
    24  		Delete: resourceArmStorageBlobDelete,
    25  
    26  		Schema: map[string]*schema.Schema{
    27  			"name": {
    28  				Type:     schema.TypeString,
    29  				Required: true,
    30  				ForceNew: true,
    31  			},
    32  			"resource_group_name": {
    33  				Type:     schema.TypeString,
    34  				Required: true,
    35  				ForceNew: true,
    36  			},
    37  			"storage_account_name": {
    38  				Type:     schema.TypeString,
    39  				Required: true,
    40  				ForceNew: true,
    41  			},
    42  			"storage_container_name": {
    43  				Type:     schema.TypeString,
    44  				Required: true,
    45  				ForceNew: true,
    46  			},
    47  			"type": {
    48  				Type:         schema.TypeString,
    49  				Required:     true,
    50  				ForceNew:     true,
    51  				ValidateFunc: validateArmStorageBlobType,
    52  			},
    53  			"size": {
    54  				Type:         schema.TypeInt,
    55  				Optional:     true,
    56  				ForceNew:     true,
    57  				Default:      0,
    58  				ValidateFunc: validateArmStorageBlobSize,
    59  			},
    60  			"source": {
    61  				Type:     schema.TypeString,
    62  				Optional: true,
    63  				ForceNew: true,
    64  			},
    65  			"url": {
    66  				Type:     schema.TypeString,
    67  				Computed: true,
    68  			},
    69  			"parallelism": {
    70  				Type:         schema.TypeInt,
    71  				Optional:     true,
    72  				Default:      8,
    73  				ForceNew:     true,
    74  				ValidateFunc: validateArmStorageBlobParallelism,
    75  			},
    76  			"attempts": {
    77  				Type:         schema.TypeInt,
    78  				Optional:     true,
    79  				Default:      1,
    80  				ForceNew:     true,
    81  				ValidateFunc: validateArmStorageBlobAttempts,
    82  			},
    83  		},
    84  	}
    85  }
    86  
    87  func validateArmStorageBlobParallelism(v interface{}, k string) (ws []string, errors []error) {
    88  	value := v.(int)
    89  
    90  	if value <= 0 {
    91  		errors = append(errors, fmt.Errorf("Blob Parallelism %q is invalid, must be greater than 0", value))
    92  	}
    93  
    94  	return
    95  }
    96  
    97  func validateArmStorageBlobAttempts(v interface{}, k string) (ws []string, errors []error) {
    98  	value := v.(int)
    99  
   100  	if value <= 0 {
   101  		errors = append(errors, fmt.Errorf("Blob Attempts %q is invalid, must be greater than 0", value))
   102  	}
   103  
   104  	return
   105  }
   106  
   107  func validateArmStorageBlobSize(v interface{}, k string) (ws []string, errors []error) {
   108  	value := v.(int)
   109  
   110  	if value%512 != 0 {
   111  		errors = append(errors, fmt.Errorf("Blob Size %q is invalid, must be a multiple of 512", value))
   112  	}
   113  
   114  	return
   115  }
   116  
   117  func validateArmStorageBlobType(v interface{}, k string) (ws []string, errors []error) {
   118  	value := strings.ToLower(v.(string))
   119  	validTypes := map[string]struct{}{
   120  		"block": struct{}{},
   121  		"page":  struct{}{},
   122  	}
   123  
   124  	if _, ok := validTypes[value]; !ok {
   125  		errors = append(errors, fmt.Errorf("Blob type %q is invalid, must be %q or %q", value, "block", "page"))
   126  	}
   127  	return
   128  }
   129  
   130  func resourceArmStorageBlobCreate(d *schema.ResourceData, meta interface{}) error {
   131  	armClient := meta.(*ArmClient)
   132  
   133  	resourceGroupName := d.Get("resource_group_name").(string)
   134  	storageAccountName := d.Get("storage_account_name").(string)
   135  
   136  	blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroupName, storageAccountName)
   137  	if err != nil {
   138  		return err
   139  	}
   140  	if !accountExists {
   141  		return fmt.Errorf("Storage Account %q Not Found", storageAccountName)
   142  	}
   143  
   144  	name := d.Get("name").(string)
   145  	blobType := d.Get("type").(string)
   146  	cont := d.Get("storage_container_name").(string)
   147  
   148  	log.Printf("[INFO] Creating blob %q in storage account %q", name, storageAccountName)
   149  	switch strings.ToLower(blobType) {
   150  	case "block":
   151  		if err := blobClient.CreateBlockBlob(cont, name); err != nil {
   152  			return fmt.Errorf("Error creating storage blob on Azure: %s", err)
   153  		}
   154  
   155  		source := d.Get("source").(string)
   156  		if source != "" {
   157  			parallelism := d.Get("parallelism").(int)
   158  			attempts := d.Get("attempts").(int)
   159  			if err := resourceArmStorageBlobBlockUploadFromSource(cont, name, source, blobClient, parallelism, attempts); err != nil {
   160  				return fmt.Errorf("Error creating storage blob on Azure: %s", err)
   161  			}
   162  		}
   163  	case "page":
   164  		source := d.Get("source").(string)
   165  		if source != "" {
   166  			parallelism := d.Get("parallelism").(int)
   167  			attempts := d.Get("attempts").(int)
   168  			if err := resourceArmStorageBlobPageUploadFromSource(cont, name, source, blobClient, parallelism, attempts); err != nil {
   169  				return fmt.Errorf("Error creating storage blob on Azure: %s", err)
   170  			}
   171  		} else {
   172  			size := int64(d.Get("size").(int))
   173  			if err := blobClient.PutPageBlob(cont, name, size, map[string]string{}); err != nil {
   174  				return fmt.Errorf("Error creating storage blob on Azure: %s", err)
   175  			}
   176  		}
   177  	}
   178  
   179  	d.SetId(name)
   180  	return resourceArmStorageBlobRead(d, meta)
   181  }
   182  
   183  type resourceArmStorageBlobPage struct {
   184  	offset  int64
   185  	section *io.SectionReader
   186  }
   187  
   188  func resourceArmStorageBlobPageUploadFromSource(container, name, source string, client *storage.BlobStorageClient, parallelism, attempts int) error {
   189  	workerCount := parallelism * runtime.NumCPU()
   190  
   191  	file, err := os.Open(source)
   192  	if err != nil {
   193  		return fmt.Errorf("Error opening source file for upload %q: %s", source, err)
   194  	}
   195  	defer file.Close()
   196  
   197  	blobSize, pageList, err := resourceArmStorageBlobPageSplit(file)
   198  	if err != nil {
   199  		return fmt.Errorf("Error splitting source file %q into pages: %s", source, err)
   200  	}
   201  
   202  	if err := client.PutPageBlob(container, name, blobSize, map[string]string{}); err != nil {
   203  		return fmt.Errorf("Error creating storage blob on Azure: %s", err)
   204  	}
   205  
   206  	pages := make(chan resourceArmStorageBlobPage, len(pageList))
   207  	errors := make(chan error, len(pageList))
   208  	wg := &sync.WaitGroup{}
   209  	wg.Add(len(pageList))
   210  
   211  	total := int64(0)
   212  	for _, page := range pageList {
   213  		total += page.section.Size()
   214  		pages <- page
   215  	}
   216  	close(pages)
   217  
   218  	for i := 0; i < workerCount; i++ {
   219  		go resourceArmStorageBlobPageUploadWorker(resourceArmStorageBlobPageUploadContext{
   220  			container: container,
   221  			name:      name,
   222  			source:    source,
   223  			blobSize:  blobSize,
   224  			client:    client,
   225  			pages:     pages,
   226  			errors:    errors,
   227  			wg:        wg,
   228  			attempts:  attempts,
   229  		})
   230  	}
   231  
   232  	wg.Wait()
   233  
   234  	if len(errors) > 0 {
   235  		return fmt.Errorf("Error while uploading source file %q: %s", source, <-errors)
   236  	}
   237  
   238  	return nil
   239  }
   240  
   241  func resourceArmStorageBlobPageSplit(file *os.File) (int64, []resourceArmStorageBlobPage, error) {
   242  	const (
   243  		minPageSize int64 = 4 * 1024
   244  		maxPageSize int64 = 4 * 1024 * 1024
   245  	)
   246  
   247  	info, err := file.Stat()
   248  	if err != nil {
   249  		return int64(0), nil, fmt.Errorf("Could not stat file %q: %s", file.Name(), err)
   250  	}
   251  
   252  	blobSize := info.Size()
   253  	if info.Size()%minPageSize != 0 {
   254  		blobSize = info.Size() + (minPageSize - (info.Size() % minPageSize))
   255  	}
   256  
   257  	emptyPage := make([]byte, minPageSize)
   258  
   259  	type byteRange struct {
   260  		offset int64
   261  		length int64
   262  	}
   263  
   264  	var nonEmptyRanges []byteRange
   265  	var currentRange byteRange
   266  	for i := int64(0); i < blobSize; i += minPageSize {
   267  		pageBuf := make([]byte, minPageSize)
   268  		_, err = file.ReadAt(pageBuf, i)
   269  		if err != nil && err != io.EOF {
   270  			return int64(0), nil, fmt.Errorf("Could not read chunk at %d: %s", i, err)
   271  		}
   272  
   273  		if bytes.Equal(pageBuf, emptyPage) {
   274  			if currentRange.length != 0 {
   275  				nonEmptyRanges = append(nonEmptyRanges, currentRange)
   276  			}
   277  			currentRange = byteRange{
   278  				offset: i + minPageSize,
   279  			}
   280  		} else {
   281  			currentRange.length += minPageSize
   282  			if currentRange.length == maxPageSize || (currentRange.offset+currentRange.length == blobSize) {
   283  				nonEmptyRanges = append(nonEmptyRanges, currentRange)
   284  				currentRange = byteRange{
   285  					offset: i + minPageSize,
   286  				}
   287  			}
   288  		}
   289  	}
   290  
   291  	var pages []resourceArmStorageBlobPage
   292  	for _, nonEmptyRange := range nonEmptyRanges {
   293  		pages = append(pages, resourceArmStorageBlobPage{
   294  			offset:  nonEmptyRange.offset,
   295  			section: io.NewSectionReader(file, nonEmptyRange.offset, nonEmptyRange.length),
   296  		})
   297  	}
   298  
   299  	return info.Size(), pages, nil
   300  }
   301  
   302  type resourceArmStorageBlobPageUploadContext struct {
   303  	container string
   304  	name      string
   305  	source    string
   306  	blobSize  int64
   307  	client    *storage.BlobStorageClient
   308  	pages     chan resourceArmStorageBlobPage
   309  	errors    chan error
   310  	wg        *sync.WaitGroup
   311  	attempts  int
   312  }
   313  
   314  func resourceArmStorageBlobPageUploadWorker(ctx resourceArmStorageBlobPageUploadContext) {
   315  	for page := range ctx.pages {
   316  		start := page.offset
   317  		end := page.offset + page.section.Size() - 1
   318  		if end > ctx.blobSize-1 {
   319  			end = ctx.blobSize - 1
   320  		}
   321  		size := end - start + 1
   322  
   323  		chunk := make([]byte, size)
   324  		_, err := page.section.Read(chunk)
   325  		if err != nil && err != io.EOF {
   326  			ctx.errors <- fmt.Errorf("Error reading source file %q at offset %d: %s", ctx.source, page.offset, err)
   327  			ctx.wg.Done()
   328  			continue
   329  		}
   330  
   331  		for x := 0; x < ctx.attempts; x++ {
   332  			err = ctx.client.PutPage(ctx.container, ctx.name, start, end, storage.PageWriteTypeUpdate, chunk, map[string]string{})
   333  			if err == nil {
   334  				break
   335  			}
   336  		}
   337  		if err != nil {
   338  			ctx.errors <- fmt.Errorf("Error writing page at offset %d for file %q: %s", page.offset, ctx.source, err)
   339  			ctx.wg.Done()
   340  			continue
   341  		}
   342  
   343  		ctx.wg.Done()
   344  	}
   345  }
   346  
   347  type resourceArmStorageBlobBlock struct {
   348  	section *io.SectionReader
   349  	id      string
   350  }
   351  
   352  func resourceArmStorageBlobBlockUploadFromSource(container, name, source string, client *storage.BlobStorageClient, parallelism, attempts int) error {
   353  	workerCount := parallelism * runtime.NumCPU()
   354  
   355  	file, err := os.Open(source)
   356  	if err != nil {
   357  		return fmt.Errorf("Error opening source file for upload %q: %s", source, err)
   358  	}
   359  	defer file.Close()
   360  
   361  	blockList, parts, err := resourceArmStorageBlobBlockSplit(file)
   362  	if err != nil {
   363  		return fmt.Errorf("Error reading and splitting source file for upload %q: %s", source, err)
   364  	}
   365  
   366  	wg := &sync.WaitGroup{}
   367  	blocks := make(chan resourceArmStorageBlobBlock, len(parts))
   368  	errors := make(chan error, len(parts))
   369  
   370  	wg.Add(len(parts))
   371  	for _, p := range parts {
   372  		blocks <- p
   373  	}
   374  	close(blocks)
   375  
   376  	for i := 0; i < workerCount; i++ {
   377  		go resourceArmStorageBlobBlockUploadWorker(resourceArmStorageBlobBlockUploadContext{
   378  			client:    client,
   379  			source:    source,
   380  			container: container,
   381  			name:      name,
   382  			blocks:    blocks,
   383  			errors:    errors,
   384  			wg:        wg,
   385  			attempts:  attempts,
   386  		})
   387  	}
   388  
   389  	wg.Wait()
   390  
   391  	if len(errors) > 0 {
   392  		return fmt.Errorf("Error while uploading source file %q: %s", source, <-errors)
   393  	}
   394  
   395  	err = client.PutBlockList(container, name, blockList)
   396  	if err != nil {
   397  		return fmt.Errorf("Error updating block list for source file %q: %s", source, err)
   398  	}
   399  
   400  	return nil
   401  }
   402  
   403  func resourceArmStorageBlobBlockSplit(file *os.File) ([]storage.Block, []resourceArmStorageBlobBlock, error) {
   404  	const (
   405  		idSize          = 64
   406  		blockSize int64 = 4 * 1024 * 1024
   407  	)
   408  	var parts []resourceArmStorageBlobBlock
   409  	var blockList []storage.Block
   410  
   411  	info, err := file.Stat()
   412  	if err != nil {
   413  		return nil, nil, fmt.Errorf("Error stating source file %q: %s", file.Name(), err)
   414  	}
   415  
   416  	for i := int64(0); i < info.Size(); i = i + blockSize {
   417  		entropy := make([]byte, idSize)
   418  		_, err = rand.Read(entropy)
   419  		if err != nil {
   420  			return nil, nil, fmt.Errorf("Error generating a random block ID for source file %q: %s", file.Name(), err)
   421  		}
   422  
   423  		sectionSize := blockSize
   424  		remainder := info.Size() - i
   425  		if remainder < blockSize {
   426  			sectionSize = remainder
   427  		}
   428  
   429  		block := storage.Block{
   430  			ID:     base64.StdEncoding.EncodeToString(entropy),
   431  			Status: storage.BlockStatusUncommitted,
   432  		}
   433  
   434  		blockList = append(blockList, block)
   435  
   436  		parts = append(parts, resourceArmStorageBlobBlock{
   437  			id:      block.ID,
   438  			section: io.NewSectionReader(file, i, sectionSize),
   439  		})
   440  	}
   441  
   442  	return blockList, parts, nil
   443  }
   444  
   445  type resourceArmStorageBlobBlockUploadContext struct {
   446  	client    *storage.BlobStorageClient
   447  	container string
   448  	name      string
   449  	source    string
   450  	attempts  int
   451  	blocks    chan resourceArmStorageBlobBlock
   452  	errors    chan error
   453  	wg        *sync.WaitGroup
   454  }
   455  
   456  func resourceArmStorageBlobBlockUploadWorker(ctx resourceArmStorageBlobBlockUploadContext) {
   457  	for block := range ctx.blocks {
   458  		buffer := make([]byte, block.section.Size())
   459  
   460  		_, err := block.section.Read(buffer)
   461  		if err != nil {
   462  			ctx.errors <- fmt.Errorf("Error reading source file %q: %s", ctx.source, err)
   463  			ctx.wg.Done()
   464  			continue
   465  		}
   466  
   467  		for i := 0; i < ctx.attempts; i++ {
   468  			err = ctx.client.PutBlock(ctx.container, ctx.name, block.id, buffer)
   469  			if err == nil {
   470  				break
   471  			}
   472  		}
   473  		if err != nil {
   474  			ctx.errors <- fmt.Errorf("Error uploading block %q for source file %q: %s", block.id, ctx.source, err)
   475  			ctx.wg.Done()
   476  			continue
   477  		}
   478  
   479  		ctx.wg.Done()
   480  	}
   481  }
   482  
   483  func resourceArmStorageBlobRead(d *schema.ResourceData, meta interface{}) error {
   484  	armClient := meta.(*ArmClient)
   485  
   486  	resourceGroupName := d.Get("resource_group_name").(string)
   487  	storageAccountName := d.Get("storage_account_name").(string)
   488  
   489  	blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroupName, storageAccountName)
   490  	if err != nil {
   491  		return err
   492  	}
   493  	if !accountExists {
   494  		log.Printf("[DEBUG] Storage account %q not found, removing blob %q from state", storageAccountName, d.Id())
   495  		d.SetId("")
   496  		return nil
   497  	}
   498  
   499  	exists, err := resourceArmStorageBlobExists(d, meta)
   500  	if err != nil {
   501  		return err
   502  	}
   503  
   504  	if !exists {
   505  		// Exists already removed this from state
   506  		return nil
   507  	}
   508  
   509  	name := d.Get("name").(string)
   510  	storageContainerName := d.Get("storage_container_name").(string)
   511  
   512  	url := blobClient.GetBlobURL(storageContainerName, name)
   513  	if url == "" {
   514  		log.Printf("[INFO] URL for %q is empty", name)
   515  	}
   516  	d.Set("url", url)
   517  
   518  	return nil
   519  }
   520  
   521  func resourceArmStorageBlobExists(d *schema.ResourceData, meta interface{}) (bool, error) {
   522  	armClient := meta.(*ArmClient)
   523  
   524  	resourceGroupName := d.Get("resource_group_name").(string)
   525  	storageAccountName := d.Get("storage_account_name").(string)
   526  
   527  	blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroupName, storageAccountName)
   528  	if err != nil {
   529  		return false, err
   530  	}
   531  	if !accountExists {
   532  		log.Printf("[DEBUG] Storage account %q not found, removing blob %q from state", storageAccountName, d.Id())
   533  		d.SetId("")
   534  		return false, nil
   535  	}
   536  
   537  	name := d.Get("name").(string)
   538  	storageContainerName := d.Get("storage_container_name").(string)
   539  
   540  	log.Printf("[INFO] Checking for existence of storage blob %q.", name)
   541  	exists, err := blobClient.BlobExists(storageContainerName, name)
   542  	if err != nil {
   543  		return false, fmt.Errorf("error testing existence of storage blob %q: %s", name, err)
   544  	}
   545  
   546  	if !exists {
   547  		log.Printf("[INFO] Storage blob %q no longer exists, removing from state...", name)
   548  		d.SetId("")
   549  	}
   550  
   551  	return exists, nil
   552  }
   553  
   554  func resourceArmStorageBlobDelete(d *schema.ResourceData, meta interface{}) error {
   555  	armClient := meta.(*ArmClient)
   556  
   557  	resourceGroupName := d.Get("resource_group_name").(string)
   558  	storageAccountName := d.Get("storage_account_name").(string)
   559  
   560  	blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroupName, storageAccountName)
   561  	if err != nil {
   562  		return err
   563  	}
   564  	if !accountExists {
   565  		log.Printf("[INFO]Storage Account %q doesn't exist so the blob won't exist", storageAccountName)
   566  		return nil
   567  	}
   568  
   569  	name := d.Get("name").(string)
   570  	storageContainerName := d.Get("storage_container_name").(string)
   571  
   572  	log.Printf("[INFO] Deleting storage blob %q", name)
   573  	if _, err = blobClient.DeleteBlobIfExists(storageContainerName, name, map[string]string{}); err != nil {
   574  		return fmt.Errorf("Error deleting storage blob %q: %s", name, err)
   575  	}
   576  
   577  	d.SetId("")
   578  	return nil
   579  }