github.com/mohanarpit/terraform@v0.6.16-0.20160909104007-291f29853544/builtin/providers/azurerm/resource_arm_storage_blob.go (about)

     1  package azurerm
     2  
     3  import (
     4  	"bytes"
     5  	"crypto/rand"
     6  	"encoding/base64"
     7  	"fmt"
     8  	"io"
     9  	"log"
    10  	"os"
    11  	"runtime"
    12  	"strings"
    13  	"sync"
    14  
    15  	"github.com/Azure/azure-sdk-for-go/storage"
    16  	"github.com/hashicorp/terraform/helper/schema"
    17  )
    18  
    19  func resourceArmStorageBlob() *schema.Resource {
    20  	return &schema.Resource{
    21  		Create: resourceArmStorageBlobCreate,
    22  		Read:   resourceArmStorageBlobRead,
    23  		Exists: resourceArmStorageBlobExists,
    24  		Delete: resourceArmStorageBlobDelete,
    25  
    26  		Schema: map[string]*schema.Schema{
    27  			"name": {
    28  				Type:     schema.TypeString,
    29  				Required: true,
    30  				ForceNew: true,
    31  			},
    32  			"resource_group_name": {
    33  				Type:     schema.TypeString,
    34  				Required: true,
    35  				ForceNew: true,
    36  			},
    37  			"storage_account_name": {
    38  				Type:     schema.TypeString,
    39  				Required: true,
    40  				ForceNew: true,
    41  			},
    42  			"storage_container_name": {
    43  				Type:     schema.TypeString,
    44  				Required: true,
    45  				ForceNew: true,
    46  			},
    47  			"type": {
    48  				Type:         schema.TypeString,
    49  				Optional:     true,
    50  				ForceNew:     true,
    51  				ValidateFunc: validateArmStorageBlobType,
    52  			},
    53  			"size": {
    54  				Type:         schema.TypeInt,
    55  				Optional:     true,
    56  				ForceNew:     true,
    57  				Default:      0,
    58  				ValidateFunc: validateArmStorageBlobSize,
    59  			},
    60  			"source": {
    61  				Type:          schema.TypeString,
    62  				Optional:      true,
    63  				ForceNew:      true,
    64  				ConflictsWith: []string{"source_uri"},
    65  			},
    66  			"source_uri": {
    67  				Type:          schema.TypeString,
    68  				Optional:      true,
    69  				ForceNew:      true,
    70  				ConflictsWith: []string{"source"},
    71  			},
    72  			"url": {
    73  				Type:     schema.TypeString,
    74  				Computed: true,
    75  			},
    76  			"parallelism": {
    77  				Type:         schema.TypeInt,
    78  				Optional:     true,
    79  				Default:      8,
    80  				ForceNew:     true,
    81  				ValidateFunc: validateArmStorageBlobParallelism,
    82  			},
    83  			"attempts": {
    84  				Type:         schema.TypeInt,
    85  				Optional:     true,
    86  				Default:      1,
    87  				ForceNew:     true,
    88  				ValidateFunc: validateArmStorageBlobAttempts,
    89  			},
    90  		},
    91  	}
    92  }
    93  
    94  func validateArmStorageBlobParallelism(v interface{}, k string) (ws []string, errors []error) {
    95  	value := v.(int)
    96  
    97  	if value <= 0 {
    98  		errors = append(errors, fmt.Errorf("Blob Parallelism %q is invalid, must be greater than 0", value))
    99  	}
   100  
   101  	return
   102  }
   103  
   104  func validateArmStorageBlobAttempts(v interface{}, k string) (ws []string, errors []error) {
   105  	value := v.(int)
   106  
   107  	if value <= 0 {
   108  		errors = append(errors, fmt.Errorf("Blob Attempts %q is invalid, must be greater than 0", value))
   109  	}
   110  
   111  	return
   112  }
   113  
   114  func validateArmStorageBlobSize(v interface{}, k string) (ws []string, errors []error) {
   115  	value := v.(int)
   116  
   117  	if value%512 != 0 {
   118  		errors = append(errors, fmt.Errorf("Blob Size %q is invalid, must be a multiple of 512", value))
   119  	}
   120  
   121  	return
   122  }
   123  
   124  func validateArmStorageBlobType(v interface{}, k string) (ws []string, errors []error) {
   125  	value := strings.ToLower(v.(string))
   126  	validTypes := map[string]struct{}{
   127  		"block": struct{}{},
   128  		"page":  struct{}{},
   129  	}
   130  
   131  	if _, ok := validTypes[value]; !ok {
   132  		errors = append(errors, fmt.Errorf("Blob type %q is invalid, must be %q or %q", value, "block", "page"))
   133  	}
   134  	return
   135  }
   136  
   137  func resourceArmStorageBlobCreate(d *schema.ResourceData, meta interface{}) error {
   138  	armClient := meta.(*ArmClient)
   139  
   140  	resourceGroupName := d.Get("resource_group_name").(string)
   141  	storageAccountName := d.Get("storage_account_name").(string)
   142  
   143  	blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroupName, storageAccountName)
   144  	if err != nil {
   145  		return err
   146  	}
   147  	if !accountExists {
   148  		return fmt.Errorf("Storage Account %q Not Found", storageAccountName)
   149  	}
   150  
   151  	name := d.Get("name").(string)
   152  	blobType := d.Get("type").(string)
   153  	cont := d.Get("storage_container_name").(string)
   154  	sourceUri := d.Get("source_uri").(string)
   155  
   156  	log.Printf("[INFO] Creating blob %q in storage account %q", name, storageAccountName)
   157  	if sourceUri != "" {
   158  		if err := blobClient.CopyBlob(cont, name, sourceUri); err != nil {
   159  			return fmt.Errorf("Error creating storage blob on Azure: %s", err)
   160  		}
   161  	} else {
   162  		switch strings.ToLower(blobType) {
   163  		case "block":
   164  			if err := blobClient.CreateBlockBlob(cont, name); err != nil {
   165  				return fmt.Errorf("Error creating storage blob on Azure: %s", err)
   166  			}
   167  
   168  			source := d.Get("source").(string)
   169  			if source != "" {
   170  				parallelism := d.Get("parallelism").(int)
   171  				attempts := d.Get("attempts").(int)
   172  				if err := resourceArmStorageBlobBlockUploadFromSource(cont, name, source, blobClient, parallelism, attempts); err != nil {
   173  					return fmt.Errorf("Error creating storage blob on Azure: %s", err)
   174  				}
   175  			}
   176  		case "page":
   177  			source := d.Get("source").(string)
   178  			if source != "" {
   179  				parallelism := d.Get("parallelism").(int)
   180  				attempts := d.Get("attempts").(int)
   181  				if err := resourceArmStorageBlobPageUploadFromSource(cont, name, source, blobClient, parallelism, attempts); err != nil {
   182  					return fmt.Errorf("Error creating storage blob on Azure: %s", err)
   183  				}
   184  			} else {
   185  				size := int64(d.Get("size").(int))
   186  				if err := blobClient.PutPageBlob(cont, name, size, map[string]string{}); err != nil {
   187  					return fmt.Errorf("Error creating storage blob on Azure: %s", err)
   188  				}
   189  			}
   190  		}
   191  	}
   192  
   193  	d.SetId(name)
   194  	return resourceArmStorageBlobRead(d, meta)
   195  }
   196  
   197  type resourceArmStorageBlobPage struct {
   198  	offset  int64
   199  	section *io.SectionReader
   200  }
   201  
   202  func resourceArmStorageBlobPageUploadFromSource(container, name, source string, client *storage.BlobStorageClient, parallelism, attempts int) error {
   203  	workerCount := parallelism * runtime.NumCPU()
   204  
   205  	file, err := os.Open(source)
   206  	if err != nil {
   207  		return fmt.Errorf("Error opening source file for upload %q: %s", source, err)
   208  	}
   209  	defer file.Close()
   210  
   211  	blobSize, pageList, err := resourceArmStorageBlobPageSplit(file)
   212  	if err != nil {
   213  		return fmt.Errorf("Error splitting source file %q into pages: %s", source, err)
   214  	}
   215  
   216  	if err := client.PutPageBlob(container, name, blobSize, map[string]string{}); err != nil {
   217  		return fmt.Errorf("Error creating storage blob on Azure: %s", err)
   218  	}
   219  
   220  	pages := make(chan resourceArmStorageBlobPage, len(pageList))
   221  	errors := make(chan error, len(pageList))
   222  	wg := &sync.WaitGroup{}
   223  	wg.Add(len(pageList))
   224  
   225  	total := int64(0)
   226  	for _, page := range pageList {
   227  		total += page.section.Size()
   228  		pages <- page
   229  	}
   230  	close(pages)
   231  
   232  	for i := 0; i < workerCount; i++ {
   233  		go resourceArmStorageBlobPageUploadWorker(resourceArmStorageBlobPageUploadContext{
   234  			container: container,
   235  			name:      name,
   236  			source:    source,
   237  			blobSize:  blobSize,
   238  			client:    client,
   239  			pages:     pages,
   240  			errors:    errors,
   241  			wg:        wg,
   242  			attempts:  attempts,
   243  		})
   244  	}
   245  
   246  	wg.Wait()
   247  
   248  	if len(errors) > 0 {
   249  		return fmt.Errorf("Error while uploading source file %q: %s", source, <-errors)
   250  	}
   251  
   252  	return nil
   253  }
   254  
   255  func resourceArmStorageBlobPageSplit(file *os.File) (int64, []resourceArmStorageBlobPage, error) {
   256  	const (
   257  		minPageSize int64 = 4 * 1024
   258  		maxPageSize int64 = 4 * 1024 * 1024
   259  	)
   260  
   261  	info, err := file.Stat()
   262  	if err != nil {
   263  		return int64(0), nil, fmt.Errorf("Could not stat file %q: %s", file.Name(), err)
   264  	}
   265  
   266  	blobSize := info.Size()
   267  	if info.Size()%minPageSize != 0 {
   268  		blobSize = info.Size() + (minPageSize - (info.Size() % minPageSize))
   269  	}
   270  
   271  	emptyPage := make([]byte, minPageSize)
   272  
   273  	type byteRange struct {
   274  		offset int64
   275  		length int64
   276  	}
   277  
   278  	var nonEmptyRanges []byteRange
   279  	var currentRange byteRange
   280  	for i := int64(0); i < blobSize; i += minPageSize {
   281  		pageBuf := make([]byte, minPageSize)
   282  		_, err = file.ReadAt(pageBuf, i)
   283  		if err != nil && err != io.EOF {
   284  			return int64(0), nil, fmt.Errorf("Could not read chunk at %d: %s", i, err)
   285  		}
   286  
   287  		if bytes.Equal(pageBuf, emptyPage) {
   288  			if currentRange.length != 0 {
   289  				nonEmptyRanges = append(nonEmptyRanges, currentRange)
   290  			}
   291  			currentRange = byteRange{
   292  				offset: i + minPageSize,
   293  			}
   294  		} else {
   295  			currentRange.length += minPageSize
   296  			if currentRange.length == maxPageSize || (currentRange.offset+currentRange.length == blobSize) {
   297  				nonEmptyRanges = append(nonEmptyRanges, currentRange)
   298  				currentRange = byteRange{
   299  					offset: i + minPageSize,
   300  				}
   301  			}
   302  		}
   303  	}
   304  
   305  	var pages []resourceArmStorageBlobPage
   306  	for _, nonEmptyRange := range nonEmptyRanges {
   307  		pages = append(pages, resourceArmStorageBlobPage{
   308  			offset:  nonEmptyRange.offset,
   309  			section: io.NewSectionReader(file, nonEmptyRange.offset, nonEmptyRange.length),
   310  		})
   311  	}
   312  
   313  	return info.Size(), pages, nil
   314  }
   315  
   316  type resourceArmStorageBlobPageUploadContext struct {
   317  	container string
   318  	name      string
   319  	source    string
   320  	blobSize  int64
   321  	client    *storage.BlobStorageClient
   322  	pages     chan resourceArmStorageBlobPage
   323  	errors    chan error
   324  	wg        *sync.WaitGroup
   325  	attempts  int
   326  }
   327  
   328  func resourceArmStorageBlobPageUploadWorker(ctx resourceArmStorageBlobPageUploadContext) {
   329  	for page := range ctx.pages {
   330  		start := page.offset
   331  		end := page.offset + page.section.Size() - 1
   332  		if end > ctx.blobSize-1 {
   333  			end = ctx.blobSize - 1
   334  		}
   335  		size := end - start + 1
   336  
   337  		chunk := make([]byte, size)
   338  		_, err := page.section.Read(chunk)
   339  		if err != nil && err != io.EOF {
   340  			ctx.errors <- fmt.Errorf("Error reading source file %q at offset %d: %s", ctx.source, page.offset, err)
   341  			ctx.wg.Done()
   342  			continue
   343  		}
   344  
   345  		for x := 0; x < ctx.attempts; x++ {
   346  			err = ctx.client.PutPage(ctx.container, ctx.name, start, end, storage.PageWriteTypeUpdate, chunk, map[string]string{})
   347  			if err == nil {
   348  				break
   349  			}
   350  		}
   351  		if err != nil {
   352  			ctx.errors <- fmt.Errorf("Error writing page at offset %d for file %q: %s", page.offset, ctx.source, err)
   353  			ctx.wg.Done()
   354  			continue
   355  		}
   356  
   357  		ctx.wg.Done()
   358  	}
   359  }
   360  
   361  type resourceArmStorageBlobBlock struct {
   362  	section *io.SectionReader
   363  	id      string
   364  }
   365  
   366  func resourceArmStorageBlobBlockUploadFromSource(container, name, source string, client *storage.BlobStorageClient, parallelism, attempts int) error {
   367  	workerCount := parallelism * runtime.NumCPU()
   368  
   369  	file, err := os.Open(source)
   370  	if err != nil {
   371  		return fmt.Errorf("Error opening source file for upload %q: %s", source, err)
   372  	}
   373  	defer file.Close()
   374  
   375  	blockList, parts, err := resourceArmStorageBlobBlockSplit(file)
   376  	if err != nil {
   377  		return fmt.Errorf("Error reading and splitting source file for upload %q: %s", source, err)
   378  	}
   379  
   380  	wg := &sync.WaitGroup{}
   381  	blocks := make(chan resourceArmStorageBlobBlock, len(parts))
   382  	errors := make(chan error, len(parts))
   383  
   384  	wg.Add(len(parts))
   385  	for _, p := range parts {
   386  		blocks <- p
   387  	}
   388  	close(blocks)
   389  
   390  	for i := 0; i < workerCount; i++ {
   391  		go resourceArmStorageBlobBlockUploadWorker(resourceArmStorageBlobBlockUploadContext{
   392  			client:    client,
   393  			source:    source,
   394  			container: container,
   395  			name:      name,
   396  			blocks:    blocks,
   397  			errors:    errors,
   398  			wg:        wg,
   399  			attempts:  attempts,
   400  		})
   401  	}
   402  
   403  	wg.Wait()
   404  
   405  	if len(errors) > 0 {
   406  		return fmt.Errorf("Error while uploading source file %q: %s", source, <-errors)
   407  	}
   408  
   409  	err = client.PutBlockList(container, name, blockList)
   410  	if err != nil {
   411  		return fmt.Errorf("Error updating block list for source file %q: %s", source, err)
   412  	}
   413  
   414  	return nil
   415  }
   416  
   417  func resourceArmStorageBlobBlockSplit(file *os.File) ([]storage.Block, []resourceArmStorageBlobBlock, error) {
   418  	const (
   419  		idSize          = 64
   420  		blockSize int64 = 4 * 1024 * 1024
   421  	)
   422  	var parts []resourceArmStorageBlobBlock
   423  	var blockList []storage.Block
   424  
   425  	info, err := file.Stat()
   426  	if err != nil {
   427  		return nil, nil, fmt.Errorf("Error stating source file %q: %s", file.Name(), err)
   428  	}
   429  
   430  	for i := int64(0); i < info.Size(); i = i + blockSize {
   431  		entropy := make([]byte, idSize)
   432  		_, err = rand.Read(entropy)
   433  		if err != nil {
   434  			return nil, nil, fmt.Errorf("Error generating a random block ID for source file %q: %s", file.Name(), err)
   435  		}
   436  
   437  		sectionSize := blockSize
   438  		remainder := info.Size() - i
   439  		if remainder < blockSize {
   440  			sectionSize = remainder
   441  		}
   442  
   443  		block := storage.Block{
   444  			ID:     base64.StdEncoding.EncodeToString(entropy),
   445  			Status: storage.BlockStatusUncommitted,
   446  		}
   447  
   448  		blockList = append(blockList, block)
   449  
   450  		parts = append(parts, resourceArmStorageBlobBlock{
   451  			id:      block.ID,
   452  			section: io.NewSectionReader(file, i, sectionSize),
   453  		})
   454  	}
   455  
   456  	return blockList, parts, nil
   457  }
   458  
   459  type resourceArmStorageBlobBlockUploadContext struct {
   460  	client    *storage.BlobStorageClient
   461  	container string
   462  	name      string
   463  	source    string
   464  	attempts  int
   465  	blocks    chan resourceArmStorageBlobBlock
   466  	errors    chan error
   467  	wg        *sync.WaitGroup
   468  }
   469  
   470  func resourceArmStorageBlobBlockUploadWorker(ctx resourceArmStorageBlobBlockUploadContext) {
   471  	for block := range ctx.blocks {
   472  		buffer := make([]byte, block.section.Size())
   473  
   474  		_, err := block.section.Read(buffer)
   475  		if err != nil {
   476  			ctx.errors <- fmt.Errorf("Error reading source file %q: %s", ctx.source, err)
   477  			ctx.wg.Done()
   478  			continue
   479  		}
   480  
   481  		for i := 0; i < ctx.attempts; i++ {
   482  			err = ctx.client.PutBlock(ctx.container, ctx.name, block.id, buffer)
   483  			if err == nil {
   484  				break
   485  			}
   486  		}
   487  		if err != nil {
   488  			ctx.errors <- fmt.Errorf("Error uploading block %q for source file %q: %s", block.id, ctx.source, err)
   489  			ctx.wg.Done()
   490  			continue
   491  		}
   492  
   493  		ctx.wg.Done()
   494  	}
   495  }
   496  
   497  func resourceArmStorageBlobRead(d *schema.ResourceData, meta interface{}) error {
   498  	armClient := meta.(*ArmClient)
   499  
   500  	resourceGroupName := d.Get("resource_group_name").(string)
   501  	storageAccountName := d.Get("storage_account_name").(string)
   502  
   503  	blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroupName, storageAccountName)
   504  	if err != nil {
   505  		return err
   506  	}
   507  	if !accountExists {
   508  		log.Printf("[DEBUG] Storage account %q not found, removing blob %q from state", storageAccountName, d.Id())
   509  		d.SetId("")
   510  		return nil
   511  	}
   512  
   513  	exists, err := resourceArmStorageBlobExists(d, meta)
   514  	if err != nil {
   515  		return err
   516  	}
   517  
   518  	if !exists {
   519  		// Exists already removed this from state
   520  		return nil
   521  	}
   522  
   523  	name := d.Get("name").(string)
   524  	storageContainerName := d.Get("storage_container_name").(string)
   525  
   526  	url := blobClient.GetBlobURL(storageContainerName, name)
   527  	if url == "" {
   528  		log.Printf("[INFO] URL for %q is empty", name)
   529  	}
   530  	d.Set("url", url)
   531  
   532  	return nil
   533  }
   534  
   535  func resourceArmStorageBlobExists(d *schema.ResourceData, meta interface{}) (bool, error) {
   536  	armClient := meta.(*ArmClient)
   537  
   538  	resourceGroupName := d.Get("resource_group_name").(string)
   539  	storageAccountName := d.Get("storage_account_name").(string)
   540  
   541  	blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroupName, storageAccountName)
   542  	if err != nil {
   543  		return false, err
   544  	}
   545  	if !accountExists {
   546  		log.Printf("[DEBUG] Storage account %q not found, removing blob %q from state", storageAccountName, d.Id())
   547  		d.SetId("")
   548  		return false, nil
   549  	}
   550  
   551  	name := d.Get("name").(string)
   552  	storageContainerName := d.Get("storage_container_name").(string)
   553  
   554  	log.Printf("[INFO] Checking for existence of storage blob %q.", name)
   555  	exists, err := blobClient.BlobExists(storageContainerName, name)
   556  	if err != nil {
   557  		return false, fmt.Errorf("error testing existence of storage blob %q: %s", name, err)
   558  	}
   559  
   560  	if !exists {
   561  		log.Printf("[INFO] Storage blob %q no longer exists, removing from state...", name)
   562  		d.SetId("")
   563  	}
   564  
   565  	return exists, nil
   566  }
   567  
   568  func resourceArmStorageBlobDelete(d *schema.ResourceData, meta interface{}) error {
   569  	armClient := meta.(*ArmClient)
   570  
   571  	resourceGroupName := d.Get("resource_group_name").(string)
   572  	storageAccountName := d.Get("storage_account_name").(string)
   573  
   574  	blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroupName, storageAccountName)
   575  	if err != nil {
   576  		return err
   577  	}
   578  	if !accountExists {
   579  		log.Printf("[INFO]Storage Account %q doesn't exist so the blob won't exist", storageAccountName)
   580  		return nil
   581  	}
   582  
   583  	name := d.Get("name").(string)
   584  	storageContainerName := d.Get("storage_container_name").(string)
   585  
   586  	log.Printf("[INFO] Deleting storage blob %q", name)
   587  	if _, err = blobClient.DeleteBlobIfExists(storageContainerName, name, map[string]string{}); err != nil {
   588  		return fmt.Errorf("Error deleting storage blob %q: %s", name, err)
   589  	}
   590  
   591  	d.SetId("")
   592  	return nil
   593  }