github.com/vieux/docker@v0.6.3-0.20161004191708-e097c2a938c7/cli/command/formatter/disk_usage.go (about)

     1  package formatter
     2  
     3  import (
     4  	"bytes"
     5  	"fmt"
     6  	"strings"
     7  	"text/template"
     8  
     9  	"github.com/docker/distribution/reference"
    10  	"github.com/docker/docker/api/types"
    11  	units "github.com/docker/go-units"
    12  )
    13  
    14  const (
    15  	defaultDiskUsageImageTableFormat     = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.VirtualSize}}\t{{.SharedSize}}\t{{.UniqueSize}}\t{{.Containers}}"
    16  	defaultDiskUsageContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Names}}"
    17  	defaultDiskUsageVolumeTableFormat    = "table {{.Name}}\t{{.Links}}\t{{.Size}}"
    18  	defaultDiskUsageTableFormat          = "table {{.Type}}\t{{.TotalCount}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}"
    19  
    20  	typeHeader        = "TYPE"
    21  	totalHeader       = "TOTAL"
    22  	activeHeader      = "ACTIVE"
    23  	reclaimableHeader = "RECLAIMABLE"
    24  	containersHeader  = "CONTAINERS"
    25  	sharedSizeHeader  = "SHARED SIZE"
    26  	uniqueSizeHeader  = "UNIQUE SiZE"
    27  )
    28  
    29  // DiskUsageContext contains disk usage specific information required by the formater, encapsulate a Context struct.
    30  type DiskUsageContext struct {
    31  	Context
    32  	Verbose    bool
    33  	LayersSize int64
    34  	Images     []*types.Image
    35  	Containers []*types.Container
    36  	Volumes    []*types.Volume
    37  }
    38  
    39  func (ctx *DiskUsageContext) startSubsection(format string) (*template.Template, error) {
    40  	ctx.buffer = bytes.NewBufferString("")
    41  	ctx.header = ""
    42  	ctx.Format = Format(format)
    43  	ctx.preFormat()
    44  
    45  	return ctx.parseFormat()
    46  }
    47  
    48  func (ctx *DiskUsageContext) Write() {
    49  	if ctx.Verbose == false {
    50  		ctx.buffer = bytes.NewBufferString("")
    51  		ctx.Format = defaultDiskUsageTableFormat
    52  		ctx.preFormat()
    53  
    54  		tmpl, err := ctx.parseFormat()
    55  		if err != nil {
    56  			return
    57  		}
    58  
    59  		err = ctx.contextFormat(tmpl, &diskUsageImagesContext{
    60  			totalSize: ctx.LayersSize,
    61  			images:    ctx.Images,
    62  		})
    63  		if err != nil {
    64  			return
    65  		}
    66  		err = ctx.contextFormat(tmpl, &diskUsageContainersContext{
    67  			containers: ctx.Containers,
    68  		})
    69  		if err != nil {
    70  			return
    71  		}
    72  
    73  		err = ctx.contextFormat(tmpl, &diskUsageVolumesContext{
    74  			volumes: ctx.Volumes,
    75  		})
    76  		if err != nil {
    77  			return
    78  		}
    79  
    80  		ctx.postFormat(tmpl, &diskUsageContainersContext{containers: []*types.Container{}})
    81  
    82  		return
    83  	}
    84  
    85  	// First images
    86  	tmpl, err := ctx.startSubsection(defaultDiskUsageImageTableFormat)
    87  	if err != nil {
    88  		return
    89  	}
    90  
    91  	ctx.Output.Write([]byte("Images space usage:\n\n"))
    92  	for _, i := range ctx.Images {
    93  		repo := "<none>"
    94  		tag := "<none>"
    95  		if len(i.RepoTags) > 0 && !isDangling(*i) {
    96  			// Only show the first tag
    97  			ref, err := reference.ParseNamed(i.RepoTags[0])
    98  			if err != nil {
    99  				continue
   100  			}
   101  			if nt, ok := ref.(reference.NamedTagged); ok {
   102  				repo = ref.Name()
   103  				tag = nt.Tag()
   104  			}
   105  		}
   106  
   107  		err = ctx.contextFormat(tmpl, &imageContext{
   108  			repo:  repo,
   109  			tag:   tag,
   110  			trunc: true,
   111  			i:     *i,
   112  		})
   113  		if err != nil {
   114  			return
   115  		}
   116  	}
   117  	ctx.postFormat(tmpl, &imageContext{})
   118  
   119  	// Now containers
   120  	ctx.Output.Write([]byte("\nContainers space usage:\n\n"))
   121  	tmpl, err = ctx.startSubsection(defaultDiskUsageContainerTableFormat)
   122  	if err != nil {
   123  		return
   124  	}
   125  	for _, c := range ctx.Containers {
   126  		// Don't display the virtual size
   127  		c.SizeRootFs = 0
   128  		err = ctx.contextFormat(tmpl, &containerContext{
   129  			trunc: true,
   130  			c:     *c,
   131  		})
   132  		if err != nil {
   133  			return
   134  		}
   135  	}
   136  	ctx.postFormat(tmpl, &containerContext{})
   137  
   138  	// And volumes
   139  	ctx.Output.Write([]byte("\nLocal Volumes space usage:\n\n"))
   140  	tmpl, err = ctx.startSubsection(defaultDiskUsageVolumeTableFormat)
   141  	if err != nil {
   142  		return
   143  	}
   144  	for _, v := range ctx.Volumes {
   145  		err = ctx.contextFormat(tmpl, &volumeContext{
   146  			v: *v,
   147  		})
   148  		if err != nil {
   149  			return
   150  		}
   151  	}
   152  	ctx.postFormat(tmpl, &volumeContext{v: types.Volume{}})
   153  }
   154  
   155  type diskUsageImagesContext struct {
   156  	HeaderContext
   157  	totalSize int64
   158  	images    []*types.Image
   159  }
   160  
   161  func (c *diskUsageImagesContext) Type() string {
   162  	c.AddHeader(typeHeader)
   163  	return "Images"
   164  }
   165  
   166  func (c *diskUsageImagesContext) TotalCount() string {
   167  	c.AddHeader(totalHeader)
   168  	return fmt.Sprintf("%d", len(c.images))
   169  }
   170  
   171  func (c *diskUsageImagesContext) Active() string {
   172  	c.AddHeader(activeHeader)
   173  	used := 0
   174  	for _, i := range c.images {
   175  		if i.Containers > 0 {
   176  			used++
   177  		}
   178  	}
   179  
   180  	return fmt.Sprintf("%d", used)
   181  }
   182  
   183  func (c *diskUsageImagesContext) Size() string {
   184  	c.AddHeader(sizeHeader)
   185  	return units.HumanSize(float64(c.totalSize))
   186  
   187  }
   188  
   189  func (c *diskUsageImagesContext) Reclaimable() string {
   190  	var used int64
   191  
   192  	c.AddHeader(reclaimableHeader)
   193  	for _, i := range c.images {
   194  		if i.Containers != 0 {
   195  			used += i.Size
   196  		}
   197  	}
   198  
   199  	reclaimable := c.totalSize - used
   200  	if c.totalSize > 0 {
   201  		return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/c.totalSize)
   202  	}
   203  	return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable)))
   204  }
   205  
   206  type diskUsageContainersContext struct {
   207  	HeaderContext
   208  	verbose    bool
   209  	containers []*types.Container
   210  }
   211  
   212  func (c *diskUsageContainersContext) Type() string {
   213  	c.AddHeader(typeHeader)
   214  	return "Containers"
   215  }
   216  
   217  func (c *diskUsageContainersContext) TotalCount() string {
   218  	c.AddHeader(totalHeader)
   219  	return fmt.Sprintf("%d", len(c.containers))
   220  }
   221  
   222  func (c *diskUsageContainersContext) isActive(container types.Container) bool {
   223  	return strings.Contains(container.State, "running") ||
   224  		strings.Contains(container.State, "paused") ||
   225  		strings.Contains(container.State, "restarting")
   226  }
   227  
   228  func (c *diskUsageContainersContext) Active() string {
   229  	c.AddHeader(activeHeader)
   230  	used := 0
   231  	for _, container := range c.containers {
   232  		if c.isActive(*container) {
   233  			used++
   234  		}
   235  	}
   236  
   237  	return fmt.Sprintf("%d", used)
   238  }
   239  
   240  func (c *diskUsageContainersContext) Size() string {
   241  	var size int64
   242  
   243  	c.AddHeader(sizeHeader)
   244  	for _, container := range c.containers {
   245  		size += container.SizeRw
   246  	}
   247  
   248  	return units.HumanSize(float64(size))
   249  }
   250  
   251  func (c *diskUsageContainersContext) Reclaimable() string {
   252  	var reclaimable int64
   253  	var totalSize int64
   254  
   255  	c.AddHeader(reclaimableHeader)
   256  	for _, container := range c.containers {
   257  		if !c.isActive(*container) {
   258  			reclaimable += container.SizeRw
   259  		}
   260  		totalSize += container.SizeRw
   261  	}
   262  
   263  	if totalSize > 0 {
   264  		return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize)
   265  	}
   266  
   267  	return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable)))
   268  }
   269  
   270  type diskUsageVolumesContext struct {
   271  	HeaderContext
   272  	verbose bool
   273  	volumes []*types.Volume
   274  }
   275  
   276  func (c *diskUsageVolumesContext) Type() string {
   277  	c.AddHeader(typeHeader)
   278  	return "Local Volumes"
   279  }
   280  
   281  func (c *diskUsageVolumesContext) TotalCount() string {
   282  	c.AddHeader(totalHeader)
   283  	return fmt.Sprintf("%d", len(c.volumes))
   284  }
   285  
   286  func (c *diskUsageVolumesContext) Active() string {
   287  	c.AddHeader(activeHeader)
   288  
   289  	used := 0
   290  	for _, v := range c.volumes {
   291  		if v.RefCount > 0 {
   292  			used++
   293  		}
   294  	}
   295  
   296  	return fmt.Sprintf("%d", used)
   297  }
   298  
   299  func (c *diskUsageVolumesContext) Size() string {
   300  	var size int64
   301  
   302  	c.AddHeader(sizeHeader)
   303  	for _, v := range c.volumes {
   304  		if v.Size != -1 {
   305  			size += v.Size
   306  		}
   307  	}
   308  
   309  	return units.HumanSize(float64(size))
   310  }
   311  
   312  func (c *diskUsageVolumesContext) Reclaimable() string {
   313  	var reclaimable int64
   314  	var totalSize int64
   315  
   316  	c.AddHeader(reclaimableHeader)
   317  	for _, v := range c.volumes {
   318  		if v.Size != -1 {
   319  			if v.RefCount == 0 {
   320  				reclaimable += v.Size
   321  			}
   322  			totalSize += v.Size
   323  		}
   324  	}
   325  
   326  	if totalSize > 0 {
   327  		return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize)
   328  	}
   329  
   330  	return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable)))
   331  }