github.com/filecoin-project/bacalhau@v0.3.23-0.20230228154132-45c989550ace/pkg/compute/capacity/disk/calculator.go (about)

     1  package disk
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  
     7  	"github.com/filecoin-project/bacalhau/pkg/executor"
     8  	"github.com/filecoin-project/bacalhau/pkg/model"
     9  )
    10  
    11  type DiskUsageCalculatorParams struct {
    12  	Executors executor.ExecutorProvider
    13  }
    14  
    15  type DiskUsageCalculator struct {
    16  	executors executor.ExecutorProvider
    17  }
    18  
    19  func NewDiskUsageCalculator(params DiskUsageCalculatorParams) *DiskUsageCalculator {
    20  	return &DiskUsageCalculator{
    21  		executors: params.Executors,
    22  	}
    23  }
    24  
    25  func (c *DiskUsageCalculator) Calculate(
    26  	ctx context.Context, job model.Job, parsedUsage model.ResourceUsageData) (model.ResourceUsageData, error) {
    27  	requirements := model.ResourceUsageData{}
    28  
    29  	e, err := c.executors.Get(ctx, job.Spec.Engine)
    30  	if err != nil {
    31  		return model.ResourceUsageData{}, fmt.Errorf("error getting job disk space requirements: %w", err)
    32  	}
    33  
    34  	var totalDiskRequirements uint64 = 0
    35  
    36  	for _, input := range job.Spec.Inputs {
    37  		volumeSize, err := e.GetVolumeSize(ctx, input)
    38  		if err != nil {
    39  			return model.ResourceUsageData{}, fmt.Errorf("error getting job disk space requirements: %w", err)
    40  		}
    41  		totalDiskRequirements += volumeSize
    42  	}
    43  
    44  	// TODO: think about the fact that each shard might be different sizes
    45  	//  this is probably good enough for now
    46  	totalShards := job.Spec.ExecutionPlan.TotalShards
    47  	if totalShards == 0 {
    48  		totalShards = 1
    49  	}
    50  	// update the job requirements disk space with what we calculated
    51  	requirements.Disk = totalDiskRequirements / uint64(totalShards)
    52  
    53  	return requirements, nil
    54  }