github.com/projecteru2/core@v0.0.0-20240321043226-06bcc1c23f58/cluster/calcium/sendlarge.go (about)

     1  package calcium
     2  
     3  import (
     4  	"context"
     5  	"io"
     6  	"sync"
     7  
     8  	"github.com/pkg/errors"
     9  	"github.com/projecteru2/core/log"
    10  	"github.com/projecteru2/core/types"
    11  	"github.com/projecteru2/core/utils"
    12  )
    13  
    14  // SendLargeFile send large files by stream to workload
    15  func (c *Calcium) SendLargeFile(ctx context.Context, inputChan chan *types.SendLargeFileOptions) chan *types.SendMessage {
    16  	resp := make(chan *types.SendMessage)
    17  	wg := &sync.WaitGroup{}
    18  	utils.SentryGo(func() {
    19  		defer close(resp)
    20  		senders := make(map[string]*workloadSender)
    21  		// for each file
    22  		for data := range inputChan {
    23  			for _, id := range data.IDs {
    24  				if _, ok := senders[id]; !ok {
    25  					log.Debugf(ctx, "[SendLargeFile] create sender for %s", id)
    26  					// for each container, let's create a new sender to send identical file chunk, each chunk will include the metadata of this file
    27  					// we need to add `waitGroup out of the `newWorkloadSender` because we need to avoid `wg.Wait()` be executing before `wg.Add()`,
    28  					// which will cause the goroutine in `c.newWorkloadSender` to be killed.
    29  					wg.Add(1)
    30  					sender := c.newWorkloadSender(ctx, id, resp, wg)
    31  					senders[id] = sender
    32  				}
    33  				senders[id].send(data)
    34  			}
    35  		}
    36  		for _, sender := range senders {
    37  			sender.close()
    38  		}
    39  		wg.Wait()
    40  	})
    41  	return resp
    42  }
    43  
    44  type workloadSender struct {
    45  	calcium *Calcium
    46  	id      string
    47  	wg      *sync.WaitGroup
    48  	buffer  chan *types.SendLargeFileOptions
    49  	resp    chan *types.SendMessage
    50  }
    51  
    52  func (c *Calcium) newWorkloadSender(ctx context.Context, ID string, resp chan *types.SendMessage, wg *sync.WaitGroup) *workloadSender {
    53  	sender := &workloadSender{
    54  		calcium: c,
    55  		id:      ID,
    56  		wg:      wg,
    57  		buffer:  make(chan *types.SendLargeFileOptions, 10),
    58  		resp:    resp,
    59  	}
    60  	utils.SentryGo(func() {
    61  		var writer *io.PipeWriter
    62  		curFile := ""
    63  		for data := range sender.buffer {
    64  			if curFile != "" && curFile != data.Dst {
    65  				log.Warnf(ctx, "[newWorkloadExecutor] receive different files %s, %s", curFile, data.Dst)
    66  				break
    67  			}
    68  			// ready to send
    69  			if curFile == "" {
    70  				log.Debugf(ctx, "[newWorkloadExecutor]Receive new file %s to %s", curFile, sender.id)
    71  				curFile = data.Dst
    72  				pr, pw := io.Pipe()
    73  				writer = pw
    74  				utils.SentryGo(func(ID, name string, size int64, content io.Reader, uid, gid int, mode int64) func() {
    75  					return func() {
    76  						defer wg.Done()
    77  						if err := sender.calcium.withWorkloadLocked(ctx, ID, false, func(ctx context.Context, workload *types.Workload) error {
    78  							err := errors.WithStack(workload.Engine.VirtualizationCopyChunkTo(ctx, ID, name, size, content, uid, gid, mode))
    79  							resp <- &types.SendMessage{ID: ID, Path: name, Error: err}
    80  							return nil
    81  						}); err != nil {
    82  							resp <- &types.SendMessage{ID: ID, Error: err}
    83  						}
    84  					}
    85  				}(ID, curFile, data.Size, pr, data.UID, data.GID, data.Mode))
    86  			}
    87  			n, err := writer.Write(data.Chunk)
    88  			if err != nil || n != len(data.Chunk) {
    89  				log.Errorf(ctx, err, "[newWorkloadExecutor] send file to engine err, file = %s", curFile)
    90  				break
    91  			}
    92  		}
    93  		writer.Close()
    94  	})
    95  	return sender
    96  }
    97  
    98  func (s *workloadSender) send(chunk *types.SendLargeFileOptions) {
    99  	s.buffer <- chunk
   100  }
   101  
   102  func (s *workloadSender) close() {
   103  	close(s.buffer)
   104  }