github.com/projecteru2/core@v0.0.0-20240321043226-06bcc1c23f58/rpc/rpc.go (about)

     1  package rpc
     2  
     3  import (
     4  	"archive/tar"
     5  	"bufio"
     6  	"fmt"
     7  	"io"
     8  	"path/filepath"
     9  	"runtime"
    10  	"sync"
    11  	"time"
    12  
    13  	"github.com/projecteru2/core/cluster"
    14  	"github.com/projecteru2/core/log"
    15  	pb "github.com/projecteru2/core/rpc/gen"
    16  	"github.com/projecteru2/core/types"
    17  	"github.com/projecteru2/core/utils"
    18  	"github.com/projecteru2/core/version"
    19  
    20  	"golang.org/x/net/context"
    21  	grpcstatus "google.golang.org/grpc/status"
    22  )
    23  
    24  // Vibranium is implementations for grpc server interface
    25  // Many data types should be transformed
    26  type Vibranium struct {
    27  	cluster cluster.Cluster
    28  	config  types.Config
    29  	counter sync.WaitGroup
    30  	stop    chan struct{}
    31  	TaskNum int
    32  }
    33  
    34  // Info show core info
    35  func (v *Vibranium) Info(context.Context, *pb.Empty) (*pb.CoreInfo, error) {
    36  	return &pb.CoreInfo{
    37  		Version:       version.VERSION,
    38  		Revison:       version.REVISION,
    39  		BuildAt:       version.BUILTAT,
    40  		GolangVersion: runtime.Version(),
    41  		OsArch:        fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
    42  		Identifier:    v.cluster.GetIdentifier(),
    43  	}, nil
    44  }
    45  
    46  // WatchServiceStatus pushes sibling services
    47  func (v *Vibranium) WatchServiceStatus(_ *pb.Empty, stream pb.CoreRPC_WatchServiceStatusServer) (err error) {
    48  	task := v.newTask(stream.Context(), "WatchServiceStatus", false)
    49  	defer task.done()
    50  	ch, err := v.cluster.WatchServiceStatus(task.context)
    51  	if err != nil {
    52  		return grpcstatus.Error(WatchServiceStatus, err.Error())
    53  	}
    54  	for {
    55  		select {
    56  		case status, ok := <-ch:
    57  			if !ok {
    58  				return nil
    59  			}
    60  			s := toRPCServiceStatus(status)
    61  			if err = stream.Send(s); err != nil {
    62  				v.logUnsentMessages(task.context, "WatchServicesStatus", err, s)
    63  				return grpcstatus.Error(WatchServiceStatus, err.Error())
    64  			}
    65  		case <-task.context.Done():
    66  			return nil
    67  		case <-v.stop:
    68  			return nil
    69  		}
    70  	}
    71  }
    72  
    73  // ListNetworks list networks for pod
    74  func (v *Vibranium) ListNetworks(ctx context.Context, opts *pb.ListNetworkOptions) (*pb.Networks, error) {
    75  	task := v.newTask(ctx, "ListNetworks", false)
    76  	defer task.done()
    77  	networks, err := v.cluster.ListNetworks(task.context, opts.Podname, opts.Driver)
    78  	if err != nil {
    79  		return nil, grpcstatus.Error(ListNetworks, err.Error())
    80  	}
    81  
    82  	ns := []*pb.Network{}
    83  	for _, n := range networks {
    84  		ns = append(ns, toRPCNetwork(n))
    85  	}
    86  	return &pb.Networks{Networks: ns}, nil
    87  }
    88  
    89  // ConnectNetwork connect network
    90  func (v *Vibranium) ConnectNetwork(ctx context.Context, opts *pb.ConnectNetworkOptions) (*pb.Network, error) {
    91  	task := v.newTask(ctx, "ConnectNetwork", false)
    92  	defer task.done()
    93  	subnets, err := v.cluster.ConnectNetwork(task.context, opts.Network, opts.Target, opts.Ipv4, opts.Ipv6)
    94  	if err != nil {
    95  		return nil, grpcstatus.Error(ConnectNetwork, err.Error())
    96  	}
    97  	return &pb.Network{Name: opts.Network, Subnets: subnets}, nil
    98  }
    99  
   100  // DisconnectNetwork disconnect network
   101  func (v *Vibranium) DisconnectNetwork(ctx context.Context, opts *pb.DisconnectNetworkOptions) (*pb.Empty, error) {
   102  	task := v.newTask(ctx, "DisconnectNetwork", false)
   103  	defer task.done()
   104  	if err := v.cluster.DisconnectNetwork(task.context, opts.Network, opts.Target, opts.Force); err != nil {
   105  		return nil, grpcstatus.Error(DisconnectNetwork, err.Error())
   106  	}
   107  	return &pb.Empty{}, nil
   108  }
   109  
   110  // AddPod saves a pod, and returns it to client
   111  func (v *Vibranium) AddPod(ctx context.Context, opts *pb.AddPodOptions) (*pb.Pod, error) {
   112  	task := v.newTask(ctx, "AddPod", false)
   113  	defer task.done()
   114  	p, err := v.cluster.AddPod(task.context, opts.Name, opts.Desc)
   115  	if err != nil {
   116  		return nil, grpcstatus.Error(AddPod, err.Error())
   117  	}
   118  
   119  	return toRPCPod(p), nil
   120  }
   121  
   122  // RemovePod removes a pod only if it's empty
   123  func (v *Vibranium) RemovePod(ctx context.Context, opts *pb.RemovePodOptions) (*pb.Empty, error) {
   124  	task := v.newTask(ctx, "RemovePod", false)
   125  	defer task.done()
   126  	if err := v.cluster.RemovePod(task.context, opts.Name); err != nil {
   127  		return nil, grpcstatus.Error(RemovePod, err.Error())
   128  	}
   129  	return &pb.Empty{}, nil
   130  }
   131  
   132  // GetPod show a pod
   133  func (v *Vibranium) GetPod(ctx context.Context, opts *pb.GetPodOptions) (*pb.Pod, error) {
   134  	task := v.newTask(ctx, "GetPod", false)
   135  	defer task.done()
   136  	p, err := v.cluster.GetPod(task.context, opts.Name)
   137  	if err != nil {
   138  		return nil, grpcstatus.Error(GetPod, err.Error())
   139  	}
   140  
   141  	return toRPCPod(p), nil
   142  }
   143  
   144  // ListPods returns a list of pods
   145  func (v *Vibranium) ListPods(ctx context.Context, _ *pb.Empty) (*pb.Pods, error) {
   146  	task := v.newTask(ctx, "ListPods", false)
   147  	defer task.done()
   148  	ps, err := v.cluster.ListPods(task.context)
   149  	if err != nil {
   150  		return nil, grpcstatus.Error(ListPods, err.Error())
   151  	}
   152  
   153  	pods := []*pb.Pod{}
   154  	for _, p := range ps {
   155  		pods = append(pods, toRPCPod(p))
   156  	}
   157  
   158  	return &pb.Pods{Pods: pods}, nil
   159  }
   160  
   161  // GetPodResource get pod nodes resource usage
   162  func (v *Vibranium) GetPodResource(opts *pb.GetPodOptions, stream pb.CoreRPC_GetPodResourceServer) error {
   163  	task := v.newTask(stream.Context(), "GetPodResource", false)
   164  	defer task.done()
   165  	ch, err := v.cluster.PodResource(task.context, opts.Name)
   166  	if err != nil {
   167  		return grpcstatus.Error(PodResource, err.Error())
   168  	}
   169  	for msg := range ch {
   170  		if err := stream.Send(toRPCNodeResource(msg)); err != nil {
   171  			v.logUnsentMessages(task.context, "GetPodResource", err, msg)
   172  		}
   173  	}
   174  	return nil
   175  }
   176  
   177  // GetNodeResource check node resource
   178  func (v *Vibranium) GetNodeResource(ctx context.Context, opts *pb.GetNodeResourceOptions) (*pb.NodeResource, error) {
   179  	task := v.newTask(ctx, "GetNodeResource", false)
   180  	defer task.done()
   181  	nr, err := v.cluster.NodeResource(task.context, opts.GetOpts().Nodename, opts.Fix)
   182  	if err != nil {
   183  		return nil, grpcstatus.Error(GetNodeResource, err.Error())
   184  	}
   185  
   186  	return toRPCNodeResource(nr), nil
   187  }
   188  
   189  // AddNode saves a node and returns it to client
   190  // Method must be called synchronously, or nothing will be returned
   191  func (v *Vibranium) AddNode(ctx context.Context, opts *pb.AddNodeOptions) (*pb.Node, error) {
   192  	task := v.newTask(ctx, "AddNode", false)
   193  	defer task.done()
   194  	addNodeOpts := toCoreAddNodeOptions(opts)
   195  	n, err := v.cluster.AddNode(task.context, addNodeOpts)
   196  	if err != nil {
   197  		return nil, grpcstatus.Error(AddNode, err.Error())
   198  	}
   199  
   200  	return toRPCNode(n), nil
   201  }
   202  
   203  // RemoveNode removes the node from etcd
   204  func (v *Vibranium) RemoveNode(ctx context.Context, opts *pb.RemoveNodeOptions) (*pb.Empty, error) {
   205  	task := v.newTask(ctx, "RemoveNode", false)
   206  	defer task.done()
   207  	if err := v.cluster.RemoveNode(task.context, opts.Nodename); err != nil {
   208  		return nil, grpcstatus.Error(RemoveNode, err.Error())
   209  	}
   210  	return &pb.Empty{}, nil
   211  }
   212  
   213  // ListPodNodes returns a list of node for pod
   214  func (v *Vibranium) ListPodNodes(opts *pb.ListNodesOptions, stream pb.CoreRPC_ListPodNodesServer) error {
   215  	task := v.newTask(stream.Context(), "ListPodNodes", false)
   216  	defer task.done()
   217  
   218  	timeout := time.Duration(opts.TimeoutInSecond) * time.Second
   219  	if opts.TimeoutInSecond <= 0 {
   220  		timeout = v.config.ConnectionTimeout
   221  	}
   222  	ctx, cancel := context.WithTimeout(task.context, timeout)
   223  	defer cancel()
   224  
   225  	ch, err := v.cluster.ListPodNodes(ctx, toCoreListNodesOptions(opts))
   226  	if err != nil {
   227  		return grpcstatus.Error(ListPodNodes, err.Error())
   228  	}
   229  
   230  	for msg := range ch {
   231  		if err := stream.Send(toRPCNode(msg)); err != nil {
   232  			v.logUnsentMessages(task.context, "PodNodesStream", err, msg)
   233  		}
   234  	}
   235  	return nil
   236  }
   237  
   238  // GetNode get a node
   239  func (v *Vibranium) GetNode(ctx context.Context, opts *pb.GetNodeOptions) (*pb.Node, error) {
   240  	task := v.newTask(ctx, "GetNode", false)
   241  	defer task.done()
   242  	n, err := v.cluster.GetNode(task.context, opts.Nodename)
   243  	if err != nil {
   244  		return nil, grpcstatus.Error(GetNode, err.Error())
   245  	}
   246  
   247  	return toRPCNode(n), nil
   248  }
   249  
   250  // GetNodeEngine get a node engine
   251  func (v *Vibranium) GetNodeEngineInfo(ctx context.Context, opts *pb.GetNodeOptions) (*pb.Engine, error) {
   252  	task := v.newTask(ctx, "GetNodeEngine", false)
   253  	defer task.done()
   254  	e, err := v.cluster.GetNodeEngineInfo(task.context, opts.Nodename)
   255  	if err != nil {
   256  		return nil, grpcstatus.Error(GetNodeEngine, err.Error())
   257  	}
   258  
   259  	return toRPCEngine(e), nil
   260  }
   261  
   262  // SetNode set node meta
   263  func (v *Vibranium) SetNode(ctx context.Context, opts *pb.SetNodeOptions) (*pb.Node, error) {
   264  	task := v.newTask(ctx, "SetNode", false)
   265  	defer task.done()
   266  	setNodeOpts, err := toCoreSetNodeOptions(opts)
   267  	if err != nil {
   268  		return nil, grpcstatus.Error(SetNode, err.Error())
   269  	}
   270  	n, err := v.cluster.SetNode(task.context, setNodeOpts)
   271  	if err != nil {
   272  		return nil, grpcstatus.Error(SetNode, err.Error())
   273  	}
   274  	return toRPCNode(n), nil
   275  }
   276  
   277  // GetNodeStatus set status of a node for reporting
   278  func (v *Vibranium) GetNodeStatus(ctx context.Context, opts *pb.GetNodeStatusOptions) (*pb.NodeStatusStreamMessage, error) {
   279  	task := v.newTask(ctx, "GetNodeStatus", false)
   280  	defer task.done()
   281  	status, err := v.cluster.GetNodeStatus(task.context, opts.Nodename)
   282  	if err != nil {
   283  		return nil, grpcstatus.Error(GetNodeStatus, err.Error())
   284  	}
   285  	return &pb.NodeStatusStreamMessage{
   286  		Nodename: status.Nodename,
   287  		Podname:  status.Podname,
   288  		Alive:    status.Alive,
   289  	}, nil
   290  }
   291  
   292  // SetNodeStatus set status of a node for reporting
   293  func (v *Vibranium) SetNodeStatus(ctx context.Context, opts *pb.SetNodeStatusOptions) (*pb.Empty, error) {
   294  	task := v.newTask(ctx, "SetNodeStatus", false)
   295  	defer task.done()
   296  	if err := v.cluster.SetNodeStatus(task.context, opts.Nodename, opts.Ttl); err != nil {
   297  		return nil, grpcstatus.Error(SetNodeStatus, err.Error())
   298  	}
   299  	return &pb.Empty{}, nil
   300  }
   301  
   302  // NodeStatusStream watch and show deployed status
   303  func (v *Vibranium) NodeStatusStream(_ *pb.Empty, stream pb.CoreRPC_NodeStatusStreamServer) error {
   304  	task := v.newTask(stream.Context(), "NodeStatusStream", true)
   305  	defer task.done()
   306  
   307  	ch := v.cluster.NodeStatusStream(task.context)
   308  	for {
   309  		select {
   310  		case m, ok := <-ch:
   311  			if !ok {
   312  				return nil
   313  			}
   314  			r := &pb.NodeStatusStreamMessage{
   315  				Nodename: m.Nodename,
   316  				Podname:  m.Podname,
   317  				Alive:    m.Alive,
   318  			}
   319  			if m.Error != nil {
   320  				r.Error = m.Error.Error()
   321  			}
   322  			if err := stream.Send(r); err != nil {
   323  				v.logUnsentMessages(task.context, "NodeStatusStream", err, m)
   324  			}
   325  		case <-v.stop:
   326  			return nil
   327  		}
   328  	}
   329  }
   330  
   331  // GetWorkloadsStatus get workloads status
   332  func (v *Vibranium) GetWorkloadsStatus(ctx context.Context, opts *pb.WorkloadIDs) (*pb.WorkloadsStatus, error) {
   333  	task := v.newTask(ctx, "GetWorkloadsStatus", false)
   334  	defer task.done()
   335  
   336  	workloadsStatus, err := v.cluster.GetWorkloadsStatus(task.context, opts.IDs)
   337  	if err != nil {
   338  		return nil, grpcstatus.Error(GetWorkloadsStatus, err.Error())
   339  	}
   340  	return toRPCWorkloadsStatus(workloadsStatus), nil
   341  }
   342  
   343  // SetWorkloadsStatus set workloads status
   344  func (v *Vibranium) SetWorkloadsStatus(ctx context.Context, opts *pb.SetWorkloadsStatusOptions) (*pb.WorkloadsStatus, error) {
   345  	task := v.newTask(ctx, "SetWorkloadsStatus", false)
   346  	defer task.done()
   347  
   348  	var err error
   349  	statusData := []*types.StatusMeta{}
   350  	ttls := map[string]int64{}
   351  	for _, status := range opts.Status {
   352  		r := &types.StatusMeta{
   353  			ID:        status.Id,
   354  			Running:   status.Running,
   355  			Healthy:   status.Healthy,
   356  			Networks:  status.Networks,
   357  			Extension: status.Extension,
   358  
   359  			Appname:    status.Appname,
   360  			Nodename:   status.Nodename,
   361  			Entrypoint: status.Entrypoint,
   362  		}
   363  		statusData = append(statusData, r)
   364  		ttls[status.Id] = status.Ttl
   365  	}
   366  
   367  	status, err := v.cluster.SetWorkloadsStatus(task.context, statusData, ttls)
   368  	if err != nil {
   369  		return nil, grpcstatus.Error(SetWorkloadsStatus, err.Error())
   370  	}
   371  	return toRPCWorkloadsStatus(status), nil
   372  }
   373  
   374  // WorkloadStatusStream watch and show deployed status
   375  func (v *Vibranium) WorkloadStatusStream(opts *pb.WorkloadStatusStreamOptions, stream pb.CoreRPC_WorkloadStatusStreamServer) error {
   376  	task := v.newTask(stream.Context(), "WorkloadStatusStream", true)
   377  	defer task.done()
   378  	logger := log.WithFunc("vibranium.WorkloadStatusStream").WithField("app", opts.Appname)
   379  
   380  	logger.Info(task.context, "WorkloadStatusStream start")
   381  	defer logger.Info(task.context, "WorkloadStatusStream stop")
   382  
   383  	ch := v.cluster.WorkloadStatusStream(
   384  		task.context,
   385  		opts.Appname, opts.Entrypoint, opts.Nodename, opts.Labels,
   386  	)
   387  	for {
   388  		select {
   389  		case m, ok := <-ch:
   390  			if !ok {
   391  				return nil
   392  			}
   393  			r := &pb.WorkloadStatusStreamMessage{Id: m.ID, Delete: m.Delete}
   394  			if m.Error != nil {
   395  				r.Error = m.Error.Error()
   396  			} else if m.Workload != nil {
   397  				if workload, err := toRPCWorkload(task.context, m.Workload); err != nil {
   398  					r.Error = err.Error()
   399  				} else {
   400  					r.Workload = workload
   401  					r.Status = toRPCWorkloadStatus(m.Workload.StatusMeta)
   402  				}
   403  			}
   404  			if err := stream.Send(r); err != nil {
   405  				v.logUnsentMessages(task.context, "WorkloadStatusStream", err, m)
   406  			}
   407  		case <-v.stop:
   408  			return nil
   409  		}
   410  	}
   411  }
   412  
   413  // CalculateCapacity calculates capacity for each node
   414  func (v *Vibranium) CalculateCapacity(ctx context.Context, opts *pb.DeployOptions) (*pb.CapacityMessage, error) {
   415  	task := v.newTask(ctx, "CalculateCapacity", true)
   416  	defer task.done()
   417  	deployOpts, err := toCoreDeployOptions(opts)
   418  	if err != nil {
   419  		return nil, grpcstatus.Error(CalculateCapacity, err.Error())
   420  	}
   421  	m, err := v.cluster.CalculateCapacity(task.context, deployOpts)
   422  	if err != nil {
   423  		return nil, grpcstatus.Error(CalculateCapacity, err.Error())
   424  	}
   425  	return toRPCCapacityMessage(m), nil
   426  }
   427  
   428  // GetWorkload get a workload
   429  // More information will be shown
   430  func (v *Vibranium) GetWorkload(ctx context.Context, ID *pb.WorkloadID) (*pb.Workload, error) {
   431  	task := v.newTask(ctx, "GetWorkload", false)
   432  	defer task.done()
   433  	workload, err := v.cluster.GetWorkload(task.context, ID.Id)
   434  	if err != nil {
   435  		return nil, grpcstatus.Error(GetWorkload, err.Error())
   436  	}
   437  
   438  	return toRPCWorkload(task.context, workload)
   439  }
   440  
   441  // GetWorkloads get lots workloads
   442  // like GetWorkload, information should be returned
   443  func (v *Vibranium) GetWorkloads(ctx context.Context, cids *pb.WorkloadIDs) (*pb.Workloads, error) {
   444  	task := v.newTask(ctx, "GetWorkloads", false)
   445  	defer task.done()
   446  	workloads, err := v.cluster.GetWorkloads(task.context, cids.GetIDs())
   447  	if err != nil {
   448  		return nil, grpcstatus.Error(GetWorkloads, err.Error())
   449  	}
   450  
   451  	return toRPCWorkloads(task.context, workloads, nil), nil
   452  }
   453  
   454  // ListWorkloads by appname with optional entrypoint and nodename
   455  func (v *Vibranium) ListWorkloads(opts *pb.ListWorkloadsOptions, stream pb.CoreRPC_ListWorkloadsServer) error {
   456  	task := v.newTask(stream.Context(), "ListWorkloads", true)
   457  	defer task.done()
   458  	lsopts := &types.ListWorkloadsOptions{
   459  		Appname:    opts.Appname,
   460  		Entrypoint: opts.Entrypoint,
   461  		Nodename:   opts.Nodename,
   462  		Limit:      opts.Limit,
   463  		Labels:     opts.Labels,
   464  	}
   465  	workloads, err := v.cluster.ListWorkloads(task.context, lsopts)
   466  	if err != nil {
   467  		return grpcstatus.Error(ListWorkloads, err.Error())
   468  	}
   469  
   470  	for _, c := range toRPCWorkloads(task.context, workloads, opts.Labels).Workloads {
   471  		if err = stream.Send(c); err != nil {
   472  			v.logUnsentMessages(task.context, "ListWorkloads", err, c)
   473  			return grpcstatus.Error(ListWorkloads, err.Error())
   474  		}
   475  	}
   476  	return nil
   477  }
   478  
   479  // ListNodeWorkloads list node workloads
   480  func (v *Vibranium) ListNodeWorkloads(ctx context.Context, opts *pb.GetNodeOptions) (*pb.Workloads, error) {
   481  	task := v.newTask(ctx, "ListNodeWorkloads", false)
   482  	defer task.done()
   483  	workloads, err := v.cluster.ListNodeWorkloads(task.context, opts.Nodename, opts.Labels)
   484  	if err != nil {
   485  		return nil, grpcstatus.Error(ListNodeWorkloads, err.Error())
   486  	}
   487  	return toRPCWorkloads(task.context, workloads, nil), nil
   488  }
   489  
   490  // Copy copy files from multiple workloads
   491  func (v *Vibranium) Copy(opts *pb.CopyOptions, stream pb.CoreRPC_CopyServer) error {
   492  	task := v.newTask(stream.Context(), "Copy", true)
   493  	defer task.done()
   494  	logger := log.WithFunc("vibranium.Copy")
   495  
   496  	copyOpts := toCoreCopyOptions(opts)
   497  	ch, err := v.cluster.Copy(task.context, copyOpts)
   498  	if err != nil {
   499  		return grpcstatus.Error(Copy, err.Error())
   500  	}
   501  	// 4K buffer
   502  	p := make([]byte, 4096)
   503  	for m := range ch {
   504  		msg := &pb.CopyMessage{
   505  			Id:   m.ID,
   506  			Path: m.Path,
   507  		}
   508  		if m.Error != nil {
   509  			msg.Error = m.Error.Error()
   510  			if err := stream.Send(msg); err != nil {
   511  				v.logUnsentMessages(task.context, "Copy", err, m)
   512  			}
   513  			continue
   514  		}
   515  
   516  		r, w := io.Pipe()
   517  		utils.SentryGo(func(m *types.CopyMessage) func() {
   518  			return func() {
   519  				var err error
   520  				defer func() {
   521  					w.CloseWithError(err) //nolint
   522  				}()
   523  
   524  				tw := tar.NewWriter(w)
   525  				defer tw.Close()
   526  				header := &tar.Header{
   527  					Name: filepath.Base(m.Filename),
   528  					Uid:  m.UID,
   529  					Gid:  m.GID,
   530  					Mode: m.Mode,
   531  					Size: int64(len(m.Content)),
   532  				}
   533  				if err = tw.WriteHeader(header); err != nil {
   534  					logger.Error(task.context, err, "Error during writing tarball header")
   535  					return
   536  				}
   537  				if _, err = tw.Write(m.Content); err != nil {
   538  					logger.Error(task.context, err, "Error during writing tarball content")
   539  					return
   540  				}
   541  			}
   542  		}(m))
   543  
   544  		for {
   545  			n, err := r.Read(p)
   546  			if err != nil {
   547  				if err != io.EOF {
   548  					logger.Error(task.context, err, "Error during buffer resp")
   549  					msg.Error = err.Error()
   550  					if err = stream.Send(msg); err != nil {
   551  						v.logUnsentMessages(task.context, "Copy", err, m)
   552  					}
   553  				}
   554  				break
   555  			}
   556  			if n > 0 {
   557  				msg.Data = p[:n]
   558  				if err = stream.Send(msg); err != nil {
   559  					v.logUnsentMessages(task.context, "Copy", err, m)
   560  				}
   561  			}
   562  		}
   563  	}
   564  	return nil
   565  }
   566  
   567  // Send send files to some contaienrs
   568  func (v *Vibranium) Send(opts *pb.SendOptions, stream pb.CoreRPC_SendServer) error {
   569  	task := v.newTask(stream.Context(), "Send", true)
   570  	defer task.done()
   571  
   572  	sendOpts, err := toCoreSendOptions(opts)
   573  	if err != nil {
   574  		return grpcstatus.Error(Send, err.Error())
   575  	}
   576  
   577  	for _, file := range sendOpts.Files {
   578  		dc := make(chan *types.SendLargeFileOptions)
   579  		ch := v.cluster.SendLargeFile(task.context, dc)
   580  		utils.SentryGo(func() {
   581  			defer close(dc)
   582  			data := toSendLargeFileChunks(file, sendOpts.IDs)
   583  			for _, chunk := range data {
   584  				dc <- chunk
   585  			}
   586  		})
   587  
   588  		for m := range ch {
   589  			msg := &pb.SendMessage{
   590  				Id:   m.ID,
   591  				Path: m.Path,
   592  			}
   593  			if m.Error != nil {
   594  				msg.Error = m.Error.Error()
   595  			}
   596  			if err := stream.Send(msg); err != nil {
   597  				v.logUnsentMessages(task.context, "Send", err, m)
   598  			}
   599  		}
   600  	}
   601  	return nil
   602  }
   603  
   604  func (v *Vibranium) SendLargeFile(stream pb.CoreRPC_SendLargeFileServer) error {
   605  	task := v.newTask(stream.Context(), "SendLargeFile", true)
   606  	defer task.done()
   607  
   608  	inputChan := make(chan *types.SendLargeFileOptions)
   609  	resp := v.cluster.SendLargeFile(task.context, inputChan)
   610  	utils.SentryGo(func() {
   611  		defer close(inputChan)
   612  		for {
   613  			req, err := stream.Recv()
   614  			if err == io.EOF {
   615  				break
   616  			}
   617  			if err != nil {
   618  				log.Errorf(task.context, err, "[SendLargeFile]receive from rpc err: %v", err)
   619  				return
   620  			}
   621  			data, err := toSendLargeFileOptions(req)
   622  			if err != nil {
   623  				log.Errorf(task.context, err, "[SendLargeFile]transform data err: %v", err)
   624  				return
   625  			}
   626  			inputChan <- data
   627  		}
   628  	})
   629  
   630  	for m := range resp {
   631  		msg := &pb.SendMessage{
   632  			Id:   m.ID,
   633  			Path: m.Path,
   634  		}
   635  		if m.Error != nil {
   636  			msg.Error = m.Error.Error()
   637  		}
   638  		if err := stream.Send(msg); err != nil {
   639  			v.logUnsentMessages(task.context, "SendLargeFile", err, m)
   640  		}
   641  	}
   642  	return nil
   643  }
   644  
   645  // BuildImage streamed returned functions
   646  func (v *Vibranium) BuildImage(opts *pb.BuildImageOptions, stream pb.CoreRPC_BuildImageServer) error {
   647  	task := v.newTask(stream.Context(), "BuildImage", true)
   648  	defer task.done()
   649  
   650  	buildOpts, err := toCoreBuildOptions(opts)
   651  	if err != nil {
   652  		return grpcstatus.Error(BuildImage, err.Error())
   653  	}
   654  	ch, err := v.cluster.BuildImage(task.context, buildOpts)
   655  	if err != nil {
   656  		return grpcstatus.Error(BuildImage, err.Error())
   657  	}
   658  
   659  	for m := range ch {
   660  		if err = stream.Send(toRPCBuildImageMessage(m)); err != nil {
   661  			v.logUnsentMessages(task.context, "BuildImage", err, m)
   662  		}
   663  	}
   664  	return nil
   665  }
   666  
   667  // CacheImage cache image
   668  func (v *Vibranium) CacheImage(opts *pb.CacheImageOptions, stream pb.CoreRPC_CacheImageServer) error {
   669  	task := v.newTask(stream.Context(), "CacheImage", true)
   670  	defer task.done()
   671  
   672  	ch, err := v.cluster.CacheImage(task.context, toCoreCacheImageOptions(opts))
   673  	if err != nil {
   674  		return grpcstatus.Error(CacheImage, err.Error())
   675  	}
   676  
   677  	for m := range ch {
   678  		if err = stream.Send(toRPCCacheImageMessage(m)); err != nil {
   679  			v.logUnsentMessages(task.context, "CacheImage", err, m)
   680  		}
   681  	}
   682  	return nil
   683  }
   684  
   685  // RemoveImage remove image
   686  func (v *Vibranium) RemoveImage(opts *pb.RemoveImageOptions, stream pb.CoreRPC_RemoveImageServer) error {
   687  	task := v.newTask(stream.Context(), "RemoveImage", true)
   688  	defer task.done()
   689  
   690  	ch, err := v.cluster.RemoveImage(task.context, toCoreRemoveImageOptions(opts))
   691  	if err != nil {
   692  		return grpcstatus.Error(RemoveImage, err.Error())
   693  	}
   694  
   695  	for m := range ch {
   696  		if err = stream.Send(toRPCRemoveImageMessage(m)); err != nil {
   697  			v.logUnsentMessages(task.context, "RemoveImage", err, m)
   698  		}
   699  	}
   700  	return nil
   701  }
   702  
   703  // ListImage list image
   704  func (v *Vibranium) ListImage(opts *pb.ListImageOptions, stream pb.CoreRPC_ListImageServer) error {
   705  	task := v.newTask(stream.Context(), "ListImage", true)
   706  	defer task.done()
   707  
   708  	ch, err := v.cluster.ListImage(task.context, toCoreListImageOptions(opts))
   709  	if err != nil {
   710  		return grpcstatus.Error(ListImage, err.Error())
   711  	}
   712  
   713  	for msg := range ch {
   714  		if err = stream.Send(toRPCListImageMessage(msg)); err != nil {
   715  			v.logUnsentMessages(task.context, "ListImage", err, msg)
   716  		}
   717  	}
   718  
   719  	return nil
   720  }
   721  
   722  // CreateWorkload create workloads
   723  func (v *Vibranium) CreateWorkload(opts *pb.DeployOptions, stream pb.CoreRPC_CreateWorkloadServer) error {
   724  	task := v.newTask(stream.Context(), "CreateWorkload", true)
   725  	defer task.done()
   726  
   727  	deployOpts, err := toCoreDeployOptions(opts)
   728  	if err != nil {
   729  		return grpcstatus.Error(CreateWorkload, err.Error())
   730  	}
   731  
   732  	ch, err := v.cluster.CreateWorkload(task.context, deployOpts)
   733  	if err != nil {
   734  		return grpcstatus.Error(CreateWorkload, err.Error())
   735  	}
   736  	for m := range ch {
   737  		log.WithFunc("vibranium.CreateWorkload").Debugf(task.context, "create workload message: %+v", m)
   738  		if err = stream.Send(toRPCCreateWorkloadMessage(m)); err != nil {
   739  			v.logUnsentMessages(task.context, "CreateWorkload", err, m)
   740  		}
   741  	}
   742  	return nil
   743  }
   744  
   745  // ReplaceWorkload replace workloads
   746  func (v *Vibranium) ReplaceWorkload(opts *pb.ReplaceOptions, stream pb.CoreRPC_ReplaceWorkloadServer) error {
   747  	task := v.newTask(stream.Context(), "ReplaceWorkload", true)
   748  	defer task.done()
   749  
   750  	replaceOpts, err := toCoreReplaceOptions(opts)
   751  	if err != nil {
   752  		return grpcstatus.Error(ReplaceWorkload, err.Error())
   753  	}
   754  
   755  	ch, err := v.cluster.ReplaceWorkload(task.context, replaceOpts)
   756  	if err != nil {
   757  		return grpcstatus.Error(ReplaceWorkload, err.Error())
   758  	}
   759  
   760  	for m := range ch {
   761  		if err = stream.Send(toRPCReplaceWorkloadMessage(m)); err != nil {
   762  			v.logUnsentMessages(task.context, "ReplaceWorkload", err, m)
   763  		}
   764  	}
   765  	return nil
   766  }
   767  
   768  // RemoveWorkload remove workloads
   769  func (v *Vibranium) RemoveWorkload(opts *pb.RemoveWorkloadOptions, stream pb.CoreRPC_RemoveWorkloadServer) error {
   770  	task := v.newTask(stream.Context(), "RemoveWorkload", true)
   771  	defer task.done()
   772  
   773  	IDs := opts.GetIDs()
   774  	force := opts.GetForce()
   775  
   776  	if len(IDs) == 0 {
   777  		return types.ErrNoWorkloadIDs
   778  	}
   779  	ch, err := v.cluster.RemoveWorkload(task.context, IDs, force)
   780  	if err != nil {
   781  		return grpcstatus.Error(ReplaceWorkload, err.Error())
   782  	}
   783  
   784  	for m := range ch {
   785  		if err = stream.Send(toRPCRemoveWorkloadMessage(m)); err != nil {
   786  			v.logUnsentMessages(task.context, "RemoveWorkload", err, m)
   787  		}
   788  	}
   789  
   790  	return nil
   791  }
   792  
   793  // DissociateWorkload dissociate workload
   794  func (v *Vibranium) DissociateWorkload(opts *pb.DissociateWorkloadOptions, stream pb.CoreRPC_DissociateWorkloadServer) error {
   795  	task := v.newTask(stream.Context(), "DissociateWorkload", true)
   796  	defer task.done()
   797  
   798  	IDs := opts.GetIDs()
   799  	if len(IDs) == 0 {
   800  		return types.ErrNoWorkloadIDs
   801  	}
   802  
   803  	ch, err := v.cluster.DissociateWorkload(task.context, IDs)
   804  	if err != nil {
   805  		return grpcstatus.Error(DissociateWorkload, err.Error())
   806  	}
   807  
   808  	for m := range ch {
   809  		if err = stream.Send(toRPCDissociateWorkloadMessage(m)); err != nil {
   810  			v.logUnsentMessages(task.context, "DissociateWorkload", err, m)
   811  		}
   812  	}
   813  
   814  	return nil
   815  }
   816  
   817  // ControlWorkload control workloads
   818  func (v *Vibranium) ControlWorkload(opts *pb.ControlWorkloadOptions, stream pb.CoreRPC_ControlWorkloadServer) error {
   819  	task := v.newTask(stream.Context(), "ControlWorkload", true)
   820  	defer task.done()
   821  
   822  	IDs := opts.GetIDs()
   823  	t := opts.GetType()
   824  	force := opts.GetForce()
   825  
   826  	if len(IDs) == 0 {
   827  		return types.ErrNoWorkloadIDs
   828  	}
   829  
   830  	ch, err := v.cluster.ControlWorkload(task.context, IDs, t, force)
   831  	if err != nil {
   832  		return grpcstatus.Error(ControlWorkload, err.Error())
   833  	}
   834  
   835  	for m := range ch {
   836  		if err = stream.Send(toRPCControlWorkloadMessage(m)); err != nil {
   837  			v.logUnsentMessages(task.context, "ControlWorkload", err, m)
   838  		}
   839  	}
   840  
   841  	return nil
   842  }
   843  
   844  // ExecuteWorkload runs a command in a running workload
   845  func (v *Vibranium) ExecuteWorkload(stream pb.CoreRPC_ExecuteWorkloadServer) error {
   846  	task := v.newTask(stream.Context(), "ExecuteWorkload", true)
   847  	defer task.done()
   848  
   849  	opts, err := stream.Recv()
   850  	if err != nil {
   851  		return grpcstatus.Error(ExecuteWorkload, err.Error())
   852  	}
   853  	var executeWorkloadOpts *types.ExecuteWorkloadOptions
   854  	if executeWorkloadOpts, err = toCoreExecuteWorkloadOptions(opts); err != nil {
   855  		return grpcstatus.Error(ExecuteWorkload, err.Error())
   856  	}
   857  
   858  	inCh := make(chan []byte)
   859  	utils.SentryGo(func() {
   860  		defer close(inCh)
   861  		if opts.OpenStdin {
   862  			for {
   863  				execWorkloadOpt, err := stream.Recv()
   864  				if execWorkloadOpt == nil || err != nil {
   865  					log.WithFunc("vibranium.ExecuteWorkload").Error(task.context, err, "Recv command error")
   866  					return
   867  				}
   868  				inCh <- execWorkloadOpt.ReplCmd
   869  			}
   870  		}
   871  	})
   872  
   873  	for m := range v.cluster.ExecuteWorkload(task.context, executeWorkloadOpts, inCh) {
   874  		if err = stream.Send(toRPCAttachWorkloadMessage(m)); err != nil {
   875  			v.logUnsentMessages(task.context, "ExecuteWorkload", err, m)
   876  		}
   877  	}
   878  	return nil
   879  }
   880  
   881  // ReallocResource realloc res for workloads
   882  func (v *Vibranium) ReallocResource(ctx context.Context, opts *pb.ReallocOptions) (msg *pb.ReallocResourceMessage, err error) {
   883  	task := v.newTask(ctx, "ReallocResource", true)
   884  	defer task.done()
   885  	defer func() {
   886  		errString := ""
   887  		if err != nil {
   888  			errString = err.Error()
   889  		}
   890  		msg = &pb.ReallocResourceMessage{Error: errString}
   891  	}()
   892  
   893  	if opts.Id == "" {
   894  		return msg, grpcstatus.Errorf(ReallocResource, "%+v", types.ErrNoWorkloadIDs)
   895  	}
   896  
   897  	if err := v.cluster.ReallocResource(
   898  		ctx,
   899  		&types.ReallocOptions{
   900  			ID:        opts.Id,
   901  			Resources: toCoreResources(opts.Resources),
   902  		},
   903  	); err != nil {
   904  		return msg, grpcstatus.Error(ReallocResource, err.Error())
   905  	}
   906  
   907  	return msg, nil
   908  }
   909  
   910  // LogStream get workload logs
   911  func (v *Vibranium) LogStream(opts *pb.LogStreamOptions, stream pb.CoreRPC_LogStreamServer) error {
   912  	task := v.newTask(stream.Context(), "LogStream", true)
   913  	defer task.done()
   914  
   915  	ID := opts.GetId()
   916  	logger := log.WithFunc("vibranium.LogStream").WithField("ID", ID)
   917  
   918  	logger.Info(task.context, "Get log start")
   919  	defer logger.Info(task.context, "Get log done")
   920  	ch, err := v.cluster.LogStream(task.context, &types.LogStreamOptions{
   921  		ID:     ID,
   922  		Tail:   opts.Tail,
   923  		Since:  opts.Since,
   924  		Until:  opts.Until,
   925  		Follow: opts.Follow,
   926  	})
   927  	if err != nil {
   928  		return grpcstatus.Error(LogStream, err.Error())
   929  	}
   930  
   931  	for {
   932  		select {
   933  		case m, ok := <-ch:
   934  			if !ok {
   935  				return nil
   936  			}
   937  			if err = stream.Send(toRPCLogStreamMessage(m)); err != nil {
   938  				v.logUnsentMessages(task.context, "LogStream", err, m)
   939  			}
   940  		case <-v.stop:
   941  			return nil
   942  		}
   943  	}
   944  }
   945  
   946  // RunAndWait is lambda
   947  func (v *Vibranium) RunAndWait(stream pb.CoreRPC_RunAndWaitServer) error {
   948  	task := v.newTask(stream.Context(), "RunAndWait", true)
   949  	RunAndWaitOptions, err := stream.Recv()
   950  	if err != nil {
   951  		task.done()
   952  		return grpcstatus.Error(RunAndWait, err.Error())
   953  	}
   954  	logger := log.WithFunc("vibranium.RunAndWait")
   955  
   956  	if RunAndWaitOptions.DeployOptions == nil {
   957  		task.done()
   958  		return grpcstatus.Error(RunAndWait, types.ErrNoDeployOpts.Error())
   959  	}
   960  
   961  	opts := RunAndWaitOptions.DeployOptions
   962  	deployOpts, err := toCoreDeployOptions(opts)
   963  	if err != nil {
   964  		task.done()
   965  		return grpcstatus.Error(RunAndWait, err.Error())
   966  	}
   967  
   968  	ctx, cancel := context.WithCancel(task.context)
   969  	if RunAndWaitOptions.Async {
   970  		timeout := v.config.GlobalTimeout
   971  		if RunAndWaitOptions.AsyncTimeout != 0 {
   972  			timeout = time.Second * time.Duration(RunAndWaitOptions.AsyncTimeout)
   973  		}
   974  		ctx, cancel = context.WithTimeout(context.TODO(), timeout) // not use the stream ctx
   975  		// force mark stdin to false
   976  		opts.OpenStdin = false
   977  	}
   978  
   979  	inCh := make(chan []byte)
   980  	utils.SentryGo(func() {
   981  		defer close(inCh)
   982  		if !opts.OpenStdin {
   983  			return
   984  		}
   985  		for {
   986  			RunAndWaitOptions, err := stream.Recv()
   987  			if RunAndWaitOptions == nil || err != nil {
   988  				logger.Error(ctx, err, "Recv command")
   989  				break
   990  			}
   991  			inCh <- RunAndWaitOptions.Cmd
   992  		}
   993  	})
   994  
   995  	IDs, ch, err := v.cluster.RunAndWait(ctx, deployOpts, inCh)
   996  	if err != nil {
   997  		task.done()
   998  		cancel()
   999  		return grpcstatus.Error(RunAndWait, err.Error())
  1000  	}
  1001  
  1002  	// send workload IDs to client first
  1003  	for _, ID := range IDs {
  1004  		if err = stream.Send(&pb.AttachWorkloadMessage{
  1005  			WorkloadId:    ID,
  1006  			Data:          []byte(""),
  1007  			StdStreamType: pb.StdStreamType_TYPEWORKLOADID,
  1008  		}); err != nil {
  1009  			v.logUnsentMessages(ctx, "RunAndWait: first message send failed", err, ID)
  1010  		}
  1011  	}
  1012  
  1013  	// then deal with the rest messages
  1014  	runAndWait := func(f func(<-chan *types.AttachWorkloadMessage)) {
  1015  		defer task.done()
  1016  		defer cancel()
  1017  		f(ch)
  1018  	}
  1019  
  1020  	if !RunAndWaitOptions.Async {
  1021  		runAndWait(func(ch <-chan *types.AttachWorkloadMessage) {
  1022  			for m := range ch {
  1023  				if err = stream.Send(toRPCAttachWorkloadMessage(m)); err != nil {
  1024  					v.logUnsentMessages(ctx, "RunAndWait", err, m)
  1025  				}
  1026  			}
  1027  		})
  1028  		return nil
  1029  	}
  1030  
  1031  	utils.SentryGo(func() {
  1032  		runAndWait(func(ch <-chan *types.AttachWorkloadMessage) {
  1033  			r, w := io.Pipe()
  1034  			utils.SentryGo(func() {
  1035  				defer w.Close()
  1036  				for m := range ch {
  1037  					if _, err := w.Write(m.Data); err != nil {
  1038  						logger.Error(ctx, err, "iterate and forward AttachWorkloadMessage")
  1039  					}
  1040  				}
  1041  			})
  1042  			bufReader := bufio.NewReader(r)
  1043  			for {
  1044  				var (
  1045  					line, part []byte
  1046  					isPrefix   bool
  1047  					err        error
  1048  				)
  1049  				for {
  1050  					if part, isPrefix, err = bufReader.ReadLine(); err != nil {
  1051  						if err != io.EOF {
  1052  							logger.Error(ctx, err, "read error")
  1053  						}
  1054  						return
  1055  					}
  1056  					line = append(line, part...)
  1057  					if !isPrefix {
  1058  						break
  1059  					}
  1060  				}
  1061  				logger.Info(ctx, line)
  1062  			}
  1063  		})
  1064  	})
  1065  	return nil
  1066  }
  1067  
  1068  func (v *Vibranium) RawEngine(ctx context.Context, opts *pb.RawEngineOptions) (*pb.RawEngineMessage, error) {
  1069  	task := v.newTask(ctx, "RawEngine", true)
  1070  	defer task.done()
  1071  
  1072  	rawEngineOpts, err := toCoreRawEngineOptions(opts)
  1073  	if err != nil {
  1074  		return nil, grpcstatus.Error(RawEngineStatus, err.Error())
  1075  	}
  1076  
  1077  	msg, err := v.cluster.RawEngine(task.context, rawEngineOpts)
  1078  
  1079  	if err != nil {
  1080  		return nil, grpcstatus.Error(RawEngineStatus, err.Error())
  1081  	}
  1082  	return toRPCRawEngineMessage(msg), nil
  1083  }
  1084  
  1085  func (v *Vibranium) logUnsentMessages(ctx context.Context, msgType string, err error, msg any) {
  1086  	log.WithFunc("vibranium.logUnsentMessages").Infof(ctx, "Unsent (%s) streamed message due to (%+v): (%+v)", msgType, err, msg)
  1087  }
  1088  
  1089  // New will new a new cluster instance
  1090  func New(cluster cluster.Cluster, config types.Config, stop chan struct{}) *Vibranium {
  1091  	return &Vibranium{cluster: cluster, config: config, counter: sync.WaitGroup{}, stop: stop}
  1092  }