github.com/pachyderm/pachyderm@v1.13.4/src/client/admin/v1_9/pps/pps.proto (about)

     1  syntax = "proto3";
     2  
     3  package pps_1_9;
     4  option go_package = "github.com/pachyderm/pachyderm/src/client/admin/v1_9/pps";
     5  
     6  import "google/protobuf/empty.proto";
     7  import "google/protobuf/timestamp.proto";
     8  import "google/protobuf/duration.proto";
     9  
    10  import "gogoproto/gogo.proto";
    11  
    12  import "client/admin/v1_9/pfs/pfs.proto";
    13  
    14  message Secret {
    15    // Name must be the name of the secret in kubernetes.
    16    string name = 1;
    17    // Key of the secret to load into env_var, this field only has meaning if EnvVar != "".
    18    string key = 4;
    19    string mount_path = 2;
    20    string env_var = 3;
    21  }
    22  
    23  message Transform {
    24    string image = 1;
    25    repeated string cmd = 2;
    26    repeated string err_cmd = 13;
    27    map<string, string> env = 3;
    28    repeated Secret secrets = 4;
    29    repeated string image_pull_secrets = 9;
    30    repeated string stdin = 5;
    31    repeated string err_stdin = 14;
    32    repeated int64 accept_return_code = 6;
    33    bool debug = 7;
    34    string user = 10;
    35    string working_dir = 11;
    36    string dockerfile = 12;
    37  }
    38  
    39  message TFJob {
    40    // tf_job  is a serialized Kubeflow TFJob spec. Pachyderm sends this directly
    41    // to a kubernetes cluster on which kubeflow has been installed, instead of
    42    // creating a pipeline ReplicationController as it normally would.
    43    string tf_job = 1 [(gogoproto.customname) = "TFJob"];
    44  }
    45  
    46  message Egress {
    47    string URL = 1;
    48  }
    49  
    50  message Job {
    51    string id = 1 [(gogoproto.customname) = "ID"];
    52  }
    53  
    54  enum JobState {
    55    JOB_STARTING = 0;
    56    JOB_RUNNING = 1;
    57    JOB_FAILURE = 2;
    58    JOB_SUCCESS = 3;
    59    JOB_KILLED = 4;
    60    JOB_MERGING = 5;
    61  }
    62  
    63  message Service {
    64    int32 internal_port = 1;
    65    int32 external_port = 2;
    66    string ip = 3 [(gogoproto.customname) = "IP"];
    67    string type = 4;
    68    map<string, string> annotations = 5;
    69  }
    70  
    71  message Spout {
    72    bool overwrite = 1;
    73    Service service = 2;
    74    string marker = 3;
    75  }
    76  
    77  message PFSInput {
    78    string name = 1;
    79    string repo = 2;
    80    string branch = 3;
    81    string commit = 4;
    82    string glob = 5;
    83    string join_on = 8;
    84    bool lazy = 6;
    85    // EmptyFiles, if true, will cause files from this PFS input to be
    86    // presented as empty files. This is useful in shuffle pipelines where you
    87    // want to read the names of files and reorganize them using symlinks.
    88    bool empty_files = 7;
    89  }
    90  
    91  message CronInput {
    92    string name = 1;
    93    string repo = 2;
    94    string commit = 3;
    95    string spec = 4;
    96    // Overwrite, if true, will expose a single datum that gets overwritten each
    97    // tick. If false, it will create a new datum for each tick.
    98    bool overwrite = 6;
    99    google.protobuf.Timestamp start = 5;
   100  }
   101  
   102  message GitInput {
   103    string name = 1;
   104    string url = 2 [(gogoproto.customname) = "URL"];
   105    string branch = 3;
   106    string commit = 4;
   107  }
   108  
   109  message Input {
   110    PFSInput pfs = 6;
   111    repeated Input join = 7;
   112    repeated Input cross = 2;
   113    repeated Input union = 3;
   114    CronInput cron = 4;
   115    GitInput git = 5;
   116  }
   117  
   118  message JobInput {
   119    string name = 4;
   120    pfs_1_9.Commit commit = 1;
   121    string glob = 2;
   122    bool lazy = 3;
   123  }
   124  
   125  message ParallelismSpec {
   126    reserved 1;
   127  
   128    // Starts the pipeline/job with a 'constant' workers, unless 'constant' is
   129    // zero. If 'constant' is zero (which is the zero value of ParallelismSpec),
   130    // then Pachyderm will choose the number of workers that is started,
   131    // (currently it chooses the number of workers in the cluster)
   132    uint64 constant = 2;
   133  
   134    // Starts the pipeline/job with number of workers equal to 'coefficient' * N,
   135    // where N is the number of nodes in the kubernetes cluster.
   136    //
   137    // For example, if each Kubernetes node has four CPUs, you might set
   138    // 'coefficient' to four, so that there are four Pachyderm workers per
   139    // Kubernetes node, and each Pachyderm worker gets one CPU. If you want to
   140    // reserve half the nodes in your cluster for other tasks, you might set
   141    // 'coefficient' to 0.5.
   142    double coefficient = 3;
   143  }
   144  
   145  // HashTreeSpec sets the number of shards into which pps splits a pipeline's
   146  // output commits (sharded commits are implemented in Pachyderm 1.8+ only)
   147  message HashtreeSpec {
   148    uint64 constant = 1;
   149  }
   150  
   151  message InputFile {
   152    // This file's absolute path within its pfs repo.
   153    string path = 4;
   154  
   155    // This file's hash
   156    bytes hash = 5;
   157  }
   158  
   159  message Datum {
   160    // ID is the hash computed from all the files
   161    string id = 1 [(gogoproto.customname) = "ID"];
   162    Job job = 2;
   163  }
   164  
   165  enum DatumState {
   166      FAILED = 0;
   167      SUCCESS = 1;
   168      SKIPPED = 2;
   169      STARTING = 3;
   170      RECOVERED = 4;
   171  }
   172  
   173  message DatumInfo {
   174    Datum datum = 1;
   175    DatumState state = 2;
   176    ProcessStats stats = 3;
   177    pfs_1_9.File pfs_state = 4;
   178    repeated pfs_1_9.FileInfo data = 5;
   179  }
   180  
   181  message Aggregate {
   182    int64 count = 1;
   183    double mean = 2;
   184    double stddev = 3;
   185    double fifth_percentile = 4;
   186    double ninety_fifth_percentile = 5;
   187  }
   188  
   189  message ProcessStats {
   190    google.protobuf.Duration download_time = 1;
   191    google.protobuf.Duration process_time = 2;
   192    google.protobuf.Duration upload_time = 3;
   193    uint64 download_bytes = 4;
   194    uint64 upload_bytes = 5;
   195  }
   196  
   197  message AggregateProcessStats {
   198    Aggregate download_time = 1;
   199    Aggregate process_time = 2;
   200    Aggregate upload_time = 3;
   201    Aggregate download_bytes = 4;
   202    Aggregate upload_bytes = 5;
   203  }
   204  
   205  message WorkerStatus {
   206    string worker_id = 1 [(gogoproto.customname) = "WorkerID"];
   207    string job_id = 2 [(gogoproto.customname) = "JobID"];
   208    repeated pps_1_9.InputFile data = 3;
   209    // Started is the time processing on the current datum began.
   210    google.protobuf.Timestamp started = 4;
   211    ProcessStats stats = 5;
   212    int64 queue_size = 6;
   213  }
   214  
   215  // ResourceSpec describes the amount of resources that pipeline pods should
   216  // request from kubernetes, for scheduling.
   217  message ResourceSpec {
   218    reserved 3;
   219  
   220    // The number of CPUs each worker needs (partial values are allowed, and
   221    // encouraged)
   222    float cpu = 1;
   223  
   224    // The amount of memory each worker needs (in bytes, with allowed
   225    // SI suffixes (M, K, G, Mi, Ki, Gi, etc).
   226    string memory = 2;
   227  
   228    // The spec for GPU resources.
   229    GPUSpec gpu = 5;
   230  
   231    // The amount of ephemeral storage each worker needs (in bytes, with allowed
   232    // SI suffixes (M, K, G, Mi, Ki, Gi, etc).
   233    string disk = 4;
   234  }
   235  
   236  message GPUSpec {
   237    // The type of GPU (nvidia.com/gpu or amd.com/gpu for example).
   238    string type = 1;
   239    // The number of GPUs to request.
   240    int64 number = 2;
   241  }
   242  
   243  // EtcdJobInfo is the portion of the JobInfo that gets stored in etcd during
   244  // job execution. It contains fields which change over the lifetime of the job
   245  // but aren't used in the execution of the job.
   246  message EtcdJobInfo {
   247    Job job = 1;
   248    Pipeline pipeline = 2;
   249    pfs_1_9.Commit output_commit = 3;
   250    // Job restart count (e.g. due to datum failure)
   251    uint64 restart = 4;
   252  
   253    // Counts of how many times we processed or skipped a datum
   254    int64 data_processed = 5;
   255    int64 data_skipped = 6;
   256    int64 data_total = 7;
   257    int64 data_failed = 8;
   258    int64 data_recovered = 15;
   259  
   260    // Download/process/upload time and download/upload bytes
   261    ProcessStats stats = 9;
   262  
   263    pfs_1_9.Commit stats_commit = 10;
   264    JobState state = 11;
   265    string reason = 12;
   266    google.protobuf.Timestamp started = 13;
   267    google.protobuf.Timestamp finished = 14;
   268  }
   269  
   270  message JobInfo {
   271    reserved 4, 5, 28, 34;
   272    Job job = 1;
   273    Transform transform = 2;                     // requires ListJobRequest.Full
   274    Pipeline pipeline = 3;
   275    uint64 pipeline_version = 13;                // requires ListJobRequest.Full
   276    pfs_1_9.Commit spec_commit = 47;
   277    ParallelismSpec parallelism_spec = 12;       // requires ListJobRequest.Full
   278    Egress egress = 15;                          // requires ListJobRequest.Full
   279    Job parent_job = 6;
   280    google.protobuf.Timestamp started = 7;
   281    google.protobuf.Timestamp finished = 8;
   282    pfs_1_9.Commit output_commit = 9;
   283    JobState state = 10;
   284    string reason = 35;  // reason explains why the job is in the current state
   285    Service service = 14;                        // requires ListJobRequest.Full
   286    Spout spout = 45;                            // requires ListJobRequest.Full
   287    pfs_1_9.Repo output_repo = 18;
   288    string output_branch = 17;                   // requires ListJobRequest.Full
   289    uint64 restart = 20;
   290    int64 data_processed = 22;
   291    int64 data_skipped = 30;
   292    int64 data_failed = 40;
   293    int64 data_recovered = 46;
   294    int64 data_total = 23;
   295    ProcessStats stats = 31;
   296    repeated WorkerStatus worker_status = 24;
   297    ResourceSpec resource_requests = 25;         // requires ListJobRequest.Full
   298    ResourceSpec resource_limits = 36;           // requires ListJobRequest.Full
   299    Input input = 26;                            // requires ListJobRequest.Full
   300    pfs_1_9.BranchInfo new_branch = 27;
   301    pfs_1_9.Commit stats_commit = 29;
   302    bool enable_stats = 32;                      // requires ListJobRequest.Full
   303    string salt = 33;                            // requires ListJobRequest.Full
   304    ChunkSpec chunk_spec = 37;                   // requires ListJobRequest.Full
   305    google.protobuf.Duration datum_timeout = 38; // requires ListJobRequest.Full
   306    google.protobuf.Duration job_timeout = 39;   // requires ListJobRequest.Full
   307    int64 datum_tries = 41;                      // requires ListJobRequest.Full
   308    SchedulingSpec scheduling_spec = 42;         // requires ListJobRequest.Full
   309    string pod_spec = 43;                        // requires ListJobRequest.Full
   310    string pod_patch = 44;                       // requires ListJobRequest.Full
   311  }
   312  
   313  enum WorkerState {
   314    POD_RUNNING = 0;
   315    POD_SUCCESS = 1;
   316    POD_FAILED = 2;
   317  }
   318  
   319  message Worker {
   320    string name = 1;
   321    WorkerState state = 2;
   322  }
   323  
   324  message JobInfos {
   325    repeated JobInfo job_info = 1;
   326  }
   327  
   328  message Pipeline {
   329    string name = 1;
   330  }
   331  
   332  enum PipelineState {
   333    // There is an EtcdPipelineInfo + spec commit, but no RC
   334    // This happens when a pipeline has been created but not yet picked up by a
   335    // PPS server.
   336    PIPELINE_STARTING = 0;
   337    // A pipeline has a spec commit and a service + RC
   338    // This is the normal state of a pipeline.
   339    PIPELINE_RUNNING = 1;
   340    // Equivalent to STARTING (there is an EtcdPipelineInfo + commit, but no RC)
   341    // After some error caused runPipeline to exit, but before the pipeline is
   342    // re-run. This is when the exponential backoff is in effect.
   343    PIPELINE_RESTARTING = 2;
   344    // We have retried too many times and we have given up on this pipeline (or
   345    // the pipeline image doesn't exist)
   346    PIPELINE_FAILURE = 3;
   347    // The pipeline has been explicitly paused by the user (the pipeline spec's
   348    // Stopped field should be true if the pipeline is in this state)
   349    PIPELINE_PAUSED = 4;
   350    // The pipeline is fully functional, but there are no commits to process.
   351    PIPELINE_STANDBY = 5;
   352  }
   353  
   354  // EtcdPipelineInfo is proto that Pachd stores in etcd for each pipeline. It
   355  // tracks the state of the pipeline, and points to its metadata in PFS (and,
   356  // by pointing to a PFS commit, de facto tracks the pipeline's version)
   357  message EtcdPipelineInfo {
   358    PipelineState state = 1;
   359    string reason = 4;
   360    pfs_1_9.Commit spec_commit = 2;
   361    map<int32, int32> job_counts = 3;
   362    string auth_token = 5;
   363    JobState last_job_state = 6;
   364    uint64 parallelism = 7;
   365  }
   366  
   367  message PipelineInfo {
   368    reserved 3, 4, 22, 26, 27, 18;
   369    string id = 17 [(gogoproto.customname) = "ID"];
   370    Pipeline pipeline = 1;
   371    uint64 version = 11;
   372    Transform transform = 2;
   373    // tf_job encodes a Kubeflow TFJob spec. Pachyderm uses this to create TFJobs
   374    // when running in a kubernetes cluster on which kubeflow has been installed.
   375    // Exactly one of 'tf_job' and 'transform' should be set
   376    TFJob tf_job = 46 [(gogoproto.customname) = "TFJob"];
   377    ParallelismSpec parallelism_spec = 10;
   378    HashtreeSpec hashtree_spec = 42;
   379    Egress egress = 15;
   380    google.protobuf.Timestamp created_at = 6;
   381  
   382    // state indicates the current state of the pipeline. This is not stored in
   383    // PFS along with the rest of this data structure--PPS.InspectPipeline fills
   384    // it in
   385    PipelineState state = 7;
   386    // same for stopped field
   387    bool stopped = 38;
   388    string recent_error = 8;
   389  
   390    // job_counts and last_job_state indicates the number of jobs within this
   391    // pipeline in a given state and the state of the most recently created job,
   392    // respectively. This is not stored in PFS along with the rest of this data
   393    // structure--PPS.InspectPipeline fills it in from the EtcdPipelineInfo.
   394    map<int32, int32> job_counts = 9;
   395    JobState last_job_state = 43;
   396  
   397    string output_branch = 16;
   398    ResourceSpec resource_requests = 19;
   399    ResourceSpec resource_limits = 31;
   400    Input input = 20;
   401    string description = 21;
   402    string cache_size = 23;
   403    bool enable_stats = 24;
   404    string salt = 25;
   405  
   406    // reason includes any error messages associated with a failed pipeline
   407    string reason = 28;
   408    int64 max_queue_size = 29;
   409    Service service = 30;
   410    Spout spout = 45;
   411    ChunkSpec chunk_spec = 32;
   412    google.protobuf.Duration datum_timeout = 33;
   413    google.protobuf.Duration job_timeout = 34;
   414    string githook_url = 35 [(gogoproto.customname) = "GithookURL"];
   415    pfs_1_9.Commit spec_commit = 36;
   416    bool standby = 37;
   417    int64 datum_tries = 39;
   418    SchedulingSpec scheduling_spec = 40;
   419    string pod_spec = 41;
   420    string pod_patch = 44;
   421  }
   422  
   423  message PipelineInfos {
   424    repeated PipelineInfo pipeline_info = 1;
   425  }
   426  
   427  message CreateJobRequest {
   428    reserved 3, 4, 1, 10, 7, 9, 8, 12, 11, 13, 14, 21, 15, 16, 17, 18, 19, 20, 22, 23, 24;
   429    Pipeline pipeline = 2;
   430    pfs_1_9.Commit output_commit = 25;
   431  
   432    // Fields below should only be set when restoring an extracted job.
   433    uint64 restart = 26;
   434  
   435    // Counts of how many times we processed or skipped a datum
   436    int64 data_processed = 27;
   437    int64 data_skipped = 28;
   438    int64 data_total = 29;
   439    int64 data_failed = 30;
   440    int64 data_recovered = 31;
   441  
   442    // Download/process/upload time and download/upload bytes
   443    ProcessStats stats = 32;
   444  
   445    pfs_1_9.Commit stats_commit = 33;
   446    JobState state = 34;
   447    string reason = 35;
   448    google.protobuf.Timestamp started = 36;
   449    google.protobuf.Timestamp finished = 37;
   450  }
   451  
   452  message InspectJobRequest {
   453    // Callers should set either Job or OutputCommit, not both.
   454    Job job = 1;
   455    pfs_1_9.Commit output_commit = 3;
   456    bool block_state = 2; // block until state is either JOB_STATE_FAILURE or JOB_STATE_SUCCESS
   457  }
   458  
   459  message ListJobRequest {
   460    Pipeline pipeline = 1;                // nil means all pipelines
   461    repeated pfs_1_9.Commit input_commit = 2; // nil means all inputs
   462    pfs_1_9.Commit output_commit = 3;         // nil means all outputs
   463  
   464    // History indicates return jobs from historical versions of pipelines
   465    // semantics are:
   466    // 0: Return jobs from the current version of the pipeline or pipelines.
   467    // 1: Return the above and jobs from the next most recent version
   468    // 2: etc.
   469    //-1: Return jobs from all historical versions.
   470    int64 history = 4;
   471  
   472    // Full indicates whether the result should include all pipeline details in
   473    // each JobInfo, or limited information including name and status, but
   474    // excluding information in the pipeline spec. Leaving this "false" can make
   475    // the call significantly faster in clusters with a large number of pipelines
   476    // and jobs.
   477    // Note that if 'input_commit' is set, this field is coerced to "true"
   478    bool full = 5;
   479  }
   480  
   481  message FlushJobRequest {
   482    repeated pfs_1_9.Commit commits = 1;
   483    repeated Pipeline to_pipelines = 2;
   484  }
   485  
   486  message DeleteJobRequest {
   487    Job job = 1;
   488  }
   489  
   490  message StopJobRequest {
   491    Job job = 1;
   492  }
   493  
   494  message UpdateJobStateRequest {
   495    Job job = 1;
   496    JobState state = 2;
   497    string reason = 3;
   498  }
   499  
   500  message GetLogsRequest {
   501    reserved 4;
   502    // The pipeline from which we want to get logs (required if the job in 'job'
   503    // was created as part of a pipeline. To get logs from a non-orphan job
   504    // without the pipeline that created it, you need to use ElasticSearch).
   505    Pipeline pipeline = 2;
   506  
   507    // The job from which we want to get logs.
   508    Job job = 1;
   509  
   510    // Names of input files from which we want processing logs. This may contain
   511    // multiple files, to query pipelines that contain multiple inputs. Each
   512    // filter may be an absolute path of a file within a pps repo, or it may be
   513    // a hash for that file (to search for files at specific versions)
   514    repeated string data_filters = 3;
   515  
   516    Datum datum = 6;
   517  
   518    // If true get logs from the master process
   519    bool master = 5;
   520  
   521    // Continue to follow new logs as they become available.
   522    bool follow = 7;
   523  
   524    // If nonzero, the number of lines from the end of the logs to return.  Note:
   525    // tail applies per container, so you will get tail * <number of pods> total
   526    // lines back.
   527    int64 tail = 8;
   528  }
   529  
   530  // LogMessage is a log line from a PPS worker, annotated with metadata
   531  // indicating when and why the line was logged.
   532  message LogMessage {
   533    // The job and pipeline for which a PFS file is being processed (if the job
   534    // is an orphan job, pipeline name and ID will be unset)
   535    string pipeline_name = 1;
   536    string job_id = 3 [(gogoproto.customname) = "JobID"];
   537    string worker_id = 7 [(gogoproto.customname) = "WorkerID"];
   538    string datum_id = 9 [(gogoproto.customname) = "DatumID"];
   539    bool master = 10;
   540  
   541    // The PFS files being processed (one per pipeline/job input)
   542    repeated InputFile data = 4;
   543  
   544    // User is true if log message comes from the users code.
   545    bool user = 8;
   546  
   547    // The message logged, and the time at which it was logged
   548    google.protobuf.Timestamp ts = 5;
   549    string message = 6;
   550  }
   551  
   552  message RestartDatumRequest {
   553    Job job = 1;
   554    repeated string data_filters = 2;
   555  }
   556  
   557  message InspectDatumRequest {
   558    Datum datum = 1;
   559  }
   560  
   561  message ListDatumRequest {
   562    Job job = 1;
   563    int64 page_size = 2;
   564    int64 page = 3;
   565  }
   566  
   567  message ListDatumResponse {
   568    repeated DatumInfo datum_infos = 1;
   569    int64 total_pages = 2;
   570    int64 page = 3;
   571  }
   572  
   573  // ListDatumStreamResponse is identical to ListDatumResponse, except that only
   574  // one DatumInfo is present (as these responses are streamed)
   575  message ListDatumStreamResponse {
   576    DatumInfo datum_info = 1;
   577    // total_pages is only set in the first response (and set to 0 in all other
   578    // responses)
   579    int64 total_pages = 2;
   580    // page is only set in the first response (and set to 0 in all other
   581    // responses)
   582    int64 page = 3;
   583  }
   584  
   585  // ChunkSpec specifies how a pipeline should chunk its datums.
   586  message ChunkSpec {
   587    // number, if nonzero, specifies that each chunk should contain `number`
   588    // datums. Chunks may contain fewer if the total number of datums don't
   589    // divide evenly.
   590    int64 number = 1;
   591    // size_bytes, if nonzero, specifies a target size for each chunk of datums.
   592    // Chunks may be larger or smaller than size_bytes, but will usually be
   593    // pretty close to size_bytes in size.
   594    int64 size_bytes = 2;
   595  }
   596  
   597  message SchedulingSpec {
   598    map<string, string> node_selector = 1;
   599    string priority_class_name = 2;
   600  }
   601  
   602  message CreatePipelineRequest {
   603    reserved 3, 4, 11, 15, 19;
   604    Pipeline pipeline = 1;
   605    // tf_job encodes a Kubeflow TFJob spec. Pachyderm uses this to create TFJobs
   606    // when running in a kubernetes cluster on which kubeflow has been installed.
   607    // Exactly one of 'tf_job' and 'transform' should be set
   608    TFJob tf_job = 35 [(gogoproto.customname) = "TFJob"];
   609    Transform transform = 2;
   610    ParallelismSpec parallelism_spec = 7;
   611    HashtreeSpec hashtree_spec = 31;
   612    Egress egress = 9;
   613    bool update = 5;
   614    string output_branch = 10;
   615    ResourceSpec resource_requests = 12;
   616    ResourceSpec resource_limits = 22;
   617    Input input = 13;
   618    string description = 14;
   619    string cache_size = 16;
   620    bool enable_stats = 17;
   621    // Reprocess forces the pipeline to reprocess all datums.
   622    // It only has meaning if Update is true
   623    bool reprocess = 18;
   624    int64 max_queue_size = 20;
   625    Service service = 21;
   626    Spout spout = 33;
   627    ChunkSpec chunk_spec = 23;
   628    google.protobuf.Duration datum_timeout = 24;
   629    google.protobuf.Duration job_timeout = 25;
   630    string salt = 26;
   631    bool standby = 27;
   632    int64 datum_tries = 28;
   633    SchedulingSpec scheduling_spec = 29;
   634    string pod_spec = 30; // deprecated, use pod_patch below
   635    string pod_patch = 32; // a json patch will be applied to the pipeline's pod_spec before it's created;
   636    pfs_1_9.Commit spec_commit = 34;
   637  }
   638  
   639  message InspectPipelineRequest {
   640    Pipeline pipeline = 1;
   641  }
   642  
   643  message ListPipelineRequest {
   644    // If non-nil, only return info about a single pipeline, this is redundant
   645    // with InspectPipeline unless history is non-zero.
   646    Pipeline pipeline = 1;
   647    // History indicates how many historical versions you want returned. Its
   648    // semantics are:
   649    // 0: Return the current version of the pipeline or pipelines.
   650    // 1: Return the above and the next most recent version
   651    // 2: etc.
   652    //-1: Return all historical versions.
   653    int64 history = 2;
   654  }
   655  
   656  message DeletePipelineRequest {
   657    reserved 2, 3;
   658    Pipeline pipeline = 1;
   659    bool all = 4;
   660    bool force = 5;
   661  }
   662  
   663  message StartPipelineRequest {
   664    Pipeline pipeline = 1;
   665  }
   666  
   667  message StopPipelineRequest {
   668    Pipeline pipeline = 1;
   669  }
   670  
   671  message RunPipelineRequest {
   672    reserved 3;
   673    Pipeline pipeline = 1;
   674    repeated pfs_1_9.CommitProvenance provenance = 2;
   675    string job_id = 4 [(gogoproto.customname) = "JobID"];
   676  }
   677  
   678  message RunCronRequest {
   679    Pipeline pipeline = 1;
   680  }
   681  
   682  
   683  message GarbageCollectRequest {
   684      // Memory is how much memory to use in computing which objects are alive. A
   685      // larger number will result in more precise garbage collection (at the
   686      // cost of more memory usage).
   687      int64 memory_bytes = 1;
   688  }
   689  message GarbageCollectResponse {}
   690  
   691  message ActivateAuthRequest {}
   692  message ActivateAuthResponse {}
   693  
   694  service API {
   695    rpc CreateJob(CreateJobRequest) returns (Job) {}
   696    rpc InspectJob(InspectJobRequest) returns (JobInfo) {}
   697    // ListJob returns information about current and past Pachyderm jobs. This is
   698    // deprecated in favor of ListJobStream
   699    rpc ListJob(ListJobRequest) returns (JobInfos) {}
   700    // ListJobStream returns information about current and past Pachyderm jobs.
   701    rpc ListJobStream(ListJobRequest) returns (stream JobInfo) {}
   702    rpc FlushJob(FlushJobRequest) returns (stream JobInfo) {}
   703    rpc DeleteJob(DeleteJobRequest) returns (google.protobuf.Empty) {}
   704    rpc StopJob(StopJobRequest) returns (google.protobuf.Empty) {}
   705    rpc InspectDatum(InspectDatumRequest) returns (DatumInfo) {}
   706    // ListDatum returns information about each datum fed to a Pachyderm job. This
   707    // is deprecated in favor of ListDatumStream
   708    rpc ListDatum(ListDatumRequest) returns (ListDatumResponse) {}
   709    // ListDatumStream returns information about each datum fed to a Pachyderm job
   710    rpc ListDatumStream(ListDatumRequest) returns (stream ListDatumStreamResponse) {}
   711    rpc RestartDatum(RestartDatumRequest) returns (google.protobuf.Empty) {}
   712  
   713    rpc CreatePipeline(CreatePipelineRequest) returns (google.protobuf.Empty) {}
   714    rpc InspectPipeline(InspectPipelineRequest) returns (PipelineInfo) {}
   715    rpc ListPipeline(ListPipelineRequest) returns (PipelineInfos) {}
   716    rpc DeletePipeline(DeletePipelineRequest) returns (google.protobuf.Empty) {}
   717    rpc StartPipeline(StartPipelineRequest) returns (google.protobuf.Empty) {}
   718    rpc StopPipeline(StopPipelineRequest) returns (google.protobuf.Empty) {}
   719    rpc RunPipeline(RunPipelineRequest) returns (google.protobuf.Empty) {}
   720    rpc RunCron(RunCronRequest) returns (google.protobuf.Empty) {}
   721  
   722    // DeleteAll deletes everything
   723    rpc DeleteAll(google.protobuf.Empty) returns (google.protobuf.Empty) {}
   724    rpc GetLogs(GetLogsRequest) returns (stream LogMessage) {}
   725  
   726    // Garbage collection
   727    rpc GarbageCollect(GarbageCollectRequest) returns (GarbageCollectResponse) {}
   728  
   729    // An internal call that causes PPS to put itself into an auth-enabled state
   730    // (all pipeline have tokens, correct permissions, etcd)
   731    rpc ActivateAuth(ActivateAuthRequest) returns (ActivateAuthResponse) {}
   732  
   733    // An internal call used to move a job from one state to another
   734    rpc UpdateJobState(UpdateJobStateRequest) returns(google.protobuf.Empty) {}
   735  }