sigs.k8s.io/prow@v0.0.0-20240503223140-c5e374dc7eb1/pkg/crier/reporters/gcs/reporter.go (about)

     1  /*
     2  Copyright 2020 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package gcs
    18  
    19  import (
    20  	"context"
    21  	"encoding/json"
    22  	"fmt"
    23  	"path"
    24  	"time"
    25  
    26  	"github.com/GoogleCloudPlatform/testgrid/metadata"
    27  	"github.com/sirupsen/logrus"
    28  	utilerrors "k8s.io/apimachinery/pkg/util/errors"
    29  	utilpointer "k8s.io/utils/pointer"
    30  	"sigs.k8s.io/controller-runtime/pkg/reconcile"
    31  
    32  	prowv1 "sigs.k8s.io/prow/pkg/apis/prowjobs/v1"
    33  	"sigs.k8s.io/prow/pkg/config"
    34  	"sigs.k8s.io/prow/pkg/crier/reporters/gcs/util"
    35  	"sigs.k8s.io/prow/pkg/io"
    36  	"sigs.k8s.io/prow/pkg/io/providers"
    37  	"sigs.k8s.io/prow/pkg/pod-utils/clone"
    38  	"sigs.k8s.io/prow/pkg/pod-utils/downwardapi"
    39  )
    40  
    41  const reporterName = "gcsreporter"
    42  
    43  type gcsReporter struct {
    44  	cfg    config.Getter
    45  	dryRun bool
    46  	opener io.Opener
    47  }
    48  
    49  func (gr *gcsReporter) Report(ctx context.Context, log *logrus.Entry, pj *prowv1.ProwJob) ([]*prowv1.ProwJob, *reconcile.Result, error) {
    50  	ctx, cancel := context.WithTimeout(ctx, 20*time.Second)
    51  	defer cancel()
    52  
    53  	_, _, err := util.GetJobDestination(gr.cfg, pj)
    54  	if err != nil {
    55  		log.WithError(err).Info("Not uploading prowjob because we couldn't find a destination")
    56  		return []*prowv1.ProwJob{pj}, nil, nil
    57  	}
    58  	stateErr := gr.reportJobState(ctx, log, pj)
    59  	prowjobErr := gr.reportProwjob(ctx, log, pj)
    60  
    61  	return []*prowv1.ProwJob{pj}, nil, utilerrors.NewAggregate([]error{stateErr, prowjobErr})
    62  }
    63  
    64  func (gr *gcsReporter) reportJobState(ctx context.Context, log *logrus.Entry, pj *prowv1.ProwJob) error {
    65  	startedErr := gr.reportStartedJob(ctx, log, pj)
    66  	var finishedErr error
    67  	if pj.Complete() {
    68  		finishedErr = gr.reportFinishedJob(ctx, log, pj)
    69  	}
    70  	return utilerrors.NewAggregate([]error{startedErr, finishedErr})
    71  }
    72  
    73  // reportStartedJob uploads a started.json for the job. This will almost certainly
    74  // happen before the pod itself gets to upload one, at which point the pod will
    75  // upload its own. If for some reason one already exists, it will not be overwritten.
    76  func (gr *gcsReporter) reportStartedJob(ctx context.Context, log *logrus.Entry, pj *prowv1.ProwJob) error {
    77  	bucketName, dir, err := util.GetJobDestination(gr.cfg, pj)
    78  	if err != nil {
    79  		return fmt.Errorf("failed to get job destination: %w", err)
    80  	}
    81  
    82  	if gr.dryRun {
    83  		log.WithFields(logrus.Fields{"bucketName": bucketName, "dir": dir}).Debug("Would upload started.json")
    84  		return nil
    85  	}
    86  
    87  	// Best-effort read of existing started.json; it's overwritten only if it's uploaded
    88  	// by crier and there is something new (clone record).
    89  	var existingStarted metadata.Started
    90  	var existing bool
    91  	startedFilePath, err := providers.StoragePath(bucketName, path.Join(dir, prowv1.StartedStatusFile))
    92  	if err != nil {
    93  		// Started.json storage path is invalid, so this function will
    94  		// eventually fail.
    95  		return fmt.Errorf("failed to resolve started.json path: %v", err)
    96  	}
    97  
    98  	content, err := io.ReadContent(ctx, log, gr.opener, startedFilePath)
    99  	if err != nil {
   100  		if !io.IsNotExist(err) {
   101  			log.WithError(err).Warn("Failed to read started.json.")
   102  		}
   103  	} else {
   104  		err = json.Unmarshal(content, &existingStarted)
   105  		if err != nil {
   106  			log.WithError(err).Warn("Failed to unmarshal started.json.")
   107  		} else {
   108  			existing = true
   109  		}
   110  	}
   111  
   112  	if existing && (existingStarted.Metadata == nil || existingStarted.Metadata["uploader"] != "crier") {
   113  		// Uploaded by prowjob itself, skip reporting
   114  		log.Debug("Uploaded by pod-utils, skipping")
   115  		return nil
   116  	}
   117  
   118  	staticRevision := downwardapi.GetRevisionFromRefs(pj.Spec.Refs, pj.Spec.ExtraRefs)
   119  	if pj.Spec.Refs == nil || (existingStarted.RepoCommit != "" && existingStarted.RepoCommit != staticRevision) {
   120  		// RepoCommit could only be "", BaseRef, or the final resolved SHA,
   121  		// which shouldn't change for a given presubmit job. Avoid query GCS is
   122  		// this is already done.
   123  		log.Debug("RepoCommit already resolved before, skipping")
   124  		return nil
   125  	}
   126  
   127  	// Try to read clone records
   128  	cloneRecord := make([]clone.Record, 0)
   129  	cloneRecordFilePath, err := providers.StoragePath(bucketName, path.Join(dir, prowv1.CloneRecordFile))
   130  	if err != nil {
   131  		// This is user config error
   132  		log.WithError(err).Debug("Failed to resolve clone-records.json path.")
   133  	} else {
   134  		cloneRecordBytes, err := io.ReadContent(ctx, log, gr.opener, cloneRecordFilePath)
   135  		if err != nil {
   136  			if !io.IsNotExist(err) {
   137  				log.WithError(err).Warn("Failed to read clone records.")
   138  			}
   139  		} else {
   140  			if err := json.Unmarshal(cloneRecordBytes, &cloneRecord); err != nil {
   141  				log.WithError(err).Warn("Failed to unmarshal clone records.")
   142  			}
   143  		}
   144  	}
   145  	s := downwardapi.PjToStarted(pj, cloneRecord)
   146  	s.Metadata = metadata.Metadata{"uploader": "crier"}
   147  
   148  	output, err := json.MarshalIndent(s, "", "\t")
   149  	if err != nil {
   150  		return fmt.Errorf("failed to marshal started metadata: %w", err)
   151  	}
   152  
   153  	// Overwrite if it was uploaded by crier(existing) and there might be
   154  	// something new.
   155  	// Add a new var for better readability.
   156  	overwrite := existing
   157  	overwriteOpt := io.WriterOptions{PreconditionDoesNotExist: utilpointer.Bool(!overwrite)}
   158  	return io.WriteContent(ctx, log, gr.opener, startedFilePath, output, overwriteOpt)
   159  }
   160  
   161  // reportFinishedJob uploads a finished.json for the job, iff one did not already exist.
   162  func (gr *gcsReporter) reportFinishedJob(ctx context.Context, log *logrus.Entry, pj *prowv1.ProwJob) error {
   163  	output, err := util.MarshalFinishedJSON(pj)
   164  	if err != nil {
   165  		return fmt.Errorf("failed to marshal finished metadata: %w", err)
   166  	}
   167  
   168  	bucketName, dir, err := util.GetJobDestination(gr.cfg, pj)
   169  	if err != nil {
   170  		return fmt.Errorf("failed to get job destination: %w", err)
   171  	}
   172  
   173  	if gr.dryRun {
   174  		log.WithFields(logrus.Fields{"bucketName": bucketName, "dir": dir}).Debug("Would upload finished.json")
   175  		return nil
   176  	}
   177  	//PreconditionDoesNotExist:true means create only when file not exist.
   178  	overwriteOpt := io.WriterOptions{PreconditionDoesNotExist: utilpointer.Bool(true)}
   179  	finishedFilePath, err := providers.StoragePath(bucketName, path.Join(dir, prowv1.FinishedStatusFile))
   180  	if err != nil {
   181  		return fmt.Errorf("failed to resolve finished.json path: %v", err)
   182  	}
   183  	return io.WriteContent(ctx, log, gr.opener, finishedFilePath, output, overwriteOpt)
   184  }
   185  
   186  func (gr *gcsReporter) reportProwjob(ctx context.Context, log *logrus.Entry, pj *prowv1.ProwJob) error {
   187  	// Unconditionally dump the ProwJob to GCS, on all job updates.
   188  	output, err := util.MarshalProwJob(pj)
   189  	if err != nil {
   190  		return fmt.Errorf("failed to marshal ProwJob: %w", err)
   191  	}
   192  
   193  	bucketName, dir, err := util.GetJobDestination(gr.cfg, pj)
   194  	if err != nil {
   195  		return fmt.Errorf("failed to get job destination: %w", err)
   196  	}
   197  
   198  	if gr.dryRun {
   199  		log.WithFields(logrus.Fields{"bucketName": bucketName, "dir": dir}).Debug("Would upload pod info")
   200  		return nil
   201  	}
   202  	overWriteOpts := io.WriterOptions{PreconditionDoesNotExist: utilpointer.Bool(false)}
   203  	prowJobFilePath, err := providers.StoragePath(bucketName, path.Join(dir, prowv1.ProwJobFile))
   204  	if err != nil {
   205  		return fmt.Errorf("failed to resolve prowjob.json path: %v", err)
   206  	}
   207  	return io.WriteContent(ctx, log, gr.opener, prowJobFilePath, output, overWriteOpts)
   208  }
   209  
   210  func (gr *gcsReporter) GetName() string {
   211  	return reporterName
   212  }
   213  
   214  func (gr *gcsReporter) ShouldReport(_ context.Context, _ *logrus.Entry, pj *prowv1.ProwJob) bool {
   215  	// We can only report jobs once they have a build ID. By denying responsibility
   216  	// for it until it has one, crier will not mark us as having handled it until
   217  	// it is possible for us to handle it, ensuring that we get a chance to see it.
   218  	return pj.Status.BuildID != ""
   219  }
   220  
   221  func New(cfg config.Getter, opener io.Opener, dryRun bool) *gcsReporter {
   222  	return &gcsReporter{
   223  		cfg:    cfg,
   224  		dryRun: dryRun,
   225  		opener: opener,
   226  	}
   227  }