github.com/jaylevin/jenkins-library@v1.230.4/cmd/protecodeExecuteScan.go (about)

     1  package cmd
     2  
     3  import (
     4  	"encoding/json"
     5  	"fmt"
     6  	"io"
     7  	"os"
     8  	"path"
     9  	"path/filepath"
    10  	"regexp"
    11  	"strconv"
    12  	"strings"
    13  	"time"
    14  
    15  	"github.com/pkg/errors"
    16  
    17  	"github.com/SAP/jenkins-library/pkg/command"
    18  	piperDocker "github.com/SAP/jenkins-library/pkg/docker"
    19  	"github.com/SAP/jenkins-library/pkg/log"
    20  	"github.com/SAP/jenkins-library/pkg/piperutils"
    21  	"github.com/SAP/jenkins-library/pkg/protecode"
    22  	"github.com/SAP/jenkins-library/pkg/telemetry"
    23  	"github.com/SAP/jenkins-library/pkg/toolrecord"
    24  	"github.com/SAP/jenkins-library/pkg/versioning"
    25  )
    26  
    27  const (
    28  	webReportPath  = "%s/#/product/%v/"
    29  	scanResultFile = "protecodescan_vulns.json"
    30  	stepResultFile = "protecodeExecuteScan.json"
    31  )
    32  
    33  type protecodeUtils interface {
    34  	piperutils.FileUtils
    35  	piperDocker.Download
    36  }
    37  
    38  type protecodeUtilsBundle struct {
    39  	*piperutils.Files
    40  	*piperDocker.Client
    41  }
    42  
    43  func protecodeExecuteScan(config protecodeExecuteScanOptions, telemetryData *telemetry.CustomData, influx *protecodeExecuteScanInflux) {
    44  	c := command.Command{}
    45  	// reroute command output to loging framework
    46  	c.Stdout(log.Writer())
    47  	c.Stderr(log.Writer())
    48  
    49  	//create client for sending api request
    50  	log.Entry().Debug("Create protecode client")
    51  	client := createProtecodeClient(&config)
    52  
    53  	dClientOptions := piperDocker.ClientOptions{ImageName: config.ScanImage, RegistryURL: config.DockerRegistryURL, LocalPath: config.FilePath, ImageFormat: "legacy"}
    54  	dClient := &piperDocker.Client{}
    55  	dClient.SetOptions(dClientOptions)
    56  
    57  	utils := protecodeUtilsBundle{
    58  		Client: dClient,
    59  		Files:  &piperutils.Files{},
    60  	}
    61  
    62  	influx.step_data.fields.protecode = false
    63  	if err := runProtecodeScan(&config, influx, client, utils, "./cache"); err != nil {
    64  		log.Entry().WithError(err).Fatal("Failed to execute protecode scan.")
    65  	}
    66  	influx.step_data.fields.protecode = true
    67  }
    68  
    69  func runProtecodeScan(config *protecodeExecuteScanOptions, influx *protecodeExecuteScanInflux, client protecode.Protecode, utils protecodeUtils, cachePath string) error {
    70  	// make sure cache exists
    71  	if err := utils.MkdirAll(cachePath, 0755); err != nil {
    72  		return err
    73  	}
    74  
    75  	correctDockerConfigEnvVar(config)
    76  
    77  	var fileName, filePath string
    78  	var err error
    79  
    80  	if len(config.FetchURL) == 0 && len(config.FilePath) == 0 {
    81  		log.Entry().Debugf("Get docker image: %v, %v, %v", config.ScanImage, config.DockerRegistryURL, config.FilePath)
    82  		fileName, filePath, err = getDockerImage(utils, config, cachePath)
    83  		if err != nil {
    84  			return errors.Wrap(err, "failed to get Docker image")
    85  		}
    86  		if len(config.FilePath) <= 0 {
    87  			(*config).FilePath = filePath
    88  			log.Entry().Debugf("Filepath for upload image: %v", config.FilePath)
    89  		}
    90  	} else if len(config.FilePath) > 0 {
    91  		parts := strings.Split(config.FilePath, "/")
    92  		pathFragment := strings.Join(parts[:len(parts)-1], "/")
    93  		if len(pathFragment) > 0 {
    94  			(*config).FilePath = pathFragment
    95  		} else {
    96  			(*config).FilePath = "./"
    97  		}
    98  		fileName = parts[len(parts)-1]
    99  
   100  	} else if len(config.FetchURL) > 0 {
   101  		// Get filename from a fetch URL
   102  		fileName = filepath.Base(config.FetchURL)
   103  		log.Entry().Debugf("[DEBUG] ===> Filepath from fetch URL: %v", fileName)
   104  	}
   105  
   106  	log.Entry().Debug("Execute protecode scan")
   107  	if err := executeProtecodeScan(influx, client, config, fileName, utils); err != nil {
   108  		return err
   109  	}
   110  
   111  	defer utils.FileRemove(config.FilePath)
   112  
   113  	if err := utils.RemoveAll(cachePath); err != nil {
   114  		log.Entry().Warnf("Error during cleanup folder %v", err)
   115  	}
   116  
   117  	return nil
   118  }
   119  
   120  // TODO: extract to version utils
   121  func handleArtifactVersion(artifactVersion string) string {
   122  	matches, _ := regexp.MatchString("([\\d\\.]){1,}-[\\d]{14}([\\Wa-z\\d]{41})?", artifactVersion)
   123  	if matches {
   124  		split := strings.SplitN(artifactVersion, ".", 2)
   125  		log.Entry().WithField("old", artifactVersion).WithField("new", split[0]).Debug("Trimming version to major version digit.")
   126  		return split[0]
   127  	}
   128  	return artifactVersion
   129  }
   130  
   131  func getDockerImage(utils protecodeUtils, config *protecodeExecuteScanOptions, cachePath string) (string, string, error) {
   132  	m := regexp.MustCompile("[\\s@:/]")
   133  
   134  	tarFileName := fmt.Sprintf("%s.tar", m.ReplaceAllString(config.ScanImage, "-"))
   135  	tarFilePath, err := filepath.Abs(filepath.Join(cachePath, tarFileName))
   136  
   137  	if err != nil {
   138  		return "", "", err
   139  	}
   140  
   141  	if _, err = utils.DownloadImage(config.ScanImage, tarFilePath); err != nil {
   142  		return "", "", errors.Wrap(err, "failed to download docker image")
   143  	}
   144  
   145  	return filepath.Base(tarFilePath), filepath.Dir(tarFilePath), nil
   146  }
   147  
   148  func executeProtecodeScan(influx *protecodeExecuteScanInflux, client protecode.Protecode, config *protecodeExecuteScanOptions, fileName string, utils protecodeUtils) error {
   149  	reportPath := "./"
   150  
   151  	log.Entry().Debugf("[DEBUG] ===> Load existing product Group:%v, VerifyOnly:%v, Filename:%v, replaceProductId:%v", config.Group, config.VerifyOnly, fileName, config.ReplaceProductID)
   152  
   153  	productID := -1
   154  
   155  	// If replaceProductId is not provided then switch to automatic existing product detection
   156  	if config.ReplaceProductID > 0 {
   157  
   158  		log.Entry().Infof("replaceProductID has been provided (%v) and checking ...", config.ReplaceProductID)
   159  		// log.Entry().Debugf("[DEBUG] ===> ReplaceProductID has been provided and required to verify it: %v", config.ReplaceProductID)
   160  
   161  		// Validate provided product id, if not valid id then throw an error
   162  		if client.VerifyProductID(config.ReplaceProductID) {
   163  			log.Entry().Infof("replaceProductID has been checked and it's valid")
   164  			// log.Entry().Debugf("[DEBUG] ===> ReplaceProductID exists")
   165  			productID = config.ReplaceProductID
   166  		} else {
   167  			log.Entry().Debugf("[DEBUG] ===> ReplaceProductID doesn't exist")
   168  			return fmt.Errorf("ERROR -> the product id is not valid '%d'", config.ReplaceProductID)
   169  		}
   170  
   171  	} else {
   172  		// Get existing product id by filename
   173  		log.Entry().Infof("replaceProductID is not provided and automatic search starts from group: %v ... ", config.Group)
   174  		// log.Entry().Debugf("[DEBUG] ===> ReplaceProductID hasn't provided and automatic search starts... ")
   175  		productID = client.LoadExistingProduct(config.Group, fileName)
   176  
   177  		if productID > 0 {
   178  			log.Entry().Infof("Automatic search completed and found following product id: %v", productID)
   179  			// log.Entry().Debugf("[DEBUG] ===> Returned productID: %v", productID)
   180  		} else {
   181  			log.Entry().Infof("Automatic search completed but not found any similar product scan, now starts new scan creation")
   182  		}
   183  	}
   184  
   185  	// check if no existing is found
   186  	productID = uploadScanOrDeclareFetch(utils, *config, productID, client, fileName)
   187  
   188  	log.Entry().Debugf("[DEBUG] ===> After 'uploadScanOrDeclareFetch' returned productID: %v", productID)
   189  
   190  	if productID <= 0 {
   191  		return fmt.Errorf("the product id is not valid '%d'", productID)
   192  	}
   193  
   194  	//pollForResult
   195  	log.Entry().Debugf("Poll for scan result %v", productID)
   196  	result := client.PollForResult(productID, config.TimeoutMinutes)
   197  	// write results to file
   198  	jsonData, _ := json.Marshal(result)
   199  	utils.FileWrite(filepath.Join(reportPath, scanResultFile), jsonData, 0644)
   200  
   201  	//check if result is ok else notify
   202  	if protecode.HasFailed(result) {
   203  		log.SetErrorCategory(log.ErrorService)
   204  		return fmt.Errorf("protecode scan failed: %v/products/%v", config.ServerURL, productID)
   205  	}
   206  
   207  	//loadReport
   208  	log.Entry().Debugf("Load report %v for %v", config.ReportFileName, productID)
   209  	resp := client.LoadReport(config.ReportFileName, productID)
   210  
   211  	buf, err := io.ReadAll(*resp)
   212  
   213  	if err != nil {
   214  		return fmt.Errorf("unable to process protecode report %v", err)
   215  	}
   216  
   217  	if err = utils.FileWrite(config.ReportFileName, buf, 0644); err != nil {
   218  		log.Entry().Warningf("failed to write report: %s", err)
   219  	}
   220  
   221  	//clean scan from server
   222  	log.Entry().Debugf("Delete scan %v for %v", config.CleanupMode, productID)
   223  	client.DeleteScan(config.CleanupMode, productID)
   224  
   225  	//count vulnerabilities
   226  	log.Entry().Debug("Parse scan result")
   227  	parsedResult, vulns := client.ParseResultForInflux(result.Result, config.ExcludeCVEs)
   228  
   229  	log.Entry().Debug("Write report to filesystem")
   230  	if err := protecode.WriteReport(
   231  		protecode.ReportData{
   232  			ServerURL:                   config.ServerURL,
   233  			FailOnSevereVulnerabilities: config.FailOnSevereVulnerabilities,
   234  			ExcludeCVEs:                 config.ExcludeCVEs,
   235  			Target:                      config.ReportFileName,
   236  			Vulnerabilities:             vulns,
   237  			ProductID:                   fmt.Sprintf("%v", productID),
   238  		}, reportPath, stepResultFile, parsedResult, utils); err != nil {
   239  		log.Entry().Warningf("failed to write report: %v", err)
   240  	}
   241  
   242  	log.Entry().Debug("Write influx data")
   243  	setInfluxData(influx, parsedResult)
   244  
   245  	// write reports JSON
   246  	reports := []piperutils.Path{
   247  		{Target: config.ReportFileName, Mandatory: true},
   248  		{Target: stepResultFile, Mandatory: true},
   249  		{Target: scanResultFile, Mandatory: true},
   250  	}
   251  	// write links JSON
   252  	webuiURL := fmt.Sprintf(webReportPath, config.ServerURL, productID)
   253  	links := []piperutils.Path{
   254  		{Name: "Protecode WebUI", Target: webuiURL},
   255  		{Name: "Protecode Report", Target: path.Join("artifact", config.ReportFileName), Scope: "job"},
   256  	}
   257  
   258  	// write custom report
   259  	scanReport := protecode.CreateCustomReport(fileName, productID, parsedResult, vulns)
   260  	paths, err := protecode.WriteCustomReports(scanReport, fileName, fmt.Sprint(productID), utils)
   261  	if err != nil {
   262  		// do not fail - consider failing later on
   263  		log.Entry().Warning("failed to create custom HTML/MarkDown file ...", err)
   264  	} else {
   265  		reports = append(reports, paths...)
   266  	}
   267  
   268  	// create toolrecord file
   269  	toolRecordFileName, err := createToolRecordProtecode("./", config, productID, webuiURL)
   270  	if err != nil {
   271  		// do not fail until the framework is well established
   272  		log.Entry().Warning("TR_PROTECODE: Failed to create toolrecord file ...", err)
   273  	} else {
   274  		reports = append(reports, piperutils.Path{Target: toolRecordFileName})
   275  	}
   276  
   277  	piperutils.PersistReportsAndLinks("protecodeExecuteScan", "", reports, links)
   278  
   279  	if config.FailOnSevereVulnerabilities && protecode.HasSevereVulnerabilities(result.Result, config.ExcludeCVEs) {
   280  		log.SetErrorCategory(log.ErrorCompliance)
   281  		return fmt.Errorf("the product is not compliant")
   282  	}
   283  	return nil
   284  }
   285  
   286  func setInfluxData(influx *protecodeExecuteScanInflux, result map[string]int) {
   287  	influx.protecode_data.fields.historical_vulnerabilities = result["historical_vulnerabilities"]
   288  	influx.protecode_data.fields.triaged_vulnerabilities = result["triaged_vulnerabilities"]
   289  	influx.protecode_data.fields.excluded_vulnerabilities = result["excluded_vulnerabilities"]
   290  	influx.protecode_data.fields.minor_vulnerabilities = result["minor_vulnerabilities"]
   291  	influx.protecode_data.fields.major_vulnerabilities = result["major_vulnerabilities"]
   292  	influx.protecode_data.fields.vulnerabilities = result["vulnerabilities"]
   293  }
   294  
   295  func createProtecodeClient(config *protecodeExecuteScanOptions) protecode.Protecode {
   296  	var duration time.Duration = time.Duration(time.Minute * 1)
   297  
   298  	if len(config.TimeoutMinutes) > 0 {
   299  		dur, err := time.ParseDuration(fmt.Sprintf("%vm", config.TimeoutMinutes))
   300  		if err != nil {
   301  			log.Entry().Warnf("Failed to parse timeout %v, switched back to default timeout %v minutes", config.TimeoutMinutes, duration)
   302  		} else {
   303  			duration = dur
   304  		}
   305  	}
   306  
   307  	pc := protecode.Protecode{}
   308  
   309  	protecodeOptions := protecode.Options{
   310  		ServerURL: config.ServerURL,
   311  		Logger:    log.Entry().WithField("package", "SAP/jenkins-library/pkg/protecode"),
   312  		Duration:  duration,
   313  		Username:  config.Username,
   314  		Password:  config.Password,
   315  	}
   316  
   317  	pc.SetOptions(protecodeOptions)
   318  
   319  	return pc
   320  }
   321  
   322  func uploadScanOrDeclareFetch(utils protecodeUtils, config protecodeExecuteScanOptions, productID int, client protecode.Protecode, fileName string) int {
   323  	//check if the LoadExistingProduct) before returns an valid product id, than skip this
   324  	//if !hasExisting(productID, config.VerifyOnly) {
   325  
   326  	log.Entry().Debugf("[DEBUG] ===> In uploadScanOrDeclareFetch: %v", productID)
   327  
   328  	// check if product doesn't exist then create a new one.
   329  	if productID <= 0 {
   330  		log.Entry().Infof("New product creation started ... ")
   331  		// log.Entry().Debugf("[DEBUG] ===> New product creation started: %v", productID)
   332  		productID = uploadFile(utils, config, productID, client, fileName, false)
   333  
   334  		log.Entry().Infof("New product has been successfully created: %v", productID)
   335  		// log.Entry().Debugf("[DEBUG] ===> After uploading [productID < 0] file returned productID: %v", productID)
   336  		return productID
   337  
   338  		// In case product already exists and "VerifyOnly (reuseExisting)" is false then we replace binary without creating a new product.
   339  	} else if (productID > 0) && !config.VerifyOnly {
   340  		log.Entry().Infof("Product already exists and 'VerifyOnly (reuseExisting)' is false then product (%v) binary and scan result will be replaced without creating a new product.", productID)
   341  		// log.Entry().Debugf("[DEBUG] ===> Replace binary entry point started %v", productID)
   342  		productID = uploadFile(utils, config, productID, client, fileName, true)
   343  
   344  		// log.Entry().Debugf("[DEBUG] ===> After uploading file [(productID > 0) && !config.VerifyOnly] returned productID: %v", productID)
   345  		return productID
   346  
   347  		// If product already exists and "reuseExisting" option is enabled then return the latest similar scan result.
   348  	} else {
   349  		log.Entry().Infof("VerifyOnly (reuseExisting) option is enabled and returned productID: %v", productID)
   350  		// log.Entry().Debugf("[DEBUG] ===> VerifyOnly (reuseExisting) option is enabled and returned productID: %v", productID)
   351  		return productID
   352  	}
   353  }
   354  
   355  func uploadFile(utils protecodeUtils, config protecodeExecuteScanOptions, productID int, client protecode.Protecode, fileName string, replaceBinary bool) int {
   356  
   357  	// get calculated version for Version field
   358  	version := getProcessedVersion(&config)
   359  
   360  	if len(config.FetchURL) > 0 {
   361  		log.Entry().Debugf("Declare fetch url %v", config.FetchURL)
   362  		resultData := client.DeclareFetchURL(config.CleanupMode, config.Group, config.FetchURL, version, productID, replaceBinary)
   363  		productID = resultData.Result.ProductID
   364  	} else {
   365  		log.Entry().Debugf("Upload file path: %v", config.FilePath)
   366  		if len(config.FilePath) <= 0 {
   367  			log.Entry().Fatalf("There is no file path configured for upload : %v", config.FilePath)
   368  		}
   369  		pathToFile := filepath.Join(config.FilePath, fileName)
   370  		if exists, err := utils.FileExists(pathToFile); err != nil && !exists {
   371  			log.Entry().Fatalf("There is no file for upload: %v", pathToFile)
   372  		}
   373  
   374  		combinedFileName := fileName
   375  		if len(config.PullRequestName) > 0 {
   376  			combinedFileName = fmt.Sprintf("%v_%v", config.PullRequestName, fileName)
   377  		}
   378  
   379  		resultData := client.UploadScanFile(config.CleanupMode, config.Group, pathToFile, combinedFileName, version, productID, replaceBinary)
   380  		productID = resultData.Result.ProductID
   381  		log.Entry().Debugf("[DEBUG] ===> uploadFile return FINAL product id: %v", productID)
   382  	}
   383  	return productID
   384  }
   385  
   386  func hasExisting(productID int, verifyOnly bool) bool {
   387  	if (productID > 0) || verifyOnly {
   388  		return true
   389  	}
   390  	return false
   391  }
   392  
   393  func correctDockerConfigEnvVar(config *protecodeExecuteScanOptions) {
   394  	path := config.DockerConfigJSON
   395  	if len(path) > 0 {
   396  		log.Entry().Infof("Docker credentials configuration: %v", path)
   397  		path, _ := filepath.Abs(path)
   398  		// use parent directory
   399  		path = filepath.Dir(path)
   400  		os.Setenv("DOCKER_CONFIG", path)
   401  	} else {
   402  		log.Entry().Info("Docker credentials configuration: NONE")
   403  	}
   404  }
   405  
   406  // Calculate version based on versioning model and artifact version or return custom scan version provided by user
   407  func getProcessedVersion(config *protecodeExecuteScanOptions) string {
   408  	processedVersion := config.CustomScanVersion
   409  	if len(processedVersion) > 0 {
   410  		log.Entry().Infof("Using custom version: %v", processedVersion)
   411  	} else {
   412  		if len(config.VersioningModel) > 0 {
   413  			processedVersion = versioning.ApplyVersioningModel(config.VersioningModel, config.Version)
   414  		} else {
   415  			// By default 'major' if <config.VersioningModel> not provided
   416  			processedVersion = versioning.ApplyVersioningModel("major", config.Version)
   417  		}
   418  	}
   419  	return processedVersion
   420  }
   421  
   422  // create toolrecord file for protecode
   423  // todo: check if group and product names can be retrieved
   424  func createToolRecordProtecode(workspace string, config *protecodeExecuteScanOptions, productID int, webuiURL string) (string, error) {
   425  	record := toolrecord.New(workspace, "protecode", config.ServerURL)
   426  	groupURL := config.ServerURL + "/#/groups/" + config.Group
   427  	err := record.AddKeyData("group",
   428  		config.Group,
   429  		config.Group, // todo figure out display name
   430  		groupURL)
   431  	if err != nil {
   432  		return "", err
   433  	}
   434  	err = record.AddKeyData("product",
   435  		strconv.Itoa(productID),
   436  		strconv.Itoa(productID), // todo figure out display name
   437  		webuiURL)
   438  	if err != nil {
   439  		return "", err
   440  	}
   441  	err = record.Persist()
   442  	if err != nil {
   443  		return "", err
   444  	}
   445  	return record.GetFileName(), nil
   446  }