github.com/xgoffin/jenkins-library@v1.154.0/cmd/protecodeExecuteScan.go (about)

     1  package cmd
     2  
     3  import (
     4  	"encoding/json"
     5  	"fmt"
     6  	"io"
     7  	"io/ioutil"
     8  	"os"
     9  	"path"
    10  	"path/filepath"
    11  	"regexp"
    12  	"strconv"
    13  	"strings"
    14  	"time"
    15  
    16  	"github.com/pkg/errors"
    17  
    18  	"github.com/GoogleContainerTools/container-diff/pkg/util"
    19  	"github.com/SAP/jenkins-library/pkg/command"
    20  	piperDocker "github.com/SAP/jenkins-library/pkg/docker"
    21  	"github.com/SAP/jenkins-library/pkg/log"
    22  	StepResults "github.com/SAP/jenkins-library/pkg/piperutils"
    23  	"github.com/SAP/jenkins-library/pkg/protecode"
    24  	"github.com/SAP/jenkins-library/pkg/telemetry"
    25  	"github.com/SAP/jenkins-library/pkg/toolrecord"
    26  	"github.com/SAP/jenkins-library/pkg/versioning"
    27  )
    28  
    29  const (
    30  	webReportPath  = "%s/#/product/%v/"
    31  	scanResultFile = "protecodescan_vulns.json"
    32  	stepResultFile = "protecodeExecuteScan.json"
    33  )
    34  
    35  var reportPath = "./"
    36  var cachePath = "./cache"
    37  var cacheProtecodeImagePath = "/protecode/Image"
    38  var cacheProtecodePath = "/protecode"
    39  
    40  func protecodeExecuteScan(config protecodeExecuteScanOptions, telemetryData *telemetry.CustomData, influx *protecodeExecuteScanInflux) {
    41  	c := command.Command{}
    42  	// reroute command output to loging framework
    43  	c.Stdout(log.Writer())
    44  	c.Stderr(log.Writer())
    45  
    46  	dClient := createDockerClient(&config)
    47  	influx.step_data.fields.protecode = false
    48  	if err := runProtecodeScan(&config, influx, dClient); err != nil {
    49  		log.Entry().WithError(err).Fatal("Failed to execute protecode scan.")
    50  	}
    51  	influx.step_data.fields.protecode = true
    52  }
    53  
    54  func runProtecodeScan(config *protecodeExecuteScanOptions, influx *protecodeExecuteScanInflux, dClient piperDocker.Download) error {
    55  	correctDockerConfigEnvVar(config)
    56  	var fileName, filePath string
    57  	var err error
    58  	//create client for sending api request
    59  	log.Entry().Debug("Create protecode client")
    60  	client := createClient(config)
    61  	if len(config.FetchURL) == 0 && len(config.FilePath) == 0 {
    62  		log.Entry().Debugf("Get docker image: %v, %v, %v, %v", config.ScanImage, config.DockerRegistryURL, config.FilePath, config.IncludeLayers)
    63  		fileName, filePath, err = getDockerImage(dClient, config)
    64  		if err != nil {
    65  			return errors.Wrap(err, "failed to get Docker image")
    66  		}
    67  		if len(config.FilePath) <= 0 {
    68  			(*config).FilePath = filePath
    69  			log.Entry().Debugf("Filepath for upload image: %v", config.FilePath)
    70  		}
    71  	} else if len(config.FilePath) > 0 {
    72  		parts := strings.Split(config.FilePath, "/")
    73  		pathFragment := strings.Join(parts[:len(parts)-1], "/")
    74  		if len(pathFragment) > 0 {
    75  			(*config).FilePath = pathFragment
    76  		} else {
    77  			(*config).FilePath = "./"
    78  		}
    79  		fileName = parts[len(parts)-1]
    80  
    81  	} else if len(config.FetchURL) > 0 {
    82  		// Get filename from a fetch URL
    83  		fileName = filepath.Base(config.FetchURL)
    84  		log.Entry().Debugf("[DEBUG] ===> Filepath from fetch URL: %v", fileName)
    85  	}
    86  
    87  	log.Entry().Debug("Execute protecode scan")
    88  	if err := executeProtecodeScan(influx, client, config, fileName, writeReportToFile); err != nil {
    89  		return err
    90  	}
    91  
    92  	defer os.Remove(config.FilePath)
    93  
    94  	if err := os.RemoveAll(filepath.Join(cachePath, cacheProtecodePath)); err != nil {
    95  		log.Entry().Warnf("Error during cleanup folder %v", err)
    96  	}
    97  
    98  	return nil
    99  }
   100  
   101  // TODO: extract to version utils
   102  func handleArtifactVersion(artifactVersion string) string {
   103  	matches, _ := regexp.MatchString("([\\d\\.]){1,}-[\\d]{14}([\\Wa-z\\d]{41})?", artifactVersion)
   104  	if matches {
   105  		split := strings.SplitN(artifactVersion, ".", 2)
   106  		log.Entry().WithField("old", artifactVersion).WithField("new", split[0]).Debug("Trimming version to major version digit.")
   107  		return split[0]
   108  	}
   109  	return artifactVersion
   110  }
   111  
   112  func getDockerImage(dClient piperDocker.Download, config *protecodeExecuteScanOptions) (string, string, error) {
   113  
   114  	cacheImagePath := filepath.Join(cachePath, cacheProtecodeImagePath)
   115  	deletePath := filepath.Join(cachePath, cacheProtecodePath)
   116  	err := os.RemoveAll(deletePath)
   117  
   118  	os.Mkdir(cacheImagePath, 600)
   119  
   120  	imageSource, err := dClient.GetImageSource()
   121  	if err != nil {
   122  		log.SetErrorCategory(log.ErrorConfiguration)
   123  		return "", "", errors.Wrap(err, "failed to get docker image")
   124  	}
   125  	image, err := dClient.DownloadImageToPath(imageSource, cacheImagePath)
   126  	if err != nil {
   127  		return "", "", errors.Wrap(err, "failed to download docker image")
   128  	}
   129  
   130  	var fileName string
   131  	if util.IsTar(config.ScanImage) {
   132  		fileName = config.ScanImage
   133  	} else {
   134  		fileName = getTarName(config)
   135  		tarFilePath := filepath.Join(cachePath, fileName)
   136  		tarFile, err := os.Create(tarFilePath)
   137  		if err != nil {
   138  			log.SetErrorCategory(log.ErrorCustom)
   139  			return "", "", errors.Wrap(err, "failed to create tar for the docker image")
   140  		}
   141  		defer tarFile.Close()
   142  		if err := os.Chmod(tarFilePath, 0644); err != nil {
   143  			log.SetErrorCategory(log.ErrorCustom)
   144  			return "", "", errors.Wrap(err, "failed to set permissions on tar for the docker image")
   145  		}
   146  		if err = dClient.TarImage(tarFile, image); err != nil {
   147  			return "", "", errors.Wrap(err, "failed to tar the docker image")
   148  		}
   149  	}
   150  
   151  	resultFilePath := config.FilePath
   152  
   153  	if len(config.FilePath) <= 0 {
   154  		resultFilePath = cachePath
   155  	}
   156  
   157  	return fileName, resultFilePath, nil
   158  }
   159  
   160  func executeProtecodeScan(influx *protecodeExecuteScanInflux, client protecode.Protecode, config *protecodeExecuteScanOptions, fileName string, writeReportToFile func(resp io.ReadCloser, reportFileName string) error) error {
   161  
   162  	log.Entry().Debugf("[DEBUG] ===> Load existing product Group:%v, VerifyOnly:%v, Filename:%v, replaceProductId:%v", config.Group, config.VerifyOnly, fileName, config.ReplaceProductID)
   163  
   164  	productID := -1
   165  
   166  	// If replaceProductId is not provided then switch to automatic existing product detection
   167  	if config.ReplaceProductID > 0 {
   168  
   169  		log.Entry().Infof("replaceProductID has been provided (%v) and checking ...", config.ReplaceProductID)
   170  		// log.Entry().Debugf("[DEBUG] ===> ReplaceProductID has been provided and required to verify it: %v", config.ReplaceProductID)
   171  
   172  		// Validate provided product id, if not valid id then throw an error
   173  		if client.VerifyProductID(config.ReplaceProductID) {
   174  			log.Entry().Infof("replaceProductID has been checked and it's valid")
   175  			// log.Entry().Debugf("[DEBUG] ===> ReplaceProductID exists")
   176  			productID = config.ReplaceProductID
   177  		} else {
   178  			log.Entry().Debugf("[DEBUG] ===> ReplaceProductID doesn't exist")
   179  			return fmt.Errorf("ERROR -> the product id is not valid '%d'", config.ReplaceProductID)
   180  		}
   181  
   182  	} else {
   183  		// Get existing product id by filename
   184  		log.Entry().Infof("replaceProductID is not provided and automatic search starts from group: %v ... ", config.Group)
   185  		// log.Entry().Debugf("[DEBUG] ===> ReplaceProductID hasn't provided and automatic search starts... ")
   186  		productID = client.LoadExistingProduct(config.Group, fileName)
   187  
   188  		if productID > 0 {
   189  			log.Entry().Infof("Automatic search completed and found following product id: %v", productID)
   190  			// log.Entry().Debugf("[DEBUG] ===> Returned productID: %v", productID)
   191  		} else {
   192  			log.Entry().Infof("Automatic search completed but not found any similar product scan, now starts new scan creation")
   193  		}
   194  	}
   195  
   196  	// check if no existing is found
   197  	productID = uploadScanOrDeclareFetch(*config, productID, client, fileName)
   198  
   199  	log.Entry().Debugf("[DEBUG] ===> After 'uploadScanOrDeclareFetch' returned productID: %v", productID)
   200  
   201  	if productID <= 0 {
   202  		return fmt.Errorf("the product id is not valid '%d'", productID)
   203  	}
   204  
   205  	//pollForResult
   206  	log.Entry().Debugf("Poll for scan result %v", productID)
   207  	result := client.PollForResult(productID, config.TimeoutMinutes)
   208  	// write results to file
   209  	jsonData, _ := json.Marshal(result)
   210  	ioutil.WriteFile(filepath.Join(reportPath, scanResultFile), jsonData, 0644)
   211  
   212  	//check if result is ok else notify
   213  	if protecode.HasFailed(result) {
   214  		log.SetErrorCategory(log.ErrorService)
   215  		return fmt.Errorf("protecode scan failed: %v/products/%v", config.ServerURL, productID)
   216  	}
   217  
   218  	//loadReport
   219  	log.Entry().Debugf("Load report %v for %v", config.ReportFileName, productID)
   220  	resp := client.LoadReport(config.ReportFileName, productID)
   221  	//save report to filesystem
   222  	if err := writeReportToFile(*resp, config.ReportFileName); err != nil {
   223  		log.Entry().Warningf("failed to write report: %s", err)
   224  	}
   225  	//clean scan from server
   226  	log.Entry().Debugf("Delete scan %v for %v", config.CleanupMode, productID)
   227  	client.DeleteScan(config.CleanupMode, productID)
   228  
   229  	//count vulnerabilities
   230  	log.Entry().Debug("Parse scan result")
   231  	parsedResult, vulns := client.ParseResultForInflux(result.Result, config.ExcludeCVEs)
   232  
   233  	log.Entry().Debug("Write report to filesystem")
   234  	if err := protecode.WriteReport(
   235  		protecode.ReportData{
   236  			ServerURL:                   config.ServerURL,
   237  			FailOnSevereVulnerabilities: config.FailOnSevereVulnerabilities,
   238  			ExcludeCVEs:                 config.ExcludeCVEs,
   239  			Target:                      config.ReportFileName,
   240  			Vulnerabilities:             vulns,
   241  			ProductID:                   fmt.Sprintf("%v", productID),
   242  		}, reportPath, stepResultFile, parsedResult, ioutil.WriteFile); err != nil {
   243  		log.Entry().Warningf("failed to write report: %v", err)
   244  	}
   245  
   246  	log.Entry().Debug("Write influx data")
   247  	setInfluxData(influx, parsedResult)
   248  
   249  	// write reports JSON
   250  	reports := []StepResults.Path{
   251  		{Target: config.ReportFileName, Mandatory: true},
   252  		{Target: stepResultFile, Mandatory: true},
   253  		{Target: scanResultFile, Mandatory: true},
   254  	}
   255  	// write links JSON
   256  	webuiURL := fmt.Sprintf(webReportPath, config.ServerURL, productID)
   257  	links := []StepResults.Path{
   258  		{Name: "Protecode WebUI", Target: webuiURL},
   259  		{Name: "Protecode Report", Target: path.Join("artifact", config.ReportFileName), Scope: "job"},
   260  	}
   261  
   262  	// write custom report
   263  	scanReport := protecode.CreateCustomReport(fileName, productID, parsedResult, vulns)
   264  	paths, err := protecode.WriteCustomReports(scanReport, fileName, fmt.Sprint(productID))
   265  	if err != nil {
   266  		// do not fail - consider failing later on
   267  		log.Entry().Warning("failed to create custom HTML/MarkDown file ...", err)
   268  	} else {
   269  		reports = append(reports, paths...)
   270  	}
   271  
   272  	// create toolrecord file
   273  	toolRecordFileName, err := createToolRecordProtecode("./", config, productID, webuiURL)
   274  	if err != nil {
   275  		// do not fail until the framework is well established
   276  		log.Entry().Warning("TR_PROTECODE: Failed to create toolrecord file ...", err)
   277  	} else {
   278  		reports = append(reports, StepResults.Path{Target: toolRecordFileName})
   279  	}
   280  
   281  	StepResults.PersistReportsAndLinks("protecodeExecuteScan", "", reports, links)
   282  
   283  	if config.FailOnSevereVulnerabilities && protecode.HasSevereVulnerabilities(result.Result, config.ExcludeCVEs) {
   284  		log.SetErrorCategory(log.ErrorCompliance)
   285  		return fmt.Errorf("the product is not compliant")
   286  	}
   287  	return nil
   288  }
   289  
   290  func setInfluxData(influx *protecodeExecuteScanInflux, result map[string]int) {
   291  	influx.protecode_data.fields.historical_vulnerabilities = result["historical_vulnerabilities"]
   292  	influx.protecode_data.fields.triaged_vulnerabilities = result["triaged_vulnerabilities"]
   293  	influx.protecode_data.fields.excluded_vulnerabilities = result["excluded_vulnerabilities"]
   294  	influx.protecode_data.fields.minor_vulnerabilities = result["minor_vulnerabilities"]
   295  	influx.protecode_data.fields.major_vulnerabilities = result["major_vulnerabilities"]
   296  	influx.protecode_data.fields.vulnerabilities = result["vulnerabilities"]
   297  }
   298  
   299  func createClient(config *protecodeExecuteScanOptions) protecode.Protecode {
   300  
   301  	var duration time.Duration = time.Duration(time.Minute * 1)
   302  
   303  	if len(config.TimeoutMinutes) > 0 {
   304  		dur, err := time.ParseDuration(fmt.Sprintf("%vm", config.TimeoutMinutes))
   305  		if err != nil {
   306  			log.Entry().Warnf("Failed to parse timeout %v, switched back to default timeout %v minutes", config.TimeoutMinutes, duration)
   307  		} else {
   308  			duration = dur
   309  		}
   310  	}
   311  
   312  	pc := protecode.Protecode{}
   313  
   314  	protecodeOptions := protecode.Options{
   315  		ServerURL: config.ServerURL,
   316  		Logger:    log.Entry().WithField("package", "SAP/jenkins-library/pkg/protecode"),
   317  		Duration:  duration,
   318  		Username:  config.Username,
   319  		Password:  config.Password,
   320  	}
   321  
   322  	pc.SetOptions(protecodeOptions)
   323  
   324  	return pc
   325  }
   326  
   327  func createDockerClient(config *protecodeExecuteScanOptions) piperDocker.Download {
   328  
   329  	dClientOptions := piperDocker.ClientOptions{ImageName: config.ScanImage, RegistryURL: config.DockerRegistryURL, LocalPath: config.FilePath, IncludeLayers: config.IncludeLayers}
   330  	dClient := &piperDocker.Client{}
   331  	dClient.SetOptions(dClientOptions)
   332  
   333  	return dClient
   334  }
   335  
   336  func uploadScanOrDeclareFetch(config protecodeExecuteScanOptions, productID int, client protecode.Protecode, fileName string) int {
   337  	//check if the LoadExistingProduct) before returns an valid product id, than skip this
   338  	//if !hasExisting(productID, config.VerifyOnly) {
   339  
   340  	log.Entry().Debugf("[DEBUG] ===> In uploadScanOrDeclareFetch: %v", productID)
   341  
   342  	// check if product doesn't exist then create a new one.
   343  	if productID <= 0 {
   344  		log.Entry().Infof("New product creation started ... ")
   345  		// log.Entry().Debugf("[DEBUG] ===> New product creation started: %v", productID)
   346  		productID = uploadFile(config, productID, client, fileName, false)
   347  
   348  		log.Entry().Infof("New product has been successfully created: %v", productID)
   349  		// log.Entry().Debugf("[DEBUG] ===> After uploading [productID < 0] file returned productID: %v", productID)
   350  		return productID
   351  
   352  		// In case product already exists and "VerifyOnly (reuseExisting)" is false then we replace binary without creating a new product.
   353  	} else if (productID > 0) && !config.VerifyOnly {
   354  		log.Entry().Infof("Product already exists and 'VerifyOnly (reuseExisting)' is false then product (%v) binary and scan result will be replaced without creating a new product.", productID)
   355  		// log.Entry().Debugf("[DEBUG] ===> Replace binary entry point started %v", productID)
   356  		productID = uploadFile(config, productID, client, fileName, true)
   357  
   358  		// log.Entry().Debugf("[DEBUG] ===> After uploading file [(productID > 0) && !config.VerifyOnly] returned productID: %v", productID)
   359  		return productID
   360  
   361  		// If product already exists and "reuseExisting" option is enabled then return the latest similar scan result.
   362  	} else {
   363  		log.Entry().Infof("VerifyOnly (reuseExisting) option is enabled and returned productID: %v", productID)
   364  		// log.Entry().Debugf("[DEBUG] ===> VerifyOnly (reuseExisting) option is enabled and returned productID: %v", productID)
   365  		return productID
   366  	}
   367  }
   368  
   369  func uploadFile(config protecodeExecuteScanOptions, productID int, client protecode.Protecode, fileName string, replaceBinary bool) int {
   370  
   371  	// get calculated version for Version field
   372  	version := getProcessedVersion(&config)
   373  
   374  	if len(config.FetchURL) > 0 {
   375  		log.Entry().Debugf("Declare fetch url %v", config.FetchURL)
   376  		resultData := client.DeclareFetchURL(config.CleanupMode, config.Group, config.FetchURL, version, productID, replaceBinary)
   377  		productID = resultData.Result.ProductID
   378  	} else {
   379  		log.Entry().Debugf("Upload file path: %v", config.FilePath)
   380  		if len(config.FilePath) <= 0 {
   381  			log.Entry().Fatalf("There is no file path configured for upload : %v", config.FilePath)
   382  		}
   383  		pathToFile := filepath.Join(config.FilePath, fileName)
   384  		if !(fileExists(pathToFile)) {
   385  			log.Entry().Fatalf("There is no file for upload: %v", pathToFile)
   386  		}
   387  
   388  		combinedFileName := fileName
   389  		if len(config.PullRequestName) > 0 {
   390  			combinedFileName = fmt.Sprintf("%v_%v", config.PullRequestName, fileName)
   391  		}
   392  
   393  		resultData := client.UploadScanFile(config.CleanupMode, config.Group, pathToFile, combinedFileName, version, productID, replaceBinary)
   394  		productID = resultData.Result.ProductID
   395  		log.Entry().Debugf("[DEBUG] ===> uploadFile return FINAL product id: %v", productID)
   396  	}
   397  	return productID
   398  }
   399  
   400  func fileExists(filename string) bool {
   401  	info, err := os.Stat(filename)
   402  	if os.IsNotExist(err) {
   403  		return false
   404  	}
   405  	return !info.IsDir()
   406  }
   407  
   408  func hasExisting(productID int, verifyOnly bool) bool {
   409  	if (productID > 0) || verifyOnly {
   410  		return true
   411  	}
   412  	return false
   413  }
   414  
   415  var writeReportToFile = func(resp io.ReadCloser, reportFileName string) error {
   416  	filePath := filepath.Join(reportPath, reportFileName)
   417  	f, err := os.Create(filePath)
   418  	if err == nil {
   419  		defer f.Close()
   420  		_, err = io.Copy(f, resp)
   421  	}
   422  
   423  	return err
   424  }
   425  
   426  func correctDockerConfigEnvVar(config *protecodeExecuteScanOptions) {
   427  	path := config.DockerConfigJSON
   428  	if len(path) > 0 {
   429  		log.Entry().Infof("Docker credentials configuration: %v", path)
   430  		path, _ := filepath.Abs(path)
   431  		// use parent directory
   432  		path = filepath.Dir(path)
   433  		os.Setenv("DOCKER_CONFIG", path)
   434  	} else {
   435  		log.Entry().Info("Docker credentials configuration: NONE")
   436  	}
   437  }
   438  
   439  func getTarName(config *protecodeExecuteScanOptions) string {
   440  
   441  	// remove original version
   442  	fileName := strings.TrimSuffix(config.ScanImage, ":"+config.Version)
   443  	// remove sha digest if exists
   444  	sha256 := "@sha256"
   445  	if index := strings.Index(fileName, sha256); index > -1 {
   446  		fileName = fileName[:index]
   447  	}
   448  
   449  	version := getProcessedVersion(config)
   450  	if len(version) > 0 {
   451  		fileName = fileName + "_" + version
   452  	}
   453  
   454  	fileName = strings.ReplaceAll(fileName, "/", "_")
   455  
   456  	return fileName + ".tar"
   457  }
   458  
   459  // Calculate version based on versioning model and artifact version or return custom scan version provided by user
   460  func getProcessedVersion(config *protecodeExecuteScanOptions) string {
   461  	processedVersion := config.CustomScanVersion
   462  	if len(processedVersion) > 0 {
   463  		log.Entry().Infof("Using custom version: %v", processedVersion)
   464  	} else {
   465  		if len(config.VersioningModel) > 0 {
   466  			processedVersion = versioning.ApplyVersioningModel(config.VersioningModel, config.Version)
   467  		} else {
   468  			// By default 'major' if <config.VersioningModel> not provided
   469  			processedVersion = versioning.ApplyVersioningModel("major", config.Version)
   470  		}
   471  	}
   472  	return processedVersion
   473  }
   474  
   475  // create toolrecord file for protecode
   476  // todo: check if group and product names can be retrieved
   477  func createToolRecordProtecode(workspace string, config *protecodeExecuteScanOptions, productID int, webuiURL string) (string, error) {
   478  	record := toolrecord.New(workspace, "protecode", config.ServerURL)
   479  	groupURL := config.ServerURL + "/#/groups/" + config.Group
   480  	err := record.AddKeyData("group",
   481  		config.Group,
   482  		config.Group, // todo figure out display name
   483  		groupURL)
   484  	if err != nil {
   485  		return "", err
   486  	}
   487  	err = record.AddKeyData("product",
   488  		strconv.Itoa(productID),
   489  		strconv.Itoa(productID), // todo figure out display name
   490  		webuiURL)
   491  	if err != nil {
   492  		return "", err
   493  	}
   494  	err = record.Persist()
   495  	if err != nil {
   496  		return "", err
   497  	}
   498  	return record.GetFileName(), nil
   499  }