github.com/google/syzkaller@v0.0.0-20240517125934-c0f1611a36d6/pkg/covermerger/covermerger.go (about)

     1  // Copyright 2024 syzkaller project authors. All rights reserved.
     2  // Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
     3  
     4  package covermerger
     5  
     6  import (
     7  	"encoding/csv"
     8  	"fmt"
     9  	"io"
    10  	"strconv"
    11  
    12  	"golang.org/x/exp/maps"
    13  )
    14  
    15  const (
    16  	keyKernelRepo   = "kernel_repo"
    17  	keyKernelBranch = "kernel_branch"
    18  	keyKernelCommit = "kernel_commit"
    19  	keyFilePath     = "file_path"
    20  	keyStartLine    = "sl"
    21  	keyStartCol     = "sc"
    22  	keyEndLine      = "el"
    23  	keyEndCol       = "ec"
    24  	keyHitCount     = "hit_count"
    25  	keyArch         = "arch"
    26  )
    27  
    28  type FileRecord map[string]string
    29  type FileRecords []FileRecord
    30  type RepoBranchCommit struct {
    31  	Repo   string
    32  	Branch string
    33  	Commit string
    34  }
    35  
    36  func (fr FileRecord) RepoBranchCommit() RepoBranchCommit {
    37  	return RepoBranchCommit{
    38  		fr[keyKernelRepo],
    39  		fr[keyKernelBranch],
    40  		fr[keyKernelCommit],
    41  	}
    42  }
    43  
    44  type Frame struct {
    45  	StartLine int
    46  	StartCol  int
    47  	EndLine   int
    48  	EndCol    int
    49  }
    50  
    51  func (fr FileRecord) Frame() Frame {
    52  	f := Frame{}
    53  	var err error
    54  	if f.StartCol, err = strconv.Atoi(fr[keyStartCol]); err != nil {
    55  		panic(fmt.Sprintf("failed to Atoi(%s)", fr[keyStartCol]))
    56  	}
    57  	if f.StartLine, err = strconv.Atoi(fr[keyStartLine]); err != nil {
    58  		panic(fmt.Sprintf("failed to Atoi(%s)", fr[keyStartLine]))
    59  	}
    60  	if f.EndCol, err = strconv.Atoi(fr[keyEndCol]); err != nil {
    61  		panic(fmt.Sprintf("failed to Atoi(%s)", fr[keyEndCol]))
    62  	}
    63  	if f.EndLine, err = strconv.Atoi(fr[keyEndLine]); err != nil {
    64  		panic(fmt.Sprintf("failed to Atoi(%s)", fr[keyEndLine]))
    65  	}
    66  	return f
    67  }
    68  
    69  func (fr FileRecord) HitCount() int {
    70  	if hitCount, err := strconv.Atoi(fr[keyHitCount]); err != nil {
    71  		panic(fmt.Sprintf("failed to Atoi(%s)", fr[keyHitCount]))
    72  	} else {
    73  		return hitCount
    74  	}
    75  }
    76  
    77  func (fr FileRecord) Arch() string {
    78  	return fr[keyArch]
    79  }
    80  
    81  type MergeResult struct {
    82  	HitCounts  map[int]int
    83  	FileExists bool
    84  }
    85  
    86  type FileCoverageMerger interface {
    87  	AddRecord(rbc RepoBranchCommit, arch string, f Frame, hitCount int)
    88  	Result() *MergeResult
    89  }
    90  
    91  func batchFileData(c *Config, targetFilePath string, records FileRecords, base RepoBranchCommit,
    92  ) (*MergeResult, error) {
    93  	repoBranchCommitsMap := make(map[RepoBranchCommit]bool)
    94  	for _, record := range records {
    95  		repoBranchCommitsMap[record.RepoBranchCommit()] = true
    96  	}
    97  	repoBranchCommitsMap[base] = true
    98  	repoBranchCommits := maps.Keys(repoBranchCommitsMap)
    99  	fileVersions, err := getFileVersions(c, targetFilePath, repoBranchCommits)
   100  	if err != nil {
   101  		return nil, fmt.Errorf("failed to getFileVersions: %w", err)
   102  	}
   103  	merger := makeFileLineCoverMerger(fileVersions, base)
   104  	for _, record := range records {
   105  		merger.AddRecord(
   106  			record.RepoBranchCommit(),
   107  			record.Arch(),
   108  			record.Frame(),
   109  			record.HitCount())
   110  	}
   111  	return merger.Result(), nil
   112  }
   113  
   114  func makeRecord(fields, schema []string) FileRecord {
   115  	record := make(FileRecord)
   116  	if len(fields) != len(schema) {
   117  		panic("fields size and schema size are not equal")
   118  	}
   119  	for i, v := range fields {
   120  		k := schema[i]
   121  		record[k] = v
   122  	}
   123  	return record
   124  }
   125  
   126  type Config struct {
   127  	Workdir       string
   128  	skipRepoClone bool
   129  }
   130  
   131  func AggregateStreamData(c *Config, stream io.Reader, base RepoBranchCommit,
   132  ) (map[string]*MergeResult, error) {
   133  	stat := make(map[string]*MergeResult)
   134  	var schema []string
   135  	targetFile := ""
   136  	var records FileRecords
   137  	csvReader := csv.NewReader(stream)
   138  	if fields, err := csvReader.Read(); err != nil {
   139  		return nil, fmt.Errorf("failed to read schema: %w", err)
   140  	} else {
   141  		schema = fields
   142  	}
   143  	for {
   144  		fields, err := csvReader.Read()
   145  		if err == io.EOF {
   146  			break
   147  		}
   148  		if err != nil {
   149  			return nil, fmt.Errorf("failed to read CSV line: %w", err)
   150  		}
   151  		record := makeRecord(fields, schema)
   152  		curTargetFile := record[keyFilePath]
   153  		if targetFile == "" {
   154  			targetFile = curTargetFile
   155  		}
   156  		if curTargetFile != targetFile {
   157  			if stat[targetFile], err = batchFileData(c, targetFile, records, base); err != nil {
   158  				return nil, fmt.Errorf("failed to batchFileData(%s): %w", targetFile, err)
   159  			}
   160  			records = nil
   161  			targetFile = curTargetFile
   162  		}
   163  		records = append(records, record)
   164  	}
   165  	if records != nil {
   166  		var err error
   167  		if stat[targetFile], err = batchFileData(c, targetFile, records, base); err != nil {
   168  			return nil, fmt.Errorf("failed to batchFileData(%s): %w", targetFile, err)
   169  		}
   170  	}
   171  
   172  	return stat, nil
   173  }