github.com/cilium/ebpf@v0.15.1-0.20240517100537-8079b37aa138/internal/io.go (about) 1 package internal 2 3 import ( 4 "bufio" 5 "bytes" 6 "compress/gzip" 7 "errors" 8 "fmt" 9 "io" 10 "os" 11 "path/filepath" 12 "sync" 13 ) 14 15 // NewBufferedSectionReader wraps an io.ReaderAt in an appropriately-sized 16 // buffered reader. It is a convenience function for reading subsections of 17 // ELF sections while minimizing the amount of read() syscalls made. 18 // 19 // Syscall overhead is non-negligible in continuous integration context 20 // where ELFs might be accessed over virtual filesystems with poor random 21 // access performance. Buffering reads makes sense because (sub)sections 22 // end up being read completely anyway. 23 // 24 // Use instead of the r.Seek() + io.LimitReader() pattern. 25 func NewBufferedSectionReader(ra io.ReaderAt, off, n int64) *bufio.Reader { 26 // Clamp the size of the buffer to one page to avoid slurping large parts 27 // of a file into memory. bufio.NewReader uses a hardcoded default buffer 28 // of 4096. Allow arches with larger pages to allocate more, but don't 29 // allocate a fixed 4k buffer if we only need to read a small segment. 30 buf := n 31 if ps := int64(os.Getpagesize()); n > ps { 32 buf = ps 33 } 34 35 return bufio.NewReaderSize(io.NewSectionReader(ra, off, n), int(buf)) 36 } 37 38 // DiscardZeroes makes sure that all written bytes are zero 39 // before discarding them. 40 type DiscardZeroes struct{} 41 42 func (DiscardZeroes) Write(p []byte) (int, error) { 43 for _, b := range p { 44 if b != 0 { 45 return 0, errors.New("encountered non-zero byte") 46 } 47 } 48 return len(p), nil 49 } 50 51 // ReadAllCompressed decompresses a gzipped file into memory. 52 func ReadAllCompressed(file string) ([]byte, error) { 53 fh, err := os.Open(file) 54 if err != nil { 55 return nil, err 56 } 57 defer fh.Close() 58 59 gz, err := gzip.NewReader(fh) 60 if err != nil { 61 return nil, err 62 } 63 defer gz.Close() 64 65 return io.ReadAll(gz) 66 } 67 68 // ReadUint64FromFile reads a uint64 from a file. 69 // 70 // format specifies the contents of the file in fmt.Scanf syntax. 71 func ReadUint64FromFile(format string, path ...string) (uint64, error) { 72 filename := filepath.Join(path...) 73 data, err := os.ReadFile(filename) 74 if err != nil { 75 return 0, fmt.Errorf("reading file %q: %w", filename, err) 76 } 77 78 var value uint64 79 n, err := fmt.Fscanf(bytes.NewReader(data), format, &value) 80 if err != nil { 81 return 0, fmt.Errorf("parsing file %q: %w", filename, err) 82 } 83 if n != 1 { 84 return 0, fmt.Errorf("parsing file %q: expected 1 item, got %d", filename, n) 85 } 86 87 return value, nil 88 } 89 90 type uint64FromFileKey struct { 91 format, path string 92 } 93 94 var uint64FromFileCache = struct { 95 sync.RWMutex 96 values map[uint64FromFileKey]uint64 97 }{ 98 values: map[uint64FromFileKey]uint64{}, 99 } 100 101 // ReadUint64FromFileOnce is like readUint64FromFile but memoizes the result. 102 func ReadUint64FromFileOnce(format string, path ...string) (uint64, error) { 103 filename := filepath.Join(path...) 104 key := uint64FromFileKey{format, filename} 105 106 uint64FromFileCache.RLock() 107 if value, ok := uint64FromFileCache.values[key]; ok { 108 uint64FromFileCache.RUnlock() 109 return value, nil 110 } 111 uint64FromFileCache.RUnlock() 112 113 value, err := ReadUint64FromFile(format, filename) 114 if err != nil { 115 return 0, err 116 } 117 118 uint64FromFileCache.Lock() 119 defer uint64FromFileCache.Unlock() 120 121 if value, ok := uint64FromFileCache.values[key]; ok { 122 // Someone else got here before us, use what is cached. 123 return value, nil 124 } 125 126 uint64FromFileCache.values[key] = value 127 return value, nil 128 }