github.com/u-root/u-root@v7.0.1-0.20200915234505-ad7babab0a8e+incompatible/pkg/boot/kexec/memory_linux.go (about)

     1  // Copyright 2015-2019 the u-root Authors. All rights reserved
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package kexec
     6  
     7  import (
     8  	"debug/elf"
     9  	"fmt"
    10  	"io"
    11  	"io/ioutil"
    12  	"log"
    13  	"os"
    14  	"path"
    15  	"path/filepath"
    16  	"reflect"
    17  	"sort"
    18  	"strconv"
    19  	"strings"
    20  	"unsafe"
    21  )
    22  
    23  var pageMask = uint(os.Getpagesize() - 1)
    24  
    25  // ErrNotEnoughSpace is returned by the FindSpace family of functions if no
    26  // range is large enough to accommodate the request.
    27  type ErrNotEnoughSpace struct {
    28  	Size uint
    29  }
    30  
    31  func (e ErrNotEnoughSpace) Error() string {
    32  	return fmt.Sprintf("not enough space to allocate %#x bytes", e.Size)
    33  }
    34  
    35  // Range represents a contiguous uintptr interval [Start, Start+Size).
    36  type Range struct {
    37  	// Start is the inclusive start of the range.
    38  	Start uintptr
    39  
    40  	// Size is the number of elements in the range.
    41  	//
    42  	// Start+Size is the exclusive end of the range.
    43  	Size uint
    44  }
    45  
    46  // RangeFromInterval returns a Range representing [start, end).
    47  func RangeFromInterval(start, end uintptr) Range {
    48  	return Range{
    49  		Start: start,
    50  		Size:  uint(end - start),
    51  	}
    52  }
    53  
    54  // String returns [Start, Start+Size) as a string.
    55  func (r Range) String() string {
    56  	return fmt.Sprintf("[%#x, %#x)", r.Start, r.End())
    57  }
    58  
    59  // End returns the uintptr *after* the end of the interval.
    60  func (r Range) End() uintptr {
    61  	return r.Start + uintptr(r.Size)
    62  }
    63  
    64  // Adjacent returns true if r and r2 do not overlap, but are immediately next
    65  // to each other.
    66  func (r Range) Adjacent(r2 Range) bool {
    67  	return r2.End() == r.Start || r.End() == r2.Start
    68  }
    69  
    70  // Contains returns true iff p is in the interval described by r.
    71  func (r Range) Contains(p uintptr) bool {
    72  	return r.Start <= p && p < r.End()
    73  }
    74  
    75  func min(a, b uintptr) uintptr {
    76  	if a < b {
    77  		return a
    78  	}
    79  	return b
    80  }
    81  
    82  func max(a, b uintptr) uintptr {
    83  	if a > b {
    84  		return a
    85  	}
    86  	return b
    87  }
    88  
    89  // Intersect returns the continuous range of points common to r and r2 if there
    90  // is one.
    91  func (r Range) Intersect(r2 Range) *Range {
    92  	if !r.Overlaps(r2) {
    93  		return nil
    94  	}
    95  	i := RangeFromInterval(max(r.Start, r2.Start), min(r.End(), r2.End()))
    96  	return &i
    97  }
    98  
    99  // Minus removes all points in r2 from r.
   100  func (r Range) Minus(r2 Range) []Range {
   101  	var result []Range
   102  	if r.Contains(r2.Start) && r.Start != r2.Start {
   103  		result = append(result, Range{
   104  			Start: r.Start,
   105  			Size:  uint(r2.Start - r.Start),
   106  		})
   107  	}
   108  	if r.Contains(r2.End()) && r.End() != r2.End() {
   109  		result = append(result, Range{
   110  			Start: r2.End(),
   111  			Size:  uint(r.End() - r2.End()),
   112  		})
   113  	}
   114  	// Neither end was in r?
   115  	//
   116  	// Either r is a subset of r2 and r disappears completely, or they are
   117  	// completely disjunct.
   118  	if len(result) == 0 && r.Disjunct(r2) {
   119  		result = append(result, r)
   120  	}
   121  	return result
   122  }
   123  
   124  // Overlaps returns true if r and r2 overlap.
   125  func (r Range) Overlaps(r2 Range) bool {
   126  	return r.Start < r2.End() && r2.Start < r.End()
   127  }
   128  
   129  // IsSupersetOf returns true if r2 in r.
   130  func (r Range) IsSupersetOf(r2 Range) bool {
   131  	return r.Start <= r2.Start && r.End() >= r2.End()
   132  }
   133  
   134  // Disjunct returns true if r and r2 do not overlap.
   135  func (r Range) Disjunct(r2 Range) bool {
   136  	return !r.Overlaps(r2)
   137  }
   138  
   139  func (r Range) toSlice() []byte {
   140  	var data []byte
   141  
   142  	sh := (*reflect.SliceHeader)(unsafe.Pointer(&data))
   143  	sh.Data = r.Start
   144  	sh.Len = int(r.Size)
   145  	sh.Cap = int(r.Size)
   146  
   147  	return data
   148  }
   149  
   150  // Ranges is a list of non-overlapping ranges.
   151  type Ranges []Range
   152  
   153  // Minus removes all points in r from all ranges in rs.
   154  func (rs Ranges) Minus(r Range) Ranges {
   155  	var ram Ranges
   156  	for _, oldRange := range rs {
   157  		ram = append(ram, oldRange.Minus(r)...)
   158  	}
   159  	return ram
   160  }
   161  
   162  // FindSpace finds a continguous piece of sz points within Ranges and returns
   163  // the Range pointing to it.
   164  func (rs Ranges) FindSpace(sz uint) (space Range, err error) {
   165  	return rs.FindSpaceAbove(sz, 0)
   166  }
   167  
   168  // MaxAddr is the highest address in a 64bit address space.
   169  const MaxAddr = ^uintptr(0)
   170  
   171  // FindSpaceAbove finds a continguous piece of sz points within Ranges and
   172  // returns a space.Start >= minAddr.
   173  func (rs Ranges) FindSpaceAbove(sz uint, minAddr uintptr) (space Range, err error) {
   174  	return rs.FindSpaceIn(sz, RangeFromInterval(minAddr, MaxAddr))
   175  }
   176  
   177  // FindSpaceIn finds a continguous piece of sz points within Ranges and returns
   178  // a Range where space.Start >= limit.Start, with space.End() < limit.End().
   179  func (rs Ranges) FindSpaceIn(sz uint, limit Range) (space Range, err error) {
   180  	for _, r := range rs {
   181  		if overlap := r.Intersect(limit); overlap != nil && overlap.Size >= sz {
   182  			return Range{Start: overlap.Start, Size: sz}, nil
   183  		}
   184  	}
   185  	return Range{}, ErrNotEnoughSpace{Size: sz}
   186  }
   187  
   188  // Sort sorts ranges by their start point.
   189  func (rs Ranges) Sort() {
   190  	sort.Slice(rs, func(i, j int) bool {
   191  		return rs[i].Start < rs[j].Start
   192  	})
   193  }
   194  
   195  // pool stores byte slices pointed by the pointers Segments.Buf to
   196  // prevent underlying arrays to be collected by garbage collector.
   197  var pool [][]byte
   198  
   199  // Segment defines kernel memory layout.
   200  type Segment struct {
   201  	// Buf is a buffer in user space.
   202  	Buf Range
   203  
   204  	// Phys is a physical address of kernel.
   205  	Phys Range
   206  }
   207  
   208  // NewSegment creates new Segment.
   209  // Segments should be created using NewSegment method to prevent
   210  // data pointed by Segment.Buf to be collected by garbage collector.
   211  func NewSegment(buf []byte, phys Range) Segment {
   212  	if buf == nil {
   213  		return Segment{
   214  			Buf: Range{
   215  				Start: 0,
   216  				Size:  0,
   217  			},
   218  			Phys: phys,
   219  		}
   220  	}
   221  	pool = append(pool, buf)
   222  	return Segment{
   223  		Buf: Range{
   224  			Start: uintptr((unsafe.Pointer(&buf[0]))),
   225  			Size:  uint(len(buf)),
   226  		},
   227  		Phys: phys,
   228  	}
   229  }
   230  
   231  func (s Segment) String() string {
   232  	return fmt.Sprintf("(virt: %s, phys: %s)", s.Buf, s.Phys)
   233  }
   234  
   235  func (s *Segment) tryMerge(s2 Segment) (ok bool) {
   236  	if s.Phys.Disjunct(s2.Phys) {
   237  		return false
   238  	}
   239  
   240  	// Virtual memory ranges should never overlap,
   241  	// concatenate ranges.
   242  	a := s.Buf.toSlice()
   243  	b := s2.Buf.toSlice()
   244  	c := append(a, b...)
   245  
   246  	phys := s.Phys
   247  	// s1 and s2 overlap somewhat.
   248  	if !s.Phys.IsSupersetOf(s2.Phys) {
   249  		phys.Size = uint(s2.Phys.Start-s.Phys.Start) + s2.Phys.Size
   250  	}
   251  
   252  	*s = NewSegment(c, phys)
   253  	return true
   254  }
   255  
   256  func alignUp(p uint) uint {
   257  	return (p + pageMask) &^ pageMask
   258  }
   259  
   260  func alignUpPtr(p uintptr) uintptr {
   261  	return uintptr(alignUp(uint(p)))
   262  }
   263  
   264  // AlignPhys fixes s to the kexec_load preconditions.
   265  //
   266  // s's physical addresses must be multiples of the page size.
   267  //
   268  // E.g. if page size is 0x1000:
   269  // Segment {
   270  //   Buf:  {Start: 0x1011, Size: 0x1022}
   271  //   Phys: {Start: 0x2011, Size: 0x1022}
   272  // }
   273  // has to become
   274  // Segment {
   275  //   Buf:  {Start: 0x1000, Size: 0x1033}
   276  //   Phys: {Start: 0x2000, Size: 0x2000}
   277  // }
   278  func AlignPhys(s Segment) Segment {
   279  	orig := s.Phys.Start
   280  	// Find the page address of the starting point.
   281  	s.Phys.Start = s.Phys.Start &^ uintptr(pageMask)
   282  
   283  	diff := orig - s.Phys.Start
   284  
   285  	// Round up to page size.
   286  	s.Phys.Size = alignUp(s.Phys.Size + uint(diff))
   287  
   288  	if s.Buf.Start < diff && diff > 0 {
   289  		panic("cannot have virtual memory address within first page")
   290  	}
   291  	s.Buf.Start -= diff
   292  
   293  	if s.Buf.Size > 0 {
   294  		s.Buf.Size += uint(diff)
   295  	}
   296  	return s
   297  }
   298  
   299  // Segments is a collection of segments.
   300  type Segments []Segment
   301  
   302  // PhysContains returns whether p exists in any of segs' physical memory
   303  // ranges.
   304  func (segs Segments) PhysContains(p uintptr) bool {
   305  	for _, s := range segs {
   306  		if s.Phys.Contains(p) {
   307  			return true
   308  		}
   309  	}
   310  	return false
   311  }
   312  
   313  // Insert inserts s assuming it does not overlap with an existing segment.
   314  func (segs *Segments) Insert(s Segment) {
   315  	*segs = append(*segs, s)
   316  	segs.sort()
   317  }
   318  
   319  func (segs Segments) sort() {
   320  	sort.Slice(segs, func(i, j int) bool {
   321  		return segs[i].Phys.Start < segs[j].Phys.Start
   322  	})
   323  }
   324  
   325  // Dedup deduplicates overlapping and merges adjacent segments in segs.
   326  func Dedup(segs Segments) Segments {
   327  	var s Segments
   328  	sort.Slice(segs, func(i, j int) bool {
   329  		if segs[i].Phys.Start == segs[j].Phys.Start {
   330  			// let segs[i] be the superset of segs[j]
   331  			return segs[i].Phys.Size > segs[j].Phys.Size
   332  		}
   333  		return segs[i].Phys.Start < segs[j].Phys.Start
   334  	})
   335  
   336  	for _, seg := range segs {
   337  		doIt := true
   338  		for i := range s {
   339  			if merged := s[i].tryMerge(seg); merged {
   340  				doIt = false
   341  				break
   342  			}
   343  		}
   344  		if doIt {
   345  			s = append(s, seg)
   346  		}
   347  	}
   348  	return s
   349  }
   350  
   351  // Memory provides routines to work with physical memory ranges.
   352  type Memory struct {
   353  	// Phys defines the layout of physical memory.
   354  	//
   355  	// Phys is used to tell loaded operating systems what memory is usable
   356  	// as RAM, and what memory is reserved (for ACPI or other reasons).
   357  	Phys MemoryMap
   358  
   359  	// Segments are the segments used to load a new operating system.
   360  	//
   361  	// Each segment also contains a physical memory region it maps to.
   362  	Segments Segments
   363  }
   364  
   365  // LoadElfSegments loads loadable ELF segments.
   366  func (m *Memory) LoadElfSegments(r io.ReaderAt) error {
   367  	f, err := elf.NewFile(r)
   368  	if err != nil {
   369  		return err
   370  	}
   371  
   372  	for _, p := range f.Progs {
   373  		if p.Type != elf.PT_LOAD {
   374  			continue
   375  		}
   376  
   377  		var d []byte
   378  		// Only load segment if there are some data. The kexec call will zero out the rest of the buffer (all of it if Filesz=0):
   379  		// | bufsz bytes are copied from the source buffer to the target kernel buffer. If bufsz is less than memsz, then the excess bytes in the kernel buffer are zeroed out.
   380  		// http://man7.org/linux/man-pages/man2/kexec_load.2.html
   381  		if p.Filesz != 0 {
   382  			d = make([]byte, p.Filesz)
   383  			n, err := r.ReadAt(d, int64(p.Off))
   384  			if err != nil {
   385  				return err
   386  			}
   387  			if n < len(d) {
   388  				return fmt.Errorf("not all data of the segment was read")
   389  			}
   390  		}
   391  		// TODO(hugelgupf): check if this is within availableRAM??
   392  		s := NewSegment(d, Range{
   393  			Start: uintptr(p.Paddr),
   394  			Size:  uint(p.Memsz),
   395  		})
   396  		m.Segments.Insert(s)
   397  	}
   398  	return nil
   399  }
   400  
   401  // ParseMemoryMap reads firmware provided memory map from /sys/firmware/memmap.
   402  func (m *Memory) ParseMemoryMap() error {
   403  	p, err := ParseMemoryMap()
   404  	if err != nil {
   405  		return err
   406  	}
   407  	m.Phys = p
   408  	return nil
   409  }
   410  
   411  var memoryMapRoot = "/sys/firmware/memmap/"
   412  
   413  // ParseMemoryMap reads firmware provided memory map from /sys/firmware/memmap.
   414  func ParseMemoryMap() (MemoryMap, error) {
   415  	return internalParseMemoryMap(memoryMapRoot)
   416  }
   417  
   418  func internalParseMemoryMap(memoryMapDir string) (MemoryMap, error) {
   419  	type memRange struct {
   420  		// start and end addresses are inclusive
   421  		start, end uintptr
   422  		typ        RangeType
   423  	}
   424  
   425  	ranges := make(map[string]memRange)
   426  	walker := func(name string, info os.FileInfo, err error) error {
   427  		if err != nil {
   428  			return err
   429  		}
   430  		if info.IsDir() {
   431  			return nil
   432  		}
   433  
   434  		const (
   435  			// file names
   436  			start = "start"
   437  			end   = "end"
   438  			typ   = "type"
   439  		)
   440  
   441  		base := path.Base(name)
   442  		if base != start && base != end && base != typ {
   443  			return fmt.Errorf("unexpected file %q", name)
   444  		}
   445  		dir := path.Dir(name)
   446  
   447  		b, err := ioutil.ReadFile(name)
   448  		if err != nil {
   449  			return fmt.Errorf("error reading file %q: %v", name, err)
   450  		}
   451  
   452  		data := strings.TrimSpace(string(b))
   453  		r := ranges[dir]
   454  		if base == typ {
   455  			typ, ok := sysfsToRangeType[data]
   456  			if !ok {
   457  				log.Printf("Sysfs file %q contains unrecognized memory map type %q, defaulting to Reserved", name, data)
   458  				r.typ = RangeReserved
   459  			} else {
   460  				r.typ = typ
   461  			}
   462  			ranges[dir] = r
   463  			return nil
   464  		}
   465  
   466  		v, err := strconv.ParseUint(data, 0, 64)
   467  		if err != nil {
   468  			return err
   469  		}
   470  		switch base {
   471  		case start:
   472  			r.start = uintptr(v)
   473  		case end:
   474  			r.end = uintptr(v)
   475  		}
   476  		ranges[dir] = r
   477  		return nil
   478  	}
   479  
   480  	if err := filepath.Walk(memoryMapDir, walker); err != nil {
   481  		return nil, err
   482  	}
   483  
   484  	var phys []TypedRange
   485  	for _, r := range ranges {
   486  		// Range's end address is exclusive, while Linux's sysfs prints
   487  		// the end address inclusive.
   488  		//
   489  		// E.g. sysfs will contain
   490  		//
   491  		// start: 0x100, end: 0x1ff
   492  		//
   493  		// while we represent
   494  		//
   495  		// start: 0x100, size: 0x100.
   496  		phys = append(phys, TypedRange{
   497  			Range: RangeFromInterval(r.start, r.end+1),
   498  			Type:  r.typ,
   499  		})
   500  	}
   501  	sort.Slice(phys, func(i, j int) bool {
   502  		return phys[i].Start < phys[j].Start
   503  	})
   504  	return phys, nil
   505  }
   506  
   507  // M1 is 1 Megabyte in bits.
   508  const M1 = 1 << 20
   509  
   510  // FindSpace returns pointer to the physical memory, where array of size sz can
   511  // be stored during next AddKexecSegment call.
   512  func (m Memory) FindSpace(sz uint) (Range, error) {
   513  	// Allocate full pages.
   514  	sz = alignUp(sz)
   515  
   516  	// Don't use memory below 1M, just in case.
   517  	return m.AvailableRAM().FindSpaceAbove(sz, M1)
   518  }
   519  
   520  // ReservePhys reserves page-aligned sz bytes in the physical memmap within
   521  // the given limit address range.
   522  func (m *Memory) ReservePhys(sz uint, limit Range) (Range, error) {
   523  	sz = alignUp(sz)
   524  
   525  	r, err := m.AvailableRAM().FindSpaceIn(sz, limit)
   526  	if err != nil {
   527  		return Range{}, err
   528  	}
   529  
   530  	m.Phys.Insert(TypedRange{
   531  		Range: r,
   532  		Type:  RangeReserved,
   533  	})
   534  	return r, nil
   535  }
   536  
   537  // AddPhysSegment reserves len(d) bytes in the physical memmap within limit and
   538  // adds a kexec segment with d in that range.
   539  func (m *Memory) AddPhysSegment(d []byte, limit Range) (Range, error) {
   540  	r, err := m.ReservePhys(uint(len(d)), limit)
   541  	if err != nil {
   542  		return Range{}, err
   543  	}
   544  	m.Segments.Insert(NewSegment(d, r))
   545  	return r, nil
   546  }
   547  
   548  // AddKexecSegment adds d to a new kexec segment
   549  func (m *Memory) AddKexecSegment(d []byte) (Range, error) {
   550  	r, err := m.FindSpace(uint(len(d)))
   551  	if err != nil {
   552  		return Range{}, err
   553  	}
   554  	m.Segments.Insert(NewSegment(d, r))
   555  	return r, nil
   556  }
   557  
   558  // AvailableRAM returns page-aligned unused regions of RAM.
   559  //
   560  // AvailableRAM takes all RAM-marked pages in the memory map and subtracts the
   561  // kexec segments already allocated. RAM segments begin at a page boundary.
   562  //
   563  // E.g if page size is 4K and RAM segments are
   564  //            [{start:0 size:8192} {start:8192 size:8000}]
   565  // and kexec segments are
   566  //            [{start:40 size:50} {start:8000 size:2000}]
   567  // result should be
   568  //            [{start:0 size:40} {start:4096 end:8000 - 4096}]
   569  func (m Memory) AvailableRAM() Ranges {
   570  	ram := m.Phys.FilterByType(RangeRAM)
   571  
   572  	// Remove all points in Segments from available RAM.
   573  	for _, s := range m.Segments {
   574  		ram = ram.Minus(s.Phys)
   575  	}
   576  
   577  	// Only return Ranges starting at an aligned size.
   578  	var alignedRanges Ranges
   579  	for _, r := range ram {
   580  		alignedStart := alignUpPtr(r.Start)
   581  		if alignedStart < r.End() {
   582  			alignedRanges = append(alignedRanges, Range{
   583  				Start: alignedStart,
   584  				Size:  r.Size - uint(alignedStart-r.Start),
   585  			})
   586  		}
   587  	}
   588  	return alignedRanges
   589  }
   590  
   591  // RangeType defines type of a TypedRange based on the Linux
   592  // kernel string provided by firmware memory map.
   593  type RangeType string
   594  
   595  // These are the range types we know Linux uses.
   596  const (
   597  	RangeRAM      RangeType = "System RAM"
   598  	RangeDefault  RangeType = "Default"
   599  	RangeACPI     RangeType = "ACPI Tables"
   600  	RangeNVS      RangeType = "ACPI Non-volatile Storage"
   601  	RangeReserved RangeType = "Reserved"
   602  )
   603  
   604  // String implements fmt.Stringer.
   605  func (r RangeType) String() string {
   606  	return string(r)
   607  }
   608  
   609  var sysfsToRangeType = map[string]RangeType{
   610  	"System RAM":                RangeRAM,
   611  	"Default":                   RangeDefault,
   612  	"ACPI Tables":               RangeACPI,
   613  	"ACPI Non-volatile Storage": RangeNVS,
   614  	"Reserved":                  RangeReserved,
   615  	"reserved":                  RangeReserved,
   616  }
   617  
   618  // TypedRange represents range of physical memory.
   619  type TypedRange struct {
   620  	Range
   621  	Type RangeType
   622  }
   623  
   624  func (tr TypedRange) String() string {
   625  	return fmt.Sprintf("{addr: %s, type: %s}", tr.Range, tr.Type)
   626  }
   627  
   628  // MemoryMap defines the layout of physical memory.
   629  //
   630  // MemoryMap defines which ranges in memory are usable RAM and which are
   631  // reserved for various reasons.
   632  type MemoryMap []TypedRange
   633  
   634  // FilterByType only returns ranges of the given typ.
   635  func (m MemoryMap) FilterByType(typ RangeType) Ranges {
   636  	var rs Ranges
   637  	for _, tr := range m {
   638  		if tr.Type == typ {
   639  			rs = append(rs, tr.Range)
   640  		}
   641  	}
   642  	return rs
   643  }
   644  
   645  func (m MemoryMap) sort() {
   646  	sort.Slice(m, func(i, j int) bool {
   647  		return m[i].Start < m[j].Start
   648  	})
   649  }
   650  
   651  // Insert a new TypedRange into the memory map, removing chunks of other ranges
   652  // as necessary.
   653  //
   654  // Assumes that TypedRange is a valid range -- no checking.
   655  func (m *MemoryMap) Insert(r TypedRange) {
   656  	var newMap MemoryMap
   657  
   658  	// Remove points in r from all existing physical ranges.
   659  	for _, q := range *m {
   660  		split := q.Range.Minus(r.Range)
   661  		for _, r2 := range split {
   662  			newMap = append(newMap, TypedRange{Range: r2, Type: q.Type})
   663  		}
   664  	}
   665  
   666  	newMap = append(newMap, r)
   667  	newMap.sort()
   668  	*m = newMap
   669  }