github.com/shaardie/u-root@v4.0.1-0.20190127173353-f24a1c26aa2e+incompatible/pkg/kexec/memory_linux.go (about)

     1  // Copyright 2015-2019 the u-root Authors. All rights reserved
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package kexec
     6  
     7  import (
     8  	"debug/elf"
     9  	"errors"
    10  	"fmt"
    11  	"io"
    12  	"io/ioutil"
    13  	"os"
    14  	"path"
    15  	"path/filepath"
    16  	"reflect"
    17  	"sort"
    18  	"strconv"
    19  	"strings"
    20  	"syscall"
    21  	"unsafe"
    22  
    23  	"golang.org/x/sys/unix"
    24  )
    25  
    26  var pageMask = uint(os.Getpagesize() - 1)
    27  
    28  // Range represents a contiguous uintptr interval [Start, Start+Size).
    29  type Range struct {
    30  	// Start is the inclusive start of the range.
    31  	Start uintptr
    32  	// Size is the size of the range.
    33  	// Start+Size is the exclusive end of the range.
    34  	Size uint
    35  }
    36  
    37  // Overlaps returns true if r and r2 overlap.
    38  func (r Range) Overlaps(r2 Range) bool {
    39  	return r.Start < (r2.Start+uintptr(r2.Size)) && r2.Start < (r.Start+uintptr(r.Size))
    40  }
    41  
    42  // IsSupersetOf returns true if r2 in r.
    43  func (r Range) IsSupersetOf(r2 Range) bool {
    44  	return r.Start <= r2.Start && (r.Start+uintptr(r.Size)) >= (r2.Start+uintptr(r2.Size))
    45  }
    46  
    47  // Disjunct returns true if r and r2 do not overlap.
    48  func (r Range) Disjunct(r2 Range) bool {
    49  	return !r.Overlaps(r2)
    50  }
    51  
    52  func (r Range) toSlice() []byte {
    53  	var data []byte
    54  
    55  	sh := (*reflect.SliceHeader)(unsafe.Pointer(&data))
    56  	sh.Data = r.Start
    57  	sh.Len = int(r.Size)
    58  	sh.Cap = int(r.Size)
    59  
    60  	return data
    61  }
    62  
    63  // pool stores byte slices pointed by the pointers Segments.Buf to
    64  // prevent underlying arrays to be collected by garbage collector.
    65  var pool [][]byte
    66  
    67  // Segment defines kernel memory layout.
    68  type Segment struct {
    69  	// Buf is a buffer in user space.
    70  	Buf Range
    71  	// Phys is a physical address of kernel.
    72  	Phys Range
    73  }
    74  
    75  // NewSegment creates new Segment.
    76  // Segments should be created using NewSegment method to prevent
    77  // data pointed by Segment.Buf to be collected by garbage collector.
    78  func NewSegment(buf []byte, phys Range) Segment {
    79  	pool = append(pool, buf)
    80  	return Segment{
    81  		Buf: Range{
    82  			Start: uintptr((unsafe.Pointer(&buf[0]))),
    83  			Size:  uint(len(buf)),
    84  		},
    85  		Phys: phys,
    86  	}
    87  }
    88  
    89  func (s Segment) String() string {
    90  	return fmt.Sprintf("(virt: %#x + %#x | phys: %#x + %#x)", s.Buf.Start, s.Buf.Size, s.Phys.Start, s.Phys.Size)
    91  }
    92  
    93  func ptrToSlice(ptr uintptr, size int) []byte {
    94  	var data []byte
    95  
    96  	sh := (*reflect.SliceHeader)(unsafe.Pointer(&data))
    97  	sh.Data = ptr
    98  	sh.Len = size
    99  	sh.Cap = size
   100  
   101  	return data
   102  }
   103  
   104  func (s *Segment) tryMerge(s2 Segment) (ok bool) {
   105  	if s.Phys.Disjunct(s2.Phys) {
   106  		return false
   107  	}
   108  
   109  	// Virtual memory ranges should never overlap,
   110  	// concatenate ranges.
   111  	a := s.Buf.toSlice()
   112  	b := s2.Buf.toSlice()
   113  	c := append(a, b...)
   114  
   115  	phys := s.Phys
   116  	// s1 and s2 overlap somewhat.
   117  	if !s.Phys.IsSupersetOf(s2.Phys) {
   118  		phys.Size = uint(s2.Phys.Start-s.Phys.Start) + s2.Phys.Size
   119  	}
   120  
   121  	*s = NewSegment(c, phys)
   122  	return true
   123  }
   124  
   125  func alignUp(p uint) uint {
   126  	return (p + pageMask) &^ pageMask
   127  }
   128  
   129  func alignUpPtr(p uintptr) uintptr {
   130  	return uintptr(alignUp(uint(p)))
   131  }
   132  
   133  // AlignPhys fixes s to the kexec_load preconditions.
   134  //
   135  // s's physical addresses must be multiples of the page size.
   136  //
   137  // E.g. if page size is 0x1000:
   138  // Segment {
   139  //   Buf:  {Start: 0x1011, Size: 0x1022}
   140  //   Phys: {Start: 0x2011, Size: 0x1022}
   141  // }
   142  // has to become
   143  // Segment {
   144  //   Buf:  {Start: 0x1000, Size: 0x1033}
   145  //   Phys: {Start: 0x2000, Size: 0x2000}
   146  // }
   147  func AlignPhys(s Segment) Segment {
   148  	orig := s.Phys.Start
   149  	// Find the page address of the starting point.
   150  	s.Phys.Start = s.Phys.Start &^ uintptr(pageMask)
   151  
   152  	diff := orig - s.Phys.Start
   153  	// Round up to page size.
   154  	s.Phys.Size = alignUp(s.Phys.Size + uint(diff))
   155  
   156  	if s.Buf.Start < diff {
   157  		panic("cannot have virtual memory address within first page")
   158  	}
   159  	s.Buf.Start -= diff
   160  
   161  	if s.Buf.Size > 0 {
   162  		s.Buf.Size += uint(diff)
   163  	}
   164  	return s
   165  }
   166  
   167  // Dedup merges segments in segs as much as possible.
   168  func Dedup(segs []Segment) []Segment {
   169  	var s []Segment
   170  	sort.Slice(segs, func(i, j int) bool {
   171  		if segs[i].Phys.Start == segs[j].Phys.Start {
   172  			// let segs[i] be the superset of segs[j]
   173  			return segs[i].Phys.Size > segs[j].Phys.Size
   174  		}
   175  		return segs[i].Phys.Start < segs[j].Phys.Start
   176  	})
   177  
   178  	for _, seg := range segs {
   179  		doIt := true
   180  		for i := range s {
   181  			if merged := s[i].tryMerge(seg); merged {
   182  				doIt = false
   183  				break
   184  			}
   185  		}
   186  		if doIt {
   187  			s = append(s, seg)
   188  		}
   189  	}
   190  	return s
   191  }
   192  
   193  // Load loads the given segments into memory to be executed on a kexec-reboot.
   194  //
   195  // It is assumed that segments is made up of the next kernel's code and text
   196  // segments, and that `entry` is the entry point, either kernel entry point or trampoline.
   197  func Load(entry uintptr, segments []Segment, flags uint64) error {
   198  	for i := range segments {
   199  		segments[i] = AlignPhys(segments[i])
   200  	}
   201  
   202  	segments = Dedup(segments)
   203  	ok := false
   204  	for _, s := range segments {
   205  		ok = ok || (s.Phys.Start <= entry && entry < s.Phys.Start+uintptr(s.Phys.Size))
   206  	}
   207  	if !ok {
   208  		return fmt.Errorf("entry point %#v is not covered by any segment", entry)
   209  	}
   210  
   211  	return rawLoad(entry, segments, flags)
   212  }
   213  
   214  // ErrKexec is the error type returned kexec.
   215  // It describes entry point, flags, errno and kernel layout.
   216  type ErrKexec struct {
   217  	Entry    uintptr
   218  	Segments []Segment
   219  	Flags    uint64
   220  	Errno    syscall.Errno
   221  }
   222  
   223  func (e ErrKexec) Error() string {
   224  	return fmt.Sprintf("entry %x, flags %x, errno %d, segments %v", e.Entry, e.Flags, e.Errno, e.Segments)
   225  }
   226  
   227  // rawLoad is a wrapper around kexec_load(2) syscall.
   228  // Preconditions:
   229  // - segments must not overlap
   230  // - segments must be full pages
   231  func rawLoad(entry uintptr, segments []Segment, flags uint64) error {
   232  	if _, _, errno := unix.Syscall6(
   233  		unix.SYS_KEXEC_LOAD,
   234  		entry,
   235  		uintptr(len(segments)),
   236  		uintptr(unsafe.Pointer(&segments[0])),
   237  		uintptr(flags),
   238  		0, 0); errno != 0 {
   239  		return ErrKexec{
   240  			Entry:    entry,
   241  			Segments: segments,
   242  			Flags:    flags,
   243  			Errno:    errno,
   244  		}
   245  	}
   246  	return nil
   247  }
   248  
   249  // LoadElfSegments loads loadable ELF segments.
   250  func (m *Memory) LoadElfSegments(r io.ReaderAt) error {
   251  	f, err := elf.NewFile(r)
   252  	if err != nil {
   253  		return err
   254  	}
   255  
   256  	for _, p := range f.Progs {
   257  		if p.Type != elf.PT_LOAD {
   258  			continue
   259  		}
   260  		d := make([]byte, p.Filesz)
   261  		n, err := r.ReadAt(d, int64(p.Off))
   262  		if err != nil {
   263  			return err
   264  		}
   265  		if n < len(d) {
   266  			return fmt.Errorf("not all data of the segment was read")
   267  		}
   268  		s := NewSegment(d, Range{
   269  			Start: uintptr(p.Paddr),
   270  			Size:  uint(p.Memsz),
   271  		})
   272  
   273  		m.Segments = append(m.Segments, s)
   274  	}
   275  	return nil
   276  }
   277  
   278  var memoryMapRoot = "/sys/firmware/memmap/"
   279  
   280  // ParseMemoryMap reads firmware provided memory map
   281  // from /sys/firmware/memmap.
   282  func (m *Memory) ParseMemoryMap() error {
   283  	type memRange struct {
   284  		// start and addresses are inclusive
   285  		start, end uintptr
   286  		typ        RangeType
   287  	}
   288  
   289  	ranges := make(map[string]memRange)
   290  	walker := func(name string, info os.FileInfo, err error) error {
   291  		if err != nil {
   292  			return err
   293  		}
   294  		if info.IsDir() {
   295  			return nil
   296  		}
   297  
   298  		const (
   299  			// file names
   300  			start = "start"
   301  			end   = "end"
   302  			typ   = "type"
   303  		)
   304  
   305  		base := path.Base(name)
   306  		if base != start && base != end && base != typ {
   307  			return fmt.Errorf("unexpected file %q", name)
   308  		}
   309  		dir := path.Dir(name)
   310  
   311  		b, err := ioutil.ReadFile(name)
   312  		if err != nil {
   313  			return fmt.Errorf("error reading file %q: %v", name, err)
   314  		}
   315  
   316  		data := strings.TrimSpace(string(b))
   317  		r := ranges[dir]
   318  		if base == typ {
   319  			r.typ = RangeType(data)
   320  			ranges[dir] = r
   321  			return nil
   322  		}
   323  
   324  		v, err := strconv.ParseUint(data, 0, 64)
   325  		if err != nil {
   326  			return err
   327  		}
   328  		switch base {
   329  		case start:
   330  			r.start = uintptr(v)
   331  		case end:
   332  			r.end = uintptr(v)
   333  		}
   334  		ranges[dir] = r
   335  		return nil
   336  	}
   337  
   338  	if err := filepath.Walk(memoryMapRoot, walker); err != nil {
   339  		return err
   340  	}
   341  
   342  	for _, r := range ranges {
   343  		m.Phys = append(m.Phys, TypedAddressRange{
   344  			Range: Range{
   345  				Start: r.start,
   346  				Size:  uint(r.end - r.start),
   347  			},
   348  			Type: r.typ,
   349  		})
   350  	}
   351  	sort.Slice(m.Phys, func(i, j int) bool {
   352  		return m.Phys[i].Start < m.Phys[j].Start
   353  	})
   354  	return nil
   355  }
   356  
   357  // RangeType defines type of a TypedAddressRange based on the Linux
   358  // kernel string provided by firmware memory map.
   359  type RangeType string
   360  
   361  const (
   362  	RangeRAM     RangeType = "System RAM"
   363  	RangeDefault           = "Default"
   364  	RangeNVACPI            = "ACPI Non-volatile Storage"
   365  	RangeACPI              = "ACPI Tables"
   366  	RangeNVS               = "Reserved"
   367  )
   368  
   369  // Memory provides routines to work with physical memory ranges.
   370  type Memory struct {
   371  	Phys []TypedAddressRange
   372  
   373  	Segments []Segment
   374  }
   375  
   376  // TypedAddressRange represents range of physical memory.
   377  type TypedAddressRange struct {
   378  	Range
   379  	Type RangeType
   380  }
   381  
   382  var ErrNotEnoughSpace = errors.New("not enough space")
   383  
   384  // FindSpace returns pointer to the physical memory,
   385  // where array of size sz can be stored during next
   386  // AddKexecSegment call.
   387  func (m Memory) FindSpace(sz uint) (start uintptr, err error) {
   388  	sz = alignUp(sz)
   389  	ranges := m.availableRAM()
   390  	for _, r := range ranges {
   391  		// don't use memory below 1M, just in case.
   392  		if uint(r.Start)+r.Size < 1048576 {
   393  			continue
   394  		}
   395  		if r.Size >= sz {
   396  			return r.Start, nil
   397  		}
   398  	}
   399  	return 0, ErrNotEnoughSpace
   400  }
   401  
   402  func (m *Memory) addKexecSegment(addr uintptr, d []byte) {
   403  	s := NewSegment(d, Range{
   404  		Start: addr,
   405  		Size:  uint(len(d)),
   406  	})
   407  	s = AlignPhys(s)
   408  	m.Segments = append(m.Segments, s)
   409  	sort.Slice(m.Segments, func(i, j int) bool {
   410  		return m.Segments[i].Phys.Start < m.Segments[j].Phys.Start
   411  	})
   412  }
   413  
   414  // AddKexecSegment adds d to a new kexec segment
   415  func (m *Memory) AddKexecSegment(d []byte) (addr uintptr, err error) {
   416  	size := uint(len(d))
   417  	start, err := m.FindSpace(size)
   418  	if err != nil {
   419  		return 0, err
   420  	}
   421  	m.addKexecSegment(start, d)
   422  	return start, nil
   423  }
   424  
   425  // availableRAM subtracts physical ranges of kexec segments from
   426  // RAM segments of TypedAddressRange aligning range beginnings
   427  // to a page boundary.
   428  //
   429  // E.g if page size is 4K and RAM segments are
   430  //            [{start:0 size:8192} {start:8192 size:8000}]
   431  // and kexec segments are
   432  //            [{start:40 size:50} {start:8000 size:2000}]
   433  // result should be
   434  //            [{start:0 size:40} {start:4096 end:8000 - 4096}]
   435  func (m Memory) availableRAM() (avail []TypedAddressRange) {
   436  	type point struct {
   437  		// x is a point coordinate on an axis.
   438  		x uintptr
   439  		// start is true if the point is the beginning of segment.
   440  		start bool
   441  		// ram is true if the point is part of a RAM segment.
   442  		ram bool
   443  	}
   444  	// points stores starting and ending points of segments
   445  	// sorted by coordinate.
   446  	var points []point
   447  	addPoint := func(r Range, ram bool) {
   448  		points = append(points,
   449  			point{x: r.Start, start: true, ram: ram},
   450  			point{x: r.Start + uintptr(r.Size) - 1, start: false, ram: ram},
   451  		)
   452  	}
   453  
   454  	for _, s := range m.Phys {
   455  		if s.Type == RangeRAM {
   456  			addPoint(s.Range, true)
   457  		}
   458  	}
   459  	for _, s := range m.Segments {
   460  		addPoint(s.Phys, false)
   461  	}
   462  
   463  	sort.Slice(points, func(i, j int) bool {
   464  		return points[i].x < points[j].x
   465  	})
   466  
   467  	add := func(start, end uintptr, ramRange, kexecRange bool) {
   468  		if !ramRange || kexecRange {
   469  			return
   470  		}
   471  		start = alignUpPtr(start)
   472  		if start >= end {
   473  			return
   474  		}
   475  		avail = append(avail, TypedAddressRange{
   476  			Range: Range{
   477  				Start: start,
   478  				Size:  uint(end-start) + 1,
   479  			},
   480  			Type: RangeRAM,
   481  		})
   482  	}
   483  
   484  	var start uintptr
   485  	var ramRange bool
   486  	var kexecRange bool
   487  	for _, p := range points {
   488  		switch {
   489  		case p.start && p.ram:
   490  			start = p.x
   491  		case p.start && !p.ram:
   492  			if start != p.x {
   493  				add(start, p.x-1, ramRange, kexecRange)
   494  			}
   495  		case !p.start && p.ram:
   496  			add(start, p.x, ramRange, kexecRange)
   497  		case !p.start && !p.ram:
   498  			if ramRange {
   499  				start = p.x + 1
   500  			}
   501  		}
   502  
   503  		if p.ram {
   504  			ramRange = p.start
   505  		} else {
   506  			kexecRange = p.start
   507  		}
   508  	}
   509  
   510  	return avail
   511  }