github.com/jlmucb/cloudproxy@v0.0.0-20170830161738-b5aa0b619bc4/cpvmm/vmm/bootstrap/bootstrap_e820.c (about) 1 /* 2 * bootstrap_e820.c: support functions for manipulating the e820 table 3 * File: bootstrap_e820.c 4 * Description: e820 file handling 5 * Author: John Manferdelli form tboot source 6 * 7 * Copyright (c) 2006-2010, Intel Corporation 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * * Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * * Redistributions in binary form must reproduce the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer in the documentation and/or other materials provided 19 * with the distribution. 20 * * Neither the name of the Intel Corporation nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 33 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 35 * OF THE POSSIBILITY OF SUCH DAMAGE. 36 * 37 */ 38 39 #include "bootstrap_types.h" 40 #include "bootstrap_print.h" 41 #include "multiboot.h" 42 #include "e820.h" 43 44 unsigned int max_e820_entries= 0; 45 unsigned int g_nr_map= 0; 46 memory_map_t *g_copy_e820_map = NULL; 47 48 49 // copy of bootloader/BIOS e820 table with adjusted entries 50 51 52 static inline void split64b(uint64_t val, uint32_t *val_lo, uint32_t *val_hi) 53 { 54 *val_lo = (uint32_t)(val & 0xffffffff); 55 *val_hi = (uint32_t)(val >> 32); 56 } 57 58 static inline uint64_t combine64b(uint32_t val_lo, uint32_t val_hi) 59 { 60 return ((uint64_t)val_hi << 32) | (uint64_t)val_lo; 61 } 62 63 static inline uint64_t e820_base_64(memory_map_t *entry) 64 { 65 return combine64b(entry->base_addr_low, entry->base_addr_high); 66 } 67 68 static inline uint64_t e820_length_64(memory_map_t *entry) 69 { 70 return combine64b(entry->length_low, entry->length_high); 71 } 72 73 74 // print_e820_map 75 // Prints copied e820 map w/o any header (i.e. just entries, indented by a tab) 76 void print_map(memory_map_t *e820, int nr_map) 77 { 78 int i; 79 for ( i = 0; i < nr_map; i++ ) { 80 memory_map_t *entry = &e820[i]; 81 uint64_t base_addr, length; 82 83 base_addr = e820_base_64(entry); 84 length = e820_length_64(entry); 85 86 bprint("\t%016Lx - %016Lx (%d)\n", 87 (unsigned long long)base_addr, 88 (unsigned long long)(base_addr + length), 89 entry->type); 90 } 91 } 92 93 94 static bool insert_after_region(memory_map_t *e820map, unsigned int *nr_map, 95 unsigned int pos, uint64_t addr, uint64_t size, 96 uint32_t type) 97 { 98 unsigned int i; 99 100 /* no more room */ 101 if ( *nr_map + 1 > max_e820_entries) 102 return false; 103 104 /* shift (copy) everything up one entry */ 105 for ( i = *nr_map - 1; i > pos; i--) 106 e820map[i+1] = e820map[i]; 107 108 /* now add our entry */ 109 split64b(addr, &(e820map[pos+1].base_addr_low), 110 &(e820map[pos+1].base_addr_high)); 111 split64b(size, &(e820map[pos+1].length_low), 112 &(e820map[pos+1].length_high)); 113 e820map[pos+1].type = type; 114 e820map[pos+1].size = sizeof(memory_map_t) - sizeof(uint32_t); 115 116 (*nr_map)++; 117 118 return true; 119 } 120 121 122 static void remove_region(memory_map_t *e820map, unsigned int *nr_map, 123 unsigned int pos) 124 { 125 unsigned int i; 126 127 /* shift (copy) everything down one entry */ 128 for ( i = pos; i < *nr_map - 1; i++) 129 e820map[i] = e820map[i+1]; 130 131 (*nr_map)--; 132 } 133 134 135 static bool protect_region(memory_map_t *e820map, unsigned int *nr_map, 136 uint64_t new_addr, uint64_t new_size, 137 uint32_t new_type) 138 { 139 uint64_t addr, tmp_addr, size, tmp_size; 140 uint32_t type; 141 unsigned int i; 142 143 if ( new_size == 0 ) 144 return true; 145 /* check for wrap */ 146 if ( new_addr + new_size < new_addr ) 147 return false; 148 149 /* find where our region belongs in the table and insert it */ 150 for ( i = 0; i < *nr_map; i++ ) { 151 addr = e820_base_64(&e820map[i]); 152 size = e820_length_64(&e820map[i]); 153 type = e820map[i].type; 154 /* is our region at the beginning of the current map region? */ 155 if ( new_addr == addr ) { 156 if ( !insert_after_region(e820map, nr_map, i-1, new_addr, new_size, 157 new_type) ) 158 return false; 159 break; 160 } 161 /* are we w/in the current map region? */ 162 else if ( new_addr > addr && new_addr < (addr + size) ) { 163 if ( !insert_after_region(e820map, nr_map, i, new_addr, new_size, 164 new_type) ) 165 return false; 166 /* fixup current region */ 167 tmp_addr = e820_base_64(&e820map[i]); 168 split64b(new_addr - tmp_addr, &(e820map[i].length_low), 169 &(e820map[i].length_high)); 170 i++; /* adjust to always be that of our region */ 171 /* insert a copy of current region (before adj) after us so */ 172 /* that rest of code can be common with previous case */ 173 if ( !insert_after_region(e820map, nr_map, i, addr, size, type) ) 174 return false; 175 break; 176 } 177 /* is our region in a gap in the map? */ 178 else if ( addr > new_addr ) { 179 if ( !insert_after_region(e820map, nr_map, i-1, new_addr, new_size, 180 new_type) ) 181 return false; 182 break; 183 } 184 } 185 /* if we reached the end of the map without finding an overlapping */ 186 /* region, insert us at the end (note that this test won't trigger */ 187 /* for the second case above because the insert() will have incremented */ 188 /* nr_map and so i++ will still be less) */ 189 if ( i == *nr_map ) { 190 if ( !insert_after_region(e820map, nr_map, i-1, new_addr, new_size, 191 new_type) ) 192 return false; 193 return true; 194 } 195 196 i++; /* move to entry after our inserted one (we're not at end yet) */ 197 198 tmp_addr = e820_base_64(&e820map[i]); 199 tmp_size = e820_length_64(&e820map[i]); 200 201 /* did we split the (formerly) previous region? */ 202 if ( (new_addr >= tmp_addr) && 203 ((new_addr + new_size) < (tmp_addr + tmp_size)) ) { 204 /* then adjust the current region (adj size first) */ 205 split64b((tmp_addr + tmp_size) - (new_addr + new_size), 206 &(e820map[i].length_low), &(e820map[i].length_high)); 207 split64b(new_addr + new_size, 208 &(e820map[i].base_addr_low), &(e820map[i].base_addr_high)); 209 return true; 210 } 211 212 /* if our region completely covers any existing regions, delete them */ 213 while ( (i < *nr_map) && ((new_addr + new_size) >= 214 (tmp_addr + tmp_size)) ) { 215 remove_region(e820map, nr_map, i); 216 tmp_addr = e820_base_64(&e820map[i]); 217 tmp_size = e820_length_64(&e820map[i]); 218 } 219 220 /* finally, if our region partially overlaps an existing region, */ 221 /* then truncate the existing region */ 222 if ( i < *nr_map ) { 223 tmp_addr = e820_base_64(&e820map[i]); 224 tmp_size = e820_length_64(&e820map[i]); 225 if ( (new_addr + new_size) > tmp_addr ) { 226 split64b((tmp_addr + tmp_size) - (new_addr + new_size), 227 &(e820map[i].length_low), &(e820map[i].length_high)); 228 split64b(new_addr + new_size, &(e820map[i].base_addr_low), 229 &(e820map[i].base_addr_high)); 230 } 231 } 232 233 return true; 234 } 235 236 /* 237 * is_overlapped 238 * Detect whether two ranges are overlapped. 239 * return: true = overlapped 240 */ 241 static bool is_overlapped(uint64_t base, uint64_t end, uint64_t e820_base, 242 uint64_t e820_end) 243 { 244 uint64_t length = end - base, e820_length = e820_end - e820_base; 245 uint64_t min, max; 246 247 min = (base < e820_base)?base:e820_base; 248 max = (end > e820_end)?end:e820_end; 249 250 /* overlapping */ 251 if ( (max - min) < (length + e820_length) ) 252 return true; 253 254 if ( (max - min) == (length + e820_length) 255 && ( ((length == 0) && (base > e820_base) && (base < e820_end)) 256 || ((e820_length == 0) && (e820_base > base) && 257 (e820_base < end)) ) ) 258 return true; 259 260 return false; 261 } 262 263 264 // -------------------------------------------------------------------------------- 265 266 267 268 // copy_e820_map 269 // Copies the raw e820 map from bootloader to new table with room for expansion 270 // return: false = error (no table or table too big for new space) 271 bool copy_e820_map(const multiboot_info_t *mbi) 272 { 273 g_nr_map = 0; 274 275 if ( mbi->flags & MBI_MEMMAP ) { 276 #ifdef JLMDEBUG1 277 bprint("original e820 map:\n"); 278 print_map((memory_map_t *)mbi->mmap_addr, 279 mbi->mmap_length/sizeof(memory_map_t)); 280 #endif 281 282 uint32_t entry_offset = 0; 283 284 while ( entry_offset < mbi->mmap_length && 285 g_nr_map < max_e820_entries) { 286 memory_map_t *entry = (memory_map_t *) 287 (mbi->mmap_addr + entry_offset); 288 289 /* we want to support unordered and/or overlapping entries */ 290 /* so use protect_region() to insert into existing map, since */ 291 /* it handles these cases */ 292 if ( !protect_region(g_copy_e820_map, &g_nr_map, 293 e820_base_64(entry), e820_length_64(entry), 294 entry->type) ) 295 return false; 296 entry_offset += entry->size + sizeof(entry->size); 297 } 298 if ( g_nr_map == max_e820_entries) { 299 bprint("Too many e820 entries\n"); 300 return false; 301 } 302 } 303 else if ( mbi->flags & MBI_MEMLIMITS ) { 304 bprint("no e820 map, mem_lower=%x, mem_upper=%x\n", 305 mbi->mem_lower, mbi->mem_upper); 306 307 /* lower limit is 0x00000000 - <mem_lower>*0x400 (i.e. in kb) */ 308 g_copy_e820_map[0].base_addr_low = 0; 309 g_copy_e820_map[0].base_addr_high = 0; 310 g_copy_e820_map[0].length_low = mbi->mem_lower << 10; 311 g_copy_e820_map[0].length_high = 0; 312 g_copy_e820_map[0].type = E820_RAM; 313 g_copy_e820_map[0].size = sizeof(memory_map_t) - sizeof(uint32_t); 314 315 /* upper limit is 0x00100000 - <mem_upper>*0x400 */ 316 g_copy_e820_map[1].base_addr_low = 0x100000; 317 g_copy_e820_map[1].base_addr_high = 0; 318 split64b((uint64_t)mbi->mem_upper << 10, 319 &(g_copy_e820_map[1].length_low), 320 &(g_copy_e820_map[1].length_high)); 321 g_copy_e820_map[1].type = E820_RAM; 322 g_copy_e820_map[1].size = sizeof(memory_map_t) - sizeof(uint32_t); 323 324 g_nr_map = 2; 325 } 326 else { 327 bprint("no e820 map nor memory limits provided\n"); 328 return false; 329 } 330 331 return true; 332 } 333 334 335 void replace_e820_map(multiboot_info_t *mbi) 336 { 337 /* replace original with the copy */ 338 mbi->mmap_addr = (uint32_t)g_copy_e820_map; 339 mbi->mmap_length = g_nr_map * sizeof(memory_map_t); 340 mbi->flags |= MBI_MEMMAP; /* in case only MBI_MEMLIMITS was set */ 341 } 342 343 344 bool e820_protect_region(uint64_t addr, uint64_t size, uint32_t type) 345 { 346 return protect_region(g_copy_e820_map, &g_nr_map, addr, size, type); 347 } 348 349 /* 350 * e820_check_region 351 * 352 * Given a range, check which kind of range it covers 353 * return: E820_GAP, it covers gap in e820 map; 354 * E820_MIXED, it covers at least two different kinds of ranges; 355 * E820_XXX, it covers E820_XXX range only; 356 * it will not return 0. 357 */ 358 uint32_t e820_check_region(uint64_t base, uint64_t length) 359 { 360 memory_map_t* e820_entry; 361 uint64_t end = base + length, e820_base, e820_end, e820_length; 362 uint32_t type; 363 uint32_t ret = 0; 364 bool gap = true; /* suppose there is always a virtual gap at first */ 365 unsigned int i; 366 367 e820_base = 0; 368 e820_length = 0; 369 370 for ( i = 0; i < g_nr_map; i = gap ? i : i+1, gap = !gap ) { 371 e820_entry = &g_copy_e820_map[i]; 372 if ( gap ) { 373 /* deal with the gap in e820 map */ 374 e820_base = e820_base + e820_length; 375 e820_length = e820_base_64(e820_entry) - e820_base; 376 type = E820_GAP; 377 } 378 else { 379 /* deal with the normal item in e820 map */ 380 e820_base = e820_base_64(e820_entry); 381 e820_length = e820_length_64(e820_entry); 382 type = e820_entry->type; 383 } 384 385 if ( e820_length == 0 ) 386 continue; /* if the range is zero, then skip */ 387 388 e820_end = e820_base + e820_length; 389 390 if ( !is_overlapped(base, end, e820_base, e820_end) ) 391 continue; /* if no overlapping, then skip */ 392 393 /* if the value of ret is not assigned before, 394 then set ret to type directly */ 395 if ( ret == 0 ) { 396 ret = type; 397 continue; 398 } 399 400 /* if the value of ret is assigned before but ret is equal to type, 401 then no need to do anything */ 402 if ( ret == type ) 403 continue; 404 405 /* if the value of ret is assigned before but it is GAP, 406 then no need to do anything since any type merged with GAP is GAP */ 407 if ( ret == E820_GAP ) 408 continue; 409 410 /* if the value of ret is assigned before but it is not GAP and type 411 is GAP now this time, then set ret to GAP since any type merged 412 with GAP is GAP. */ 413 if ( type == E820_GAP ) { 414 ret = E820_GAP; 415 continue; 416 } 417 418 /* if the value of ret is assigned before but both ret and type are 419 not GAP and their values are not equal, then set ret to MIXED 420 since any two non-GAP values are merged into MIXED if they are 421 not equal. */ 422 ret = E820_MIXED; 423 } 424 425 /* deal with the last gap */ 426 if ( is_overlapped(base, end, e820_base + e820_length, (uint64_t)-1) ) 427 ret = E820_GAP; 428 429 /* print the result */ 430 bprint(" (range from %016Lx to %016Lx is in ", base, base + length); 431 switch (ret) { 432 case E820_RAM: 433 bprint("E820_RAM)\n"); break; 434 case E820_RESERVED: 435 bprint("E820_RESERVED)\n"); break; 436 case E820_ACPI: 437 bprint("E820_ACPI)\n"); break; 438 case E820_NVS: 439 bprint("E820_NVS)\n"); break; 440 case E820_UNUSABLE: 441 bprint("E820_UNUSABLE)\n"); break; 442 case E820_GAP: 443 bprint("E820_GAP)\n"); break; 444 case E820_MIXED: 445 bprint("E820_MIXED)\n"); break; 446 default: 447 bprint("UNKNOWN)\n"); 448 } 449 450 return ret; 451 } 452 453 454 // e820_reserve_ram 455 // Given the range, any ram range in e820 is in it, change type to reserved. 456 // return: false = error 457 bool e820_reserve_ram(uint64_t base, uint64_t length) 458 { 459 memory_map_t* e820_entry; 460 uint64_t e820_base, e820_length, e820_end; 461 uint64_t end; 462 unsigned int i; 463 464 if ( length == 0 ) 465 return true; 466 467 end = base + length; 468 469 /* find where our region should cover the ram in e820 */ 470 for ( i = 0; i < g_nr_map; i++ ) { 471 e820_entry = &g_copy_e820_map[i]; 472 e820_base = e820_base_64(e820_entry); 473 e820_length = e820_length_64(e820_entry); 474 e820_end = e820_base + e820_length; 475 476 /* if not ram, no need to deal with */ 477 if ( e820_entry->type != E820_RAM ) 478 continue; 479 480 /* if the range is before the current ram range, skip the ram range */ 481 if ( end <= e820_base ) 482 continue; 483 /* if the range is after the current ram range, skip the ram range */ 484 if ( base >= e820_end ) 485 continue; 486 487 /* case 1: the current ram range is within the range: 488 base, e820_base, e820_end, end */ 489 if ( (base <= e820_base) && (e820_end <= end) ) 490 e820_entry->type = E820_RESERVED; 491 /* case 2: overlapping: 492 base, e820_base, end, e820_end */ 493 else if ( (e820_base >= base) && (end > e820_base) && 494 (e820_end > end) ) { 495 /* split the current ram map */ 496 if ( !insert_after_region(g_copy_e820_map, &g_nr_map, i-1, 497 e820_base, (end - e820_base), 498 E820_RESERVED) ) 499 return false; 500 /* fixup the current ram map */ 501 i++; 502 split64b(end, &(g_copy_e820_map[i].base_addr_low), 503 &(g_copy_e820_map[i].base_addr_high)); 504 split64b(e820_end - end, &(g_copy_e820_map[i].length_low), 505 &(g_copy_e820_map[i].length_high)); 506 /* no need to check more */ 507 break; 508 } 509 /* case 3: overlapping: 510 e820_base, base, e820_end, end */ 511 else if ( (base > e820_base) && (e820_end > base) && 512 (end >= e820_end) ) { 513 /* fixup the current ram map */ 514 split64b((base - e820_base), &(g_copy_e820_map[i].length_low), 515 &(g_copy_e820_map[i].length_high)); 516 /* split the current ram map */ 517 if ( !insert_after_region(g_copy_e820_map, &g_nr_map, i, base, 518 (e820_end - base), E820_RESERVED) ) 519 return false; 520 i++; 521 } 522 /* case 4: the range is within the current ram range: 523 e820_base, base, end, e820_end */ 524 else if ( (base > e820_base) && (e820_end > end) ) { 525 /* fixup the current ram map */ 526 split64b((base - e820_base), &(g_copy_e820_map[i].length_low), 527 &(g_copy_e820_map[i].length_high)); 528 /* split the current ram map */ 529 if ( !insert_after_region(g_copy_e820_map, &g_nr_map, i, base, 530 length, E820_RESERVED) ) 531 return false; 532 i++; 533 /* fixup the rest of the current ram map */ 534 if ( !insert_after_region(g_copy_e820_map, &g_nr_map, i, end, 535 (e820_end - end), e820_entry->type) ) 536 return false; 537 i++; 538 /* no need to check more */ 539 break; 540 } 541 else { 542 bprint("we should never get here\n"); 543 return false; 544 } 545 } 546 547 return true; 548 } 549 550 551 void print_e820_map(void) 552 { 553 print_map(g_copy_e820_map, g_nr_map); 554 } 555 556 557 bool get_ram_ranges(uint64_t *min_lo_ram, uint64_t *max_lo_ram, 558 uint64_t *min_hi_ram, uint64_t *max_hi_ram) 559 { 560 if ( min_lo_ram == NULL || max_lo_ram == NULL || 561 min_hi_ram == NULL || max_hi_ram == NULL ) 562 return false; 563 564 *min_lo_ram = *min_hi_ram = ~0ULL; 565 *max_lo_ram = *max_hi_ram = 0; 566 bool found_reserved_region = false; 567 unsigned int i; 568 569 for ( i = 0; i < g_nr_map; i++ ) { 570 memory_map_t *entry = &g_copy_e820_map[i]; 571 uint64_t base = e820_base_64(entry); 572 uint64_t limit = base + e820_length_64(entry); 573 574 if ( entry->type == E820_RAM ) { 575 /* if range straddles 4GB boundary, that is an error */ 576 if ( base < 0x100000000ULL && limit > 0x100000000ULL ) { 577 bprint("e820 memory range straddles 4GB boundary\n"); 578 return false; 579 } 580 581 /* 582 * some BIOSes put legacy USB buffers in reserved regions <4GB, 583 * which if DMA protected cause SMM to hang, so make sure that 584 * we don't overlap any of these even if that wastes RAM 585 */ 586 if ( !found_reserved_region ) { 587 if ( base < 0x100000000ULL && base < *min_lo_ram ) 588 *min_lo_ram = base; 589 if ( limit <= 0x100000000ULL && limit > *max_lo_ram ) 590 *max_lo_ram = limit; 591 } 592 else { /* need to reserve low RAM above reserved regions */ 593 if ( base < 0x100000000ULL ) { 594 bprint("discarding RAM above reserved regions: 0x%Lx - 0x%Lx\n", base, limit); 595 if ( !e820_reserve_ram(base, limit - base) ) 596 return false; 597 } 598 } 599 600 if ( base >= 0x100000000ULL && base < *min_hi_ram ) 601 *min_hi_ram = base; 602 if ( limit > 0x100000000ULL && limit > *max_hi_ram ) 603 *max_hi_ram = limit; 604 } 605 else { 606 /* parts of low memory may be reserved for cseg, ISA hole, 607 etc. but these seem OK to DMA protect, so ignore reserved 608 regions <0x100000 */ 609 if ( *min_lo_ram != ~0ULL && limit > 0x100000ULL ) 610 found_reserved_region = true; 611 } 612 } 613 614 /* no low RAM found */ 615 if ( *min_lo_ram >= *max_lo_ram ) { 616 bprint("no low ram in e820 map\n"); 617 return false; 618 } 619 /* no high RAM found */ 620 if ( *min_hi_ram >= *max_hi_ram ) 621 *min_hi_ram = *max_hi_ram = 0; 622 623 return true; 624 } 625 626 627 uint32_t get_num_e820_ents() 628 { 629 return g_nr_map; 630 } 631 632 633 /* find highest (< <limit>) RAM region of at least <size> bytes */ 634 void get_highest_sized_ram(uint64_t size, uint64_t limit, 635 uint64_t *ram_base, uint64_t *ram_size) 636 { 637 uint64_t last_fit_base = 0, last_fit_size = 0; 638 unsigned int i; 639 640 if ( ram_base == NULL || ram_size == NULL ) 641 return; 642 643 for ( i = 0; i < g_nr_map; i++ ) { 644 memory_map_t *entry = &g_copy_e820_map[i]; 645 646 if ( entry->type == E820_RAM ) { 647 uint64_t base = e820_base_64(entry); 648 uint64_t length = e820_length_64(entry); 649 650 /* over 4GB so use the last region that fit */ 651 if ( base + length > limit ) 652 break; 653 if ( size <= length ) { 654 last_fit_base = base; 655 last_fit_size = length; 656 } 657 } 658 } 659 660 *ram_base = last_fit_base; 661 *ram_size = last_fit_size; 662 } 663