1 /* 2 * kaslr.c 3 * 4 * This contains the routines needed to generate a reasonable level of 5 * entropy to choose a randomized kernel base address offset in support 6 * of Kernel Address Space Layout Randomization (KASLR). Additionally 7 * handles walking the physical memory maps (and tracking memory regions 8 * to avoid) in order to select a physical memory location that can 9 * contain the entire properly aligned running kernel image. 10 * 11 */ 12 13 /* 14 * isspace() in linux/ctype.h is expected by next_args() to filter 15 * out "space/lf/tab". While boot/ctype.h conflicts with linux/ctype.h, 16 * since isdigit() is implemented in both of them. Hence disable it 17 * here. 18 */ 19 #define BOOT_CTYPE_H 20 21 /* 22 * _ctype[] in lib/ctype.c is needed by isspace() of linux/ctype.h. 23 * While both lib/ctype.c and lib/cmdline.c will bring EXPORT_SYMBOL 24 * which is meaningless and will cause compiling error in some cases. 25 * So do not include linux/export.h and define EXPORT_SYMBOL(sym) 26 * as empty. 27 */ 28 #define _LINUX_EXPORT_H 29 #define EXPORT_SYMBOL(sym) 30 31 #include "misc.h" 32 #include "error.h" 33 #include "../string.h" 34 35 #include <generated/compile.h> 36 #include <linux/module.h> 37 #include <linux/uts.h> 38 #include <linux/utsname.h> 39 #include <linux/ctype.h> 40 #include <generated/utsrelease.h> 41 42 /* Macros used by the included decompressor code below. */ 43 #define STATIC 44 #include <linux/decompress/mm.h> 45 46 extern unsigned long get_cmd_line_ptr(void); 47 48 /* Simplified build-specific string for starting entropy. */ 49 static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" 50 LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; 51 52 static unsigned long rotate_xor(unsigned long hash, const void *area, 53 size_t size) 54 { 55 size_t i; 56 unsigned long *ptr = (unsigned long *)area; 57 58 for (i = 0; i < size / sizeof(hash); i++) { 59 /* Rotate by odd number of bits and XOR. */ 60 hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7); 61 hash ^= ptr[i]; 62 } 63 64 return hash; 65 } 66 67 /* Attempt to create a simple but unpredictable starting entropy. */ 68 static unsigned long get_boot_seed(void) 69 { 70 unsigned long hash = 0; 71 72 hash = rotate_xor(hash, build_str, sizeof(build_str)); 73 hash = rotate_xor(hash, boot_params, sizeof(*boot_params)); 74 75 return hash; 76 } 77 78 #define KASLR_COMPRESSED_BOOT 79 #include "../../lib/kaslr.c" 80 81 struct mem_vector { 82 unsigned long long start; 83 unsigned long long size; 84 }; 85 86 /* Only supporting at most 4 unusable memmap regions with kaslr */ 87 #define MAX_MEMMAP_REGIONS 4 88 89 static bool memmap_too_large; 90 91 92 /* Store memory limit specified by "mem=nn[KMG]" or "memmap=nn[KMG]" */ 93 unsigned long long mem_limit = ULLONG_MAX; 94 95 96 enum mem_avoid_index { 97 MEM_AVOID_ZO_RANGE = 0, 98 MEM_AVOID_INITRD, 99 MEM_AVOID_CMDLINE, 100 MEM_AVOID_BOOTPARAMS, 101 MEM_AVOID_MEMMAP_BEGIN, 102 MEM_AVOID_MEMMAP_END = MEM_AVOID_MEMMAP_BEGIN + MAX_MEMMAP_REGIONS - 1, 103 MEM_AVOID_MAX, 104 }; 105 106 static struct mem_vector mem_avoid[MEM_AVOID_MAX]; 107 108 static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two) 109 { 110 /* Item one is entirely before item two. */ 111 if (one->start + one->size <= two->start) 112 return false; 113 /* Item one is entirely after item two. */ 114 if (one->start >= two->start + two->size) 115 return false; 116 return true; 117 } 118 119 char *skip_spaces(const char *str) 120 { 121 while (isspace(*str)) 122 ++str; 123 return (char *)str; 124 } 125 #include "../../../../lib/ctype.c" 126 #include "../../../../lib/cmdline.c" 127 128 static int 129 parse_memmap(char *p, unsigned long long *start, unsigned long long *size) 130 { 131 char *oldp; 132 133 if (!p) 134 return -EINVAL; 135 136 /* We don't care about this option here */ 137 if (!strncmp(p, "exactmap", 8)) 138 return -EINVAL; 139 140 oldp = p; 141 *size = memparse(p, &p); 142 if (p == oldp) 143 return -EINVAL; 144 145 switch (*p) { 146 case '#': 147 case '$': 148 case '!': 149 *start = memparse(p + 1, &p); 150 return 0; 151 case '@': 152 /* memmap=nn@ss specifies usable region, should be skipped */ 153 *size = 0; 154 /* Fall through */ 155 default: 156 /* 157 * If w/o offset, only size specified, memmap=nn[KMG] has the 158 * same behaviour as mem=nn[KMG]. It limits the max address 159 * system can use. Region above the limit should be avoided. 160 */ 161 *start = 0; 162 return 0; 163 } 164 165 return -EINVAL; 166 } 167 168 static void mem_avoid_memmap(char *str) 169 { 170 static int i; 171 int rc; 172 173 if (i >= MAX_MEMMAP_REGIONS) 174 return; 175 176 while (str && (i < MAX_MEMMAP_REGIONS)) { 177 int rc; 178 unsigned long long start, size; 179 char *k = strchr(str, ','); 180 181 if (k) 182 *k++ = 0; 183 184 rc = parse_memmap(str, &start, &size); 185 if (rc < 0) 186 break; 187 str = k; 188 189 if (start == 0) { 190 /* Store the specified memory limit if size > 0 */ 191 if (size > 0) 192 mem_limit = size; 193 194 continue; 195 } 196 197 mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].start = start; 198 mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].size = size; 199 i++; 200 } 201 202 /* More than 4 memmaps, fail kaslr */ 203 if ((i >= MAX_MEMMAP_REGIONS) && str) 204 memmap_too_large = true; 205 } 206 207 static int handle_mem_memmap(void) 208 { 209 char *args = (char *)get_cmd_line_ptr(); 210 size_t len = strlen((char *)args); 211 char *tmp_cmdline; 212 char *param, *val; 213 u64 mem_size; 214 215 if (!strstr(args, "memmap=") && !strstr(args, "mem=")) 216 return 0; 217 218 tmp_cmdline = malloc(len + 1); 219 if (!tmp_cmdline ) 220 error("Failed to allocate space for tmp_cmdline"); 221 222 memcpy(tmp_cmdline, args, len); 223 tmp_cmdline[len] = 0; 224 args = tmp_cmdline; 225 226 /* Chew leading spaces */ 227 args = skip_spaces(args); 228 229 while (*args) { 230 args = next_arg(args, ¶m, &val); 231 /* Stop at -- */ 232 if (!val && strcmp(param, "--") == 0) { 233 warn("Only '--' specified in cmdline"); 234 free(tmp_cmdline); 235 return -1; 236 } 237 238 if (!strcmp(param, "memmap")) { 239 mem_avoid_memmap(val); 240 } else if (!strcmp(param, "mem")) { 241 char *p = val; 242 243 if (!strcmp(p, "nopentium")) 244 continue; 245 mem_size = memparse(p, &p); 246 if (mem_size == 0) { 247 free(tmp_cmdline); 248 return -EINVAL; 249 } 250 mem_limit = mem_size; 251 } 252 } 253 254 free(tmp_cmdline); 255 return 0; 256 } 257 258 /* 259 * In theory, KASLR can put the kernel anywhere in the range of [16M, 64T). 260 * The mem_avoid array is used to store the ranges that need to be avoided 261 * when KASLR searches for an appropriate random address. We must avoid any 262 * regions that are unsafe to overlap with during decompression, and other 263 * things like the initrd, cmdline and boot_params. This comment seeks to 264 * explain mem_avoid as clearly as possible since incorrect mem_avoid 265 * memory ranges lead to really hard to debug boot failures. 266 * 267 * The initrd, cmdline, and boot_params are trivial to identify for 268 * avoiding. They are MEM_AVOID_INITRD, MEM_AVOID_CMDLINE, and 269 * MEM_AVOID_BOOTPARAMS respectively below. 270 * 271 * What is not obvious how to avoid is the range of memory that is used 272 * during decompression (MEM_AVOID_ZO_RANGE below). This range must cover 273 * the compressed kernel (ZO) and its run space, which is used to extract 274 * the uncompressed kernel (VO) and relocs. 275 * 276 * ZO's full run size sits against the end of the decompression buffer, so 277 * we can calculate where text, data, bss, etc of ZO are positioned more 278 * easily. 279 * 280 * For additional background, the decompression calculations can be found 281 * in header.S, and the memory diagram is based on the one found in misc.c. 282 * 283 * The following conditions are already enforced by the image layouts and 284 * associated code: 285 * - input + input_size >= output + output_size 286 * - kernel_total_size <= init_size 287 * - kernel_total_size <= output_size (see Note below) 288 * - output + init_size >= output + output_size 289 * 290 * (Note that kernel_total_size and output_size have no fundamental 291 * relationship, but output_size is passed to choose_random_location 292 * as a maximum of the two. The diagram is showing a case where 293 * kernel_total_size is larger than output_size, but this case is 294 * handled by bumping output_size.) 295 * 296 * The above conditions can be illustrated by a diagram: 297 * 298 * 0 output input input+input_size output+init_size 299 * | | | | | 300 * | | | | | 301 * |-----|--------|--------|--------------|-----------|--|-------------| 302 * | | | 303 * | | | 304 * output+init_size-ZO_INIT_SIZE output+output_size output+kernel_total_size 305 * 306 * [output, output+init_size) is the entire memory range used for 307 * extracting the compressed image. 308 * 309 * [output, output+kernel_total_size) is the range needed for the 310 * uncompressed kernel (VO) and its run size (bss, brk, etc). 311 * 312 * [output, output+output_size) is VO plus relocs (i.e. the entire 313 * uncompressed payload contained by ZO). This is the area of the buffer 314 * written to during decompression. 315 * 316 * [output+init_size-ZO_INIT_SIZE, output+init_size) is the worst-case 317 * range of the copied ZO and decompression code. (i.e. the range 318 * covered backwards of size ZO_INIT_SIZE, starting from output+init_size.) 319 * 320 * [input, input+input_size) is the original copied compressed image (ZO) 321 * (i.e. it does not include its run size). This range must be avoided 322 * because it contains the data used for decompression. 323 * 324 * [input+input_size, output+init_size) is [_text, _end) for ZO. This 325 * range includes ZO's heap and stack, and must be avoided since it 326 * performs the decompression. 327 * 328 * Since the above two ranges need to be avoided and they are adjacent, 329 * they can be merged, resulting in: [input, output+init_size) which 330 * becomes the MEM_AVOID_ZO_RANGE below. 331 */ 332 static void mem_avoid_init(unsigned long input, unsigned long input_size, 333 unsigned long output) 334 { 335 unsigned long init_size = boot_params->hdr.init_size; 336 u64 initrd_start, initrd_size; 337 u64 cmd_line, cmd_line_size; 338 char *ptr; 339 340 /* 341 * Avoid the region that is unsafe to overlap during 342 * decompression. 343 */ 344 mem_avoid[MEM_AVOID_ZO_RANGE].start = input; 345 mem_avoid[MEM_AVOID_ZO_RANGE].size = (output + init_size) - input; 346 add_identity_map(mem_avoid[MEM_AVOID_ZO_RANGE].start, 347 mem_avoid[MEM_AVOID_ZO_RANGE].size); 348 349 /* Avoid initrd. */ 350 initrd_start = (u64)boot_params->ext_ramdisk_image << 32; 351 initrd_start |= boot_params->hdr.ramdisk_image; 352 initrd_size = (u64)boot_params->ext_ramdisk_size << 32; 353 initrd_size |= boot_params->hdr.ramdisk_size; 354 mem_avoid[MEM_AVOID_INITRD].start = initrd_start; 355 mem_avoid[MEM_AVOID_INITRD].size = initrd_size; 356 /* No need to set mapping for initrd, it will be handled in VO. */ 357 358 /* Avoid kernel command line. */ 359 cmd_line = (u64)boot_params->ext_cmd_line_ptr << 32; 360 cmd_line |= boot_params->hdr.cmd_line_ptr; 361 /* Calculate size of cmd_line. */ 362 ptr = (char *)(unsigned long)cmd_line; 363 for (cmd_line_size = 0; ptr[cmd_line_size++]; ) 364 ; 365 mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line; 366 mem_avoid[MEM_AVOID_CMDLINE].size = cmd_line_size; 367 add_identity_map(mem_avoid[MEM_AVOID_CMDLINE].start, 368 mem_avoid[MEM_AVOID_CMDLINE].size); 369 370 /* Avoid boot parameters. */ 371 mem_avoid[MEM_AVOID_BOOTPARAMS].start = (unsigned long)boot_params; 372 mem_avoid[MEM_AVOID_BOOTPARAMS].size = sizeof(*boot_params); 373 add_identity_map(mem_avoid[MEM_AVOID_BOOTPARAMS].start, 374 mem_avoid[MEM_AVOID_BOOTPARAMS].size); 375 376 /* We don't need to set a mapping for setup_data. */ 377 378 /* Mark the memmap regions we need to avoid */ 379 handle_mem_memmap(); 380 381 #ifdef CONFIG_X86_VERBOSE_BOOTUP 382 /* Make sure video RAM can be used. */ 383 add_identity_map(0, PMD_SIZE); 384 #endif 385 } 386 387 /* 388 * Does this memory vector overlap a known avoided area? If so, record the 389 * overlap region with the lowest address. 390 */ 391 static bool mem_avoid_overlap(struct mem_vector *img, 392 struct mem_vector *overlap) 393 { 394 int i; 395 struct setup_data *ptr; 396 unsigned long earliest = img->start + img->size; 397 bool is_overlapping = false; 398 399 for (i = 0; i < MEM_AVOID_MAX; i++) { 400 if (mem_overlaps(img, &mem_avoid[i]) && 401 mem_avoid[i].start < earliest) { 402 *overlap = mem_avoid[i]; 403 earliest = overlap->start; 404 is_overlapping = true; 405 } 406 } 407 408 /* Avoid all entries in the setup_data linked list. */ 409 ptr = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data; 410 while (ptr) { 411 struct mem_vector avoid; 412 413 avoid.start = (unsigned long)ptr; 414 avoid.size = sizeof(*ptr) + ptr->len; 415 416 if (mem_overlaps(img, &avoid) && (avoid.start < earliest)) { 417 *overlap = avoid; 418 earliest = overlap->start; 419 is_overlapping = true; 420 } 421 422 ptr = (struct setup_data *)(unsigned long)ptr->next; 423 } 424 425 return is_overlapping; 426 } 427 428 struct slot_area { 429 unsigned long addr; 430 int num; 431 }; 432 433 #define MAX_SLOT_AREA 100 434 435 static struct slot_area slot_areas[MAX_SLOT_AREA]; 436 437 static unsigned long slot_max; 438 439 static unsigned long slot_area_index; 440 441 static void store_slot_info(struct mem_vector *region, unsigned long image_size) 442 { 443 struct slot_area slot_area; 444 445 if (slot_area_index == MAX_SLOT_AREA) 446 return; 447 448 slot_area.addr = region->start; 449 slot_area.num = (region->size - image_size) / 450 CONFIG_PHYSICAL_ALIGN + 1; 451 452 if (slot_area.num > 0) { 453 slot_areas[slot_area_index++] = slot_area; 454 slot_max += slot_area.num; 455 } 456 } 457 458 static unsigned long slots_fetch_random(void) 459 { 460 unsigned long slot; 461 int i; 462 463 /* Handle case of no slots stored. */ 464 if (slot_max == 0) 465 return 0; 466 467 slot = kaslr_get_random_long("Physical") % slot_max; 468 469 for (i = 0; i < slot_area_index; i++) { 470 if (slot >= slot_areas[i].num) { 471 slot -= slot_areas[i].num; 472 continue; 473 } 474 return slot_areas[i].addr + slot * CONFIG_PHYSICAL_ALIGN; 475 } 476 477 if (i == slot_area_index) 478 debug_putstr("slots_fetch_random() failed!?\n"); 479 return 0; 480 } 481 482 static void process_e820_entry(struct boot_e820_entry *entry, 483 unsigned long minimum, 484 unsigned long image_size) 485 { 486 struct mem_vector region, overlap; 487 struct slot_area slot_area; 488 unsigned long start_orig, end; 489 struct boot_e820_entry cur_entry; 490 491 /* Skip non-RAM entries. */ 492 if (entry->type != E820_TYPE_RAM) 493 return; 494 495 /* On 32-bit, ignore entries entirely above our maximum. */ 496 if (IS_ENABLED(CONFIG_X86_32) && entry->addr >= KERNEL_IMAGE_SIZE) 497 return; 498 499 /* Ignore entries entirely below our minimum. */ 500 if (entry->addr + entry->size < minimum) 501 return; 502 503 /* Ignore entries above memory limit */ 504 end = min(entry->size + entry->addr, mem_limit); 505 if (entry->addr >= end) 506 return; 507 cur_entry.addr = entry->addr; 508 cur_entry.size = end - entry->addr; 509 510 region.start = cur_entry.addr; 511 region.size = cur_entry.size; 512 513 /* Give up if slot area array is full. */ 514 while (slot_area_index < MAX_SLOT_AREA) { 515 start_orig = region.start; 516 517 /* Potentially raise address to minimum location. */ 518 if (region.start < minimum) 519 region.start = minimum; 520 521 /* Potentially raise address to meet alignment needs. */ 522 region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN); 523 524 /* Did we raise the address above this e820 region? */ 525 if (region.start > cur_entry.addr + cur_entry.size) 526 return; 527 528 /* Reduce size by any delta from the original address. */ 529 region.size -= region.start - start_orig; 530 531 /* On 32-bit, reduce region size to fit within max size. */ 532 if (IS_ENABLED(CONFIG_X86_32) && 533 region.start + region.size > KERNEL_IMAGE_SIZE) 534 region.size = KERNEL_IMAGE_SIZE - region.start; 535 536 /* Return if region can't contain decompressed kernel */ 537 if (region.size < image_size) 538 return; 539 540 /* If nothing overlaps, store the region and return. */ 541 if (!mem_avoid_overlap(®ion, &overlap)) { 542 store_slot_info(®ion, image_size); 543 return; 544 } 545 546 /* Store beginning of region if holds at least image_size. */ 547 if (overlap.start > region.start + image_size) { 548 struct mem_vector beginning; 549 550 beginning.start = region.start; 551 beginning.size = overlap.start - region.start; 552 store_slot_info(&beginning, image_size); 553 } 554 555 /* Return if overlap extends to or past end of region. */ 556 if (overlap.start + overlap.size >= region.start + region.size) 557 return; 558 559 /* Clip off the overlapping region and start over. */ 560 region.size -= overlap.start - region.start + overlap.size; 561 region.start = overlap.start + overlap.size; 562 } 563 } 564 565 static unsigned long find_random_phys_addr(unsigned long minimum, 566 unsigned long image_size) 567 { 568 int i; 569 unsigned long addr; 570 571 /* Check if we had too many memmaps. */ 572 if (memmap_too_large) { 573 debug_putstr("Aborted e820 scan (more than 4 memmap= args)!\n"); 574 return 0; 575 } 576 577 /* Make sure minimum is aligned. */ 578 minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN); 579 580 /* Verify potential e820 positions, appending to slots list. */ 581 for (i = 0; i < boot_params->e820_entries; i++) { 582 process_e820_entry(&boot_params->e820_table[i], minimum, 583 image_size); 584 if (slot_area_index == MAX_SLOT_AREA) { 585 debug_putstr("Aborted e820 scan (slot_areas full)!\n"); 586 break; 587 } 588 } 589 590 return slots_fetch_random(); 591 } 592 593 static unsigned long find_random_virt_addr(unsigned long minimum, 594 unsigned long image_size) 595 { 596 unsigned long slots, random_addr; 597 598 /* Make sure minimum is aligned. */ 599 minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN); 600 /* Align image_size for easy slot calculations. */ 601 image_size = ALIGN(image_size, CONFIG_PHYSICAL_ALIGN); 602 603 /* 604 * There are how many CONFIG_PHYSICAL_ALIGN-sized slots 605 * that can hold image_size within the range of minimum to 606 * KERNEL_IMAGE_SIZE? 607 */ 608 slots = (KERNEL_IMAGE_SIZE - minimum - image_size) / 609 CONFIG_PHYSICAL_ALIGN + 1; 610 611 random_addr = kaslr_get_random_long("Virtual") % slots; 612 613 return random_addr * CONFIG_PHYSICAL_ALIGN + minimum; 614 } 615 616 /* 617 * Since this function examines addresses much more numerically, 618 * it takes the input and output pointers as 'unsigned long'. 619 */ 620 void choose_random_location(unsigned long input, 621 unsigned long input_size, 622 unsigned long *output, 623 unsigned long output_size, 624 unsigned long *virt_addr) 625 { 626 unsigned long random_addr, min_addr; 627 628 if (cmdline_find_option_bool("nokaslr")) { 629 warn("KASLR disabled: 'nokaslr' on cmdline."); 630 return; 631 } 632 633 boot_params->hdr.loadflags |= KASLR_FLAG; 634 635 /* Prepare to add new identity pagetables on demand. */ 636 initialize_identity_maps(); 637 638 /* Record the various known unsafe memory ranges. */ 639 mem_avoid_init(input, input_size, *output); 640 641 /* 642 * Low end of the randomization range should be the 643 * smaller of 512M or the initial kernel image 644 * location: 645 */ 646 min_addr = min(*output, 512UL << 20); 647 648 /* Walk e820 and find a random address. */ 649 random_addr = find_random_phys_addr(min_addr, output_size); 650 if (!random_addr) { 651 warn("Physical KASLR disabled: no suitable memory region!"); 652 } else { 653 /* Update the new physical address location. */ 654 if (*output != random_addr) { 655 add_identity_map(random_addr, output_size); 656 *output = random_addr; 657 } 658 659 /* 660 * This loads the identity mapping page table. 661 * This should only be done if a new physical address 662 * is found for the kernel, otherwise we should keep 663 * the old page table to make it be like the "nokaslr" 664 * case. 665 */ 666 finalize_identity_maps(); 667 } 668 669 670 /* Pick random virtual address starting from LOAD_PHYSICAL_ADDR. */ 671 if (IS_ENABLED(CONFIG_X86_64)) 672 random_addr = find_random_virt_addr(LOAD_PHYSICAL_ADDR, output_size); 673 *virt_addr = random_addr; 674 } 675