1 /* 2 * kaslr.c 3 * 4 * This contains the routines needed to generate a reasonable level of 5 * entropy to choose a randomized kernel base address offset in support 6 * of Kernel Address Space Layout Randomization (KASLR). Additionally 7 * handles walking the physical memory maps (and tracking memory regions 8 * to avoid) in order to select a physical memory location that can 9 * contain the entire properly aligned running kernel image. 10 * 11 */ 12 #include "misc.h" 13 #include "error.h" 14 15 #include <asm/msr.h> 16 #include <asm/archrandom.h> 17 #include <asm/e820.h> 18 19 #include <generated/compile.h> 20 #include <linux/module.h> 21 #include <linux/uts.h> 22 #include <linux/utsname.h> 23 #include <generated/utsrelease.h> 24 25 /* Simplified build-specific string for starting entropy. */ 26 static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" 27 LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; 28 29 #define I8254_PORT_CONTROL 0x43 30 #define I8254_PORT_COUNTER0 0x40 31 #define I8254_CMD_READBACK 0xC0 32 #define I8254_SELECT_COUNTER0 0x02 33 #define I8254_STATUS_NOTREADY 0x40 34 static inline u16 i8254(void) 35 { 36 u16 status, timer; 37 38 do { 39 outb(I8254_PORT_CONTROL, 40 I8254_CMD_READBACK | I8254_SELECT_COUNTER0); 41 status = inb(I8254_PORT_COUNTER0); 42 timer = inb(I8254_PORT_COUNTER0); 43 timer |= inb(I8254_PORT_COUNTER0) << 8; 44 } while (status & I8254_STATUS_NOTREADY); 45 46 return timer; 47 } 48 49 static unsigned long rotate_xor(unsigned long hash, const void *area, 50 size_t size) 51 { 52 size_t i; 53 unsigned long *ptr = (unsigned long *)area; 54 55 for (i = 0; i < size / sizeof(hash); i++) { 56 /* Rotate by odd number of bits and XOR. */ 57 hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7); 58 hash ^= ptr[i]; 59 } 60 61 return hash; 62 } 63 64 /* Attempt to create a simple but unpredictable starting entropy. */ 65 static unsigned long get_random_boot(void) 66 { 67 unsigned long hash = 0; 68 69 hash = rotate_xor(hash, build_str, sizeof(build_str)); 70 hash = rotate_xor(hash, boot_params, sizeof(*boot_params)); 71 72 return hash; 73 } 74 75 static unsigned long get_random_long(const char *purpose) 76 { 77 #ifdef CONFIG_X86_64 78 const unsigned long mix_const = 0x5d6008cbf3848dd3UL; 79 #else 80 const unsigned long mix_const = 0x3f39e593UL; 81 #endif 82 unsigned long raw, random = get_random_boot(); 83 bool use_i8254 = true; 84 85 debug_putstr(purpose); 86 debug_putstr(" KASLR using"); 87 88 if (has_cpuflag(X86_FEATURE_RDRAND)) { 89 debug_putstr(" RDRAND"); 90 if (rdrand_long(&raw)) { 91 random ^= raw; 92 use_i8254 = false; 93 } 94 } 95 96 if (has_cpuflag(X86_FEATURE_TSC)) { 97 debug_putstr(" RDTSC"); 98 raw = rdtsc(); 99 100 random ^= raw; 101 use_i8254 = false; 102 } 103 104 if (use_i8254) { 105 debug_putstr(" i8254"); 106 random ^= i8254(); 107 } 108 109 /* Circular multiply for better bit diffusion */ 110 asm("mul %3" 111 : "=a" (random), "=d" (raw) 112 : "a" (random), "rm" (mix_const)); 113 random += raw; 114 115 debug_putstr("...\n"); 116 117 return random; 118 } 119 120 struct mem_vector { 121 unsigned long start; 122 unsigned long size; 123 }; 124 125 enum mem_avoid_index { 126 MEM_AVOID_ZO_RANGE = 0, 127 MEM_AVOID_INITRD, 128 MEM_AVOID_CMDLINE, 129 MEM_AVOID_BOOTPARAMS, 130 MEM_AVOID_MAX, 131 }; 132 133 static struct mem_vector mem_avoid[MEM_AVOID_MAX]; 134 135 static bool mem_contains(struct mem_vector *region, struct mem_vector *item) 136 { 137 /* Item at least partially before region. */ 138 if (item->start < region->start) 139 return false; 140 /* Item at least partially after region. */ 141 if (item->start + item->size > region->start + region->size) 142 return false; 143 return true; 144 } 145 146 static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two) 147 { 148 /* Item one is entirely before item two. */ 149 if (one->start + one->size <= two->start) 150 return false; 151 /* Item one is entirely after item two. */ 152 if (one->start >= two->start + two->size) 153 return false; 154 return true; 155 } 156 157 /* 158 * In theory, KASLR can put the kernel anywhere in the range of [16M, 64T). 159 * The mem_avoid array is used to store the ranges that need to be avoided 160 * when KASLR searches for an appropriate random address. We must avoid any 161 * regions that are unsafe to overlap with during decompression, and other 162 * things like the initrd, cmdline and boot_params. This comment seeks to 163 * explain mem_avoid as clearly as possible since incorrect mem_avoid 164 * memory ranges lead to really hard to debug boot failures. 165 * 166 * The initrd, cmdline, and boot_params are trivial to identify for 167 * avoiding. They are MEM_AVOID_INITRD, MEM_AVOID_CMDLINE, and 168 * MEM_AVOID_BOOTPARAMS respectively below. 169 * 170 * What is not obvious how to avoid is the range of memory that is used 171 * during decompression (MEM_AVOID_ZO_RANGE below). This range must cover 172 * the compressed kernel (ZO) and its run space, which is used to extract 173 * the uncompressed kernel (VO) and relocs. 174 * 175 * ZO's full run size sits against the end of the decompression buffer, so 176 * we can calculate where text, data, bss, etc of ZO are positioned more 177 * easily. 178 * 179 * For additional background, the decompression calculations can be found 180 * in header.S, and the memory diagram is based on the one found in misc.c. 181 * 182 * The following conditions are already enforced by the image layouts and 183 * associated code: 184 * - input + input_size >= output + output_size 185 * - kernel_total_size <= init_size 186 * - kernel_total_size <= output_size (see Note below) 187 * - output + init_size >= output + output_size 188 * 189 * (Note that kernel_total_size and output_size have no fundamental 190 * relationship, but output_size is passed to choose_random_location 191 * as a maximum of the two. The diagram is showing a case where 192 * kernel_total_size is larger than output_size, but this case is 193 * handled by bumping output_size.) 194 * 195 * The above conditions can be illustrated by a diagram: 196 * 197 * 0 output input input+input_size output+init_size 198 * | | | | | 199 * | | | | | 200 * |-----|--------|--------|--------------|-----------|--|-------------| 201 * | | | 202 * | | | 203 * output+init_size-ZO_INIT_SIZE output+output_size output+kernel_total_size 204 * 205 * [output, output+init_size) is the entire memory range used for 206 * extracting the compressed image. 207 * 208 * [output, output+kernel_total_size) is the range needed for the 209 * uncompressed kernel (VO) and its run size (bss, brk, etc). 210 * 211 * [output, output+output_size) is VO plus relocs (i.e. the entire 212 * uncompressed payload contained by ZO). This is the area of the buffer 213 * written to during decompression. 214 * 215 * [output+init_size-ZO_INIT_SIZE, output+init_size) is the worst-case 216 * range of the copied ZO and decompression code. (i.e. the range 217 * covered backwards of size ZO_INIT_SIZE, starting from output+init_size.) 218 * 219 * [input, input+input_size) is the original copied compressed image (ZO) 220 * (i.e. it does not include its run size). This range must be avoided 221 * because it contains the data used for decompression. 222 * 223 * [input+input_size, output+init_size) is [_text, _end) for ZO. This 224 * range includes ZO's heap and stack, and must be avoided since it 225 * performs the decompression. 226 * 227 * Since the above two ranges need to be avoided and they are adjacent, 228 * they can be merged, resulting in: [input, output+init_size) which 229 * becomes the MEM_AVOID_ZO_RANGE below. 230 */ 231 static void mem_avoid_init(unsigned long input, unsigned long input_size, 232 unsigned long output) 233 { 234 unsigned long init_size = boot_params->hdr.init_size; 235 u64 initrd_start, initrd_size; 236 u64 cmd_line, cmd_line_size; 237 char *ptr; 238 239 /* 240 * Avoid the region that is unsafe to overlap during 241 * decompression. 242 */ 243 mem_avoid[MEM_AVOID_ZO_RANGE].start = input; 244 mem_avoid[MEM_AVOID_ZO_RANGE].size = (output + init_size) - input; 245 add_identity_map(mem_avoid[MEM_AVOID_ZO_RANGE].start, 246 mem_avoid[MEM_AVOID_ZO_RANGE].size); 247 248 /* Avoid initrd. */ 249 initrd_start = (u64)boot_params->ext_ramdisk_image << 32; 250 initrd_start |= boot_params->hdr.ramdisk_image; 251 initrd_size = (u64)boot_params->ext_ramdisk_size << 32; 252 initrd_size |= boot_params->hdr.ramdisk_size; 253 mem_avoid[MEM_AVOID_INITRD].start = initrd_start; 254 mem_avoid[MEM_AVOID_INITRD].size = initrd_size; 255 /* No need to set mapping for initrd, it will be handled in VO. */ 256 257 /* Avoid kernel command line. */ 258 cmd_line = (u64)boot_params->ext_cmd_line_ptr << 32; 259 cmd_line |= boot_params->hdr.cmd_line_ptr; 260 /* Calculate size of cmd_line. */ 261 ptr = (char *)(unsigned long)cmd_line; 262 for (cmd_line_size = 0; ptr[cmd_line_size++]; ) 263 ; 264 mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line; 265 mem_avoid[MEM_AVOID_CMDLINE].size = cmd_line_size; 266 add_identity_map(mem_avoid[MEM_AVOID_CMDLINE].start, 267 mem_avoid[MEM_AVOID_CMDLINE].size); 268 269 /* Avoid boot parameters. */ 270 mem_avoid[MEM_AVOID_BOOTPARAMS].start = (unsigned long)boot_params; 271 mem_avoid[MEM_AVOID_BOOTPARAMS].size = sizeof(*boot_params); 272 add_identity_map(mem_avoid[MEM_AVOID_BOOTPARAMS].start, 273 mem_avoid[MEM_AVOID_BOOTPARAMS].size); 274 275 /* We don't need to set a mapping for setup_data. */ 276 277 #ifdef CONFIG_X86_VERBOSE_BOOTUP 278 /* Make sure video RAM can be used. */ 279 add_identity_map(0, PMD_SIZE); 280 #endif 281 } 282 283 /* 284 * Does this memory vector overlap a known avoided area? If so, record the 285 * overlap region with the lowest address. 286 */ 287 static bool mem_avoid_overlap(struct mem_vector *img, 288 struct mem_vector *overlap) 289 { 290 int i; 291 struct setup_data *ptr; 292 unsigned long earliest = img->start + img->size; 293 bool is_overlapping = false; 294 295 for (i = 0; i < MEM_AVOID_MAX; i++) { 296 if (mem_overlaps(img, &mem_avoid[i]) && 297 mem_avoid[i].start < earliest) { 298 *overlap = mem_avoid[i]; 299 is_overlapping = true; 300 } 301 } 302 303 /* Avoid all entries in the setup_data linked list. */ 304 ptr = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data; 305 while (ptr) { 306 struct mem_vector avoid; 307 308 avoid.start = (unsigned long)ptr; 309 avoid.size = sizeof(*ptr) + ptr->len; 310 311 if (mem_overlaps(img, &avoid) && (avoid.start < earliest)) { 312 *overlap = avoid; 313 is_overlapping = true; 314 } 315 316 ptr = (struct setup_data *)(unsigned long)ptr->next; 317 } 318 319 return is_overlapping; 320 } 321 322 static unsigned long slots[KERNEL_IMAGE_SIZE / CONFIG_PHYSICAL_ALIGN]; 323 324 struct slot_area { 325 unsigned long addr; 326 int num; 327 }; 328 329 #define MAX_SLOT_AREA 100 330 331 static struct slot_area slot_areas[MAX_SLOT_AREA]; 332 333 static unsigned long slot_max; 334 335 static unsigned long slot_area_index; 336 337 static void store_slot_info(struct mem_vector *region, unsigned long image_size) 338 { 339 struct slot_area slot_area; 340 341 if (slot_area_index == MAX_SLOT_AREA) 342 return; 343 344 slot_area.addr = region->start; 345 slot_area.num = (region->size - image_size) / 346 CONFIG_PHYSICAL_ALIGN + 1; 347 348 if (slot_area.num > 0) { 349 slot_areas[slot_area_index++] = slot_area; 350 slot_max += slot_area.num; 351 } 352 } 353 354 static void slots_append(unsigned long addr) 355 { 356 /* Overflowing the slots list should be impossible. */ 357 if (slot_max >= KERNEL_IMAGE_SIZE / CONFIG_PHYSICAL_ALIGN) 358 return; 359 360 slots[slot_max++] = addr; 361 } 362 363 static unsigned long slots_fetch_random(void) 364 { 365 /* Handle case of no slots stored. */ 366 if (slot_max == 0) 367 return 0; 368 369 return slots[get_random_long("Physical") % slot_max]; 370 } 371 372 static void process_e820_entry(struct e820entry *entry, 373 unsigned long minimum, 374 unsigned long image_size) 375 { 376 struct mem_vector region, img, overlap; 377 378 /* Skip non-RAM entries. */ 379 if (entry->type != E820_RAM) 380 return; 381 382 /* Ignore entries entirely above our maximum. */ 383 if (entry->addr >= KERNEL_IMAGE_SIZE) 384 return; 385 386 /* Ignore entries entirely below our minimum. */ 387 if (entry->addr + entry->size < minimum) 388 return; 389 390 region.start = entry->addr; 391 region.size = entry->size; 392 393 /* Potentially raise address to minimum location. */ 394 if (region.start < minimum) 395 region.start = minimum; 396 397 /* Potentially raise address to meet alignment requirements. */ 398 region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN); 399 400 /* Did we raise the address above the bounds of this e820 region? */ 401 if (region.start > entry->addr + entry->size) 402 return; 403 404 /* Reduce size by any delta from the original address. */ 405 region.size -= region.start - entry->addr; 406 407 /* Reduce maximum size to fit end of image within maximum limit. */ 408 if (region.start + region.size > KERNEL_IMAGE_SIZE) 409 region.size = KERNEL_IMAGE_SIZE - region.start; 410 411 /* Walk each aligned slot and check for avoided areas. */ 412 for (img.start = region.start, img.size = image_size ; 413 mem_contains(®ion, &img) ; 414 img.start += CONFIG_PHYSICAL_ALIGN) { 415 if (mem_avoid_overlap(&img, &overlap)) 416 continue; 417 slots_append(img.start); 418 } 419 } 420 421 static unsigned long find_random_phys_addr(unsigned long minimum, 422 unsigned long image_size) 423 { 424 int i; 425 unsigned long addr; 426 427 /* Make sure minimum is aligned. */ 428 minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN); 429 430 /* Verify potential e820 positions, appending to slots list. */ 431 for (i = 0; i < boot_params->e820_entries; i++) { 432 process_e820_entry(&boot_params->e820_map[i], minimum, 433 image_size); 434 } 435 436 return slots_fetch_random(); 437 } 438 439 static unsigned long find_random_virt_addr(unsigned long minimum, 440 unsigned long image_size) 441 { 442 unsigned long slots, random_addr; 443 444 /* Make sure minimum is aligned. */ 445 minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN); 446 /* Align image_size for easy slot calculations. */ 447 image_size = ALIGN(image_size, CONFIG_PHYSICAL_ALIGN); 448 449 /* 450 * There are how many CONFIG_PHYSICAL_ALIGN-sized slots 451 * that can hold image_size within the range of minimum to 452 * KERNEL_IMAGE_SIZE? 453 */ 454 slots = (KERNEL_IMAGE_SIZE - minimum - image_size) / 455 CONFIG_PHYSICAL_ALIGN + 1; 456 457 random_addr = get_random_long("Virtual") % slots; 458 459 return random_addr * CONFIG_PHYSICAL_ALIGN + minimum; 460 } 461 462 /* 463 * Since this function examines addresses much more numerically, 464 * it takes the input and output pointers as 'unsigned long'. 465 */ 466 unsigned char *choose_random_location(unsigned long input, 467 unsigned long input_size, 468 unsigned long output, 469 unsigned long output_size) 470 { 471 unsigned long choice = output; 472 unsigned long random_addr; 473 474 #ifdef CONFIG_HIBERNATION 475 if (!cmdline_find_option_bool("kaslr")) { 476 warn("KASLR disabled: 'kaslr' not on cmdline (hibernation selected)."); 477 goto out; 478 } 479 #else 480 if (cmdline_find_option_bool("nokaslr")) { 481 warn("KASLR disabled: 'nokaslr' on cmdline."); 482 goto out; 483 } 484 #endif 485 486 boot_params->hdr.loadflags |= KASLR_FLAG; 487 488 /* Record the various known unsafe memory ranges. */ 489 mem_avoid_init(input, input_size, output); 490 491 /* Walk e820 and find a random address. */ 492 random_addr = find_random_phys_addr(output, output_size); 493 if (!random_addr) { 494 warn("KASLR disabled: could not find suitable E820 region!"); 495 goto out; 496 } 497 498 /* Always enforce the minimum. */ 499 if (random_addr < choice) 500 goto out; 501 502 choice = random_addr; 503 504 add_identity_map(choice, output_size); 505 506 /* This actually loads the identity pagetable on x86_64. */ 507 finalize_identity_maps(); 508 out: 509 return (unsigned char *)choice; 510 } 511