1*f80fb3a3SArd Biesheuvel /* 2*f80fb3a3SArd Biesheuvel * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org> 3*f80fb3a3SArd Biesheuvel * 4*f80fb3a3SArd Biesheuvel * This program is free software; you can redistribute it and/or modify 5*f80fb3a3SArd Biesheuvel * it under the terms of the GNU General Public License version 2 as 6*f80fb3a3SArd Biesheuvel * published by the Free Software Foundation. 7*f80fb3a3SArd Biesheuvel */ 8*f80fb3a3SArd Biesheuvel 9*f80fb3a3SArd Biesheuvel #include <linux/crc32.h> 10*f80fb3a3SArd Biesheuvel #include <linux/init.h> 11*f80fb3a3SArd Biesheuvel #include <linux/libfdt.h> 12*f80fb3a3SArd Biesheuvel #include <linux/mm_types.h> 13*f80fb3a3SArd Biesheuvel #include <linux/sched.h> 14*f80fb3a3SArd Biesheuvel #include <linux/types.h> 15*f80fb3a3SArd Biesheuvel 16*f80fb3a3SArd Biesheuvel #include <asm/fixmap.h> 17*f80fb3a3SArd Biesheuvel #include <asm/kernel-pgtable.h> 18*f80fb3a3SArd Biesheuvel #include <asm/memory.h> 19*f80fb3a3SArd Biesheuvel #include <asm/mmu.h> 20*f80fb3a3SArd Biesheuvel #include <asm/pgtable.h> 21*f80fb3a3SArd Biesheuvel #include <asm/sections.h> 22*f80fb3a3SArd Biesheuvel 23*f80fb3a3SArd Biesheuvel u64 __read_mostly module_alloc_base; 24*f80fb3a3SArd Biesheuvel 25*f80fb3a3SArd Biesheuvel static __init u64 get_kaslr_seed(void *fdt) 26*f80fb3a3SArd Biesheuvel { 27*f80fb3a3SArd Biesheuvel int node, len; 28*f80fb3a3SArd Biesheuvel u64 *prop; 29*f80fb3a3SArd Biesheuvel u64 ret; 30*f80fb3a3SArd Biesheuvel 31*f80fb3a3SArd Biesheuvel node = fdt_path_offset(fdt, "/chosen"); 32*f80fb3a3SArd Biesheuvel if (node < 0) 33*f80fb3a3SArd Biesheuvel return 0; 34*f80fb3a3SArd Biesheuvel 35*f80fb3a3SArd Biesheuvel prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len); 36*f80fb3a3SArd Biesheuvel if (!prop || len != sizeof(u64)) 37*f80fb3a3SArd Biesheuvel return 0; 38*f80fb3a3SArd Biesheuvel 39*f80fb3a3SArd Biesheuvel ret = fdt64_to_cpu(*prop); 40*f80fb3a3SArd Biesheuvel *prop = 0; 41*f80fb3a3SArd Biesheuvel return ret; 42*f80fb3a3SArd Biesheuvel } 43*f80fb3a3SArd Biesheuvel 44*f80fb3a3SArd Biesheuvel static __init const u8 *get_cmdline(void *fdt) 45*f80fb3a3SArd Biesheuvel { 46*f80fb3a3SArd Biesheuvel static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE; 47*f80fb3a3SArd Biesheuvel 48*f80fb3a3SArd Biesheuvel if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) { 49*f80fb3a3SArd Biesheuvel int node; 50*f80fb3a3SArd Biesheuvel const u8 *prop; 51*f80fb3a3SArd Biesheuvel 52*f80fb3a3SArd Biesheuvel node = fdt_path_offset(fdt, "/chosen"); 53*f80fb3a3SArd Biesheuvel if (node < 0) 54*f80fb3a3SArd Biesheuvel goto out; 55*f80fb3a3SArd Biesheuvel 56*f80fb3a3SArd Biesheuvel prop = fdt_getprop(fdt, node, "bootargs", NULL); 57*f80fb3a3SArd Biesheuvel if (!prop) 58*f80fb3a3SArd Biesheuvel goto out; 59*f80fb3a3SArd Biesheuvel return prop; 60*f80fb3a3SArd Biesheuvel } 61*f80fb3a3SArd Biesheuvel out: 62*f80fb3a3SArd Biesheuvel return default_cmdline; 63*f80fb3a3SArd Biesheuvel } 64*f80fb3a3SArd Biesheuvel 65*f80fb3a3SArd Biesheuvel extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, 66*f80fb3a3SArd Biesheuvel pgprot_t prot); 67*f80fb3a3SArd Biesheuvel 68*f80fb3a3SArd Biesheuvel /* 69*f80fb3a3SArd Biesheuvel * This routine will be executed with the kernel mapped at its default virtual 70*f80fb3a3SArd Biesheuvel * address, and if it returns successfully, the kernel will be remapped, and 71*f80fb3a3SArd Biesheuvel * start_kernel() will be executed from a randomized virtual offset. The 72*f80fb3a3SArd Biesheuvel * relocation will result in all absolute references (e.g., static variables 73*f80fb3a3SArd Biesheuvel * containing function pointers) to be reinitialized, and zero-initialized 74*f80fb3a3SArd Biesheuvel * .bss variables will be reset to 0. 75*f80fb3a3SArd Biesheuvel */ 76*f80fb3a3SArd Biesheuvel u64 __init kaslr_early_init(u64 dt_phys) 77*f80fb3a3SArd Biesheuvel { 78*f80fb3a3SArd Biesheuvel void *fdt; 79*f80fb3a3SArd Biesheuvel u64 seed, offset, mask, module_range; 80*f80fb3a3SArd Biesheuvel const u8 *cmdline, *str; 81*f80fb3a3SArd Biesheuvel int size; 82*f80fb3a3SArd Biesheuvel 83*f80fb3a3SArd Biesheuvel /* 84*f80fb3a3SArd Biesheuvel * Set a reasonable default for module_alloc_base in case 85*f80fb3a3SArd Biesheuvel * we end up running with module randomization disabled. 86*f80fb3a3SArd Biesheuvel */ 87*f80fb3a3SArd Biesheuvel module_alloc_base = (u64)_etext - MODULES_VSIZE; 88*f80fb3a3SArd Biesheuvel 89*f80fb3a3SArd Biesheuvel /* 90*f80fb3a3SArd Biesheuvel * Try to map the FDT early. If this fails, we simply bail, 91*f80fb3a3SArd Biesheuvel * and proceed with KASLR disabled. We will make another 92*f80fb3a3SArd Biesheuvel * attempt at mapping the FDT in setup_machine() 93*f80fb3a3SArd Biesheuvel */ 94*f80fb3a3SArd Biesheuvel early_fixmap_init(); 95*f80fb3a3SArd Biesheuvel fdt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL); 96*f80fb3a3SArd Biesheuvel if (!fdt) 97*f80fb3a3SArd Biesheuvel return 0; 98*f80fb3a3SArd Biesheuvel 99*f80fb3a3SArd Biesheuvel /* 100*f80fb3a3SArd Biesheuvel * Retrieve (and wipe) the seed from the FDT 101*f80fb3a3SArd Biesheuvel */ 102*f80fb3a3SArd Biesheuvel seed = get_kaslr_seed(fdt); 103*f80fb3a3SArd Biesheuvel if (!seed) 104*f80fb3a3SArd Biesheuvel return 0; 105*f80fb3a3SArd Biesheuvel 106*f80fb3a3SArd Biesheuvel /* 107*f80fb3a3SArd Biesheuvel * Check if 'nokaslr' appears on the command line, and 108*f80fb3a3SArd Biesheuvel * return 0 if that is the case. 109*f80fb3a3SArd Biesheuvel */ 110*f80fb3a3SArd Biesheuvel cmdline = get_cmdline(fdt); 111*f80fb3a3SArd Biesheuvel str = strstr(cmdline, "nokaslr"); 112*f80fb3a3SArd Biesheuvel if (str == cmdline || (str > cmdline && *(str - 1) == ' ')) 113*f80fb3a3SArd Biesheuvel return 0; 114*f80fb3a3SArd Biesheuvel 115*f80fb3a3SArd Biesheuvel /* 116*f80fb3a3SArd Biesheuvel * OK, so we are proceeding with KASLR enabled. Calculate a suitable 117*f80fb3a3SArd Biesheuvel * kernel image offset from the seed. Let's place the kernel in the 118*f80fb3a3SArd Biesheuvel * lower half of the VMALLOC area (VA_BITS - 2). 119*f80fb3a3SArd Biesheuvel * Even if we could randomize at page granularity for 16k and 64k pages, 120*f80fb3a3SArd Biesheuvel * let's always round to 2 MB so we don't interfere with the ability to 121*f80fb3a3SArd Biesheuvel * map using contiguous PTEs 122*f80fb3a3SArd Biesheuvel */ 123*f80fb3a3SArd Biesheuvel mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1); 124*f80fb3a3SArd Biesheuvel offset = seed & mask; 125*f80fb3a3SArd Biesheuvel 126*f80fb3a3SArd Biesheuvel /* 127*f80fb3a3SArd Biesheuvel * The kernel Image should not extend across a 1GB/32MB/512MB alignment 128*f80fb3a3SArd Biesheuvel * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this 129*f80fb3a3SArd Biesheuvel * happens, increase the KASLR offset by the size of the kernel image. 130*f80fb3a3SArd Biesheuvel */ 131*f80fb3a3SArd Biesheuvel if ((((u64)_text + offset) >> SWAPPER_TABLE_SHIFT) != 132*f80fb3a3SArd Biesheuvel (((u64)_end + offset) >> SWAPPER_TABLE_SHIFT)) 133*f80fb3a3SArd Biesheuvel offset = (offset + (u64)(_end - _text)) & mask; 134*f80fb3a3SArd Biesheuvel 135*f80fb3a3SArd Biesheuvel if (IS_ENABLED(CONFIG_KASAN)) 136*f80fb3a3SArd Biesheuvel /* 137*f80fb3a3SArd Biesheuvel * KASAN does not expect the module region to intersect the 138*f80fb3a3SArd Biesheuvel * vmalloc region, since shadow memory is allocated for each 139*f80fb3a3SArd Biesheuvel * module at load time, whereas the vmalloc region is shadowed 140*f80fb3a3SArd Biesheuvel * by KASAN zero pages. So keep modules out of the vmalloc 141*f80fb3a3SArd Biesheuvel * region if KASAN is enabled. 142*f80fb3a3SArd Biesheuvel */ 143*f80fb3a3SArd Biesheuvel return offset; 144*f80fb3a3SArd Biesheuvel 145*f80fb3a3SArd Biesheuvel if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) { 146*f80fb3a3SArd Biesheuvel /* 147*f80fb3a3SArd Biesheuvel * Randomize the module region independently from the core 148*f80fb3a3SArd Biesheuvel * kernel. This prevents modules from leaking any information 149*f80fb3a3SArd Biesheuvel * about the address of the kernel itself, but results in 150*f80fb3a3SArd Biesheuvel * branches between modules and the core kernel that are 151*f80fb3a3SArd Biesheuvel * resolved via PLTs. (Branches between modules will be 152*f80fb3a3SArd Biesheuvel * resolved normally.) 153*f80fb3a3SArd Biesheuvel */ 154*f80fb3a3SArd Biesheuvel module_range = VMALLOC_END - VMALLOC_START - MODULES_VSIZE; 155*f80fb3a3SArd Biesheuvel module_alloc_base = VMALLOC_START; 156*f80fb3a3SArd Biesheuvel } else { 157*f80fb3a3SArd Biesheuvel /* 158*f80fb3a3SArd Biesheuvel * Randomize the module region by setting module_alloc_base to 159*f80fb3a3SArd Biesheuvel * a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE, 160*f80fb3a3SArd Biesheuvel * _stext) . This guarantees that the resulting region still 161*f80fb3a3SArd Biesheuvel * covers [_stext, _etext], and that all relative branches can 162*f80fb3a3SArd Biesheuvel * be resolved without veneers. 163*f80fb3a3SArd Biesheuvel */ 164*f80fb3a3SArd Biesheuvel module_range = MODULES_VSIZE - (u64)(_etext - _stext); 165*f80fb3a3SArd Biesheuvel module_alloc_base = (u64)_etext + offset - MODULES_VSIZE; 166*f80fb3a3SArd Biesheuvel } 167*f80fb3a3SArd Biesheuvel 168*f80fb3a3SArd Biesheuvel /* use the lower 21 bits to randomize the base of the module region */ 169*f80fb3a3SArd Biesheuvel module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; 170*f80fb3a3SArd Biesheuvel module_alloc_base &= PAGE_MASK; 171*f80fb3a3SArd Biesheuvel 172*f80fb3a3SArd Biesheuvel return offset; 173*f80fb3a3SArd Biesheuvel } 174