1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * This is for all the tests related to validating kernel memory 4 * permissions: non-executable regions, non-writable regions, and 5 * even non-readable regions. 6 */ 7 #include "lkdtm.h" 8 #include <linux/slab.h> 9 #include <linux/vmalloc.h> 10 #include <linux/mman.h> 11 #include <linux/uaccess.h> 12 #include <linux/objtool.h> 13 #include <asm/cacheflush.h> 14 #include <asm/sections.h> 15 16 /* Whether or not to fill the target memory area with do_nothing(). */ 17 #define CODE_WRITE true 18 #define CODE_AS_IS false 19 20 /* How many bytes to copy to be sure we've copied enough of do_nothing(). */ 21 #define EXEC_SIZE 64 22 23 /* This is non-const, so it will end up in the .data section. */ 24 static u8 data_area[EXEC_SIZE]; 25 26 /* This is const, so it will end up in the .rodata section. */ 27 static const unsigned long rodata = 0xAA55AA55; 28 29 /* This is marked __ro_after_init, so it should ultimately be .rodata. */ 30 static unsigned long ro_after_init __ro_after_init = 0x55AA5500; 31 32 /* 33 * This is a pointer to do_nothing() which is initialized at runtime rather 34 * than build time to avoid objtool IBT validation warnings caused by an 35 * inlined unrolled memcpy() in execute_location(). 36 */ 37 static void __ro_after_init *do_nothing_ptr; 38 39 /* 40 * This just returns to the caller. It is designed to be copied into 41 * non-executable memory regions. 42 */ 43 static noinline void do_nothing(void) 44 { 45 return; 46 } 47 48 /* Must immediately follow do_nothing for size calculuations to work out. */ 49 static noinline void do_overwritten(void) 50 { 51 pr_info("do_overwritten wasn't overwritten!\n"); 52 return; 53 } 54 55 static noinline void do_almost_nothing(void) 56 { 57 pr_info("do_nothing was hijacked!\n"); 58 } 59 60 static void *setup_function_descriptor(func_desc_t *fdesc, void *dst) 61 { 62 if (!have_function_descriptors()) 63 return dst; 64 65 memcpy(fdesc, do_nothing, sizeof(*fdesc)); 66 fdesc->addr = (unsigned long)dst; 67 barrier(); 68 69 return fdesc; 70 } 71 72 static noinline __nocfi void execute_location(void *dst, bool write) 73 { 74 void (*func)(void); 75 func_desc_t fdesc; 76 77 pr_info("attempting ok execution at %px\n", do_nothing_ptr); 78 do_nothing(); 79 80 if (write == CODE_WRITE) { 81 memcpy(dst, do_nothing_ptr, EXEC_SIZE); 82 flush_icache_range((unsigned long)dst, 83 (unsigned long)dst + EXEC_SIZE); 84 } 85 pr_info("attempting bad execution at %px\n", dst); 86 func = setup_function_descriptor(&fdesc, dst); 87 func(); 88 pr_err("FAIL: func returned\n"); 89 } 90 /* 91 * Explicitly doing the wrong thing for testing. 92 */ 93 ANNOTATE_NOCFI_SYM(execute_location); 94 95 static void execute_user_location(void *dst) 96 { 97 int copied; 98 99 /* Intentionally crossing kernel/user memory boundary. */ 100 void (*func)(void); 101 func_desc_t fdesc; 102 void *do_nothing_text = dereference_function_descriptor(do_nothing); 103 104 pr_info("attempting ok execution at %px\n", do_nothing_text); 105 do_nothing(); 106 107 copied = access_process_vm(current, (unsigned long)dst, do_nothing_text, 108 EXEC_SIZE, FOLL_WRITE); 109 if (copied < EXEC_SIZE) 110 return; 111 pr_info("attempting bad execution at %px\n", dst); 112 func = setup_function_descriptor(&fdesc, dst); 113 func(); 114 pr_err("FAIL: func returned\n"); 115 } 116 117 static void lkdtm_WRITE_RO(void) 118 { 119 /* Explicitly cast away "const" for the test and make volatile. */ 120 volatile unsigned long *ptr = (unsigned long *)&rodata; 121 122 pr_info("attempting bad rodata write at %px\n", ptr); 123 *ptr ^= 0xabcd1234; 124 pr_err("FAIL: survived bad write\n"); 125 } 126 127 static void lkdtm_WRITE_RO_AFTER_INIT(void) 128 { 129 volatile unsigned long *ptr = &ro_after_init; 130 131 /* 132 * Verify we were written to during init. Since an Oops 133 * is considered a "success", a failure is to just skip the 134 * real test. 135 */ 136 if ((*ptr & 0xAA) != 0xAA) { 137 pr_info("%p was NOT written during init!?\n", ptr); 138 return; 139 } 140 141 pr_info("attempting bad ro_after_init write at %px\n", ptr); 142 *ptr ^= 0xabcd1234; 143 pr_err("FAIL: survived bad write\n"); 144 } 145 146 static void lkdtm_WRITE_KERN(void) 147 { 148 size_t size; 149 volatile unsigned char *ptr; 150 151 size = (unsigned long)dereference_function_descriptor(do_overwritten) - 152 (unsigned long)dereference_function_descriptor(do_nothing); 153 ptr = dereference_function_descriptor(do_overwritten); 154 155 pr_info("attempting bad %zu byte write at %px\n", size, ptr); 156 memcpy((void *)ptr, (unsigned char *)do_nothing, size); 157 flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size)); 158 pr_err("FAIL: survived bad write\n"); 159 160 do_overwritten(); 161 } 162 163 static void lkdtm_WRITE_OPD(void) 164 { 165 size_t size = sizeof(func_desc_t); 166 void (*func)(void) = do_nothing; 167 168 if (!have_function_descriptors()) { 169 pr_info("XFAIL: Platform doesn't use function descriptors.\n"); 170 return; 171 } 172 pr_info("attempting bad %zu bytes write at %px\n", size, do_nothing); 173 memcpy(do_nothing, do_almost_nothing, size); 174 pr_err("FAIL: survived bad write\n"); 175 176 asm("" : "=m"(func)); 177 func(); 178 } 179 180 static void lkdtm_EXEC_DATA(void) 181 { 182 execute_location(data_area, CODE_WRITE); 183 } 184 185 static void lkdtm_EXEC_STACK(void) 186 { 187 u8 stack_area[EXEC_SIZE]; 188 execute_location(stack_area, CODE_WRITE); 189 } 190 191 static void lkdtm_EXEC_KMALLOC(void) 192 { 193 u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL); 194 execute_location(kmalloc_area, CODE_WRITE); 195 kfree(kmalloc_area); 196 } 197 198 static void lkdtm_EXEC_VMALLOC(void) 199 { 200 u32 *vmalloc_area = vmalloc(EXEC_SIZE); 201 execute_location(vmalloc_area, CODE_WRITE); 202 vfree(vmalloc_area); 203 } 204 205 static void lkdtm_EXEC_RODATA(void) 206 { 207 execute_location(dereference_function_descriptor(lkdtm_rodata_do_nothing), 208 CODE_AS_IS); 209 } 210 211 static void lkdtm_EXEC_USERSPACE(void) 212 { 213 unsigned long user_addr; 214 215 user_addr = vm_mmap(NULL, 0, PAGE_SIZE, 216 PROT_READ | PROT_WRITE | PROT_EXEC, 217 MAP_ANONYMOUS | MAP_PRIVATE, 0); 218 if (user_addr >= TASK_SIZE) { 219 pr_warn("Failed to allocate user memory\n"); 220 return; 221 } 222 execute_user_location((void *)user_addr); 223 vm_munmap(user_addr, PAGE_SIZE); 224 } 225 226 static void lkdtm_EXEC_NULL(void) 227 { 228 execute_location(NULL, CODE_AS_IS); 229 } 230 231 static void lkdtm_ACCESS_USERSPACE(void) 232 { 233 unsigned long user_addr, tmp = 0; 234 unsigned long *ptr; 235 236 user_addr = vm_mmap(NULL, 0, PAGE_SIZE, 237 PROT_READ | PROT_WRITE | PROT_EXEC, 238 MAP_ANONYMOUS | MAP_PRIVATE, 0); 239 if (user_addr >= TASK_SIZE) { 240 pr_warn("Failed to allocate user memory\n"); 241 return; 242 } 243 244 if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) { 245 pr_warn("copy_to_user failed\n"); 246 vm_munmap(user_addr, PAGE_SIZE); 247 return; 248 } 249 250 ptr = (unsigned long *)user_addr; 251 252 pr_info("attempting bad read at %px\n", ptr); 253 tmp = *ptr; 254 tmp += 0xc0dec0de; 255 pr_err("FAIL: survived bad read\n"); 256 257 pr_info("attempting bad write at %px\n", ptr); 258 *ptr = tmp; 259 pr_err("FAIL: survived bad write\n"); 260 261 vm_munmap(user_addr, PAGE_SIZE); 262 } 263 264 static void lkdtm_ACCESS_NULL(void) 265 { 266 unsigned long tmp; 267 volatile unsigned long *ptr = (unsigned long *)NULL; 268 269 pr_info("attempting bad read at %px\n", ptr); 270 tmp = *ptr; 271 tmp += 0xc0dec0de; 272 pr_err("FAIL: survived bad read\n"); 273 274 pr_info("attempting bad write at %px\n", ptr); 275 *ptr = tmp; 276 pr_err("FAIL: survived bad write\n"); 277 } 278 279 void __init lkdtm_perms_init(void) 280 { 281 do_nothing_ptr = dereference_function_descriptor(do_nothing); 282 283 /* Make sure we can write to __ro_after_init values during __init */ 284 ro_after_init |= 0xAA; 285 } 286 287 static struct crashtype crashtypes[] = { 288 CRASHTYPE(WRITE_RO), 289 CRASHTYPE(WRITE_RO_AFTER_INIT), 290 CRASHTYPE(WRITE_KERN), 291 CRASHTYPE(WRITE_OPD), 292 CRASHTYPE(EXEC_DATA), 293 CRASHTYPE(EXEC_STACK), 294 CRASHTYPE(EXEC_KMALLOC), 295 CRASHTYPE(EXEC_VMALLOC), 296 CRASHTYPE(EXEC_RODATA), 297 CRASHTYPE(EXEC_USERSPACE), 298 CRASHTYPE(EXEC_NULL), 299 CRASHTYPE(ACCESS_USERSPACE), 300 CRASHTYPE(ACCESS_NULL), 301 }; 302 303 struct crashtype_category perms_crashtypes = { 304 .crashtypes = crashtypes, 305 .len = ARRAY_SIZE(crashtypes), 306 }; 307