1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * This is for all the tests related to copy_to_user() and copy_from_user() 4 * hardening. 5 */ 6 #include "lkdtm.h" 7 #include <linux/slab.h> 8 #include <linux/vmalloc.h> 9 #include <linux/sched/task_stack.h> 10 #include <linux/mman.h> 11 #include <linux/uaccess.h> 12 #include <asm/cacheflush.h> 13 14 /* 15 * Many of the tests here end up using const sizes, but those would 16 * normally be ignored by hardened usercopy, so force the compiler 17 * into choosing the non-const path to make sure we trigger the 18 * hardened usercopy checks by added "unconst" to all the const copies, 19 * and making sure "cache_size" isn't optimized into a const. 20 */ 21 static volatile size_t unconst; 22 static volatile size_t cache_size = 1024; 23 static struct kmem_cache *whitelist_cache; 24 25 static const unsigned char test_text[] = "This is a test.\n"; 26 27 /* 28 * Instead of adding -Wno-return-local-addr, just pass the stack address 29 * through a function to obfuscate it from the compiler. 30 */ 31 static noinline unsigned char *trick_compiler(unsigned char *stack) 32 { 33 return stack + unconst; 34 } 35 36 static noinline unsigned char *do_usercopy_stack_callee(int value) 37 { 38 unsigned char buf[128]; 39 int i; 40 41 /* Exercise stack to avoid everything living in registers. */ 42 for (i = 0; i < sizeof(buf); i++) { 43 buf[i] = value & 0xff; 44 } 45 46 /* 47 * Put the target buffer in the middle of stack allocation 48 * so that we don't step on future stack users regardless 49 * of stack growth direction. 50 */ 51 return trick_compiler(&buf[(128/2)-32]); 52 } 53 54 static noinline void do_usercopy_stack(bool to_user, bool bad_frame) 55 { 56 unsigned long user_addr; 57 unsigned char good_stack[32]; 58 unsigned char *bad_stack; 59 int i; 60 61 /* Exercise stack to avoid everything living in registers. */ 62 for (i = 0; i < sizeof(good_stack); i++) 63 good_stack[i] = test_text[i % sizeof(test_text)]; 64 65 /* This is a pointer to outside our current stack frame. */ 66 if (bad_frame) { 67 bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack); 68 } else { 69 /* Put start address just inside stack. */ 70 bad_stack = task_stack_page(current) + THREAD_SIZE; 71 bad_stack -= sizeof(unsigned long); 72 } 73 74 #ifdef ARCH_HAS_CURRENT_STACK_POINTER 75 pr_info("stack : %px\n", (void *)current_stack_pointer); 76 #endif 77 pr_info("good_stack: %px-%px\n", good_stack, good_stack + sizeof(good_stack)); 78 pr_info("bad_stack : %px-%px\n", bad_stack, bad_stack + sizeof(good_stack)); 79 80 user_addr = vm_mmap(NULL, 0, PAGE_SIZE, 81 PROT_READ | PROT_WRITE | PROT_EXEC, 82 MAP_ANONYMOUS | MAP_PRIVATE, 0); 83 if (user_addr >= TASK_SIZE) { 84 pr_warn("Failed to allocate user memory\n"); 85 return; 86 } 87 88 if (to_user) { 89 pr_info("attempting good copy_to_user of local stack\n"); 90 if (copy_to_user((void __user *)user_addr, good_stack, 91 unconst + sizeof(good_stack))) { 92 pr_warn("copy_to_user failed unexpectedly?!\n"); 93 goto free_user; 94 } 95 96 pr_info("attempting bad copy_to_user of distant stack\n"); 97 if (copy_to_user((void __user *)user_addr, bad_stack, 98 unconst + sizeof(good_stack))) { 99 pr_warn("copy_to_user failed, but lacked Oops\n"); 100 goto free_user; 101 } 102 } else { 103 /* 104 * There isn't a safe way to not be protected by usercopy 105 * if we're going to write to another thread's stack. 106 */ 107 if (!bad_frame) 108 goto free_user; 109 110 pr_info("attempting good copy_from_user of local stack\n"); 111 if (copy_from_user(good_stack, (void __user *)user_addr, 112 unconst + sizeof(good_stack))) { 113 pr_warn("copy_from_user failed unexpectedly?!\n"); 114 goto free_user; 115 } 116 117 pr_info("attempting bad copy_from_user of distant stack\n"); 118 if (copy_from_user(bad_stack, (void __user *)user_addr, 119 unconst + sizeof(good_stack))) { 120 pr_warn("copy_from_user failed, but lacked Oops\n"); 121 goto free_user; 122 } 123 } 124 125 free_user: 126 vm_munmap(user_addr, PAGE_SIZE); 127 } 128 129 /* 130 * This checks for whole-object size validation with hardened usercopy, 131 * with or without usercopy whitelisting. 132 */ 133 static void do_usercopy_heap_size(bool to_user) 134 { 135 unsigned long user_addr; 136 unsigned char *one, *two; 137 void __user *test_user_addr; 138 void *test_kern_addr; 139 size_t size = unconst + 1024; 140 141 one = kmalloc(size, GFP_KERNEL); 142 two = kmalloc(size, GFP_KERNEL); 143 if (!one || !two) { 144 pr_warn("Failed to allocate kernel memory\n"); 145 goto free_kernel; 146 } 147 148 user_addr = vm_mmap(NULL, 0, PAGE_SIZE, 149 PROT_READ | PROT_WRITE | PROT_EXEC, 150 MAP_ANONYMOUS | MAP_PRIVATE, 0); 151 if (user_addr >= TASK_SIZE) { 152 pr_warn("Failed to allocate user memory\n"); 153 goto free_kernel; 154 } 155 156 memset(one, 'A', size); 157 memset(two, 'B', size); 158 159 test_user_addr = (void __user *)(user_addr + 16); 160 test_kern_addr = one + 16; 161 162 if (to_user) { 163 pr_info("attempting good copy_to_user of correct size\n"); 164 if (copy_to_user(test_user_addr, test_kern_addr, size / 2)) { 165 pr_warn("copy_to_user failed unexpectedly?!\n"); 166 goto free_user; 167 } 168 169 pr_info("attempting bad copy_to_user of too large size\n"); 170 if (copy_to_user(test_user_addr, test_kern_addr, size)) { 171 pr_warn("copy_to_user failed, but lacked Oops\n"); 172 goto free_user; 173 } 174 } else { 175 pr_info("attempting good copy_from_user of correct size\n"); 176 if (copy_from_user(test_kern_addr, test_user_addr, size / 2)) { 177 pr_warn("copy_from_user failed unexpectedly?!\n"); 178 goto free_user; 179 } 180 181 pr_info("attempting bad copy_from_user of too large size\n"); 182 if (copy_from_user(test_kern_addr, test_user_addr, size)) { 183 pr_warn("copy_from_user failed, but lacked Oops\n"); 184 goto free_user; 185 } 186 } 187 pr_err("FAIL: bad usercopy not detected!\n"); 188 pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy"); 189 190 free_user: 191 vm_munmap(user_addr, PAGE_SIZE); 192 free_kernel: 193 kfree(one); 194 kfree(two); 195 } 196 197 /* 198 * This checks for the specific whitelist window within an object. If this 199 * test passes, then do_usercopy_heap_size() tests will pass too. 200 */ 201 static void do_usercopy_heap_whitelist(bool to_user) 202 { 203 unsigned long user_alloc; 204 unsigned char *buf = NULL; 205 unsigned char __user *user_addr; 206 size_t offset, size; 207 208 /* Make sure cache was prepared. */ 209 if (!whitelist_cache) { 210 pr_warn("Failed to allocate kernel cache\n"); 211 return; 212 } 213 214 /* 215 * Allocate a buffer with a whitelisted window in the buffer. 216 */ 217 buf = kmem_cache_alloc(whitelist_cache, GFP_KERNEL); 218 if (!buf) { 219 pr_warn("Failed to allocate buffer from whitelist cache\n"); 220 goto free_alloc; 221 } 222 223 /* Allocate user memory we'll poke at. */ 224 user_alloc = vm_mmap(NULL, 0, PAGE_SIZE, 225 PROT_READ | PROT_WRITE | PROT_EXEC, 226 MAP_ANONYMOUS | MAP_PRIVATE, 0); 227 if (user_alloc >= TASK_SIZE) { 228 pr_warn("Failed to allocate user memory\n"); 229 goto free_alloc; 230 } 231 user_addr = (void __user *)user_alloc; 232 233 memset(buf, 'B', cache_size); 234 235 /* Whitelisted window in buffer, from kmem_cache_create_usercopy. */ 236 offset = (cache_size / 4) + unconst; 237 size = (cache_size / 16) + unconst; 238 239 if (to_user) { 240 pr_info("attempting good copy_to_user inside whitelist\n"); 241 if (copy_to_user(user_addr, buf + offset, size)) { 242 pr_warn("copy_to_user failed unexpectedly?!\n"); 243 goto free_user; 244 } 245 246 pr_info("attempting bad copy_to_user outside whitelist\n"); 247 if (copy_to_user(user_addr, buf + offset - 1, size)) { 248 pr_warn("copy_to_user failed, but lacked Oops\n"); 249 goto free_user; 250 } 251 } else { 252 pr_info("attempting good copy_from_user inside whitelist\n"); 253 if (copy_from_user(buf + offset, user_addr, size)) { 254 pr_warn("copy_from_user failed unexpectedly?!\n"); 255 goto free_user; 256 } 257 258 pr_info("attempting bad copy_from_user outside whitelist\n"); 259 if (copy_from_user(buf + offset - 1, user_addr, size)) { 260 pr_warn("copy_from_user failed, but lacked Oops\n"); 261 goto free_user; 262 } 263 } 264 pr_err("FAIL: bad usercopy not detected!\n"); 265 pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy"); 266 267 free_user: 268 vm_munmap(user_alloc, PAGE_SIZE); 269 free_alloc: 270 if (buf) 271 kmem_cache_free(whitelist_cache, buf); 272 } 273 274 /* Callable tests. */ 275 static void lkdtm_USERCOPY_HEAP_SIZE_TO(void) 276 { 277 do_usercopy_heap_size(true); 278 } 279 280 static void lkdtm_USERCOPY_HEAP_SIZE_FROM(void) 281 { 282 do_usercopy_heap_size(false); 283 } 284 285 static void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void) 286 { 287 do_usercopy_heap_whitelist(true); 288 } 289 290 static void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void) 291 { 292 do_usercopy_heap_whitelist(false); 293 } 294 295 static void lkdtm_USERCOPY_STACK_FRAME_TO(void) 296 { 297 do_usercopy_stack(true, true); 298 } 299 300 static void lkdtm_USERCOPY_STACK_FRAME_FROM(void) 301 { 302 do_usercopy_stack(false, true); 303 } 304 305 static void lkdtm_USERCOPY_STACK_BEYOND(void) 306 { 307 do_usercopy_stack(true, false); 308 } 309 310 static void lkdtm_USERCOPY_KERNEL(void) 311 { 312 unsigned long user_addr; 313 314 user_addr = vm_mmap(NULL, 0, PAGE_SIZE, 315 PROT_READ | PROT_WRITE | PROT_EXEC, 316 MAP_ANONYMOUS | MAP_PRIVATE, 0); 317 if (user_addr >= TASK_SIZE) { 318 pr_warn("Failed to allocate user memory\n"); 319 return; 320 } 321 322 pr_info("attempting good copy_to_user from kernel rodata: %px\n", 323 test_text); 324 if (copy_to_user((void __user *)user_addr, test_text, 325 unconst + sizeof(test_text))) { 326 pr_warn("copy_to_user failed unexpectedly?!\n"); 327 goto free_user; 328 } 329 330 pr_info("attempting bad copy_to_user from kernel text: %px\n", 331 vm_mmap); 332 if (copy_to_user((void __user *)user_addr, function_nocfi(vm_mmap), 333 unconst + PAGE_SIZE)) { 334 pr_warn("copy_to_user failed, but lacked Oops\n"); 335 goto free_user; 336 } 337 pr_err("FAIL: bad copy_to_user() not detected!\n"); 338 pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy"); 339 340 free_user: 341 vm_munmap(user_addr, PAGE_SIZE); 342 } 343 344 void __init lkdtm_usercopy_init(void) 345 { 346 /* Prepare cache that lacks SLAB_USERCOPY flag. */ 347 whitelist_cache = 348 kmem_cache_create_usercopy("lkdtm-usercopy", cache_size, 349 0, 0, 350 cache_size / 4, 351 cache_size / 16, 352 NULL); 353 } 354 355 void __exit lkdtm_usercopy_exit(void) 356 { 357 kmem_cache_destroy(whitelist_cache); 358 } 359 360 static struct crashtype crashtypes[] = { 361 CRASHTYPE(USERCOPY_HEAP_SIZE_TO), 362 CRASHTYPE(USERCOPY_HEAP_SIZE_FROM), 363 CRASHTYPE(USERCOPY_HEAP_WHITELIST_TO), 364 CRASHTYPE(USERCOPY_HEAP_WHITELIST_FROM), 365 CRASHTYPE(USERCOPY_STACK_FRAME_TO), 366 CRASHTYPE(USERCOPY_STACK_FRAME_FROM), 367 CRASHTYPE(USERCOPY_STACK_BEYOND), 368 CRASHTYPE(USERCOPY_KERNEL), 369 }; 370 371 struct crashtype_category usercopy_crashtypes = { 372 .crashtypes = crashtypes, 373 .len = ARRAY_SIZE(crashtypes), 374 }; 375