1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * This file contains KASAN shadow initialization code. 4 * 5 * Copyright (c) 2015 Samsung Electronics Co., Ltd. 6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> 7 */ 8 9 #include <linux/memblock.h> 10 #include <linux/init.h> 11 #include <linux/kasan.h> 12 #include <linux/kernel.h> 13 #include <linux/mm.h> 14 #include <linux/pfn.h> 15 #include <linux/slab.h> 16 17 #include <asm/page.h> 18 #include <asm/pgalloc.h> 19 20 #include "kasan.h" 21 22 /* 23 * This page serves two purposes: 24 * - It used as early shadow memory. The entire shadow region populated 25 * with this page, before we will be able to setup normal shadow memory. 26 * - Latter it reused it as zero shadow to cover large ranges of memory 27 * that allowed to access, but not handled by kasan (vmalloc/vmemmap ...). 28 */ 29 unsigned char kasan_early_shadow_page[PAGE_SIZE] __page_aligned_bss; 30 31 #if CONFIG_PGTABLE_LEVELS > 4 32 p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D] __page_aligned_bss; 33 static inline bool kasan_p4d_table(pgd_t pgd) 34 { 35 return pgd_page(pgd) == virt_to_page(lm_alias(kasan_early_shadow_p4d)); 36 } 37 #else 38 static inline bool kasan_p4d_table(pgd_t pgd) 39 { 40 return false; 41 } 42 #endif 43 #if CONFIG_PGTABLE_LEVELS > 3 44 pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD] __page_aligned_bss; 45 static inline bool kasan_pud_table(p4d_t p4d) 46 { 47 return p4d_page(p4d) == virt_to_page(lm_alias(kasan_early_shadow_pud)); 48 } 49 #else 50 static inline bool kasan_pud_table(p4d_t p4d) 51 { 52 return false; 53 } 54 #endif 55 #if CONFIG_PGTABLE_LEVELS > 2 56 pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD] __page_aligned_bss; 57 static inline bool kasan_pmd_table(pud_t pud) 58 { 59 return pud_page(pud) == virt_to_page(lm_alias(kasan_early_shadow_pmd)); 60 } 61 #else 62 static inline bool kasan_pmd_table(pud_t pud) 63 { 64 return false; 65 } 66 #endif 67 pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS] 68 __page_aligned_bss; 69 70 static inline bool kasan_pte_table(pmd_t pmd) 71 { 72 return pmd_page(pmd) == virt_to_page(lm_alias(kasan_early_shadow_pte)); 73 } 74 75 static inline bool kasan_early_shadow_page_entry(pte_t pte) 76 { 77 return pte_page(pte) == virt_to_page(lm_alias(kasan_early_shadow_page)); 78 } 79 80 static __init void *early_alloc(size_t size, int node) 81 { 82 void *ptr = memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), 83 MEMBLOCK_ALLOC_ACCESSIBLE, node); 84 85 if (!ptr) 86 panic("%s: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n", 87 __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS)); 88 89 return ptr; 90 } 91 92 static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr, 93 unsigned long end) 94 { 95 pte_t *pte = pte_offset_kernel(pmd, addr); 96 pte_t zero_pte; 97 98 zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_early_shadow_page)), 99 PAGE_KERNEL); 100 zero_pte = pte_wrprotect(zero_pte); 101 102 while (addr + PAGE_SIZE <= end) { 103 set_pte_at(&init_mm, addr, pte, zero_pte); 104 addr += PAGE_SIZE; 105 pte = pte_offset_kernel(pmd, addr); 106 } 107 } 108 109 void __weak __meminit kernel_pte_init(void *addr) 110 { 111 } 112 113 static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr, 114 unsigned long end) 115 { 116 pmd_t *pmd = pmd_offset(pud, addr); 117 unsigned long next; 118 119 do { 120 next = pmd_addr_end(addr, end); 121 122 if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) { 123 pmd_populate_kernel(&init_mm, pmd, 124 lm_alias(kasan_early_shadow_pte)); 125 continue; 126 } 127 128 if (pmd_none(*pmd)) { 129 pte_t *p; 130 131 if (slab_is_available()) 132 p = pte_alloc_one_kernel(&init_mm); 133 else { 134 p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); 135 kernel_pte_init(p); 136 } 137 if (!p) 138 return -ENOMEM; 139 140 pmd_populate_kernel(&init_mm, pmd, p); 141 } 142 zero_pte_populate(pmd, addr, next); 143 } while (pmd++, addr = next, addr != end); 144 145 return 0; 146 } 147 148 void __weak __meminit pmd_init(void *addr) 149 { 150 } 151 152 static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr, 153 unsigned long end) 154 { 155 pud_t *pud = pud_offset(p4d, addr); 156 unsigned long next; 157 158 do { 159 next = pud_addr_end(addr, end); 160 if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) { 161 pmd_t *pmd; 162 163 pud_populate(&init_mm, pud, 164 lm_alias(kasan_early_shadow_pmd)); 165 pmd = pmd_offset(pud, addr); 166 pmd_populate_kernel(&init_mm, pmd, 167 lm_alias(kasan_early_shadow_pte)); 168 continue; 169 } 170 171 if (pud_none(*pud)) { 172 pmd_t *p; 173 174 if (slab_is_available()) { 175 p = pmd_alloc(&init_mm, pud, addr); 176 if (!p) 177 return -ENOMEM; 178 } else { 179 p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); 180 pmd_init(p); 181 pud_populate(&init_mm, pud, p); 182 } 183 } 184 zero_pmd_populate(pud, addr, next); 185 } while (pud++, addr = next, addr != end); 186 187 return 0; 188 } 189 190 void __weak __meminit pud_init(void *addr) 191 { 192 } 193 194 static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr, 195 unsigned long end) 196 { 197 p4d_t *p4d = p4d_offset(pgd, addr); 198 unsigned long next; 199 200 do { 201 next = p4d_addr_end(addr, end); 202 if (IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) { 203 pud_t *pud; 204 pmd_t *pmd; 205 206 p4d_populate(&init_mm, p4d, 207 lm_alias(kasan_early_shadow_pud)); 208 pud = pud_offset(p4d, addr); 209 pud_populate(&init_mm, pud, 210 lm_alias(kasan_early_shadow_pmd)); 211 pmd = pmd_offset(pud, addr); 212 pmd_populate_kernel(&init_mm, pmd, 213 lm_alias(kasan_early_shadow_pte)); 214 continue; 215 } 216 217 if (p4d_none(*p4d)) { 218 pud_t *p; 219 220 if (slab_is_available()) { 221 p = pud_alloc(&init_mm, p4d, addr); 222 if (!p) 223 return -ENOMEM; 224 } else { 225 p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); 226 pud_init(p); 227 p4d_populate(&init_mm, p4d, p); 228 } 229 } 230 zero_pud_populate(p4d, addr, next); 231 } while (p4d++, addr = next, addr != end); 232 233 return 0; 234 } 235 236 /** 237 * kasan_populate_early_shadow - populate shadow memory region with 238 * kasan_early_shadow_page 239 * @shadow_start: start of the memory range to populate 240 * @shadow_end: end of the memory range to populate 241 */ 242 int __ref kasan_populate_early_shadow(const void *shadow_start, 243 const void *shadow_end) 244 { 245 unsigned long addr = (unsigned long)shadow_start; 246 unsigned long end = (unsigned long)shadow_end; 247 pgd_t *pgd = pgd_offset_k(addr); 248 unsigned long next; 249 250 do { 251 next = pgd_addr_end(addr, end); 252 253 if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) { 254 p4d_t *p4d; 255 pud_t *pud; 256 pmd_t *pmd; 257 258 /* 259 * kasan_early_shadow_pud should be populated with pmds 260 * at this moment. 261 * [pud,pmd]_populate*() below needed only for 262 * 3,2 - level page tables where we don't have 263 * puds,pmds, so pgd_populate(), pud_populate() 264 * is noops. 265 */ 266 pgd_populate(&init_mm, pgd, 267 lm_alias(kasan_early_shadow_p4d)); 268 p4d = p4d_offset(pgd, addr); 269 p4d_populate(&init_mm, p4d, 270 lm_alias(kasan_early_shadow_pud)); 271 pud = pud_offset(p4d, addr); 272 pud_populate(&init_mm, pud, 273 lm_alias(kasan_early_shadow_pmd)); 274 pmd = pmd_offset(pud, addr); 275 pmd_populate_kernel(&init_mm, pmd, 276 lm_alias(kasan_early_shadow_pte)); 277 continue; 278 } 279 280 if (pgd_none(*pgd)) { 281 p4d_t *p; 282 283 if (slab_is_available()) { 284 p = p4d_alloc(&init_mm, pgd, addr); 285 if (!p) 286 return -ENOMEM; 287 } else { 288 pgd_populate(&init_mm, pgd, 289 early_alloc(PAGE_SIZE, NUMA_NO_NODE)); 290 } 291 } 292 zero_p4d_populate(pgd, addr, next); 293 } while (pgd++, addr = next, addr != end); 294 295 return 0; 296 } 297 298 static void kasan_free_pte(pte_t *pte_start, pmd_t *pmd) 299 { 300 pte_t *pte; 301 int i; 302 303 for (i = 0; i < PTRS_PER_PTE; i++) { 304 pte = pte_start + i; 305 if (!pte_none(ptep_get(pte))) 306 return; 307 } 308 309 pte_free_kernel(&init_mm, (pte_t *)page_to_virt(pmd_page(*pmd))); 310 pmd_clear(pmd); 311 } 312 313 static void kasan_free_pmd(pmd_t *pmd_start, pud_t *pud) 314 { 315 pmd_t *pmd; 316 int i; 317 318 for (i = 0; i < PTRS_PER_PMD; i++) { 319 pmd = pmd_start + i; 320 if (!pmd_none(*pmd)) 321 return; 322 } 323 324 pmd_free(&init_mm, (pmd_t *)page_to_virt(pud_page(*pud))); 325 pud_clear(pud); 326 } 327 328 static void kasan_free_pud(pud_t *pud_start, p4d_t *p4d) 329 { 330 pud_t *pud; 331 int i; 332 333 for (i = 0; i < PTRS_PER_PUD; i++) { 334 pud = pud_start + i; 335 if (!pud_none(*pud)) 336 return; 337 } 338 339 pud_free(&init_mm, (pud_t *)page_to_virt(p4d_page(*p4d))); 340 p4d_clear(p4d); 341 } 342 343 static void kasan_free_p4d(p4d_t *p4d_start, pgd_t *pgd) 344 { 345 p4d_t *p4d; 346 int i; 347 348 for (i = 0; i < PTRS_PER_P4D; i++) { 349 p4d = p4d_start + i; 350 if (!p4d_none(*p4d)) 351 return; 352 } 353 354 p4d_free(&init_mm, (p4d_t *)page_to_virt(pgd_page(*pgd))); 355 pgd_clear(pgd); 356 } 357 358 static void kasan_remove_pte_table(pte_t *pte, unsigned long addr, 359 unsigned long end) 360 { 361 unsigned long next; 362 pte_t ptent; 363 364 for (; addr < end; addr = next, pte++) { 365 next = (addr + PAGE_SIZE) & PAGE_MASK; 366 if (next > end) 367 next = end; 368 369 ptent = ptep_get(pte); 370 371 if (!pte_present(ptent)) 372 continue; 373 374 if (WARN_ON(!kasan_early_shadow_page_entry(ptent))) 375 continue; 376 pte_clear(&init_mm, addr, pte); 377 } 378 } 379 380 static void kasan_remove_pmd_table(pmd_t *pmd, unsigned long addr, 381 unsigned long end) 382 { 383 unsigned long next; 384 385 for (; addr < end; addr = next, pmd++) { 386 pte_t *pte; 387 388 next = pmd_addr_end(addr, end); 389 390 if (!pmd_present(*pmd)) 391 continue; 392 393 if (kasan_pte_table(*pmd)) { 394 if (IS_ALIGNED(addr, PMD_SIZE) && 395 IS_ALIGNED(next, PMD_SIZE)) { 396 pmd_clear(pmd); 397 continue; 398 } 399 } 400 pte = pte_offset_kernel(pmd, addr); 401 kasan_remove_pte_table(pte, addr, next); 402 kasan_free_pte(pte_offset_kernel(pmd, 0), pmd); 403 } 404 } 405 406 static void kasan_remove_pud_table(pud_t *pud, unsigned long addr, 407 unsigned long end) 408 { 409 unsigned long next; 410 411 for (; addr < end; addr = next, pud++) { 412 pmd_t *pmd, *pmd_base; 413 414 next = pud_addr_end(addr, end); 415 416 if (!pud_present(*pud)) 417 continue; 418 419 if (kasan_pmd_table(*pud)) { 420 if (IS_ALIGNED(addr, PUD_SIZE) && 421 IS_ALIGNED(next, PUD_SIZE)) { 422 pud_clear(pud); 423 continue; 424 } 425 } 426 pmd = pmd_offset(pud, addr); 427 pmd_base = pmd_offset(pud, 0); 428 kasan_remove_pmd_table(pmd, addr, next); 429 kasan_free_pmd(pmd_base, pud); 430 } 431 } 432 433 static void kasan_remove_p4d_table(p4d_t *p4d, unsigned long addr, 434 unsigned long end) 435 { 436 unsigned long next; 437 438 for (; addr < end; addr = next, p4d++) { 439 pud_t *pud; 440 441 next = p4d_addr_end(addr, end); 442 443 if (!p4d_present(*p4d)) 444 continue; 445 446 if (kasan_pud_table(*p4d)) { 447 if (IS_ALIGNED(addr, P4D_SIZE) && 448 IS_ALIGNED(next, P4D_SIZE)) { 449 p4d_clear(p4d); 450 continue; 451 } 452 } 453 pud = pud_offset(p4d, addr); 454 kasan_remove_pud_table(pud, addr, next); 455 kasan_free_pud(pud_offset(p4d, 0), p4d); 456 } 457 } 458 459 void kasan_remove_zero_shadow(void *start, unsigned long size) 460 { 461 unsigned long addr, end, next; 462 pgd_t *pgd; 463 464 addr = (unsigned long)kasan_mem_to_shadow(start); 465 end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT); 466 467 if (WARN_ON((unsigned long)start % KASAN_MEMORY_PER_SHADOW_PAGE) || 468 WARN_ON(size % KASAN_MEMORY_PER_SHADOW_PAGE)) 469 return; 470 471 for (; addr < end; addr = next) { 472 p4d_t *p4d; 473 474 next = pgd_addr_end(addr, end); 475 476 pgd = pgd_offset_k(addr); 477 if (!pgd_present(*pgd)) 478 continue; 479 480 if (kasan_p4d_table(*pgd)) { 481 if (IS_ALIGNED(addr, PGDIR_SIZE) && 482 IS_ALIGNED(next, PGDIR_SIZE)) { 483 pgd_clear(pgd); 484 continue; 485 } 486 } 487 488 p4d = p4d_offset(pgd, addr); 489 kasan_remove_p4d_table(p4d, addr, next); 490 kasan_free_p4d(p4d_offset(pgd, 0), pgd); 491 } 492 } 493 494 int kasan_add_zero_shadow(void *start, unsigned long size) 495 { 496 int ret; 497 void *shadow_start, *shadow_end; 498 499 shadow_start = kasan_mem_to_shadow(start); 500 shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT); 501 502 if (WARN_ON((unsigned long)start % KASAN_MEMORY_PER_SHADOW_PAGE) || 503 WARN_ON(size % KASAN_MEMORY_PER_SHADOW_PAGE)) 504 return -EINVAL; 505 506 ret = kasan_populate_early_shadow(shadow_start, shadow_end); 507 if (ret) 508 kasan_remove_zero_shadow(start, size); 509 return ret; 510 } 511