1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright(c) 2017 Intel Corporation. All rights reserved.
4 *
5 * This code is based in part on work published here:
6 *
7 * https://github.com/IAIK/KAISER
8 *
9 * The original work was written by and signed off by for the Linux
10 * kernel by:
11 *
12 * Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
13 * Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
14 * Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
15 * Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
16 *
17 * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
18 * Mostly rewritten by Thomas Gleixner <tglx@kernel.org> and
19 * Andy Lutomirsky <luto@amacapital.net>
20 */
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/string.h>
24 #include <linux/types.h>
25 #include <linux/bug.h>
26 #include <linux/init.h>
27 #include <linux/spinlock.h>
28 #include <linux/mm.h>
29 #include <linux/uaccess.h>
30 #include <linux/cpu.h>
31
32 #include <asm/cpufeature.h>
33 #include <asm/hypervisor.h>
34 #include <asm/vsyscall.h>
35 #include <asm/cmdline.h>
36 #include <asm/pti.h>
37 #include <asm/tlbflush.h>
38 #include <asm/desc.h>
39 #include <asm/sections.h>
40 #include <asm/set_memory.h>
41 #include <asm/bugs.h>
42
43 #undef pr_fmt
44 #define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt
45
46 /* Backporting helper */
47 #ifndef __GFP_NOTRACK
48 #define __GFP_NOTRACK 0
49 #endif
50
51 /*
52 * Define the page-table levels we clone for user-space on 32
53 * and 64 bit.
54 */
55 #ifdef CONFIG_X86_64
56 #define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PMD
57 #else
58 #define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PTE
59 #endif
60
pti_print_if_insecure(const char * reason)61 static void __init pti_print_if_insecure(const char *reason)
62 {
63 if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
64 pr_info("%s\n", reason);
65 }
66
pti_print_if_secure(const char * reason)67 static void __init pti_print_if_secure(const char *reason)
68 {
69 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
70 pr_info("%s\n", reason);
71 }
72
73 /* Assume mode is auto unless overridden via cmdline below. */
74 static enum pti_mode {
75 PTI_AUTO = 0,
76 PTI_FORCE_OFF,
77 PTI_FORCE_ON
78 } pti_mode;
79
pti_check_boottime_disable(void)80 void __init pti_check_boottime_disable(void)
81 {
82 if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
83 pti_mode = PTI_FORCE_OFF;
84 pti_print_if_insecure("disabled on XEN PV.");
85 return;
86 }
87
88 if (pti_mode == PTI_AUTO &&
89 !cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL))
90 pti_mode = PTI_FORCE_OFF;
91 if (pti_mode == PTI_FORCE_OFF) {
92 pti_print_if_insecure("disabled on command line.");
93 return;
94 }
95
96 if (pti_mode == PTI_FORCE_ON)
97 pti_print_if_secure("force enabled on command line.");
98
99 if (pti_mode == PTI_AUTO && !boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
100 return;
101
102 setup_force_cpu_cap(X86_FEATURE_PTI);
103
104 if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) {
105 pr_debug("PTI enabled, disabling INVLPGB\n");
106 setup_clear_cpu_cap(X86_FEATURE_INVLPGB);
107 }
108
109 if (cpu_feature_enabled(X86_FEATURE_FRED)) {
110 pr_debug("PTI enabled, disabling FRED\n");
111 setup_clear_cpu_cap(X86_FEATURE_FRED);
112 }
113 }
114
pti_parse_cmdline(char * arg)115 static int __init pti_parse_cmdline(char *arg)
116 {
117 if (!strcmp(arg, "off"))
118 pti_mode = PTI_FORCE_OFF;
119 else if (!strcmp(arg, "on"))
120 pti_mode = PTI_FORCE_ON;
121 else if (!strcmp(arg, "auto"))
122 pti_mode = PTI_AUTO;
123 else
124 return -EINVAL;
125 return 0;
126 }
127 early_param("pti", pti_parse_cmdline);
128
pti_parse_cmdline_nopti(char * arg)129 static int __init pti_parse_cmdline_nopti(char *arg)
130 {
131 pti_mode = PTI_FORCE_OFF;
132 return 0;
133 }
134 early_param("nopti", pti_parse_cmdline_nopti);
135
__pti_set_user_pgtbl(pgd_t * pgdp,pgd_t pgd)136 pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
137 {
138 /*
139 * Changes to the high (kernel) portion of the kernelmode page
140 * tables are not automatically propagated to the usermode tables.
141 *
142 * Users should keep in mind that, unlike the kernelmode tables,
143 * there is no vmalloc_fault equivalent for the usermode tables.
144 * Top-level entries added to init_mm's usermode pgd after boot
145 * will not be automatically propagated to other mms.
146 */
147 if (!pgdp_maps_userspace(pgdp) || (pgd.pgd & _PAGE_NOPTISHADOW))
148 return pgd;
149
150 /*
151 * The user page tables get the full PGD, accessible from
152 * userspace:
153 */
154 kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;
155
156 /*
157 * If this is normal user memory, make it NX in the kernel
158 * pagetables so that, if we somehow screw up and return to
159 * usermode with the kernel CR3 loaded, we'll get a page fault
160 * instead of allowing user code to execute with the wrong CR3.
161 *
162 * As exceptions, we don't set NX if:
163 * - _PAGE_USER is not set. This could be an executable
164 * EFI runtime mapping or something similar, and the kernel
165 * may execute from it
166 * - we don't have NX support
167 * - we're clearing the PGD (i.e. the new pgd is not present).
168 */
169 if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
170 (__supported_pte_mask & _PAGE_NX))
171 pgd.pgd |= _PAGE_NX;
172
173 /* return the copy of the PGD we want the kernel to use: */
174 return pgd;
175 }
176
177 /*
178 * Walk the user copy of the page tables (optionally) trying to allocate
179 * page table pages on the way down.
180 *
181 * Returns a pointer to a P4D on success, or NULL on failure.
182 */
pti_user_pagetable_walk_p4d(unsigned long address)183 static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
184 {
185 pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
186 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
187
188 if (address < PAGE_OFFSET) {
189 WARN_ONCE(1, "attempt to walk user address\n");
190 return NULL;
191 }
192
193 if (pgd_none(*pgd)) {
194 unsigned long new_p4d_page = __get_free_page(gfp);
195 if (WARN_ON_ONCE(!new_p4d_page))
196 return NULL;
197
198 set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
199 }
200 BUILD_BUG_ON(pgd_leaf(*pgd));
201
202 return p4d_offset(pgd, address);
203 }
204
205 /*
206 * Walk the user copy of the page tables (optionally) trying to allocate
207 * page table pages on the way down.
208 *
209 * Returns a pointer to a PMD on success, or NULL on failure.
210 */
pti_user_pagetable_walk_pmd(unsigned long address)211 static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
212 {
213 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
214 p4d_t *p4d;
215 pud_t *pud;
216
217 p4d = pti_user_pagetable_walk_p4d(address);
218 if (!p4d)
219 return NULL;
220
221 BUILD_BUG_ON(p4d_leaf(*p4d));
222 if (p4d_none(*p4d)) {
223 unsigned long new_pud_page = __get_free_page(gfp);
224 if (WARN_ON_ONCE(!new_pud_page))
225 return NULL;
226
227 set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
228 }
229
230 pud = pud_offset(p4d, address);
231 /* The user page tables do not use large mappings: */
232 if (pud_leaf(*pud)) {
233 WARN_ON(1);
234 return NULL;
235 }
236 if (pud_none(*pud)) {
237 unsigned long new_pmd_page = __get_free_page(gfp);
238 if (WARN_ON_ONCE(!new_pmd_page))
239 return NULL;
240
241 set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
242 }
243
244 return pmd_offset(pud, address);
245 }
246
247 /*
248 * Walk the shadow copy of the page tables (optionally) trying to allocate
249 * page table pages on the way down. Does not support large pages.
250 *
251 * Note: this is only used when mapping *new* kernel data into the
252 * user/shadow page tables. It is never used for userspace data.
253 *
254 * Returns a pointer to a PTE on success, or NULL on failure.
255 */
pti_user_pagetable_walk_pte(unsigned long address,bool late_text)256 static pte_t *pti_user_pagetable_walk_pte(unsigned long address, bool late_text)
257 {
258 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
259 pmd_t *pmd;
260 pte_t *pte;
261
262 pmd = pti_user_pagetable_walk_pmd(address);
263 if (!pmd)
264 return NULL;
265
266 /* Large PMD mapping found */
267 if (pmd_leaf(*pmd)) {
268 /* Clear the PMD if we hit a large mapping from the first round */
269 if (late_text) {
270 set_pmd(pmd, __pmd(0));
271 } else {
272 WARN_ON_ONCE(1);
273 return NULL;
274 }
275 }
276
277 if (pmd_none(*pmd)) {
278 unsigned long new_pte_page = __get_free_page(gfp);
279 if (!new_pte_page)
280 return NULL;
281
282 set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
283 }
284
285 pte = pte_offset_kernel(pmd, address);
286 if (pte_flags(*pte) & _PAGE_USER) {
287 WARN_ONCE(1, "attempt to walk to user pte\n");
288 return NULL;
289 }
290 return pte;
291 }
292
293 #ifdef CONFIG_X86_VSYSCALL_EMULATION
pti_setup_vsyscall(void)294 static void __init pti_setup_vsyscall(void)
295 {
296 pte_t *pte, *target_pte;
297 unsigned int level;
298
299 pte = lookup_address(VSYSCALL_ADDR, &level);
300 if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
301 return;
302
303 target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR, false);
304 if (WARN_ON(!target_pte))
305 return;
306
307 *target_pte = *pte;
308 set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
309 }
310 #else
pti_setup_vsyscall(void)311 static void __init pti_setup_vsyscall(void) { }
312 #endif
313
314 enum pti_clone_level {
315 PTI_CLONE_PMD,
316 PTI_CLONE_PTE,
317 };
318
319 static void
pti_clone_pgtable(unsigned long start,unsigned long end,enum pti_clone_level level,bool late_text)320 pti_clone_pgtable(unsigned long start, unsigned long end,
321 enum pti_clone_level level, bool late_text)
322 {
323 unsigned long addr;
324
325 /*
326 * Clone the populated PMDs which cover start to end. These PMD areas
327 * can have holes.
328 */
329 for (addr = start; addr < end;) {
330 pte_t *pte, *target_pte;
331 pmd_t *pmd, *target_pmd;
332 pgd_t *pgd;
333 p4d_t *p4d;
334 pud_t *pud;
335
336 /* Overflow check */
337 if (addr < start)
338 break;
339
340 pgd = pgd_offset_k(addr);
341 if (WARN_ON(pgd_none(*pgd)))
342 return;
343 p4d = p4d_offset(pgd, addr);
344 if (WARN_ON(p4d_none(*p4d)))
345 return;
346
347 pud = pud_offset(p4d, addr);
348 if (pud_none(*pud)) {
349 WARN_ON_ONCE(addr & ~PUD_MASK);
350 addr = round_up(addr + 1, PUD_SIZE);
351 continue;
352 }
353
354 pmd = pmd_offset(pud, addr);
355 if (pmd_none(*pmd)) {
356 WARN_ON_ONCE(addr & ~PMD_MASK);
357 addr = round_up(addr + 1, PMD_SIZE);
358 continue;
359 }
360
361 if (pmd_leaf(*pmd) || level == PTI_CLONE_PMD) {
362 target_pmd = pti_user_pagetable_walk_pmd(addr);
363 if (WARN_ON(!target_pmd))
364 return;
365
366 /*
367 * Only clone present PMDs. This ensures only setting
368 * _PAGE_GLOBAL on present PMDs. This should only be
369 * called on well-known addresses anyway, so a non-
370 * present PMD would be a surprise.
371 */
372 if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
373 return;
374
375 /*
376 * Setting 'target_pmd' below creates a mapping in both
377 * the user and kernel page tables. It is effectively
378 * global, so set it as global in both copies. Note:
379 * the X86_FEATURE_PGE check is not _required_ because
380 * the CPU ignores _PAGE_GLOBAL when PGE is not
381 * supported. The check keeps consistency with
382 * code that only set this bit when supported.
383 */
384 if (boot_cpu_has(X86_FEATURE_PGE))
385 *pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
386
387 /*
388 * Copy the PMD. That is, the kernelmode and usermode
389 * tables will share the last-level page tables of this
390 * address range
391 */
392 *target_pmd = *pmd;
393
394 addr = round_up(addr + 1, PMD_SIZE);
395
396 } else if (level == PTI_CLONE_PTE) {
397
398 /* Walk the page-table down to the pte level */
399 pte = pte_offset_kernel(pmd, addr);
400 if (pte_none(*pte)) {
401 addr = round_up(addr + 1, PAGE_SIZE);
402 continue;
403 }
404
405 /* Only clone present PTEs */
406 if (WARN_ON(!(pte_flags(*pte) & _PAGE_PRESENT)))
407 return;
408
409 /* Allocate PTE in the user page-table */
410 target_pte = pti_user_pagetable_walk_pte(addr, late_text);
411 if (WARN_ON(!target_pte))
412 return;
413
414 /* Set GLOBAL bit in both PTEs */
415 if (boot_cpu_has(X86_FEATURE_PGE))
416 *pte = pte_set_flags(*pte, _PAGE_GLOBAL);
417
418 /* Clone the PTE */
419 *target_pte = *pte;
420
421 addr = round_up(addr + 1, PAGE_SIZE);
422
423 } else {
424 BUG();
425 }
426 }
427 }
428
429 #ifdef CONFIG_X86_64
430 /*
431 * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
432 * next-level entry on 5-level systems.
433 */
pti_clone_p4d(unsigned long addr)434 static void __init pti_clone_p4d(unsigned long addr)
435 {
436 p4d_t *kernel_p4d, *user_p4d;
437 pgd_t *kernel_pgd;
438
439 user_p4d = pti_user_pagetable_walk_p4d(addr);
440 if (!user_p4d)
441 return;
442
443 kernel_pgd = pgd_offset_k(addr);
444 kernel_p4d = p4d_offset(kernel_pgd, addr);
445 *user_p4d = *kernel_p4d;
446 }
447
448 /*
449 * Clone the CPU_ENTRY_AREA and associated data into the user space visible
450 * page table.
451 */
pti_clone_user_shared(void)452 static void __init pti_clone_user_shared(void)
453 {
454 unsigned int cpu;
455
456 pti_clone_p4d(CPU_ENTRY_AREA_BASE);
457
458 for_each_possible_cpu(cpu) {
459 /*
460 * The SYSCALL64 entry code needs one word of scratch space
461 * in which to spill a register. It lives in the sp2 slot
462 * of the CPU's TSS.
463 *
464 * This is done for all possible CPUs during boot to ensure
465 * that it's propagated to all mms.
466 */
467
468 unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);
469 phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
470 pte_t *target_pte;
471
472 target_pte = pti_user_pagetable_walk_pte(va, false);
473 if (WARN_ON(!target_pte))
474 return;
475
476 *target_pte = pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL);
477 }
478 }
479
480 #else /* CONFIG_X86_64 */
481
482 /*
483 * On 32 bit PAE systems with 1GB of Kernel address space there is only
484 * one pgd/p4d for the whole kernel. Cloning that would map the whole
485 * address space into the user page-tables, making PTI useless. So clone
486 * the page-table on the PMD level to prevent that.
487 */
pti_clone_user_shared(void)488 static void __init pti_clone_user_shared(void)
489 {
490 unsigned long start, end;
491
492 start = CPU_ENTRY_AREA_BASE;
493 end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
494
495 pti_clone_pgtable(start, end, PTI_CLONE_PMD, false);
496 }
497 #endif /* CONFIG_X86_64 */
498
499 /*
500 * Clone the ESPFIX P4D into the user space visible page table
501 */
pti_setup_espfix64(void)502 static void __init pti_setup_espfix64(void)
503 {
504 #ifdef CONFIG_X86_ESPFIX64
505 pti_clone_p4d(ESPFIX_BASE_ADDR);
506 #endif
507 }
508
509 /*
510 * Clone the populated PMDs of the entry text and force it RO.
511 */
pti_clone_entry_text(bool late)512 static void pti_clone_entry_text(bool late)
513 {
514 pti_clone_pgtable((unsigned long) __entry_text_start,
515 (unsigned long) __entry_text_end,
516 PTI_LEVEL_KERNEL_IMAGE, late);
517 }
518
519 /*
520 * Global pages and PCIDs are both ways to make kernel TLB entries
521 * live longer, reduce TLB misses and improve kernel performance.
522 * But, leaving all kernel text Global makes it potentially accessible
523 * to Meltdown-style attacks which make it trivial to find gadgets or
524 * defeat KASLR.
525 *
526 * Only use global pages when it is really worth it.
527 */
pti_kernel_image_global_ok(void)528 static inline bool pti_kernel_image_global_ok(void)
529 {
530 /*
531 * Systems with PCIDs get little benefit from global
532 * kernel text and are not worth the downsides.
533 */
534 if (cpu_feature_enabled(X86_FEATURE_PCID))
535 return false;
536
537 /*
538 * Only do global kernel image for pti=auto. Do the most
539 * secure thing (not global) if pti=on specified.
540 */
541 if (pti_mode != PTI_AUTO)
542 return false;
543
544 /*
545 * K8 may not tolerate the cleared _PAGE_RW on the userspace
546 * global kernel image pages. Do the safe thing (disable
547 * global kernel image). This is unlikely to ever be
548 * noticed because PTI is disabled by default on AMD CPUs.
549 */
550 if (boot_cpu_has(X86_FEATURE_K8))
551 return false;
552
553 /*
554 * RANDSTRUCT derives its hardening benefits from the
555 * attacker's lack of knowledge about the layout of kernel
556 * data structures. Keep the kernel image non-global in
557 * cases where RANDSTRUCT is in use to help keep the layout a
558 * secret.
559 */
560 if (IS_ENABLED(CONFIG_RANDSTRUCT))
561 return false;
562
563 return true;
564 }
565
566 /*
567 * For some configurations, map all of kernel text into the user page
568 * tables. This reduces TLB misses, especially on non-PCID systems.
569 */
pti_clone_kernel_text(void)570 static void pti_clone_kernel_text(void)
571 {
572 /*
573 * rodata is part of the kernel image and is normally
574 * readable on the filesystem or on the web. But, do not
575 * clone the areas past rodata, they might contain secrets.
576 */
577 unsigned long start = PFN_ALIGN(_text);
578 unsigned long end_clone = (unsigned long)__end_rodata_aligned;
579 unsigned long end_global = PFN_ALIGN((unsigned long)_etext);
580
581 if (!pti_kernel_image_global_ok())
582 return;
583
584 pr_debug("mapping partial kernel image into user address space\n");
585
586 /*
587 * Note that this will undo _some_ of the work that
588 * pti_set_kernel_image_nonglobal() did to clear the
589 * global bit.
590 */
591 pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE, false);
592
593 /*
594 * pti_clone_pgtable() will set the global bit in any PMDs
595 * that it clones, but we also need to get any PTEs in
596 * the last level for areas that are not huge-page-aligned.
597 */
598
599 /* Set the global bit for normal non-__init kernel text: */
600 set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
601 }
602
pti_set_kernel_image_nonglobal(void)603 static void pti_set_kernel_image_nonglobal(void)
604 {
605 /*
606 * The identity map is created with PMDs, regardless of the
607 * actual length of the kernel. We need to clear
608 * _PAGE_GLOBAL up to a PMD boundary, not just to the end
609 * of the image.
610 */
611 unsigned long start = PFN_ALIGN(_text);
612 unsigned long end = ALIGN((unsigned long)_end, PMD_SIZE);
613
614 /*
615 * This clears _PAGE_GLOBAL from the entire kernel image.
616 * pti_clone_kernel_text() map put _PAGE_GLOBAL back for
617 * areas that are mapped to userspace.
618 */
619 set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
620 }
621
622 /*
623 * Initialize kernel page table isolation
624 */
pti_init(void)625 void __init pti_init(void)
626 {
627 if (!boot_cpu_has(X86_FEATURE_PTI))
628 return;
629
630 pr_info("enabled\n");
631
632 #ifdef CONFIG_X86_32
633 /*
634 * We check for X86_FEATURE_PCID here. But the init-code will
635 * clear the feature flag on 32 bit because the feature is not
636 * supported on 32 bit anyway. To print the warning we need to
637 * check with cpuid directly again.
638 */
639 if (cpuid_ecx(0x1) & BIT(17)) {
640 /* Use printk to work around pr_fmt() */
641 printk(KERN_WARNING "\n");
642 printk(KERN_WARNING "************************************************************\n");
643 printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n");
644 printk(KERN_WARNING "** **\n");
645 printk(KERN_WARNING "** You are using 32-bit PTI on a 64-bit PCID-capable CPU. **\n");
646 printk(KERN_WARNING "** Your performance will increase dramatically if you **\n");
647 printk(KERN_WARNING "** switch to a 64-bit kernel! **\n");
648 printk(KERN_WARNING "** **\n");
649 printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n");
650 printk(KERN_WARNING "************************************************************\n");
651 }
652 #endif
653
654 pti_clone_user_shared();
655
656 /* Undo all global bits from the init pagetables in head_64.S: */
657 pti_set_kernel_image_nonglobal();
658
659 /* Replace some of the global bits just for shared entry text: */
660 /*
661 * This is very early in boot. Device and Late initcalls can do
662 * modprobe before free_initmem() and mark_readonly(). This
663 * pti_clone_entry_text() allows those user-mode-helpers to function,
664 * but notably the text is still RW.
665 */
666 pti_clone_entry_text(false);
667 pti_setup_espfix64();
668 pti_setup_vsyscall();
669 }
670
671 /*
672 * Finalize the kernel mappings in the userspace page-table. Some of the
673 * mappings for the kernel image might have changed since pti_init()
674 * cloned them. This is because parts of the kernel image have been
675 * mapped RO and/or NX. These changes need to be cloned again to the
676 * userspace page-table.
677 */
pti_finalize(void)678 void pti_finalize(void)
679 {
680 if (!boot_cpu_has(X86_FEATURE_PTI))
681 return;
682 /*
683 * This is after free_initmem() (all initcalls are done) and we've done
684 * mark_readonly(). Text is now NX which might've split some PMDs
685 * relative to the early clone.
686 */
687 pti_clone_entry_text(true);
688 pti_clone_kernel_text();
689
690 debug_checkwx_user();
691 }
692