xref: /linux/arch/x86/mm/pti.c (revision 5c00eca95a9a20e662bd290c3ef3f2e07dfa9baa)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright(c) 2017 Intel Corporation. All rights reserved.
4  *
5  * This code is based in part on work published here:
6  *
7  *	https://github.com/IAIK/KAISER
8  *
9  * The original work was written by and signed off by for the Linux
10  * kernel by:
11  *
12  *   Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
13  *   Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
14  *   Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
15  *   Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
16  *
17  * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
18  * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and
19  *		       Andy Lutomirsky <luto@amacapital.net>
20  */
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/string.h>
24 #include <linux/types.h>
25 #include <linux/bug.h>
26 #include <linux/init.h>
27 #include <linux/spinlock.h>
28 #include <linux/mm.h>
29 #include <linux/uaccess.h>
30 #include <linux/cpu.h>
31 
32 #include <asm/cpufeature.h>
33 #include <asm/hypervisor.h>
34 #include <asm/vsyscall.h>
35 #include <asm/cmdline.h>
36 #include <asm/pti.h>
37 #include <asm/tlbflush.h>
38 #include <asm/desc.h>
39 #include <asm/sections.h>
40 #include <asm/set_memory.h>
41 
42 #undef pr_fmt
43 #define pr_fmt(fmt)     "Kernel/User page tables isolation: " fmt
44 
45 /* Backporting helper */
46 #ifndef __GFP_NOTRACK
47 #define __GFP_NOTRACK	0
48 #endif
49 
50 /*
51  * Define the page-table levels we clone for user-space on 32
52  * and 64 bit.
53  */
54 #ifdef CONFIG_X86_64
55 #define	PTI_LEVEL_KERNEL_IMAGE	PTI_CLONE_PMD
56 #else
57 #define	PTI_LEVEL_KERNEL_IMAGE	PTI_CLONE_PTE
58 #endif
59 
pti_print_if_insecure(const char * reason)60 static void __init pti_print_if_insecure(const char *reason)
61 {
62 	if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
63 		pr_info("%s\n", reason);
64 }
65 
pti_print_if_secure(const char * reason)66 static void __init pti_print_if_secure(const char *reason)
67 {
68 	if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
69 		pr_info("%s\n", reason);
70 }
71 
72 /* Assume mode is auto unless overridden via cmdline below. */
73 static enum pti_mode {
74 	PTI_AUTO = 0,
75 	PTI_FORCE_OFF,
76 	PTI_FORCE_ON
77 } pti_mode;
78 
pti_check_boottime_disable(void)79 void __init pti_check_boottime_disable(void)
80 {
81 	if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
82 		pti_mode = PTI_FORCE_OFF;
83 		pti_print_if_insecure("disabled on XEN PV.");
84 		return;
85 	}
86 
87 	if (cpu_mitigations_off())
88 		pti_mode = PTI_FORCE_OFF;
89 	if (pti_mode == PTI_FORCE_OFF) {
90 		pti_print_if_insecure("disabled on command line.");
91 		return;
92 	}
93 
94 	if (pti_mode == PTI_FORCE_ON)
95 		pti_print_if_secure("force enabled on command line.");
96 
97 	if (pti_mode == PTI_AUTO && !boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
98 		return;
99 
100 	setup_force_cpu_cap(X86_FEATURE_PTI);
101 
102 	if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) {
103 		pr_debug("PTI enabled, disabling INVLPGB\n");
104 		setup_clear_cpu_cap(X86_FEATURE_INVLPGB);
105 	}
106 }
107 
pti_parse_cmdline(char * arg)108 static int __init pti_parse_cmdline(char *arg)
109 {
110 	if (!strcmp(arg, "off"))
111 		pti_mode = PTI_FORCE_OFF;
112 	else if (!strcmp(arg, "on"))
113 		pti_mode = PTI_FORCE_ON;
114 	else if (!strcmp(arg, "auto"))
115 		pti_mode = PTI_AUTO;
116 	else
117 		return -EINVAL;
118 	return 0;
119 }
120 early_param("pti", pti_parse_cmdline);
121 
pti_parse_cmdline_nopti(char * arg)122 static int __init pti_parse_cmdline_nopti(char *arg)
123 {
124 	pti_mode = PTI_FORCE_OFF;
125 	return 0;
126 }
127 early_param("nopti", pti_parse_cmdline_nopti);
128 
__pti_set_user_pgtbl(pgd_t * pgdp,pgd_t pgd)129 pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
130 {
131 	/*
132 	 * Changes to the high (kernel) portion of the kernelmode page
133 	 * tables are not automatically propagated to the usermode tables.
134 	 *
135 	 * Users should keep in mind that, unlike the kernelmode tables,
136 	 * there is no vmalloc_fault equivalent for the usermode tables.
137 	 * Top-level entries added to init_mm's usermode pgd after boot
138 	 * will not be automatically propagated to other mms.
139 	 */
140 	if (!pgdp_maps_userspace(pgdp) || (pgd.pgd & _PAGE_NOPTISHADOW))
141 		return pgd;
142 
143 	/*
144 	 * The user page tables get the full PGD, accessible from
145 	 * userspace:
146 	 */
147 	kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;
148 
149 	/*
150 	 * If this is normal user memory, make it NX in the kernel
151 	 * pagetables so that, if we somehow screw up and return to
152 	 * usermode with the kernel CR3 loaded, we'll get a page fault
153 	 * instead of allowing user code to execute with the wrong CR3.
154 	 *
155 	 * As exceptions, we don't set NX if:
156 	 *  - _PAGE_USER is not set.  This could be an executable
157 	 *     EFI runtime mapping or something similar, and the kernel
158 	 *     may execute from it
159 	 *  - we don't have NX support
160 	 *  - we're clearing the PGD (i.e. the new pgd is not present).
161 	 */
162 	if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
163 	    (__supported_pte_mask & _PAGE_NX))
164 		pgd.pgd |= _PAGE_NX;
165 
166 	/* return the copy of the PGD we want the kernel to use: */
167 	return pgd;
168 }
169 
170 /*
171  * Walk the user copy of the page tables (optionally) trying to allocate
172  * page table pages on the way down.
173  *
174  * Returns a pointer to a P4D on success, or NULL on failure.
175  */
pti_user_pagetable_walk_p4d(unsigned long address)176 static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
177 {
178 	pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
179 	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
180 
181 	if (address < PAGE_OFFSET) {
182 		WARN_ONCE(1, "attempt to walk user address\n");
183 		return NULL;
184 	}
185 
186 	if (pgd_none(*pgd)) {
187 		unsigned long new_p4d_page = __get_free_page(gfp);
188 		if (WARN_ON_ONCE(!new_p4d_page))
189 			return NULL;
190 
191 		set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
192 	}
193 	BUILD_BUG_ON(pgd_leaf(*pgd));
194 
195 	return p4d_offset(pgd, address);
196 }
197 
198 /*
199  * Walk the user copy of the page tables (optionally) trying to allocate
200  * page table pages on the way down.
201  *
202  * Returns a pointer to a PMD on success, or NULL on failure.
203  */
pti_user_pagetable_walk_pmd(unsigned long address)204 static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
205 {
206 	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
207 	p4d_t *p4d;
208 	pud_t *pud;
209 
210 	p4d = pti_user_pagetable_walk_p4d(address);
211 	if (!p4d)
212 		return NULL;
213 
214 	BUILD_BUG_ON(p4d_leaf(*p4d));
215 	if (p4d_none(*p4d)) {
216 		unsigned long new_pud_page = __get_free_page(gfp);
217 		if (WARN_ON_ONCE(!new_pud_page))
218 			return NULL;
219 
220 		set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
221 	}
222 
223 	pud = pud_offset(p4d, address);
224 	/* The user page tables do not use large mappings: */
225 	if (pud_leaf(*pud)) {
226 		WARN_ON(1);
227 		return NULL;
228 	}
229 	if (pud_none(*pud)) {
230 		unsigned long new_pmd_page = __get_free_page(gfp);
231 		if (WARN_ON_ONCE(!new_pmd_page))
232 			return NULL;
233 
234 		set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
235 	}
236 
237 	return pmd_offset(pud, address);
238 }
239 
240 /*
241  * Walk the shadow copy of the page tables (optionally) trying to allocate
242  * page table pages on the way down.  Does not support large pages.
243  *
244  * Note: this is only used when mapping *new* kernel data into the
245  * user/shadow page tables.  It is never used for userspace data.
246  *
247  * Returns a pointer to a PTE on success, or NULL on failure.
248  */
pti_user_pagetable_walk_pte(unsigned long address,bool late_text)249 static pte_t *pti_user_pagetable_walk_pte(unsigned long address, bool late_text)
250 {
251 	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
252 	pmd_t *pmd;
253 	pte_t *pte;
254 
255 	pmd = pti_user_pagetable_walk_pmd(address);
256 	if (!pmd)
257 		return NULL;
258 
259 	/* Large PMD mapping found */
260 	if (pmd_leaf(*pmd)) {
261 		/* Clear the PMD if we hit a large mapping from the first round */
262 		if (late_text) {
263 			set_pmd(pmd, __pmd(0));
264 		} else {
265 			WARN_ON_ONCE(1);
266 			return NULL;
267 		}
268 	}
269 
270 	if (pmd_none(*pmd)) {
271 		unsigned long new_pte_page = __get_free_page(gfp);
272 		if (!new_pte_page)
273 			return NULL;
274 
275 		set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
276 	}
277 
278 	pte = pte_offset_kernel(pmd, address);
279 	if (pte_flags(*pte) & _PAGE_USER) {
280 		WARN_ONCE(1, "attempt to walk to user pte\n");
281 		return NULL;
282 	}
283 	return pte;
284 }
285 
286 #ifdef CONFIG_X86_VSYSCALL_EMULATION
pti_setup_vsyscall(void)287 static void __init pti_setup_vsyscall(void)
288 {
289 	pte_t *pte, *target_pte;
290 	unsigned int level;
291 
292 	pte = lookup_address(VSYSCALL_ADDR, &level);
293 	if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
294 		return;
295 
296 	target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR, false);
297 	if (WARN_ON(!target_pte))
298 		return;
299 
300 	*target_pte = *pte;
301 	set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
302 }
303 #else
pti_setup_vsyscall(void)304 static void __init pti_setup_vsyscall(void) { }
305 #endif
306 
307 enum pti_clone_level {
308 	PTI_CLONE_PMD,
309 	PTI_CLONE_PTE,
310 };
311 
312 static void
pti_clone_pgtable(unsigned long start,unsigned long end,enum pti_clone_level level,bool late_text)313 pti_clone_pgtable(unsigned long start, unsigned long end,
314 		  enum pti_clone_level level, bool late_text)
315 {
316 	unsigned long addr;
317 
318 	/*
319 	 * Clone the populated PMDs which cover start to end. These PMD areas
320 	 * can have holes.
321 	 */
322 	for (addr = start; addr < end;) {
323 		pte_t *pte, *target_pte;
324 		pmd_t *pmd, *target_pmd;
325 		pgd_t *pgd;
326 		p4d_t *p4d;
327 		pud_t *pud;
328 
329 		/* Overflow check */
330 		if (addr < start)
331 			break;
332 
333 		pgd = pgd_offset_k(addr);
334 		if (WARN_ON(pgd_none(*pgd)))
335 			return;
336 		p4d = p4d_offset(pgd, addr);
337 		if (WARN_ON(p4d_none(*p4d)))
338 			return;
339 
340 		pud = pud_offset(p4d, addr);
341 		if (pud_none(*pud)) {
342 			WARN_ON_ONCE(addr & ~PUD_MASK);
343 			addr = round_up(addr + 1, PUD_SIZE);
344 			continue;
345 		}
346 
347 		pmd = pmd_offset(pud, addr);
348 		if (pmd_none(*pmd)) {
349 			WARN_ON_ONCE(addr & ~PMD_MASK);
350 			addr = round_up(addr + 1, PMD_SIZE);
351 			continue;
352 		}
353 
354 		if (pmd_leaf(*pmd) || level == PTI_CLONE_PMD) {
355 			target_pmd = pti_user_pagetable_walk_pmd(addr);
356 			if (WARN_ON(!target_pmd))
357 				return;
358 
359 			/*
360 			 * Only clone present PMDs.  This ensures only setting
361 			 * _PAGE_GLOBAL on present PMDs.  This should only be
362 			 * called on well-known addresses anyway, so a non-
363 			 * present PMD would be a surprise.
364 			 */
365 			if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
366 				return;
367 
368 			/*
369 			 * Setting 'target_pmd' below creates a mapping in both
370 			 * the user and kernel page tables.  It is effectively
371 			 * global, so set it as global in both copies.  Note:
372 			 * the X86_FEATURE_PGE check is not _required_ because
373 			 * the CPU ignores _PAGE_GLOBAL when PGE is not
374 			 * supported.  The check keeps consistency with
375 			 * code that only set this bit when supported.
376 			 */
377 			if (boot_cpu_has(X86_FEATURE_PGE))
378 				*pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
379 
380 			/*
381 			 * Copy the PMD.  That is, the kernelmode and usermode
382 			 * tables will share the last-level page tables of this
383 			 * address range
384 			 */
385 			*target_pmd = *pmd;
386 
387 			addr = round_up(addr + 1, PMD_SIZE);
388 
389 		} else if (level == PTI_CLONE_PTE) {
390 
391 			/* Walk the page-table down to the pte level */
392 			pte = pte_offset_kernel(pmd, addr);
393 			if (pte_none(*pte)) {
394 				addr = round_up(addr + 1, PAGE_SIZE);
395 				continue;
396 			}
397 
398 			/* Only clone present PTEs */
399 			if (WARN_ON(!(pte_flags(*pte) & _PAGE_PRESENT)))
400 				return;
401 
402 			/* Allocate PTE in the user page-table */
403 			target_pte = pti_user_pagetable_walk_pte(addr, late_text);
404 			if (WARN_ON(!target_pte))
405 				return;
406 
407 			/* Set GLOBAL bit in both PTEs */
408 			if (boot_cpu_has(X86_FEATURE_PGE))
409 				*pte = pte_set_flags(*pte, _PAGE_GLOBAL);
410 
411 			/* Clone the PTE */
412 			*target_pte = *pte;
413 
414 			addr = round_up(addr + 1, PAGE_SIZE);
415 
416 		} else {
417 			BUG();
418 		}
419 	}
420 }
421 
422 #ifdef CONFIG_X86_64
423 /*
424  * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
425  * next-level entry on 5-level systems.
426  */
pti_clone_p4d(unsigned long addr)427 static void __init pti_clone_p4d(unsigned long addr)
428 {
429 	p4d_t *kernel_p4d, *user_p4d;
430 	pgd_t *kernel_pgd;
431 
432 	user_p4d = pti_user_pagetable_walk_p4d(addr);
433 	if (!user_p4d)
434 		return;
435 
436 	kernel_pgd = pgd_offset_k(addr);
437 	kernel_p4d = p4d_offset(kernel_pgd, addr);
438 	*user_p4d = *kernel_p4d;
439 }
440 
441 /*
442  * Clone the CPU_ENTRY_AREA and associated data into the user space visible
443  * page table.
444  */
pti_clone_user_shared(void)445 static void __init pti_clone_user_shared(void)
446 {
447 	unsigned int cpu;
448 
449 	pti_clone_p4d(CPU_ENTRY_AREA_BASE);
450 
451 	for_each_possible_cpu(cpu) {
452 		/*
453 		 * The SYSCALL64 entry code needs one word of scratch space
454 		 * in which to spill a register.  It lives in the sp2 slot
455 		 * of the CPU's TSS.
456 		 *
457 		 * This is done for all possible CPUs during boot to ensure
458 		 * that it's propagated to all mms.
459 		 */
460 
461 		unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);
462 		phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
463 		pte_t *target_pte;
464 
465 		target_pte = pti_user_pagetable_walk_pte(va, false);
466 		if (WARN_ON(!target_pte))
467 			return;
468 
469 		*target_pte = pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL);
470 	}
471 }
472 
473 #else /* CONFIG_X86_64 */
474 
475 /*
476  * On 32 bit PAE systems with 1GB of Kernel address space there is only
477  * one pgd/p4d for the whole kernel. Cloning that would map the whole
478  * address space into the user page-tables, making PTI useless. So clone
479  * the page-table on the PMD level to prevent that.
480  */
pti_clone_user_shared(void)481 static void __init pti_clone_user_shared(void)
482 {
483 	unsigned long start, end;
484 
485 	start = CPU_ENTRY_AREA_BASE;
486 	end   = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
487 
488 	pti_clone_pgtable(start, end, PTI_CLONE_PMD, false);
489 }
490 #endif /* CONFIG_X86_64 */
491 
492 /*
493  * Clone the ESPFIX P4D into the user space visible page table
494  */
pti_setup_espfix64(void)495 static void __init pti_setup_espfix64(void)
496 {
497 #ifdef CONFIG_X86_ESPFIX64
498 	pti_clone_p4d(ESPFIX_BASE_ADDR);
499 #endif
500 }
501 
502 /*
503  * Clone the populated PMDs of the entry text and force it RO.
504  */
pti_clone_entry_text(bool late)505 static void pti_clone_entry_text(bool late)
506 {
507 	pti_clone_pgtable((unsigned long) __entry_text_start,
508 			  (unsigned long) __entry_text_end,
509 			  PTI_LEVEL_KERNEL_IMAGE, late);
510 }
511 
512 /*
513  * Global pages and PCIDs are both ways to make kernel TLB entries
514  * live longer, reduce TLB misses and improve kernel performance.
515  * But, leaving all kernel text Global makes it potentially accessible
516  * to Meltdown-style attacks which make it trivial to find gadgets or
517  * defeat KASLR.
518  *
519  * Only use global pages when it is really worth it.
520  */
pti_kernel_image_global_ok(void)521 static inline bool pti_kernel_image_global_ok(void)
522 {
523 	/*
524 	 * Systems with PCIDs get little benefit from global
525 	 * kernel text and are not worth the downsides.
526 	 */
527 	if (cpu_feature_enabled(X86_FEATURE_PCID))
528 		return false;
529 
530 	/*
531 	 * Only do global kernel image for pti=auto.  Do the most
532 	 * secure thing (not global) if pti=on specified.
533 	 */
534 	if (pti_mode != PTI_AUTO)
535 		return false;
536 
537 	/*
538 	 * K8 may not tolerate the cleared _PAGE_RW on the userspace
539 	 * global kernel image pages.  Do the safe thing (disable
540 	 * global kernel image).  This is unlikely to ever be
541 	 * noticed because PTI is disabled by default on AMD CPUs.
542 	 */
543 	if (boot_cpu_has(X86_FEATURE_K8))
544 		return false;
545 
546 	/*
547 	 * RANDSTRUCT derives its hardening benefits from the
548 	 * attacker's lack of knowledge about the layout of kernel
549 	 * data structures.  Keep the kernel image non-global in
550 	 * cases where RANDSTRUCT is in use to help keep the layout a
551 	 * secret.
552 	 */
553 	if (IS_ENABLED(CONFIG_RANDSTRUCT))
554 		return false;
555 
556 	return true;
557 }
558 
559 /*
560  * For some configurations, map all of kernel text into the user page
561  * tables.  This reduces TLB misses, especially on non-PCID systems.
562  */
pti_clone_kernel_text(void)563 static void pti_clone_kernel_text(void)
564 {
565 	/*
566 	 * rodata is part of the kernel image and is normally
567 	 * readable on the filesystem or on the web.  But, do not
568 	 * clone the areas past rodata, they might contain secrets.
569 	 */
570 	unsigned long start = PFN_ALIGN(_text);
571 	unsigned long end_clone  = (unsigned long)__end_rodata_aligned;
572 	unsigned long end_global = PFN_ALIGN((unsigned long)_etext);
573 
574 	if (!pti_kernel_image_global_ok())
575 		return;
576 
577 	pr_debug("mapping partial kernel image into user address space\n");
578 
579 	/*
580 	 * Note that this will undo _some_ of the work that
581 	 * pti_set_kernel_image_nonglobal() did to clear the
582 	 * global bit.
583 	 */
584 	pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE, false);
585 
586 	/*
587 	 * pti_clone_pgtable() will set the global bit in any PMDs
588 	 * that it clones, but we also need to get any PTEs in
589 	 * the last level for areas that are not huge-page-aligned.
590 	 */
591 
592 	/* Set the global bit for normal non-__init kernel text: */
593 	set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
594 }
595 
pti_set_kernel_image_nonglobal(void)596 static void pti_set_kernel_image_nonglobal(void)
597 {
598 	/*
599 	 * The identity map is created with PMDs, regardless of the
600 	 * actual length of the kernel.  We need to clear
601 	 * _PAGE_GLOBAL up to a PMD boundary, not just to the end
602 	 * of the image.
603 	 */
604 	unsigned long start = PFN_ALIGN(_text);
605 	unsigned long end = ALIGN((unsigned long)_end, PMD_SIZE);
606 
607 	/*
608 	 * This clears _PAGE_GLOBAL from the entire kernel image.
609 	 * pti_clone_kernel_text() map put _PAGE_GLOBAL back for
610 	 * areas that are mapped to userspace.
611 	 */
612 	set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
613 }
614 
615 /*
616  * Initialize kernel page table isolation
617  */
pti_init(void)618 void __init pti_init(void)
619 {
620 	if (!boot_cpu_has(X86_FEATURE_PTI))
621 		return;
622 
623 	pr_info("enabled\n");
624 
625 #ifdef CONFIG_X86_32
626 	/*
627 	 * We check for X86_FEATURE_PCID here. But the init-code will
628 	 * clear the feature flag on 32 bit because the feature is not
629 	 * supported on 32 bit anyway. To print the warning we need to
630 	 * check with cpuid directly again.
631 	 */
632 	if (cpuid_ecx(0x1) & BIT(17)) {
633 		/* Use printk to work around pr_fmt() */
634 		printk(KERN_WARNING "\n");
635 		printk(KERN_WARNING "************************************************************\n");
636 		printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!  **\n");
637 		printk(KERN_WARNING "**                                                        **\n");
638 		printk(KERN_WARNING "** You are using 32-bit PTI on a 64-bit PCID-capable CPU. **\n");
639 		printk(KERN_WARNING "** Your performance will increase dramatically if you     **\n");
640 		printk(KERN_WARNING "** switch to a 64-bit kernel!                             **\n");
641 		printk(KERN_WARNING "**                                                        **\n");
642 		printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!  **\n");
643 		printk(KERN_WARNING "************************************************************\n");
644 	}
645 #endif
646 
647 	pti_clone_user_shared();
648 
649 	/* Undo all global bits from the init pagetables in head_64.S: */
650 	pti_set_kernel_image_nonglobal();
651 
652 	/* Replace some of the global bits just for shared entry text: */
653 	/*
654 	 * This is very early in boot. Device and Late initcalls can do
655 	 * modprobe before free_initmem() and mark_readonly(). This
656 	 * pti_clone_entry_text() allows those user-mode-helpers to function,
657 	 * but notably the text is still RW.
658 	 */
659 	pti_clone_entry_text(false);
660 	pti_setup_espfix64();
661 	pti_setup_vsyscall();
662 }
663 
664 /*
665  * Finalize the kernel mappings in the userspace page-table. Some of the
666  * mappings for the kernel image might have changed since pti_init()
667  * cloned them. This is because parts of the kernel image have been
668  * mapped RO and/or NX.  These changes need to be cloned again to the
669  * userspace page-table.
670  */
pti_finalize(void)671 void pti_finalize(void)
672 {
673 	if (!boot_cpu_has(X86_FEATURE_PTI))
674 		return;
675 	/*
676 	 * This is after free_initmem() (all initcalls are done) and we've done
677 	 * mark_readonly(). Text is now NX which might've split some PMDs
678 	 * relative to the early clone.
679 	 */
680 	pti_clone_entry_text(true);
681 	pti_clone_kernel_text();
682 
683 	debug_checkwx_user();
684 }
685