xref: /linux/arch/x86/mm/fault.c (revision 4745dc8abb0a0a9851c07265eea01d844886d5c8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 1995  Linus Torvalds
4  *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
5  *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
6  */
7 #include <linux/sched.h>		/* test_thread_flag(), ...	*/
8 #include <linux/sched/task_stack.h>	/* task_stack_*(), ...		*/
9 #include <linux/kdebug.h>		/* oops_begin/end, ...		*/
10 #include <linux/extable.h>		/* search_exception_tables	*/
11 #include <linux/memblock.h>		/* max_low_pfn			*/
12 #include <linux/kprobes.h>		/* NOKPROBE_SYMBOL, ...		*/
13 #include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
14 #include <linux/perf_event.h>		/* perf_sw_event		*/
15 #include <linux/hugetlb.h>		/* hstate_index_to_shift	*/
16 #include <linux/prefetch.h>		/* prefetchw			*/
17 #include <linux/context_tracking.h>	/* exception_enter(), ...	*/
18 #include <linux/uaccess.h>		/* faulthandler_disabled()	*/
19 #include <linux/efi.h>			/* efi_recover_from_page_fault()*/
20 #include <linux/mm_types.h>
21 
22 #include <asm/cpufeature.h>		/* boot_cpu_has, ...		*/
23 #include <asm/traps.h>			/* dotraplinkage, ...		*/
24 #include <asm/pgalloc.h>		/* pgd_*(), ...			*/
25 #include <asm/fixmap.h>			/* VSYSCALL_ADDR		*/
26 #include <asm/vsyscall.h>		/* emulate_vsyscall		*/
27 #include <asm/vm86.h>			/* struct vm86			*/
28 #include <asm/mmu_context.h>		/* vma_pkey()			*/
29 #include <asm/efi.h>			/* efi_recover_from_page_fault()*/
30 #include <asm/desc.h>			/* store_idt(), ...		*/
31 #include <asm/cpu_entry_area.h>		/* exception stack		*/
32 
33 #define CREATE_TRACE_POINTS
34 #include <asm/trace/exceptions.h>
35 
36 /*
37  * Returns 0 if mmiotrace is disabled, or if the fault is not
38  * handled by mmiotrace:
39  */
40 static nokprobe_inline int
41 kmmio_fault(struct pt_regs *regs, unsigned long addr)
42 {
43 	if (unlikely(is_kmmio_active()))
44 		if (kmmio_handler(regs, addr) == 1)
45 			return -1;
46 	return 0;
47 }
48 
49 static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
50 {
51 	if (!kprobes_built_in())
52 		return 0;
53 	if (user_mode(regs))
54 		return 0;
55 	/*
56 	 * To be potentially processing a kprobe fault and to be allowed to call
57 	 * kprobe_running(), we have to be non-preemptible.
58 	 */
59 	if (preemptible())
60 		return 0;
61 	if (!kprobe_running())
62 		return 0;
63 	return kprobe_fault_handler(regs, X86_TRAP_PF);
64 }
65 
66 /*
67  * Prefetch quirks:
68  *
69  * 32-bit mode:
70  *
71  *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
72  *   Check that here and ignore it.
73  *
74  * 64-bit mode:
75  *
76  *   Sometimes the CPU reports invalid exceptions on prefetch.
77  *   Check that here and ignore it.
78  *
79  * Opcode checker based on code by Richard Brunner.
80  */
81 static inline int
82 check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
83 		      unsigned char opcode, int *prefetch)
84 {
85 	unsigned char instr_hi = opcode & 0xf0;
86 	unsigned char instr_lo = opcode & 0x0f;
87 
88 	switch (instr_hi) {
89 	case 0x20:
90 	case 0x30:
91 		/*
92 		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
93 		 * In X86_64 long mode, the CPU will signal invalid
94 		 * opcode if some of these prefixes are present so
95 		 * X86_64 will never get here anyway
96 		 */
97 		return ((instr_lo & 7) == 0x6);
98 #ifdef CONFIG_X86_64
99 	case 0x40:
100 		/*
101 		 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
102 		 * Need to figure out under what instruction mode the
103 		 * instruction was issued. Could check the LDT for lm,
104 		 * but for now it's good enough to assume that long
105 		 * mode only uses well known segments or kernel.
106 		 */
107 		return (!user_mode(regs) || user_64bit_mode(regs));
108 #endif
109 	case 0x60:
110 		/* 0x64 thru 0x67 are valid prefixes in all modes. */
111 		return (instr_lo & 0xC) == 0x4;
112 	case 0xF0:
113 		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
114 		return !instr_lo || (instr_lo>>1) == 1;
115 	case 0x00:
116 		/* Prefetch instruction is 0x0F0D or 0x0F18 */
117 		if (probe_kernel_address(instr, opcode))
118 			return 0;
119 
120 		*prefetch = (instr_lo == 0xF) &&
121 			(opcode == 0x0D || opcode == 0x18);
122 		return 0;
123 	default:
124 		return 0;
125 	}
126 }
127 
128 static int
129 is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
130 {
131 	unsigned char *max_instr;
132 	unsigned char *instr;
133 	int prefetch = 0;
134 
135 	/*
136 	 * If it was a exec (instruction fetch) fault on NX page, then
137 	 * do not ignore the fault:
138 	 */
139 	if (error_code & X86_PF_INSTR)
140 		return 0;
141 
142 	instr = (void *)convert_ip_to_linear(current, regs);
143 	max_instr = instr + 15;
144 
145 	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX)
146 		return 0;
147 
148 	while (instr < max_instr) {
149 		unsigned char opcode;
150 
151 		if (probe_kernel_address(instr, opcode))
152 			break;
153 
154 		instr++;
155 
156 		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
157 			break;
158 	}
159 	return prefetch;
160 }
161 
162 DEFINE_SPINLOCK(pgd_lock);
163 LIST_HEAD(pgd_list);
164 
165 #ifdef CONFIG_X86_32
166 static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
167 {
168 	unsigned index = pgd_index(address);
169 	pgd_t *pgd_k;
170 	p4d_t *p4d, *p4d_k;
171 	pud_t *pud, *pud_k;
172 	pmd_t *pmd, *pmd_k;
173 
174 	pgd += index;
175 	pgd_k = init_mm.pgd + index;
176 
177 	if (!pgd_present(*pgd_k))
178 		return NULL;
179 
180 	/*
181 	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
182 	 * and redundant with the set_pmd() on non-PAE. As would
183 	 * set_p4d/set_pud.
184 	 */
185 	p4d = p4d_offset(pgd, address);
186 	p4d_k = p4d_offset(pgd_k, address);
187 	if (!p4d_present(*p4d_k))
188 		return NULL;
189 
190 	pud = pud_offset(p4d, address);
191 	pud_k = pud_offset(p4d_k, address);
192 	if (!pud_present(*pud_k))
193 		return NULL;
194 
195 	pmd = pmd_offset(pud, address);
196 	pmd_k = pmd_offset(pud_k, address);
197 	if (!pmd_present(*pmd_k))
198 		return NULL;
199 
200 	if (!pmd_present(*pmd))
201 		set_pmd(pmd, *pmd_k);
202 	else
203 		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
204 
205 	return pmd_k;
206 }
207 
208 void vmalloc_sync_all(void)
209 {
210 	unsigned long address;
211 
212 	if (SHARED_KERNEL_PMD)
213 		return;
214 
215 	for (address = VMALLOC_START & PMD_MASK;
216 	     address >= TASK_SIZE_MAX && address < FIXADDR_TOP;
217 	     address += PMD_SIZE) {
218 		struct page *page;
219 
220 		spin_lock(&pgd_lock);
221 		list_for_each_entry(page, &pgd_list, lru) {
222 			spinlock_t *pgt_lock;
223 			pmd_t *ret;
224 
225 			/* the pgt_lock only for Xen */
226 			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
227 
228 			spin_lock(pgt_lock);
229 			ret = vmalloc_sync_one(page_address(page), address);
230 			spin_unlock(pgt_lock);
231 
232 			if (!ret)
233 				break;
234 		}
235 		spin_unlock(&pgd_lock);
236 	}
237 }
238 
239 /*
240  * 32-bit:
241  *
242  *   Handle a fault on the vmalloc or module mapping area
243  */
244 static noinline int vmalloc_fault(unsigned long address)
245 {
246 	unsigned long pgd_paddr;
247 	pmd_t *pmd_k;
248 	pte_t *pte_k;
249 
250 	/* Make sure we are in vmalloc area: */
251 	if (!(address >= VMALLOC_START && address < VMALLOC_END))
252 		return -1;
253 
254 	/*
255 	 * Synchronize this task's top level page-table
256 	 * with the 'reference' page table.
257 	 *
258 	 * Do _not_ use "current" here. We might be inside
259 	 * an interrupt in the middle of a task switch..
260 	 */
261 	pgd_paddr = read_cr3_pa();
262 	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
263 	if (!pmd_k)
264 		return -1;
265 
266 	if (pmd_large(*pmd_k))
267 		return 0;
268 
269 	pte_k = pte_offset_kernel(pmd_k, address);
270 	if (!pte_present(*pte_k))
271 		return -1;
272 
273 	return 0;
274 }
275 NOKPROBE_SYMBOL(vmalloc_fault);
276 
277 /*
278  * Did it hit the DOS screen memory VA from vm86 mode?
279  */
280 static inline void
281 check_v8086_mode(struct pt_regs *regs, unsigned long address,
282 		 struct task_struct *tsk)
283 {
284 #ifdef CONFIG_VM86
285 	unsigned long bit;
286 
287 	if (!v8086_mode(regs) || !tsk->thread.vm86)
288 		return;
289 
290 	bit = (address - 0xA0000) >> PAGE_SHIFT;
291 	if (bit < 32)
292 		tsk->thread.vm86->screen_bitmap |= 1 << bit;
293 #endif
294 }
295 
296 static bool low_pfn(unsigned long pfn)
297 {
298 	return pfn < max_low_pfn;
299 }
300 
301 static void dump_pagetable(unsigned long address)
302 {
303 	pgd_t *base = __va(read_cr3_pa());
304 	pgd_t *pgd = &base[pgd_index(address)];
305 	p4d_t *p4d;
306 	pud_t *pud;
307 	pmd_t *pmd;
308 	pte_t *pte;
309 
310 #ifdef CONFIG_X86_PAE
311 	pr_info("*pdpt = %016Lx ", pgd_val(*pgd));
312 	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
313 		goto out;
314 #define pr_pde pr_cont
315 #else
316 #define pr_pde pr_info
317 #endif
318 	p4d = p4d_offset(pgd, address);
319 	pud = pud_offset(p4d, address);
320 	pmd = pmd_offset(pud, address);
321 	pr_pde("*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
322 #undef pr_pde
323 
324 	/*
325 	 * We must not directly access the pte in the highpte
326 	 * case if the page table is located in highmem.
327 	 * And let's rather not kmap-atomic the pte, just in case
328 	 * it's allocated already:
329 	 */
330 	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
331 		goto out;
332 
333 	pte = pte_offset_kernel(pmd, address);
334 	pr_cont("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
335 out:
336 	pr_cont("\n");
337 }
338 
339 #else /* CONFIG_X86_64: */
340 
341 void vmalloc_sync_all(void)
342 {
343 	sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
344 }
345 
346 /*
347  * 64-bit:
348  *
349  *   Handle a fault on the vmalloc area
350  */
351 static noinline int vmalloc_fault(unsigned long address)
352 {
353 	pgd_t *pgd, *pgd_k;
354 	p4d_t *p4d, *p4d_k;
355 	pud_t *pud;
356 	pmd_t *pmd;
357 	pte_t *pte;
358 
359 	/* Make sure we are in vmalloc area: */
360 	if (!(address >= VMALLOC_START && address < VMALLOC_END))
361 		return -1;
362 
363 	/*
364 	 * Copy kernel mappings over when needed. This can also
365 	 * happen within a race in page table update. In the later
366 	 * case just flush:
367 	 */
368 	pgd = (pgd_t *)__va(read_cr3_pa()) + pgd_index(address);
369 	pgd_k = pgd_offset_k(address);
370 	if (pgd_none(*pgd_k))
371 		return -1;
372 
373 	if (pgtable_l5_enabled()) {
374 		if (pgd_none(*pgd)) {
375 			set_pgd(pgd, *pgd_k);
376 			arch_flush_lazy_mmu_mode();
377 		} else {
378 			BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_k));
379 		}
380 	}
381 
382 	/* With 4-level paging, copying happens on the p4d level. */
383 	p4d = p4d_offset(pgd, address);
384 	p4d_k = p4d_offset(pgd_k, address);
385 	if (p4d_none(*p4d_k))
386 		return -1;
387 
388 	if (p4d_none(*p4d) && !pgtable_l5_enabled()) {
389 		set_p4d(p4d, *p4d_k);
390 		arch_flush_lazy_mmu_mode();
391 	} else {
392 		BUG_ON(p4d_pfn(*p4d) != p4d_pfn(*p4d_k));
393 	}
394 
395 	BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4);
396 
397 	pud = pud_offset(p4d, address);
398 	if (pud_none(*pud))
399 		return -1;
400 
401 	if (pud_large(*pud))
402 		return 0;
403 
404 	pmd = pmd_offset(pud, address);
405 	if (pmd_none(*pmd))
406 		return -1;
407 
408 	if (pmd_large(*pmd))
409 		return 0;
410 
411 	pte = pte_offset_kernel(pmd, address);
412 	if (!pte_present(*pte))
413 		return -1;
414 
415 	return 0;
416 }
417 NOKPROBE_SYMBOL(vmalloc_fault);
418 
419 #ifdef CONFIG_CPU_SUP_AMD
420 static const char errata93_warning[] =
421 KERN_ERR
422 "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
423 "******* Working around it, but it may cause SEGVs or burn power.\n"
424 "******* Please consider a BIOS update.\n"
425 "******* Disabling USB legacy in the BIOS may also help.\n";
426 #endif
427 
428 /*
429  * No vm86 mode in 64-bit mode:
430  */
431 static inline void
432 check_v8086_mode(struct pt_regs *regs, unsigned long address,
433 		 struct task_struct *tsk)
434 {
435 }
436 
437 static int bad_address(void *p)
438 {
439 	unsigned long dummy;
440 
441 	return probe_kernel_address((unsigned long *)p, dummy);
442 }
443 
444 static void dump_pagetable(unsigned long address)
445 {
446 	pgd_t *base = __va(read_cr3_pa());
447 	pgd_t *pgd = base + pgd_index(address);
448 	p4d_t *p4d;
449 	pud_t *pud;
450 	pmd_t *pmd;
451 	pte_t *pte;
452 
453 	if (bad_address(pgd))
454 		goto bad;
455 
456 	pr_info("PGD %lx ", pgd_val(*pgd));
457 
458 	if (!pgd_present(*pgd))
459 		goto out;
460 
461 	p4d = p4d_offset(pgd, address);
462 	if (bad_address(p4d))
463 		goto bad;
464 
465 	pr_cont("P4D %lx ", p4d_val(*p4d));
466 	if (!p4d_present(*p4d) || p4d_large(*p4d))
467 		goto out;
468 
469 	pud = pud_offset(p4d, address);
470 	if (bad_address(pud))
471 		goto bad;
472 
473 	pr_cont("PUD %lx ", pud_val(*pud));
474 	if (!pud_present(*pud) || pud_large(*pud))
475 		goto out;
476 
477 	pmd = pmd_offset(pud, address);
478 	if (bad_address(pmd))
479 		goto bad;
480 
481 	pr_cont("PMD %lx ", pmd_val(*pmd));
482 	if (!pmd_present(*pmd) || pmd_large(*pmd))
483 		goto out;
484 
485 	pte = pte_offset_kernel(pmd, address);
486 	if (bad_address(pte))
487 		goto bad;
488 
489 	pr_cont("PTE %lx", pte_val(*pte));
490 out:
491 	pr_cont("\n");
492 	return;
493 bad:
494 	pr_info("BAD\n");
495 }
496 
497 #endif /* CONFIG_X86_64 */
498 
499 /*
500  * Workaround for K8 erratum #93 & buggy BIOS.
501  *
502  * BIOS SMM functions are required to use a specific workaround
503  * to avoid corruption of the 64bit RIP register on C stepping K8.
504  *
505  * A lot of BIOS that didn't get tested properly miss this.
506  *
507  * The OS sees this as a page fault with the upper 32bits of RIP cleared.
508  * Try to work around it here.
509  *
510  * Note we only handle faults in kernel here.
511  * Does nothing on 32-bit.
512  */
513 static int is_errata93(struct pt_regs *regs, unsigned long address)
514 {
515 #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
516 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
517 	    || boot_cpu_data.x86 != 0xf)
518 		return 0;
519 
520 	if (address != regs->ip)
521 		return 0;
522 
523 	if ((address >> 32) != 0)
524 		return 0;
525 
526 	address |= 0xffffffffUL << 32;
527 	if ((address >= (u64)_stext && address <= (u64)_etext) ||
528 	    (address >= MODULES_VADDR && address <= MODULES_END)) {
529 		printk_once(errata93_warning);
530 		regs->ip = address;
531 		return 1;
532 	}
533 #endif
534 	return 0;
535 }
536 
537 /*
538  * Work around K8 erratum #100 K8 in compat mode occasionally jumps
539  * to illegal addresses >4GB.
540  *
541  * We catch this in the page fault handler because these addresses
542  * are not reachable. Just detect this case and return.  Any code
543  * segment in LDT is compatibility mode.
544  */
545 static int is_errata100(struct pt_regs *regs, unsigned long address)
546 {
547 #ifdef CONFIG_X86_64
548 	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
549 		return 1;
550 #endif
551 	return 0;
552 }
553 
554 static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
555 {
556 #ifdef CONFIG_X86_F00F_BUG
557 	unsigned long nr;
558 
559 	/*
560 	 * Pentium F0 0F C7 C8 bug workaround:
561 	 */
562 	if (boot_cpu_has_bug(X86_BUG_F00F)) {
563 		nr = (address - idt_descr.address) >> 3;
564 
565 		if (nr == 6) {
566 			do_invalid_op(regs, 0);
567 			return 1;
568 		}
569 	}
570 #endif
571 	return 0;
572 }
573 
574 static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
575 {
576 	u32 offset = (index >> 3) * sizeof(struct desc_struct);
577 	unsigned long addr;
578 	struct ldttss_desc desc;
579 
580 	if (index == 0) {
581 		pr_alert("%s: NULL\n", name);
582 		return;
583 	}
584 
585 	if (offset + sizeof(struct ldttss_desc) >= gdt->size) {
586 		pr_alert("%s: 0x%hx -- out of bounds\n", name, index);
587 		return;
588 	}
589 
590 	if (probe_kernel_read(&desc, (void *)(gdt->address + offset),
591 			      sizeof(struct ldttss_desc))) {
592 		pr_alert("%s: 0x%hx -- GDT entry is not readable\n",
593 			 name, index);
594 		return;
595 	}
596 
597 	addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24);
598 #ifdef CONFIG_X86_64
599 	addr |= ((u64)desc.base3 << 32);
600 #endif
601 	pr_alert("%s: 0x%hx -- base=0x%lx limit=0x%x\n",
602 		 name, index, addr, (desc.limit0 | (desc.limit1 << 16)));
603 }
604 
605 static void
606 show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address)
607 {
608 	if (!oops_may_print())
609 		return;
610 
611 	if (error_code & X86_PF_INSTR) {
612 		unsigned int level;
613 		pgd_t *pgd;
614 		pte_t *pte;
615 
616 		pgd = __va(read_cr3_pa());
617 		pgd += pgd_index(address);
618 
619 		pte = lookup_address_in_pgd(pgd, address, &level);
620 
621 		if (pte && pte_present(*pte) && !pte_exec(*pte))
622 			pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n",
623 				from_kuid(&init_user_ns, current_uid()));
624 		if (pte && pte_present(*pte) && pte_exec(*pte) &&
625 				(pgd_flags(*pgd) & _PAGE_USER) &&
626 				(__read_cr4() & X86_CR4_SMEP))
627 			pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n",
628 				from_kuid(&init_user_ns, current_uid()));
629 	}
630 
631 	if (address < PAGE_SIZE && !user_mode(regs))
632 		pr_alert("BUG: kernel NULL pointer dereference, address: %px\n",
633 			(void *)address);
634 	else
635 		pr_alert("BUG: unable to handle page fault for address: %px\n",
636 			(void *)address);
637 
638 	pr_alert("#PF: %s %s in %s mode\n",
639 		 (error_code & X86_PF_USER)  ? "user" : "supervisor",
640 		 (error_code & X86_PF_INSTR) ? "instruction fetch" :
641 		 (error_code & X86_PF_WRITE) ? "write access" :
642 					       "read access",
643 			     user_mode(regs) ? "user" : "kernel");
644 	pr_alert("#PF: error_code(0x%04lx) - %s\n", error_code,
645 		 !(error_code & X86_PF_PROT) ? "not-present page" :
646 		 (error_code & X86_PF_RSVD)  ? "reserved bit violation" :
647 		 (error_code & X86_PF_PK)    ? "protection keys violation" :
648 					       "permissions violation");
649 
650 	if (!(error_code & X86_PF_USER) && user_mode(regs)) {
651 		struct desc_ptr idt, gdt;
652 		u16 ldtr, tr;
653 
654 		/*
655 		 * This can happen for quite a few reasons.  The more obvious
656 		 * ones are faults accessing the GDT, or LDT.  Perhaps
657 		 * surprisingly, if the CPU tries to deliver a benign or
658 		 * contributory exception from user code and gets a page fault
659 		 * during delivery, the page fault can be delivered as though
660 		 * it originated directly from user code.  This could happen
661 		 * due to wrong permissions on the IDT, GDT, LDT, TSS, or
662 		 * kernel or IST stack.
663 		 */
664 		store_idt(&idt);
665 
666 		/* Usable even on Xen PV -- it's just slow. */
667 		native_store_gdt(&gdt);
668 
669 		pr_alert("IDT: 0x%lx (limit=0x%hx) GDT: 0x%lx (limit=0x%hx)\n",
670 			 idt.address, idt.size, gdt.address, gdt.size);
671 
672 		store_ldt(ldtr);
673 		show_ldttss(&gdt, "LDTR", ldtr);
674 
675 		store_tr(tr);
676 		show_ldttss(&gdt, "TR", tr);
677 	}
678 
679 	dump_pagetable(address);
680 }
681 
682 static noinline void
683 pgtable_bad(struct pt_regs *regs, unsigned long error_code,
684 	    unsigned long address)
685 {
686 	struct task_struct *tsk;
687 	unsigned long flags;
688 	int sig;
689 
690 	flags = oops_begin();
691 	tsk = current;
692 	sig = SIGKILL;
693 
694 	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
695 	       tsk->comm, address);
696 	dump_pagetable(address);
697 
698 	if (__die("Bad pagetable", regs, error_code))
699 		sig = 0;
700 
701 	oops_end(flags, regs, sig);
702 }
703 
704 static void set_signal_archinfo(unsigned long address,
705 				unsigned long error_code)
706 {
707 	struct task_struct *tsk = current;
708 
709 	/*
710 	 * To avoid leaking information about the kernel page
711 	 * table layout, pretend that user-mode accesses to
712 	 * kernel addresses are always protection faults.
713 	 *
714 	 * NB: This means that failed vsyscalls with vsyscall=none
715 	 * will have the PROT bit.  This doesn't leak any
716 	 * information and does not appear to cause any problems.
717 	 */
718 	if (address >= TASK_SIZE_MAX)
719 		error_code |= X86_PF_PROT;
720 
721 	tsk->thread.trap_nr = X86_TRAP_PF;
722 	tsk->thread.error_code = error_code | X86_PF_USER;
723 	tsk->thread.cr2 = address;
724 }
725 
726 static noinline void
727 no_context(struct pt_regs *regs, unsigned long error_code,
728 	   unsigned long address, int signal, int si_code)
729 {
730 	struct task_struct *tsk = current;
731 	unsigned long flags;
732 	int sig;
733 
734 	if (user_mode(regs)) {
735 		/*
736 		 * This is an implicit supervisor-mode access from user
737 		 * mode.  Bypass all the kernel-mode recovery code and just
738 		 * OOPS.
739 		 */
740 		goto oops;
741 	}
742 
743 	/* Are we prepared to handle this kernel fault? */
744 	if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) {
745 		/*
746 		 * Any interrupt that takes a fault gets the fixup. This makes
747 		 * the below recursive fault logic only apply to a faults from
748 		 * task context.
749 		 */
750 		if (in_interrupt())
751 			return;
752 
753 		/*
754 		 * Per the above we're !in_interrupt(), aka. task context.
755 		 *
756 		 * In this case we need to make sure we're not recursively
757 		 * faulting through the emulate_vsyscall() logic.
758 		 */
759 		if (current->thread.sig_on_uaccess_err && signal) {
760 			set_signal_archinfo(address, error_code);
761 
762 			/* XXX: hwpoison faults will set the wrong code. */
763 			force_sig_fault(signal, si_code, (void __user *)address);
764 		}
765 
766 		/*
767 		 * Barring that, we can do the fixup and be happy.
768 		 */
769 		return;
770 	}
771 
772 #ifdef CONFIG_VMAP_STACK
773 	/*
774 	 * Stack overflow?  During boot, we can fault near the initial
775 	 * stack in the direct map, but that's not an overflow -- check
776 	 * that we're in vmalloc space to avoid this.
777 	 */
778 	if (is_vmalloc_addr((void *)address) &&
779 	    (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) ||
780 	     address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) {
781 		unsigned long stack = __this_cpu_ist_top_va(DF) - sizeof(void *);
782 		/*
783 		 * We're likely to be running with very little stack space
784 		 * left.  It's plausible that we'd hit this condition but
785 		 * double-fault even before we get this far, in which case
786 		 * we're fine: the double-fault handler will deal with it.
787 		 *
788 		 * We don't want to make it all the way into the oops code
789 		 * and then double-fault, though, because we're likely to
790 		 * break the console driver and lose most of the stack dump.
791 		 */
792 		asm volatile ("movq %[stack], %%rsp\n\t"
793 			      "call handle_stack_overflow\n\t"
794 			      "1: jmp 1b"
795 			      : ASM_CALL_CONSTRAINT
796 			      : "D" ("kernel stack overflow (page fault)"),
797 				"S" (regs), "d" (address),
798 				[stack] "rm" (stack));
799 		unreachable();
800 	}
801 #endif
802 
803 	/*
804 	 * 32-bit:
805 	 *
806 	 *   Valid to do another page fault here, because if this fault
807 	 *   had been triggered by is_prefetch fixup_exception would have
808 	 *   handled it.
809 	 *
810 	 * 64-bit:
811 	 *
812 	 *   Hall of shame of CPU/BIOS bugs.
813 	 */
814 	if (is_prefetch(regs, error_code, address))
815 		return;
816 
817 	if (is_errata93(regs, address))
818 		return;
819 
820 	/*
821 	 * Buggy firmware could access regions which might page fault, try to
822 	 * recover from such faults.
823 	 */
824 	if (IS_ENABLED(CONFIG_EFI))
825 		efi_recover_from_page_fault(address);
826 
827 oops:
828 	/*
829 	 * Oops. The kernel tried to access some bad page. We'll have to
830 	 * terminate things with extreme prejudice:
831 	 */
832 	flags = oops_begin();
833 
834 	show_fault_oops(regs, error_code, address);
835 
836 	if (task_stack_end_corrupted(tsk))
837 		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
838 
839 	sig = SIGKILL;
840 	if (__die("Oops", regs, error_code))
841 		sig = 0;
842 
843 	/* Executive summary in case the body of the oops scrolled away */
844 	printk(KERN_DEFAULT "CR2: %016lx\n", address);
845 
846 	oops_end(flags, regs, sig);
847 }
848 
849 /*
850  * Print out info about fatal segfaults, if the show_unhandled_signals
851  * sysctl is set:
852  */
853 static inline void
854 show_signal_msg(struct pt_regs *regs, unsigned long error_code,
855 		unsigned long address, struct task_struct *tsk)
856 {
857 	const char *loglvl = task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG;
858 
859 	if (!unhandled_signal(tsk, SIGSEGV))
860 		return;
861 
862 	if (!printk_ratelimit())
863 		return;
864 
865 	printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx",
866 		loglvl, tsk->comm, task_pid_nr(tsk), address,
867 		(void *)regs->ip, (void *)regs->sp, error_code);
868 
869 	print_vma_addr(KERN_CONT " in ", regs->ip);
870 
871 	printk(KERN_CONT "\n");
872 
873 	show_opcodes(regs, loglvl);
874 }
875 
876 /*
877  * The (legacy) vsyscall page is the long page in the kernel portion
878  * of the address space that has user-accessible permissions.
879  */
880 static bool is_vsyscall_vaddr(unsigned long vaddr)
881 {
882 	return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
883 }
884 
885 static void
886 __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
887 		       unsigned long address, u32 pkey, int si_code)
888 {
889 	struct task_struct *tsk = current;
890 
891 	/* User mode accesses just cause a SIGSEGV */
892 	if (user_mode(regs) && (error_code & X86_PF_USER)) {
893 		/*
894 		 * It's possible to have interrupts off here:
895 		 */
896 		local_irq_enable();
897 
898 		/*
899 		 * Valid to do another page fault here because this one came
900 		 * from user space:
901 		 */
902 		if (is_prefetch(regs, error_code, address))
903 			return;
904 
905 		if (is_errata100(regs, address))
906 			return;
907 
908 		/*
909 		 * To avoid leaking information about the kernel page table
910 		 * layout, pretend that user-mode accesses to kernel addresses
911 		 * are always protection faults.
912 		 */
913 		if (address >= TASK_SIZE_MAX)
914 			error_code |= X86_PF_PROT;
915 
916 		if (likely(show_unhandled_signals))
917 			show_signal_msg(regs, error_code, address, tsk);
918 
919 		set_signal_archinfo(address, error_code);
920 
921 		if (si_code == SEGV_PKUERR)
922 			force_sig_pkuerr((void __user *)address, pkey);
923 
924 		force_sig_fault(SIGSEGV, si_code, (void __user *)address);
925 
926 		return;
927 	}
928 
929 	if (is_f00f_bug(regs, address))
930 		return;
931 
932 	no_context(regs, error_code, address, SIGSEGV, si_code);
933 }
934 
935 static noinline void
936 bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
937 		     unsigned long address)
938 {
939 	__bad_area_nosemaphore(regs, error_code, address, 0, SEGV_MAPERR);
940 }
941 
942 static void
943 __bad_area(struct pt_regs *regs, unsigned long error_code,
944 	   unsigned long address, u32 pkey, int si_code)
945 {
946 	struct mm_struct *mm = current->mm;
947 	/*
948 	 * Something tried to access memory that isn't in our memory map..
949 	 * Fix it, but check if it's kernel or user first..
950 	 */
951 	up_read(&mm->mmap_sem);
952 
953 	__bad_area_nosemaphore(regs, error_code, address, pkey, si_code);
954 }
955 
956 static noinline void
957 bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
958 {
959 	__bad_area(regs, error_code, address, 0, SEGV_MAPERR);
960 }
961 
962 static inline bool bad_area_access_from_pkeys(unsigned long error_code,
963 		struct vm_area_struct *vma)
964 {
965 	/* This code is always called on the current mm */
966 	bool foreign = false;
967 
968 	if (!boot_cpu_has(X86_FEATURE_OSPKE))
969 		return false;
970 	if (error_code & X86_PF_PK)
971 		return true;
972 	/* this checks permission keys on the VMA: */
973 	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
974 				       (error_code & X86_PF_INSTR), foreign))
975 		return true;
976 	return false;
977 }
978 
979 static noinline void
980 bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
981 		      unsigned long address, struct vm_area_struct *vma)
982 {
983 	/*
984 	 * This OSPKE check is not strictly necessary at runtime.
985 	 * But, doing it this way allows compiler optimizations
986 	 * if pkeys are compiled out.
987 	 */
988 	if (bad_area_access_from_pkeys(error_code, vma)) {
989 		/*
990 		 * A protection key fault means that the PKRU value did not allow
991 		 * access to some PTE.  Userspace can figure out what PKRU was
992 		 * from the XSAVE state.  This function captures the pkey from
993 		 * the vma and passes it to userspace so userspace can discover
994 		 * which protection key was set on the PTE.
995 		 *
996 		 * If we get here, we know that the hardware signaled a X86_PF_PK
997 		 * fault and that there was a VMA once we got in the fault
998 		 * handler.  It does *not* guarantee that the VMA we find here
999 		 * was the one that we faulted on.
1000 		 *
1001 		 * 1. T1   : mprotect_key(foo, PAGE_SIZE, pkey=4);
1002 		 * 2. T1   : set PKRU to deny access to pkey=4, touches page
1003 		 * 3. T1   : faults...
1004 		 * 4.    T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
1005 		 * 5. T1   : enters fault handler, takes mmap_sem, etc...
1006 		 * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
1007 		 *	     faulted on a pte with its pkey=4.
1008 		 */
1009 		u32 pkey = vma_pkey(vma);
1010 
1011 		__bad_area(regs, error_code, address, pkey, SEGV_PKUERR);
1012 	} else {
1013 		__bad_area(regs, error_code, address, 0, SEGV_ACCERR);
1014 	}
1015 }
1016 
1017 static void
1018 do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
1019 	  vm_fault_t fault)
1020 {
1021 	/* Kernel mode? Handle exceptions or die: */
1022 	if (!(error_code & X86_PF_USER)) {
1023 		no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
1024 		return;
1025 	}
1026 
1027 	/* User-space => ok to do another page fault: */
1028 	if (is_prefetch(regs, error_code, address))
1029 		return;
1030 
1031 	set_signal_archinfo(address, error_code);
1032 
1033 #ifdef CONFIG_MEMORY_FAILURE
1034 	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
1035 		struct task_struct *tsk = current;
1036 		unsigned lsb = 0;
1037 
1038 		pr_err(
1039 	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
1040 			tsk->comm, tsk->pid, address);
1041 		if (fault & VM_FAULT_HWPOISON_LARGE)
1042 			lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
1043 		if (fault & VM_FAULT_HWPOISON)
1044 			lsb = PAGE_SHIFT;
1045 		force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb);
1046 		return;
1047 	}
1048 #endif
1049 	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
1050 }
1051 
1052 static noinline void
1053 mm_fault_error(struct pt_regs *regs, unsigned long error_code,
1054 	       unsigned long address, vm_fault_t fault)
1055 {
1056 	if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) {
1057 		no_context(regs, error_code, address, 0, 0);
1058 		return;
1059 	}
1060 
1061 	if (fault & VM_FAULT_OOM) {
1062 		/* Kernel mode? Handle exceptions or die: */
1063 		if (!(error_code & X86_PF_USER)) {
1064 			no_context(regs, error_code, address,
1065 				   SIGSEGV, SEGV_MAPERR);
1066 			return;
1067 		}
1068 
1069 		/*
1070 		 * We ran out of memory, call the OOM killer, and return the
1071 		 * userspace (which will retry the fault, or kill us if we got
1072 		 * oom-killed):
1073 		 */
1074 		pagefault_out_of_memory();
1075 	} else {
1076 		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
1077 			     VM_FAULT_HWPOISON_LARGE))
1078 			do_sigbus(regs, error_code, address, fault);
1079 		else if (fault & VM_FAULT_SIGSEGV)
1080 			bad_area_nosemaphore(regs, error_code, address);
1081 		else
1082 			BUG();
1083 	}
1084 }
1085 
1086 static int spurious_kernel_fault_check(unsigned long error_code, pte_t *pte)
1087 {
1088 	if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
1089 		return 0;
1090 
1091 	if ((error_code & X86_PF_INSTR) && !pte_exec(*pte))
1092 		return 0;
1093 
1094 	return 1;
1095 }
1096 
1097 /*
1098  * Handle a spurious fault caused by a stale TLB entry.
1099  *
1100  * This allows us to lazily refresh the TLB when increasing the
1101  * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
1102  * eagerly is very expensive since that implies doing a full
1103  * cross-processor TLB flush, even if no stale TLB entries exist
1104  * on other processors.
1105  *
1106  * Spurious faults may only occur if the TLB contains an entry with
1107  * fewer permission than the page table entry.  Non-present (P = 0)
1108  * and reserved bit (R = 1) faults are never spurious.
1109  *
1110  * There are no security implications to leaving a stale TLB when
1111  * increasing the permissions on a page.
1112  *
1113  * Returns non-zero if a spurious fault was handled, zero otherwise.
1114  *
1115  * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3
1116  * (Optional Invalidation).
1117  */
1118 static noinline int
1119 spurious_kernel_fault(unsigned long error_code, unsigned long address)
1120 {
1121 	pgd_t *pgd;
1122 	p4d_t *p4d;
1123 	pud_t *pud;
1124 	pmd_t *pmd;
1125 	pte_t *pte;
1126 	int ret;
1127 
1128 	/*
1129 	 * Only writes to RO or instruction fetches from NX may cause
1130 	 * spurious faults.
1131 	 *
1132 	 * These could be from user or supervisor accesses but the TLB
1133 	 * is only lazily flushed after a kernel mapping protection
1134 	 * change, so user accesses are not expected to cause spurious
1135 	 * faults.
1136 	 */
1137 	if (error_code != (X86_PF_WRITE | X86_PF_PROT) &&
1138 	    error_code != (X86_PF_INSTR | X86_PF_PROT))
1139 		return 0;
1140 
1141 	pgd = init_mm.pgd + pgd_index(address);
1142 	if (!pgd_present(*pgd))
1143 		return 0;
1144 
1145 	p4d = p4d_offset(pgd, address);
1146 	if (!p4d_present(*p4d))
1147 		return 0;
1148 
1149 	if (p4d_large(*p4d))
1150 		return spurious_kernel_fault_check(error_code, (pte_t *) p4d);
1151 
1152 	pud = pud_offset(p4d, address);
1153 	if (!pud_present(*pud))
1154 		return 0;
1155 
1156 	if (pud_large(*pud))
1157 		return spurious_kernel_fault_check(error_code, (pte_t *) pud);
1158 
1159 	pmd = pmd_offset(pud, address);
1160 	if (!pmd_present(*pmd))
1161 		return 0;
1162 
1163 	if (pmd_large(*pmd))
1164 		return spurious_kernel_fault_check(error_code, (pte_t *) pmd);
1165 
1166 	pte = pte_offset_kernel(pmd, address);
1167 	if (!pte_present(*pte))
1168 		return 0;
1169 
1170 	ret = spurious_kernel_fault_check(error_code, pte);
1171 	if (!ret)
1172 		return 0;
1173 
1174 	/*
1175 	 * Make sure we have permissions in PMD.
1176 	 * If not, then there's a bug in the page tables:
1177 	 */
1178 	ret = spurious_kernel_fault_check(error_code, (pte_t *) pmd);
1179 	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
1180 
1181 	return ret;
1182 }
1183 NOKPROBE_SYMBOL(spurious_kernel_fault);
1184 
1185 int show_unhandled_signals = 1;
1186 
1187 static inline int
1188 access_error(unsigned long error_code, struct vm_area_struct *vma)
1189 {
1190 	/* This is only called for the current mm, so: */
1191 	bool foreign = false;
1192 
1193 	/*
1194 	 * Read or write was blocked by protection keys.  This is
1195 	 * always an unconditional error and can never result in
1196 	 * a follow-up action to resolve the fault, like a COW.
1197 	 */
1198 	if (error_code & X86_PF_PK)
1199 		return 1;
1200 
1201 	/*
1202 	 * Make sure to check the VMA so that we do not perform
1203 	 * faults just to hit a X86_PF_PK as soon as we fill in a
1204 	 * page.
1205 	 */
1206 	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
1207 				       (error_code & X86_PF_INSTR), foreign))
1208 		return 1;
1209 
1210 	if (error_code & X86_PF_WRITE) {
1211 		/* write, present and write, not present: */
1212 		if (unlikely(!(vma->vm_flags & VM_WRITE)))
1213 			return 1;
1214 		return 0;
1215 	}
1216 
1217 	/* read, present: */
1218 	if (unlikely(error_code & X86_PF_PROT))
1219 		return 1;
1220 
1221 	/* read, not present: */
1222 	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
1223 		return 1;
1224 
1225 	return 0;
1226 }
1227 
1228 static int fault_in_kernel_space(unsigned long address)
1229 {
1230 	/*
1231 	 * On 64-bit systems, the vsyscall page is at an address above
1232 	 * TASK_SIZE_MAX, but is not considered part of the kernel
1233 	 * address space.
1234 	 */
1235 	if (IS_ENABLED(CONFIG_X86_64) && is_vsyscall_vaddr(address))
1236 		return false;
1237 
1238 	return address >= TASK_SIZE_MAX;
1239 }
1240 
1241 /*
1242  * Called for all faults where 'address' is part of the kernel address
1243  * space.  Might get called for faults that originate from *code* that
1244  * ran in userspace or the kernel.
1245  */
1246 static void
1247 do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
1248 		   unsigned long address)
1249 {
1250 	/*
1251 	 * Protection keys exceptions only happen on user pages.  We
1252 	 * have no user pages in the kernel portion of the address
1253 	 * space, so do not expect them here.
1254 	 */
1255 	WARN_ON_ONCE(hw_error_code & X86_PF_PK);
1256 
1257 	/*
1258 	 * We can fault-in kernel-space virtual memory on-demand. The
1259 	 * 'reference' page table is init_mm.pgd.
1260 	 *
1261 	 * NOTE! We MUST NOT take any locks for this case. We may
1262 	 * be in an interrupt or a critical region, and should
1263 	 * only copy the information from the master page table,
1264 	 * nothing more.
1265 	 *
1266 	 * Before doing this on-demand faulting, ensure that the
1267 	 * fault is not any of the following:
1268 	 * 1. A fault on a PTE with a reserved bit set.
1269 	 * 2. A fault caused by a user-mode access.  (Do not demand-
1270 	 *    fault kernel memory due to user-mode accesses).
1271 	 * 3. A fault caused by a page-level protection violation.
1272 	 *    (A demand fault would be on a non-present page which
1273 	 *     would have X86_PF_PROT==0).
1274 	 */
1275 	if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
1276 		if (vmalloc_fault(address) >= 0)
1277 			return;
1278 	}
1279 
1280 	/* Was the fault spurious, caused by lazy TLB invalidation? */
1281 	if (spurious_kernel_fault(hw_error_code, address))
1282 		return;
1283 
1284 	/* kprobes don't want to hook the spurious faults: */
1285 	if (kprobes_fault(regs))
1286 		return;
1287 
1288 	/*
1289 	 * Note, despite being a "bad area", there are quite a few
1290 	 * acceptable reasons to get here, such as erratum fixups
1291 	 * and handling kernel code that can fault, like get_user().
1292 	 *
1293 	 * Don't take the mm semaphore here. If we fixup a prefetch
1294 	 * fault we could otherwise deadlock:
1295 	 */
1296 	bad_area_nosemaphore(regs, hw_error_code, address);
1297 }
1298 NOKPROBE_SYMBOL(do_kern_addr_fault);
1299 
1300 /* Handle faults in the user portion of the address space */
1301 static inline
1302 void do_user_addr_fault(struct pt_regs *regs,
1303 			unsigned long hw_error_code,
1304 			unsigned long address)
1305 {
1306 	struct vm_area_struct *vma;
1307 	struct task_struct *tsk;
1308 	struct mm_struct *mm;
1309 	vm_fault_t fault, major = 0;
1310 	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1311 
1312 	tsk = current;
1313 	mm = tsk->mm;
1314 
1315 	/* kprobes don't want to hook the spurious faults: */
1316 	if (unlikely(kprobes_fault(regs)))
1317 		return;
1318 
1319 	/*
1320 	 * Reserved bits are never expected to be set on
1321 	 * entries in the user portion of the page tables.
1322 	 */
1323 	if (unlikely(hw_error_code & X86_PF_RSVD))
1324 		pgtable_bad(regs, hw_error_code, address);
1325 
1326 	/*
1327 	 * If SMAP is on, check for invalid kernel (supervisor) access to user
1328 	 * pages in the user address space.  The odd case here is WRUSS,
1329 	 * which, according to the preliminary documentation, does not respect
1330 	 * SMAP and will have the USER bit set so, in all cases, SMAP
1331 	 * enforcement appears to be consistent with the USER bit.
1332 	 */
1333 	if (unlikely(cpu_feature_enabled(X86_FEATURE_SMAP) &&
1334 		     !(hw_error_code & X86_PF_USER) &&
1335 		     !(regs->flags & X86_EFLAGS_AC)))
1336 	{
1337 		bad_area_nosemaphore(regs, hw_error_code, address);
1338 		return;
1339 	}
1340 
1341 	/*
1342 	 * If we're in an interrupt, have no user context or are running
1343 	 * in a region with pagefaults disabled then we must not take the fault
1344 	 */
1345 	if (unlikely(faulthandler_disabled() || !mm)) {
1346 		bad_area_nosemaphore(regs, hw_error_code, address);
1347 		return;
1348 	}
1349 
1350 	/*
1351 	 * It's safe to allow irq's after cr2 has been saved and the
1352 	 * vmalloc fault has been handled.
1353 	 *
1354 	 * User-mode registers count as a user access even for any
1355 	 * potential system fault or CPU buglet:
1356 	 */
1357 	if (user_mode(regs)) {
1358 		local_irq_enable();
1359 		flags |= FAULT_FLAG_USER;
1360 	} else {
1361 		if (regs->flags & X86_EFLAGS_IF)
1362 			local_irq_enable();
1363 	}
1364 
1365 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
1366 
1367 	if (hw_error_code & X86_PF_WRITE)
1368 		flags |= FAULT_FLAG_WRITE;
1369 	if (hw_error_code & X86_PF_INSTR)
1370 		flags |= FAULT_FLAG_INSTRUCTION;
1371 
1372 #ifdef CONFIG_X86_64
1373 	/*
1374 	 * Faults in the vsyscall page might need emulation.  The
1375 	 * vsyscall page is at a high address (>PAGE_OFFSET), but is
1376 	 * considered to be part of the user address space.
1377 	 *
1378 	 * The vsyscall page does not have a "real" VMA, so do this
1379 	 * emulation before we go searching for VMAs.
1380 	 *
1381 	 * PKRU never rejects instruction fetches, so we don't need
1382 	 * to consider the PF_PK bit.
1383 	 */
1384 	if (is_vsyscall_vaddr(address)) {
1385 		if (emulate_vsyscall(hw_error_code, regs, address))
1386 			return;
1387 	}
1388 #endif
1389 
1390 	/*
1391 	 * Kernel-mode access to the user address space should only occur
1392 	 * on well-defined single instructions listed in the exception
1393 	 * tables.  But, an erroneous kernel fault occurring outside one of
1394 	 * those areas which also holds mmap_sem might deadlock attempting
1395 	 * to validate the fault against the address space.
1396 	 *
1397 	 * Only do the expensive exception table search when we might be at
1398 	 * risk of a deadlock.  This happens if we
1399 	 * 1. Failed to acquire mmap_sem, and
1400 	 * 2. The access did not originate in userspace.
1401 	 */
1402 	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
1403 		if (!user_mode(regs) && !search_exception_tables(regs->ip)) {
1404 			/*
1405 			 * Fault from code in kernel from
1406 			 * which we do not expect faults.
1407 			 */
1408 			bad_area_nosemaphore(regs, hw_error_code, address);
1409 			return;
1410 		}
1411 retry:
1412 		down_read(&mm->mmap_sem);
1413 	} else {
1414 		/*
1415 		 * The above down_read_trylock() might have succeeded in
1416 		 * which case we'll have missed the might_sleep() from
1417 		 * down_read():
1418 		 */
1419 		might_sleep();
1420 	}
1421 
1422 	vma = find_vma(mm, address);
1423 	if (unlikely(!vma)) {
1424 		bad_area(regs, hw_error_code, address);
1425 		return;
1426 	}
1427 	if (likely(vma->vm_start <= address))
1428 		goto good_area;
1429 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
1430 		bad_area(regs, hw_error_code, address);
1431 		return;
1432 	}
1433 	if (unlikely(expand_stack(vma, address))) {
1434 		bad_area(regs, hw_error_code, address);
1435 		return;
1436 	}
1437 
1438 	/*
1439 	 * Ok, we have a good vm_area for this memory access, so
1440 	 * we can handle it..
1441 	 */
1442 good_area:
1443 	if (unlikely(access_error(hw_error_code, vma))) {
1444 		bad_area_access_error(regs, hw_error_code, address, vma);
1445 		return;
1446 	}
1447 
1448 	/*
1449 	 * If for any reason at all we couldn't handle the fault,
1450 	 * make sure we exit gracefully rather than endlessly redo
1451 	 * the fault.  Since we never set FAULT_FLAG_RETRY_NOWAIT, if
1452 	 * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
1453 	 *
1454 	 * Note that handle_userfault() may also release and reacquire mmap_sem
1455 	 * (and not return with VM_FAULT_RETRY), when returning to userland to
1456 	 * repeat the page fault later with a VM_FAULT_NOPAGE retval
1457 	 * (potentially after handling any pending signal during the return to
1458 	 * userland). The return to userland is identified whenever
1459 	 * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
1460 	 */
1461 	fault = handle_mm_fault(vma, address, flags);
1462 	major |= fault & VM_FAULT_MAJOR;
1463 
1464 	/*
1465 	 * If we need to retry the mmap_sem has already been released,
1466 	 * and if there is a fatal signal pending there is no guarantee
1467 	 * that we made any progress. Handle this case first.
1468 	 */
1469 	if (unlikely(fault & VM_FAULT_RETRY)) {
1470 		/* Retry at most once */
1471 		if (flags & FAULT_FLAG_ALLOW_RETRY) {
1472 			flags &= ~FAULT_FLAG_ALLOW_RETRY;
1473 			flags |= FAULT_FLAG_TRIED;
1474 			if (!fatal_signal_pending(tsk))
1475 				goto retry;
1476 		}
1477 
1478 		/* User mode? Just return to handle the fatal exception */
1479 		if (flags & FAULT_FLAG_USER)
1480 			return;
1481 
1482 		/* Not returning to user mode? Handle exceptions or die: */
1483 		no_context(regs, hw_error_code, address, SIGBUS, BUS_ADRERR);
1484 		return;
1485 	}
1486 
1487 	up_read(&mm->mmap_sem);
1488 	if (unlikely(fault & VM_FAULT_ERROR)) {
1489 		mm_fault_error(regs, hw_error_code, address, fault);
1490 		return;
1491 	}
1492 
1493 	/*
1494 	 * Major/minor page fault accounting. If any of the events
1495 	 * returned VM_FAULT_MAJOR, we account it as a major fault.
1496 	 */
1497 	if (major) {
1498 		tsk->maj_flt++;
1499 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
1500 	} else {
1501 		tsk->min_flt++;
1502 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
1503 	}
1504 
1505 	check_v8086_mode(regs, address, tsk);
1506 }
1507 NOKPROBE_SYMBOL(do_user_addr_fault);
1508 
1509 /*
1510  * This routine handles page faults.  It determines the address,
1511  * and the problem, and then passes it off to one of the appropriate
1512  * routines.
1513  */
1514 static noinline void
1515 __do_page_fault(struct pt_regs *regs, unsigned long hw_error_code,
1516 		unsigned long address)
1517 {
1518 	prefetchw(&current->mm->mmap_sem);
1519 
1520 	if (unlikely(kmmio_fault(regs, address)))
1521 		return;
1522 
1523 	/* Was the fault on kernel-controlled part of the address space? */
1524 	if (unlikely(fault_in_kernel_space(address)))
1525 		do_kern_addr_fault(regs, hw_error_code, address);
1526 	else
1527 		do_user_addr_fault(regs, hw_error_code, address);
1528 }
1529 NOKPROBE_SYMBOL(__do_page_fault);
1530 
1531 static nokprobe_inline void
1532 trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
1533 			 unsigned long error_code)
1534 {
1535 	if (user_mode(regs))
1536 		trace_page_fault_user(address, regs, error_code);
1537 	else
1538 		trace_page_fault_kernel(address, regs, error_code);
1539 }
1540 
1541 /*
1542  * We must have this function blacklisted from kprobes, tagged with notrace
1543  * and call read_cr2() before calling anything else. To avoid calling any
1544  * kind of tracing machinery before we've observed the CR2 value.
1545  *
1546  * exception_{enter,exit}() contains all sorts of tracepoints.
1547  */
1548 dotraplinkage void notrace
1549 do_page_fault(struct pt_regs *regs, unsigned long error_code)
1550 {
1551 	unsigned long address = read_cr2(); /* Get the faulting address */
1552 	enum ctx_state prev_state;
1553 
1554 	prev_state = exception_enter();
1555 	if (trace_pagefault_enabled())
1556 		trace_page_fault_entries(address, regs, error_code);
1557 
1558 	__do_page_fault(regs, error_code, address);
1559 	exception_exit(prev_state);
1560 }
1561 NOKPROBE_SYMBOL(do_page_fault);
1562