xref: /linux/arch/riscv/mm/fault.c (revision 802fee26d8afd073c630a74dbe1a996970f3fd90)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
4  *  Lennox Wu <lennox.wu@sunplusct.com>
5  *  Chen Liqin <liqin.chen@sunplusct.com>
6  * Copyright (C) 2012 Regents of the University of California
7  */
8 
9 
10 #include <linux/mm.h>
11 #include <linux/kernel.h>
12 #include <linux/interrupt.h>
13 #include <linux/perf_event.h>
14 #include <linux/signal.h>
15 #include <linux/uaccess.h>
16 
17 #include <asm/ptrace.h>
18 #include <asm/tlbflush.h>
19 
20 #include "../kernel/head.h"
21 
22 static void die_kernel_fault(const char *msg, unsigned long addr,
23 		struct pt_regs *regs)
24 {
25 	bust_spinlocks(1);
26 
27 	pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n", msg,
28 		addr);
29 
30 	bust_spinlocks(0);
31 	die(regs, "Oops");
32 	do_exit(SIGKILL);
33 }
34 
35 static inline void no_context(struct pt_regs *regs, unsigned long addr)
36 {
37 	const char *msg;
38 
39 	/* Are we prepared to handle this kernel fault? */
40 	if (fixup_exception(regs))
41 		return;
42 
43 	/*
44 	 * Oops. The kernel tried to access some bad page. We'll have to
45 	 * terminate things with extreme prejudice.
46 	 */
47 	msg = (addr < PAGE_SIZE) ? "NULL pointer dereference" : "paging request";
48 	die_kernel_fault(msg, addr, regs);
49 }
50 
51 static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
52 {
53 	if (fault & VM_FAULT_OOM) {
54 		/*
55 		 * We ran out of memory, call the OOM killer, and return the userspace
56 		 * (which will retry the fault, or kill us if we got oom-killed).
57 		 */
58 		if (!user_mode(regs)) {
59 			no_context(regs, addr);
60 			return;
61 		}
62 		pagefault_out_of_memory();
63 		return;
64 	} else if (fault & VM_FAULT_SIGBUS) {
65 		/* Kernel mode? Handle exceptions or die */
66 		if (!user_mode(regs)) {
67 			no_context(regs, addr);
68 			return;
69 		}
70 		do_trap(regs, SIGBUS, BUS_ADRERR, addr);
71 		return;
72 	}
73 	BUG();
74 }
75 
76 static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
77 {
78 	/*
79 	 * Something tried to access memory that isn't in our memory map.
80 	 * Fix it, but check if it's kernel or user first.
81 	 */
82 	mmap_read_unlock(mm);
83 	/* User mode accesses just cause a SIGSEGV */
84 	if (user_mode(regs)) {
85 		do_trap(regs, SIGSEGV, code, addr);
86 		return;
87 	}
88 
89 	no_context(regs, addr);
90 }
91 
92 static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
93 {
94 	pgd_t *pgd, *pgd_k;
95 	pud_t *pud, *pud_k;
96 	p4d_t *p4d, *p4d_k;
97 	pmd_t *pmd, *pmd_k;
98 	pte_t *pte_k;
99 	int index;
100 	unsigned long pfn;
101 
102 	/* User mode accesses just cause a SIGSEGV */
103 	if (user_mode(regs))
104 		return do_trap(regs, SIGSEGV, code, addr);
105 
106 	/*
107 	 * Synchronize this task's top level page-table
108 	 * with the 'reference' page table.
109 	 *
110 	 * Do _not_ use "tsk->active_mm->pgd" here.
111 	 * We might be inside an interrupt in the middle
112 	 * of a task switch.
113 	 */
114 	index = pgd_index(addr);
115 	pfn = csr_read(CSR_SATP) & SATP_PPN;
116 	pgd = (pgd_t *)pfn_to_virt(pfn) + index;
117 	pgd_k = init_mm.pgd + index;
118 
119 	if (!pgd_present(*pgd_k)) {
120 		no_context(regs, addr);
121 		return;
122 	}
123 	set_pgd(pgd, *pgd_k);
124 
125 	p4d = p4d_offset(pgd, addr);
126 	p4d_k = p4d_offset(pgd_k, addr);
127 	if (!p4d_present(*p4d_k)) {
128 		no_context(regs, addr);
129 		return;
130 	}
131 
132 	pud = pud_offset(p4d, addr);
133 	pud_k = pud_offset(p4d_k, addr);
134 	if (!pud_present(*pud_k)) {
135 		no_context(regs, addr);
136 		return;
137 	}
138 
139 	/*
140 	 * Since the vmalloc area is global, it is unnecessary
141 	 * to copy individual PTEs
142 	 */
143 	pmd = pmd_offset(pud, addr);
144 	pmd_k = pmd_offset(pud_k, addr);
145 	if (!pmd_present(*pmd_k)) {
146 		no_context(regs, addr);
147 		return;
148 	}
149 	set_pmd(pmd, *pmd_k);
150 
151 	/*
152 	 * Make sure the actual PTE exists as well to
153 	 * catch kernel vmalloc-area accesses to non-mapped
154 	 * addresses. If we don't do this, this will just
155 	 * silently loop forever.
156 	 */
157 	pte_k = pte_offset_kernel(pmd_k, addr);
158 	if (!pte_present(*pte_k)) {
159 		no_context(regs, addr);
160 		return;
161 	}
162 
163 	/*
164 	 * The kernel assumes that TLBs don't cache invalid
165 	 * entries, but in RISC-V, SFENCE.VMA specifies an
166 	 * ordering constraint, not a cache flush; it is
167 	 * necessary even after writing invalid entries.
168 	 */
169 	local_flush_tlb_page(addr);
170 }
171 
172 static inline bool access_error(unsigned long cause, struct vm_area_struct *vma)
173 {
174 	switch (cause) {
175 	case EXC_INST_PAGE_FAULT:
176 		if (!(vma->vm_flags & VM_EXEC)) {
177 			return true;
178 		}
179 		break;
180 	case EXC_LOAD_PAGE_FAULT:
181 		if (!(vma->vm_flags & VM_READ)) {
182 			return true;
183 		}
184 		break;
185 	case EXC_STORE_PAGE_FAULT:
186 		if (!(vma->vm_flags & VM_WRITE)) {
187 			return true;
188 		}
189 		break;
190 	default:
191 		panic("%s: unhandled cause %lu", __func__, cause);
192 	}
193 	return false;
194 }
195 
196 /*
197  * This routine handles page faults.  It determines the address and the
198  * problem, and then passes it off to one of the appropriate routines.
199  */
200 asmlinkage void do_page_fault(struct pt_regs *regs)
201 {
202 	struct task_struct *tsk;
203 	struct vm_area_struct *vma;
204 	struct mm_struct *mm;
205 	unsigned long addr, cause;
206 	unsigned int flags = FAULT_FLAG_DEFAULT;
207 	int code = SEGV_MAPERR;
208 	vm_fault_t fault;
209 
210 	cause = regs->cause;
211 	addr = regs->badaddr;
212 
213 	tsk = current;
214 	mm = tsk->mm;
215 
216 	/*
217 	 * Fault-in kernel-space virtual memory on-demand.
218 	 * The 'reference' page table is init_mm.pgd.
219 	 *
220 	 * NOTE! We MUST NOT take any locks for this case. We may
221 	 * be in an interrupt or a critical region, and should
222 	 * only copy the information from the master page table,
223 	 * nothing more.
224 	 */
225 	if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END))) {
226 		vmalloc_fault(regs, code, addr);
227 		return;
228 	}
229 
230 	/* Enable interrupts if they were enabled in the parent context. */
231 	if (likely(regs->status & SR_PIE))
232 		local_irq_enable();
233 
234 	/*
235 	 * If we're in an interrupt, have no user context, or are running
236 	 * in an atomic region, then we must not take the fault.
237 	 */
238 	if (unlikely(faulthandler_disabled() || !mm)) {
239 		no_context(regs, addr);
240 		return;
241 	}
242 
243 	if (user_mode(regs))
244 		flags |= FAULT_FLAG_USER;
245 
246 	if (!user_mode(regs) && addr < TASK_SIZE &&
247 			unlikely(!(regs->status & SR_SUM)))
248 		die_kernel_fault("access to user memory without uaccess routines",
249 				addr, regs);
250 
251 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
252 
253 	if (cause == EXC_STORE_PAGE_FAULT)
254 		flags |= FAULT_FLAG_WRITE;
255 	else if (cause == EXC_INST_PAGE_FAULT)
256 		flags |= FAULT_FLAG_INSTRUCTION;
257 retry:
258 	mmap_read_lock(mm);
259 	vma = find_vma(mm, addr);
260 	if (unlikely(!vma)) {
261 		bad_area(regs, mm, code, addr);
262 		return;
263 	}
264 	if (likely(vma->vm_start <= addr))
265 		goto good_area;
266 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
267 		bad_area(regs, mm, code, addr);
268 		return;
269 	}
270 	if (unlikely(expand_stack(vma, addr))) {
271 		bad_area(regs, mm, code, addr);
272 		return;
273 	}
274 
275 	/*
276 	 * Ok, we have a good vm_area for this memory access, so
277 	 * we can handle it.
278 	 */
279 good_area:
280 	code = SEGV_ACCERR;
281 
282 	if (unlikely(access_error(cause, vma))) {
283 		bad_area(regs, mm, code, addr);
284 		return;
285 	}
286 
287 	/*
288 	 * If for any reason at all we could not handle the fault,
289 	 * make sure we exit gracefully rather than endlessly redo
290 	 * the fault.
291 	 */
292 	fault = handle_mm_fault(vma, addr, flags, regs);
293 
294 	/*
295 	 * If we need to retry but a fatal signal is pending, handle the
296 	 * signal first. We do not need to release the mmap_lock because it
297 	 * would already be released in __lock_page_or_retry in mm/filemap.c.
298 	 */
299 	if (fault_signal_pending(fault, regs))
300 		return;
301 
302 	if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) {
303 		flags |= FAULT_FLAG_TRIED;
304 
305 		/*
306 		 * No need to mmap_read_unlock(mm) as we would
307 		 * have already released it in __lock_page_or_retry
308 		 * in mm/filemap.c.
309 		 */
310 		goto retry;
311 	}
312 
313 	mmap_read_unlock(mm);
314 
315 	if (unlikely(fault & VM_FAULT_ERROR)) {
316 		mm_fault_error(regs, addr, fault);
317 		return;
318 	}
319 	return;
320 }
321