xref: /linux/arch/riscv/kernel/probes/uprobes.c (revision 74784081aac8a0f3636965fc230e2d3b7cc123c6)
1*74784081SGuo Ren // SPDX-License-Identifier: GPL-2.0-only
2*74784081SGuo Ren 
3*74784081SGuo Ren #include <linux/highmem.h>
4*74784081SGuo Ren #include <linux/ptrace.h>
5*74784081SGuo Ren #include <linux/uprobes.h>
6*74784081SGuo Ren 
7*74784081SGuo Ren #include "decode-insn.h"
8*74784081SGuo Ren 
9*74784081SGuo Ren #define UPROBE_TRAP_NR	UINT_MAX
10*74784081SGuo Ren 
11*74784081SGuo Ren bool is_swbp_insn(uprobe_opcode_t *insn)
12*74784081SGuo Ren {
13*74784081SGuo Ren #ifdef CONFIG_RISCV_ISA_C
14*74784081SGuo Ren 	return (*insn & 0xffff) == UPROBE_SWBP_INSN;
15*74784081SGuo Ren #else
16*74784081SGuo Ren 	return *insn == UPROBE_SWBP_INSN;
17*74784081SGuo Ren #endif
18*74784081SGuo Ren }
19*74784081SGuo Ren 
20*74784081SGuo Ren unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
21*74784081SGuo Ren {
22*74784081SGuo Ren 	return instruction_pointer(regs);
23*74784081SGuo Ren }
24*74784081SGuo Ren 
25*74784081SGuo Ren int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
26*74784081SGuo Ren 			     unsigned long addr)
27*74784081SGuo Ren {
28*74784081SGuo Ren 	probe_opcode_t opcode;
29*74784081SGuo Ren 
30*74784081SGuo Ren 	opcode = *(probe_opcode_t *)(&auprobe->insn[0]);
31*74784081SGuo Ren 
32*74784081SGuo Ren 	auprobe->insn_size = GET_INSN_LENGTH(opcode);
33*74784081SGuo Ren 
34*74784081SGuo Ren 	switch (riscv_probe_decode_insn(&opcode, &auprobe->api)) {
35*74784081SGuo Ren 	case INSN_REJECTED:
36*74784081SGuo Ren 		return -EINVAL;
37*74784081SGuo Ren 
38*74784081SGuo Ren 	case INSN_GOOD_NO_SLOT:
39*74784081SGuo Ren 		auprobe->simulate = true;
40*74784081SGuo Ren 		break;
41*74784081SGuo Ren 
42*74784081SGuo Ren 	case INSN_GOOD:
43*74784081SGuo Ren 		auprobe->simulate = false;
44*74784081SGuo Ren 		break;
45*74784081SGuo Ren 
46*74784081SGuo Ren 	default:
47*74784081SGuo Ren 		return -EINVAL;
48*74784081SGuo Ren 	}
49*74784081SGuo Ren 
50*74784081SGuo Ren 	return 0;
51*74784081SGuo Ren }
52*74784081SGuo Ren 
53*74784081SGuo Ren int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
54*74784081SGuo Ren {
55*74784081SGuo Ren 	struct uprobe_task *utask = current->utask;
56*74784081SGuo Ren 
57*74784081SGuo Ren 	utask->autask.saved_cause = current->thread.bad_cause;
58*74784081SGuo Ren 	current->thread.bad_cause = UPROBE_TRAP_NR;
59*74784081SGuo Ren 
60*74784081SGuo Ren 	instruction_pointer_set(regs, utask->xol_vaddr);
61*74784081SGuo Ren 
62*74784081SGuo Ren 	regs->status &= ~SR_SPIE;
63*74784081SGuo Ren 
64*74784081SGuo Ren 	return 0;
65*74784081SGuo Ren }
66*74784081SGuo Ren 
67*74784081SGuo Ren int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
68*74784081SGuo Ren {
69*74784081SGuo Ren 	struct uprobe_task *utask = current->utask;
70*74784081SGuo Ren 
71*74784081SGuo Ren 	WARN_ON_ONCE(current->thread.bad_cause != UPROBE_TRAP_NR);
72*74784081SGuo Ren 
73*74784081SGuo Ren 	instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size);
74*74784081SGuo Ren 
75*74784081SGuo Ren 	regs->status |= SR_SPIE;
76*74784081SGuo Ren 
77*74784081SGuo Ren 	return 0;
78*74784081SGuo Ren }
79*74784081SGuo Ren 
80*74784081SGuo Ren bool arch_uprobe_xol_was_trapped(struct task_struct *t)
81*74784081SGuo Ren {
82*74784081SGuo Ren 	if (t->thread.bad_cause != UPROBE_TRAP_NR)
83*74784081SGuo Ren 		return true;
84*74784081SGuo Ren 
85*74784081SGuo Ren 	return false;
86*74784081SGuo Ren }
87*74784081SGuo Ren 
88*74784081SGuo Ren bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
89*74784081SGuo Ren {
90*74784081SGuo Ren 	probe_opcode_t insn;
91*74784081SGuo Ren 	unsigned long addr;
92*74784081SGuo Ren 
93*74784081SGuo Ren 	if (!auprobe->simulate)
94*74784081SGuo Ren 		return false;
95*74784081SGuo Ren 
96*74784081SGuo Ren 	insn = *(probe_opcode_t *)(&auprobe->insn[0]);
97*74784081SGuo Ren 	addr = instruction_pointer(regs);
98*74784081SGuo Ren 
99*74784081SGuo Ren 	if (auprobe->api.handler)
100*74784081SGuo Ren 		auprobe->api.handler(insn, addr, regs);
101*74784081SGuo Ren 
102*74784081SGuo Ren 	return true;
103*74784081SGuo Ren }
104*74784081SGuo Ren 
105*74784081SGuo Ren void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
106*74784081SGuo Ren {
107*74784081SGuo Ren 	struct uprobe_task *utask = current->utask;
108*74784081SGuo Ren 
109*74784081SGuo Ren 	/*
110*74784081SGuo Ren 	 * Task has received a fatal signal, so reset back to probbed
111*74784081SGuo Ren 	 * address.
112*74784081SGuo Ren 	 */
113*74784081SGuo Ren 	instruction_pointer_set(regs, utask->vaddr);
114*74784081SGuo Ren 
115*74784081SGuo Ren 	regs->status &= ~SR_SPIE;
116*74784081SGuo Ren }
117*74784081SGuo Ren 
118*74784081SGuo Ren bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
119*74784081SGuo Ren 		struct pt_regs *regs)
120*74784081SGuo Ren {
121*74784081SGuo Ren 	if (ctx == RP_CHECK_CHAIN_CALL)
122*74784081SGuo Ren 		return regs->sp <= ret->stack;
123*74784081SGuo Ren 	else
124*74784081SGuo Ren 		return regs->sp < ret->stack;
125*74784081SGuo Ren }
126*74784081SGuo Ren 
127*74784081SGuo Ren unsigned long
128*74784081SGuo Ren arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
129*74784081SGuo Ren 				  struct pt_regs *regs)
130*74784081SGuo Ren {
131*74784081SGuo Ren 	unsigned long ra;
132*74784081SGuo Ren 
133*74784081SGuo Ren 	ra = regs->ra;
134*74784081SGuo Ren 
135*74784081SGuo Ren 	regs->ra = trampoline_vaddr;
136*74784081SGuo Ren 
137*74784081SGuo Ren 	return ra;
138*74784081SGuo Ren }
139*74784081SGuo Ren 
140*74784081SGuo Ren int arch_uprobe_exception_notify(struct notifier_block *self,
141*74784081SGuo Ren 				 unsigned long val, void *data)
142*74784081SGuo Ren {
143*74784081SGuo Ren 	return NOTIFY_DONE;
144*74784081SGuo Ren }
145*74784081SGuo Ren 
146*74784081SGuo Ren bool uprobe_breakpoint_handler(struct pt_regs *regs)
147*74784081SGuo Ren {
148*74784081SGuo Ren 	if (uprobe_pre_sstep_notifier(regs))
149*74784081SGuo Ren 		return true;
150*74784081SGuo Ren 
151*74784081SGuo Ren 	return false;
152*74784081SGuo Ren }
153*74784081SGuo Ren 
154*74784081SGuo Ren bool uprobe_single_step_handler(struct pt_regs *regs)
155*74784081SGuo Ren {
156*74784081SGuo Ren 	if (uprobe_post_sstep_notifier(regs))
157*74784081SGuo Ren 		return true;
158*74784081SGuo Ren 
159*74784081SGuo Ren 	return false;
160*74784081SGuo Ren }
161*74784081SGuo Ren 
162*74784081SGuo Ren void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
163*74784081SGuo Ren 			   void *src, unsigned long len)
164*74784081SGuo Ren {
165*74784081SGuo Ren 	/* Initialize the slot */
166*74784081SGuo Ren 	void *kaddr = kmap_atomic(page);
167*74784081SGuo Ren 	void *dst = kaddr + (vaddr & ~PAGE_MASK);
168*74784081SGuo Ren 
169*74784081SGuo Ren 	memcpy(dst, src, len);
170*74784081SGuo Ren 
171*74784081SGuo Ren 	/* Add ebreak behind opcode to simulate singlestep */
172*74784081SGuo Ren 	if (vaddr) {
173*74784081SGuo Ren 		dst += GET_INSN_LENGTH(*(probe_opcode_t *)src);
174*74784081SGuo Ren 		*(uprobe_opcode_t *)dst = __BUG_INSN_32;
175*74784081SGuo Ren 	}
176*74784081SGuo Ren 
177*74784081SGuo Ren 	kunmap_atomic(kaddr);
178*74784081SGuo Ren 
179*74784081SGuo Ren 	/*
180*74784081SGuo Ren 	 * We probably need flush_icache_user_page() but it needs vma.
181*74784081SGuo Ren 	 * This should work on most of architectures by default. If
182*74784081SGuo Ren 	 * architecture needs to do something different it can define
183*74784081SGuo Ren 	 * its own version of the function.
184*74784081SGuo Ren 	 */
185*74784081SGuo Ren 	flush_dcache_page(page);
186*74784081SGuo Ren }
187