xref: /linux/arch/arm64/kernel/probes/uprobes.c (revision feafee284579d29537a5a56ba8f23894f0463f3d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2014-2016 Pratyush Anand <panand@redhat.com>
4  */
5 #include <linux/highmem.h>
6 #include <linux/ptrace.h>
7 #include <linux/uprobes.h>
8 #include <asm/cacheflush.h>
9 #include <asm/gcs.h>
10 
11 #include "decode-insn.h"
12 
13 #define UPROBE_INV_FAULT_CODE	UINT_MAX
14 
arch_uprobe_copy_ixol(struct page * page,unsigned long vaddr,void * src,unsigned long len)15 void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
16 		void *src, unsigned long len)
17 {
18 	void *xol_page_kaddr = kmap_atomic(page);
19 	void *dst = xol_page_kaddr + (vaddr & ~PAGE_MASK);
20 
21 	/*
22 	 * Initial cache maintenance of the xol page done via set_pte_at().
23 	 * Subsequent CMOs only needed if the xol slot changes.
24 	 */
25 	if (!memcmp(dst, src, len))
26 		goto done;
27 
28 	/* Initialize the slot */
29 	memcpy(dst, src, len);
30 
31 	/* flush caches (dcache/icache) */
32 	sync_icache_aliases((unsigned long)dst, (unsigned long)dst + len);
33 
34 done:
35 	kunmap_atomic(xol_page_kaddr);
36 }
37 
uprobe_get_swbp_addr(struct pt_regs * regs)38 unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
39 {
40 	return instruction_pointer(regs);
41 }
42 
arch_uprobe_analyze_insn(struct arch_uprobe * auprobe,struct mm_struct * mm,unsigned long addr)43 int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
44 		unsigned long addr)
45 {
46 	u32 insn;
47 
48 	/* TODO: Currently we do not support AARCH32 instruction probing */
49 	if (mm->context.flags & MMCF_AARCH32)
50 		return -EOPNOTSUPP;
51 	else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
52 		return -EINVAL;
53 
54 	insn = le32_to_cpu(auprobe->insn);
55 
56 	switch (arm_probe_decode_insn(insn, &auprobe->api)) {
57 	case INSN_REJECTED:
58 		return -EINVAL;
59 
60 	case INSN_GOOD_NO_SLOT:
61 		auprobe->simulate = true;
62 		break;
63 
64 	default:
65 		break;
66 	}
67 
68 	return 0;
69 }
70 
arch_uprobe_pre_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)71 int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
72 {
73 	struct uprobe_task *utask = current->utask;
74 
75 	/* Initialize with an invalid fault code to detect if ol insn trapped */
76 	current->thread.fault_code = UPROBE_INV_FAULT_CODE;
77 
78 	/* Instruction points to execute ol */
79 	instruction_pointer_set(regs, utask->xol_vaddr);
80 
81 	user_enable_single_step(current);
82 
83 	return 0;
84 }
85 
arch_uprobe_post_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)86 int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
87 {
88 	struct uprobe_task *utask = current->utask;
89 
90 	WARN_ON_ONCE(current->thread.fault_code != UPROBE_INV_FAULT_CODE);
91 
92 	/* Instruction points to execute next to breakpoint address */
93 	instruction_pointer_set(regs, utask->vaddr + 4);
94 
95 	user_disable_single_step(current);
96 
97 	return 0;
98 }
arch_uprobe_xol_was_trapped(struct task_struct * t)99 bool arch_uprobe_xol_was_trapped(struct task_struct *t)
100 {
101 	/*
102 	 * Between arch_uprobe_pre_xol and arch_uprobe_post_xol, if an xol
103 	 * insn itself is trapped, then detect the case with the help of
104 	 * invalid fault code which is being set in arch_uprobe_pre_xol
105 	 */
106 	if (t->thread.fault_code != UPROBE_INV_FAULT_CODE)
107 		return true;
108 
109 	return false;
110 }
111 
arch_uprobe_skip_sstep(struct arch_uprobe * auprobe,struct pt_regs * regs)112 bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
113 {
114 	u32 insn;
115 	unsigned long addr;
116 
117 	if (!auprobe->simulate)
118 		return false;
119 
120 	insn = le32_to_cpu(auprobe->insn);
121 	addr = instruction_pointer(regs);
122 
123 	if (auprobe->api.handler)
124 		auprobe->api.handler(insn, addr, regs);
125 
126 	return true;
127 }
128 
arch_uprobe_abort_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)129 void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
130 {
131 	struct uprobe_task *utask = current->utask;
132 
133 	/*
134 	 * Task has received a fatal signal, so reset back to probbed
135 	 * address.
136 	 */
137 	instruction_pointer_set(regs, utask->vaddr);
138 
139 	user_disable_single_step(current);
140 }
141 
arch_uretprobe_is_alive(struct return_instance * ret,enum rp_check ctx,struct pt_regs * regs)142 bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
143 		struct pt_regs *regs)
144 {
145 	/*
146 	 * If a simple branch instruction (B) was called for retprobed
147 	 * assembly label then return true even when regs->sp and ret->stack
148 	 * are same. It will ensure that cleanup and reporting of return
149 	 * instances corresponding to callee label is done when
150 	 * handle_trampoline for called function is executed.
151 	 */
152 	if (ctx == RP_CHECK_CHAIN_CALL)
153 		return regs->sp <= ret->stack;
154 	else
155 		return regs->sp < ret->stack;
156 }
157 
158 unsigned long
arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,struct pt_regs * regs)159 arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
160 				  struct pt_regs *regs)
161 {
162 	unsigned long orig_ret_vaddr;
163 	unsigned long gcs_ret_vaddr;
164 	int err = 0;
165 	u64 gcspr;
166 
167 	orig_ret_vaddr = procedure_link_pointer(regs);
168 
169 	if (task_gcs_el0_enabled(current)) {
170 		gcspr = read_sysreg_s(SYS_GCSPR_EL0);
171 		gcs_ret_vaddr = get_user_gcs((__force unsigned long __user *)gcspr, &err);
172 		if (err) {
173 			force_sig(SIGSEGV);
174 			goto out;
175 		}
176 
177 		/*
178 		 * If the LR and GCS return addr don't match, then some kind of PAC
179 		 * signing or control flow occurred since entering the probed function.
180 		 * Likely because the user is attempting to retprobe on an instruction
181 		 * that isn't a function boundary or inside a leaf function. Explicitly
182 		 * abort this retprobe because it will generate a GCS exception.
183 		 */
184 		if (gcs_ret_vaddr != orig_ret_vaddr) {
185 			orig_ret_vaddr = -1;
186 			goto out;
187 		}
188 
189 		put_user_gcs(trampoline_vaddr, (__force unsigned long __user *)gcspr, &err);
190 		if (err) {
191 			force_sig(SIGSEGV);
192 			goto out;
193 		}
194 	}
195 
196 	/* Replace the return addr with trampoline addr */
197 	procedure_link_pointer_set(regs, trampoline_vaddr);
198 
199 out:
200 	return orig_ret_vaddr;
201 }
202 
arch_uprobe_exception_notify(struct notifier_block * self,unsigned long val,void * data)203 int arch_uprobe_exception_notify(struct notifier_block *self,
204 				 unsigned long val, void *data)
205 {
206 	return NOTIFY_DONE;
207 }
208 
uprobe_brk_handler(struct pt_regs * regs,unsigned long esr)209 int uprobe_brk_handler(struct pt_regs *regs,
210 				     unsigned long esr)
211 {
212 	if (uprobe_pre_sstep_notifier(regs))
213 		return DBG_HOOK_HANDLED;
214 
215 	return DBG_HOOK_ERROR;
216 }
217 
uprobe_single_step_handler(struct pt_regs * regs,unsigned long esr)218 int uprobe_single_step_handler(struct pt_regs *regs,
219 				      unsigned long esr)
220 {
221 	struct uprobe_task *utask = current->utask;
222 
223 	WARN_ON(utask && (instruction_pointer(regs) != utask->xol_vaddr + 4));
224 	if (uprobe_post_sstep_notifier(regs))
225 		return DBG_HOOK_HANDLED;
226 
227 	return DBG_HOOK_ERROR;
228 }
229 
230