1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * User-space Probes (UProbes) for x86
4 *
5 * Copyright (C) IBM Corporation, 2008-2011
6 * Authors:
7 * Srikar Dronamraju
8 * Jim Keniston
9 */
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/ptrace.h>
13 #include <linux/uprobes.h>
14 #include <linux/uaccess.h>
15 #include <linux/syscalls.h>
16
17 #include <linux/kdebug.h>
18 #include <asm/processor.h>
19 #include <asm/insn.h>
20 #include <asm/insn-eval.h>
21 #include <asm/mmu_context.h>
22 #include <asm/nops.h>
23
24 /* Post-execution fixups. */
25
26 /* Adjust IP back to vicinity of actual insn */
27 #define UPROBE_FIX_IP 0x01
28
29 /* Adjust the return address of a call insn */
30 #define UPROBE_FIX_CALL 0x02
31
32 /* Instruction will modify TF, don't change it */
33 #define UPROBE_FIX_SETF 0x04
34
35 #define UPROBE_FIX_RIP_SI 0x08
36 #define UPROBE_FIX_RIP_DI 0x10
37 #define UPROBE_FIX_RIP_BX 0x20
38 #define UPROBE_FIX_RIP_MASK \
39 (UPROBE_FIX_RIP_SI | UPROBE_FIX_RIP_DI | UPROBE_FIX_RIP_BX)
40
41 #define UPROBE_TRAP_NR UINT_MAX
42
43 /* Adaptations for mhiramat x86 decoder v14. */
44 #define OPCODE1(insn) ((insn)->opcode.bytes[0])
45 #define OPCODE2(insn) ((insn)->opcode.bytes[1])
46 #define OPCODE3(insn) ((insn)->opcode.bytes[2])
47 #define MODRM_REG(insn) X86_MODRM_REG((insn)->modrm.value)
48
49 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
50 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
51 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
52 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
53 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
54 << (row % 32))
55
56 /*
57 * Good-instruction tables for 32-bit apps. This is non-const and volatile
58 * to keep gcc from statically optimizing it out, as variable_test_bit makes
59 * some versions of gcc to think only *(unsigned long*) is used.
60 *
61 * Opcodes we'll probably never support:
62 * 6c-6f - ins,outs. SEGVs if used in userspace
63 * e4-e7 - in,out imm. SEGVs if used in userspace
64 * ec-ef - in,out acc. SEGVs if used in userspace
65 * cc - int3. SIGTRAP if used in userspace
66 * ce - into. Not used in userspace - no kernel support to make it useful. SEGVs
67 * (why we support bound (62) then? it's similar, and similarly unused...)
68 * f1 - int1. SIGTRAP if used in userspace
69 * f4 - hlt. SEGVs if used in userspace
70 * fa - cli. SEGVs if used in userspace
71 * fb - sti. SEGVs if used in userspace
72 *
73 * Opcodes which need some work to be supported:
74 * 07,17,1f - pop es/ss/ds
75 * Normally not used in userspace, but would execute if used.
76 * Can cause GP or stack exception if tries to load wrong segment descriptor.
77 * We hesitate to run them under single step since kernel's handling
78 * of userspace single-stepping (TF flag) is fragile.
79 * We can easily refuse to support push es/cs/ss/ds (06/0e/16/1e)
80 * on the same grounds that they are never used.
81 * cd - int N.
82 * Used by userspace for "int 80" syscall entry. (Other "int N"
83 * cause GP -> SEGV since their IDT gates don't allow calls from CPL 3).
84 * Not supported since kernel's handling of userspace single-stepping
85 * (TF flag) is fragile.
86 * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad
87 */
88 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
89 static volatile u32 good_insns_32[256 / 32] = {
90 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
91 /* ---------------------------------------------- */
92 W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 00 */
93 W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */
94 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
95 W(0x30, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
96 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
97 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
98 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
99 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
100 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
101 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
102 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
103 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
104 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
105 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
106 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
107 W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
108 /* ---------------------------------------------- */
109 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
110 };
111 #else
112 #define good_insns_32 NULL
113 #endif
114
115 /* Good-instruction tables for 64-bit apps.
116 *
117 * Genuinely invalid opcodes:
118 * 06,07 - formerly push/pop es
119 * 0e - formerly push cs
120 * 16,17 - formerly push/pop ss
121 * 1e,1f - formerly push/pop ds
122 * 27,2f,37,3f - formerly daa/das/aaa/aas
123 * 60,61 - formerly pusha/popa
124 * 62 - formerly bound. EVEX prefix for AVX512 (not yet supported)
125 * 82 - formerly redundant encoding of Group1
126 * 9a - formerly call seg:ofs
127 * ce - formerly into
128 * d4,d5 - formerly aam/aad
129 * d6 - formerly undocumented salc
130 * ea - formerly jmp seg:ofs
131 *
132 * Opcodes we'll probably never support:
133 * 6c-6f - ins,outs. SEGVs if used in userspace
134 * e4-e7 - in,out imm. SEGVs if used in userspace
135 * ec-ef - in,out acc. SEGVs if used in userspace
136 * cc - int3. SIGTRAP if used in userspace
137 * f1 - int1. SIGTRAP if used in userspace
138 * f4 - hlt. SEGVs if used in userspace
139 * fa - cli. SEGVs if used in userspace
140 * fb - sti. SEGVs if used in userspace
141 *
142 * Opcodes which need some work to be supported:
143 * cd - int N.
144 * Used by userspace for "int 80" syscall entry. (Other "int N"
145 * cause GP -> SEGV since their IDT gates don't allow calls from CPL 3).
146 * Not supported since kernel's handling of userspace single-stepping
147 * (TF flag) is fragile.
148 * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad
149 */
150 #if defined(CONFIG_X86_64)
151 static volatile u32 good_insns_64[256 / 32] = {
152 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
153 /* ---------------------------------------------- */
154 W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* 00 */
155 W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 10 */
156 W(0x20, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 20 */
157 W(0x30, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 30 */
158 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
159 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
160 W(0x60, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
161 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
162 W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
163 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1) , /* 90 */
164 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
165 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
166 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
167 W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
168 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0) | /* e0 */
169 W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
170 /* ---------------------------------------------- */
171 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
172 };
173 #else
174 #define good_insns_64 NULL
175 #endif
176
177 /* Using this for both 64-bit and 32-bit apps.
178 * Opcodes we don't support:
179 * 0f 00 - SLDT/STR/LLDT/LTR/VERR/VERW/-/- group. System insns
180 * 0f 01 - SGDT/SIDT/LGDT/LIDT/SMSW/-/LMSW/INVLPG group.
181 * Also encodes tons of other system insns if mod=11.
182 * Some are in fact non-system: xend, xtest, rdtscp, maybe more
183 * 0f 05 - syscall
184 * 0f 06 - clts (CPL0 insn)
185 * 0f 07 - sysret
186 * 0f 08 - invd (CPL0 insn)
187 * 0f 09 - wbinvd (CPL0 insn)
188 * 0f 0b - ud2
189 * 0f 30 - wrmsr (CPL0 insn) (then why rdmsr is allowed, it's also CPL0 insn?)
190 * 0f 34 - sysenter
191 * 0f 35 - sysexit
192 * 0f 37 - getsec
193 * 0f 78 - vmread (Intel VMX. CPL0 insn)
194 * 0f 79 - vmwrite (Intel VMX. CPL0 insn)
195 * Note: with prefixes, these two opcodes are
196 * extrq/insertq/AVX512 convert vector ops.
197 * 0f ae - group15: [f]xsave,[f]xrstor,[v]{ld,st}mxcsr,clflush[opt],
198 * {rd,wr}{fs,gs}base,{s,l,m}fence.
199 * Why? They are all user-executable.
200 */
201 static volatile u32 good_2byte_insns[256 / 32] = {
202 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
203 /* ---------------------------------------------- */
204 W(0x00, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1) | /* 00 */
205 W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 10 */
206 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
207 W(0x30, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
208 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
209 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
210 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */
211 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* 70 */
212 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
213 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
214 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */
215 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
216 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
217 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
218 W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */
219 W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* f0 */
220 /* ---------------------------------------------- */
221 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
222 };
223 #undef W
224
225 /*
226 * opcodes we may need to refine support for:
227 *
228 * 0f - 2-byte instructions: For many of these instructions, the validity
229 * depends on the prefix and/or the reg field. On such instructions, we
230 * just consider the opcode combination valid if it corresponds to any
231 * valid instruction.
232 *
233 * 8f - Group 1 - only reg = 0 is OK
234 * c6-c7 - Group 11 - only reg = 0 is OK
235 * d9-df - fpu insns with some illegal encodings
236 * f2, f3 - repnz, repz prefixes. These are also the first byte for
237 * certain floating-point instructions, such as addsd.
238 *
239 * fe - Group 4 - only reg = 0 or 1 is OK
240 * ff - Group 5 - only reg = 0-6 is OK
241 *
242 * others -- Do we need to support these?
243 *
244 * 0f - (floating-point?) prefetch instructions
245 * 07, 17, 1f - pop es, pop ss, pop ds
246 * 26, 2e, 36, 3e - es:, cs:, ss:, ds: segment prefixes --
247 * but 64 and 65 (fs: and gs:) seem to be used, so we support them
248 * 67 - addr16 prefix
249 * ce - into
250 * f0 - lock prefix
251 */
252
253 /*
254 * TODO:
255 * - Where necessary, examine the modrm byte and allow only valid instructions
256 * in the different Groups and fpu instructions.
257 */
258
is_prefix_bad(struct insn * insn)259 static bool is_prefix_bad(struct insn *insn)
260 {
261 insn_byte_t p;
262
263 for_each_insn_prefix(insn, p) {
264 insn_attr_t attr;
265
266 attr = inat_get_opcode_attribute(p);
267 switch (attr) {
268 case INAT_MAKE_PREFIX(INAT_PFX_ES):
269 case INAT_MAKE_PREFIX(INAT_PFX_CS):
270 case INAT_MAKE_PREFIX(INAT_PFX_DS):
271 case INAT_MAKE_PREFIX(INAT_PFX_SS):
272 case INAT_MAKE_PREFIX(INAT_PFX_LOCK):
273 return true;
274 }
275 }
276 return false;
277 }
278
uprobe_init_insn(struct arch_uprobe * auprobe,struct insn * insn,bool x86_64)279 static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool x86_64)
280 {
281 enum insn_mode m = x86_64 ? INSN_MODE_64 : INSN_MODE_32;
282 u32 volatile *good_insns;
283 int ret;
284
285 ret = insn_decode(insn, auprobe->insn, sizeof(auprobe->insn), m);
286 if (ret < 0)
287 return -ENOEXEC;
288
289 if (is_prefix_bad(insn))
290 return -ENOTSUPP;
291
292 /* We should not singlestep on the exception masking instructions */
293 if (insn_masking_exception(insn))
294 return -ENOTSUPP;
295
296 if (x86_64)
297 good_insns = good_insns_64;
298 else
299 good_insns = good_insns_32;
300
301 if (test_bit(OPCODE1(insn), (unsigned long *)good_insns))
302 return 0;
303
304 if (insn->opcode.nbytes == 2) {
305 if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns))
306 return 0;
307 }
308
309 return -ENOTSUPP;
310 }
311
312 #ifdef CONFIG_X86_64
313
314 struct uretprobe_syscall_args {
315 unsigned long r11;
316 unsigned long cx;
317 unsigned long ax;
318 };
319
320 asm (
321 ".pushsection .rodata\n"
322 ".global uretprobe_trampoline_entry\n"
323 "uretprobe_trampoline_entry:\n"
324 "push %rax\n"
325 "push %rcx\n"
326 "push %r11\n"
327 "mov $" __stringify(__NR_uretprobe) ", %rax\n"
328 "syscall\n"
329 ".global uretprobe_syscall_check\n"
330 "uretprobe_syscall_check:\n"
331 "pop %r11\n"
332 "pop %rcx\n"
333 /*
334 * The uretprobe syscall replaces stored %rax value with final
335 * return address, so we don't restore %rax in here and just
336 * call ret.
337 */
338 "ret\n"
339 "int3\n"
340 ".global uretprobe_trampoline_end\n"
341 "uretprobe_trampoline_end:\n"
342 ".popsection\n"
343 );
344
345 extern u8 uretprobe_trampoline_entry[];
346 extern u8 uretprobe_trampoline_end[];
347 extern u8 uretprobe_syscall_check[];
348
arch_uretprobe_trampoline(unsigned long * psize)349 void *arch_uretprobe_trampoline(unsigned long *psize)
350 {
351 static uprobe_opcode_t insn = UPROBE_SWBP_INSN;
352 struct pt_regs *regs = task_pt_regs(current);
353
354 /*
355 * At the moment the uretprobe syscall trampoline is supported
356 * only for native 64-bit process, the compat process still uses
357 * standard breakpoint.
358 */
359 if (user_64bit_mode(regs)) {
360 *psize = uretprobe_trampoline_end - uretprobe_trampoline_entry;
361 return uretprobe_trampoline_entry;
362 }
363
364 *psize = UPROBE_SWBP_INSN_SIZE;
365 return &insn;
366 }
367
trampoline_check_ip(unsigned long tramp)368 static unsigned long trampoline_check_ip(unsigned long tramp)
369 {
370 return tramp + (uretprobe_syscall_check - uretprobe_trampoline_entry);
371 }
372
SYSCALL_DEFINE0(uretprobe)373 SYSCALL_DEFINE0(uretprobe)
374 {
375 struct pt_regs *regs = task_pt_regs(current);
376 struct uretprobe_syscall_args args;
377 unsigned long err, ip, sp, tramp;
378
379 /* If there's no trampoline, we are called from wrong place. */
380 tramp = uprobe_get_trampoline_vaddr();
381 if (unlikely(tramp == UPROBE_NO_TRAMPOLINE_VADDR))
382 goto sigill;
383
384 /* Make sure the ip matches the only allowed sys_uretprobe caller. */
385 if (unlikely(regs->ip != trampoline_check_ip(tramp)))
386 goto sigill;
387
388 err = copy_from_user(&args, (void __user *)regs->sp, sizeof(args));
389 if (err)
390 goto sigill;
391
392 /* expose the "right" values of r11/cx/ax/sp to uprobe_consumer/s */
393 regs->r11 = args.r11;
394 regs->cx = args.cx;
395 regs->ax = args.ax;
396 regs->sp += sizeof(args);
397 regs->orig_ax = -1;
398
399 ip = regs->ip;
400 sp = regs->sp;
401
402 uprobe_handle_trampoline(regs);
403
404 /*
405 * Some of the uprobe consumers has changed sp, we can do nothing,
406 * just return via iret.
407 * .. or shadow stack is enabled, in which case we need to skip
408 * return through the user space stack address.
409 */
410 if (regs->sp != sp || shstk_is_enabled())
411 return regs->ax;
412 regs->sp -= sizeof(args);
413
414 /* for the case uprobe_consumer has changed r11/cx */
415 args.r11 = regs->r11;
416 args.cx = regs->cx;
417
418 /*
419 * ax register is passed through as return value, so we can use
420 * its space on stack for ip value and jump to it through the
421 * trampoline's ret instruction
422 */
423 args.ax = regs->ip;
424 regs->ip = ip;
425
426 err = copy_to_user((void __user *)regs->sp, &args, sizeof(args));
427 if (err)
428 goto sigill;
429
430 /* ensure sysret, see do_syscall_64() */
431 regs->r11 = regs->flags;
432 regs->cx = regs->ip;
433
434 return regs->ax;
435
436 sigill:
437 force_sig(SIGILL);
438 return -1;
439 }
440
441 /*
442 * If arch_uprobe->insn doesn't use rip-relative addressing, return
443 * immediately. Otherwise, rewrite the instruction so that it accesses
444 * its memory operand indirectly through a scratch register. Set
445 * defparam->fixups accordingly. (The contents of the scratch register
446 * will be saved before we single-step the modified instruction,
447 * and restored afterward).
448 *
449 * We do this because a rip-relative instruction can access only a
450 * relatively small area (+/- 2 GB from the instruction), and the XOL
451 * area typically lies beyond that area. At least for instructions
452 * that store to memory, we can't execute the original instruction
453 * and "fix things up" later, because the misdirected store could be
454 * disastrous.
455 *
456 * Some useful facts about rip-relative instructions:
457 *
458 * - There's always a modrm byte with bit layout "00 reg 101".
459 * - There's never a SIB byte.
460 * - The displacement is always 4 bytes.
461 * - REX.B=1 bit in REX prefix, which normally extends r/m field,
462 * has no effect on rip-relative mode. It doesn't make modrm byte
463 * with r/m=101 refer to register 1101 = R13.
464 */
riprel_analyze(struct arch_uprobe * auprobe,struct insn * insn)465 static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
466 {
467 u8 *cursor;
468 u8 reg;
469 u8 reg2;
470
471 if (!insn_rip_relative(insn))
472 return;
473
474 /*
475 * insn_rip_relative() would have decoded rex_prefix, vex_prefix, modrm.
476 * Clear REX.b bit (extension of MODRM.rm field):
477 * we want to encode low numbered reg, not r8+.
478 */
479 if (insn->rex_prefix.nbytes) {
480 cursor = auprobe->insn + insn_offset_rex_prefix(insn);
481 /* REX byte has 0100wrxb layout, clearing REX.b bit */
482 *cursor &= 0xfe;
483 }
484 /*
485 * Similar treatment for VEX3/EVEX prefix.
486 * TODO: add XOP treatment when insn decoder supports them
487 */
488 if (insn->vex_prefix.nbytes >= 3) {
489 /*
490 * vex2: c5 rvvvvLpp (has no b bit)
491 * vex3/xop: c4/8f rxbmmmmm wvvvvLpp
492 * evex: 62 rxbR00mm wvvvv1pp zllBVaaa
493 * Setting VEX3.b (setting because it has inverted meaning).
494 * Setting EVEX.x since (in non-SIB encoding) EVEX.x
495 * is the 4th bit of MODRM.rm, and needs the same treatment.
496 * For VEX3-encoded insns, VEX3.x value has no effect in
497 * non-SIB encoding, the change is superfluous but harmless.
498 */
499 cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1;
500 *cursor |= 0x60;
501 }
502
503 /*
504 * Convert from rip-relative addressing to register-relative addressing
505 * via a scratch register.
506 *
507 * This is tricky since there are insns with modrm byte
508 * which also use registers not encoded in modrm byte:
509 * [i]div/[i]mul: implicitly use dx:ax
510 * shift ops: implicitly use cx
511 * cmpxchg: implicitly uses ax
512 * cmpxchg8/16b: implicitly uses dx:ax and bx:cx
513 * Encoding: 0f c7/1 modrm
514 * The code below thinks that reg=1 (cx), chooses si as scratch.
515 * mulx: implicitly uses dx: mulx r/m,r1,r2 does r1:r2 = dx * r/m.
516 * First appeared in Haswell (BMI2 insn). It is vex-encoded.
517 * Example where none of bx,cx,dx can be used as scratch reg:
518 * c4 e2 63 f6 0d disp32 mulx disp32(%rip),%ebx,%ecx
519 * [v]pcmpistri: implicitly uses cx, xmm0
520 * [v]pcmpistrm: implicitly uses xmm0
521 * [v]pcmpestri: implicitly uses ax, dx, cx, xmm0
522 * [v]pcmpestrm: implicitly uses ax, dx, xmm0
523 * Evil SSE4.2 string comparison ops from hell.
524 * maskmovq/[v]maskmovdqu: implicitly uses (ds:rdi) as destination.
525 * Encoding: 0f f7 modrm, 66 0f f7 modrm, vex-encoded: c5 f9 f7 modrm.
526 * Store op1, byte-masked by op2 msb's in each byte, to (ds:rdi).
527 * AMD says it has no 3-operand form (vex.vvvv must be 1111)
528 * and that it can have only register operands, not mem
529 * (its modrm byte must have mode=11).
530 * If these restrictions will ever be lifted,
531 * we'll need code to prevent selection of di as scratch reg!
532 *
533 * Summary: I don't know any insns with modrm byte which
534 * use SI register implicitly. DI register is used only
535 * by one insn (maskmovq) and BX register is used
536 * only by one too (cmpxchg8b).
537 * BP is stack-segment based (may be a problem?).
538 * AX, DX, CX are off-limits (many implicit users).
539 * SP is unusable (it's stack pointer - think about "pop mem";
540 * also, rsp+disp32 needs sib encoding -> insn length change).
541 */
542
543 reg = MODRM_REG(insn); /* Fetch modrm.reg */
544 reg2 = 0xff; /* Fetch vex.vvvv */
545 if (insn->vex_prefix.nbytes)
546 reg2 = insn->vex_prefix.bytes[2];
547 /*
548 * TODO: add XOP vvvv reading.
549 *
550 * vex.vvvv field is in bits 6-3, bits are inverted.
551 * But in 32-bit mode, high-order bit may be ignored.
552 * Therefore, let's consider only 3 low-order bits.
553 */
554 reg2 = ((reg2 >> 3) & 0x7) ^ 0x7;
555 /*
556 * Register numbering is ax,cx,dx,bx, sp,bp,si,di, r8..r15.
557 *
558 * Choose scratch reg. Order is important: must not select bx
559 * if we can use si (cmpxchg8b case!)
560 */
561 if (reg != 6 && reg2 != 6) {
562 reg2 = 6;
563 auprobe->defparam.fixups |= UPROBE_FIX_RIP_SI;
564 } else if (reg != 7 && reg2 != 7) {
565 reg2 = 7;
566 auprobe->defparam.fixups |= UPROBE_FIX_RIP_DI;
567 /* TODO (paranoia): force maskmovq to not use di */
568 } else {
569 reg2 = 3;
570 auprobe->defparam.fixups |= UPROBE_FIX_RIP_BX;
571 }
572 /*
573 * Point cursor at the modrm byte. The next 4 bytes are the
574 * displacement. Beyond the displacement, for some instructions,
575 * is the immediate operand.
576 */
577 cursor = auprobe->insn + insn_offset_modrm(insn);
578 /*
579 * Change modrm from "00 reg 101" to "10 reg reg2". Example:
580 * 89 05 disp32 mov %eax,disp32(%rip) becomes
581 * 89 86 disp32 mov %eax,disp32(%rsi)
582 */
583 *cursor = 0x80 | (reg << 3) | reg2;
584 }
585
586 static inline unsigned long *
scratch_reg(struct arch_uprobe * auprobe,struct pt_regs * regs)587 scratch_reg(struct arch_uprobe *auprobe, struct pt_regs *regs)
588 {
589 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_SI)
590 return ®s->si;
591 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_DI)
592 return ®s->di;
593 return ®s->bx;
594 }
595
596 /*
597 * If we're emulating a rip-relative instruction, save the contents
598 * of the scratch register and store the target address in that register.
599 */
riprel_pre_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)600 static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
601 {
602 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) {
603 struct uprobe_task *utask = current->utask;
604 unsigned long *sr = scratch_reg(auprobe, regs);
605
606 utask->autask.saved_scratch_register = *sr;
607 *sr = utask->vaddr + auprobe->defparam.ilen;
608 }
609 }
610
riprel_post_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)611 static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
612 {
613 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) {
614 struct uprobe_task *utask = current->utask;
615 unsigned long *sr = scratch_reg(auprobe, regs);
616
617 *sr = utask->autask.saved_scratch_register;
618 }
619 }
620
tramp_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * new_vma)621 static int tramp_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
622 {
623 return -EPERM;
624 }
625
626 static struct page *tramp_mapping_pages[2] __ro_after_init;
627
628 static struct vm_special_mapping tramp_mapping = {
629 .name = "[uprobes-trampoline]",
630 .mremap = tramp_mremap,
631 .pages = tramp_mapping_pages,
632 };
633
634 struct uprobe_trampoline {
635 struct hlist_node node;
636 unsigned long vaddr;
637 };
638
is_reachable_by_call(unsigned long vtramp,unsigned long vaddr)639 static bool is_reachable_by_call(unsigned long vtramp, unsigned long vaddr)
640 {
641 long delta = (long)(vaddr + 5 - vtramp);
642
643 return delta >= INT_MIN && delta <= INT_MAX;
644 }
645
find_nearest_trampoline(unsigned long vaddr)646 static unsigned long find_nearest_trampoline(unsigned long vaddr)
647 {
648 struct vm_unmapped_area_info info = {
649 .length = PAGE_SIZE,
650 .align_mask = ~PAGE_MASK,
651 };
652 unsigned long low_limit, high_limit;
653 unsigned long low_tramp, high_tramp;
654 unsigned long call_end = vaddr + 5;
655
656 if (check_add_overflow(call_end, INT_MIN, &low_limit))
657 low_limit = PAGE_SIZE;
658
659 high_limit = call_end + INT_MAX;
660
661 /* Search up from the caller address. */
662 info.low_limit = call_end;
663 info.high_limit = min(high_limit, TASK_SIZE);
664 high_tramp = vm_unmapped_area(&info);
665
666 /* Search down from the caller address. */
667 info.low_limit = max(low_limit, PAGE_SIZE);
668 info.high_limit = call_end;
669 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
670 low_tramp = vm_unmapped_area(&info);
671
672 if (IS_ERR_VALUE(high_tramp) && IS_ERR_VALUE(low_tramp))
673 return -ENOMEM;
674 if (IS_ERR_VALUE(high_tramp))
675 return low_tramp;
676 if (IS_ERR_VALUE(low_tramp))
677 return high_tramp;
678
679 /* Return address that's closest to the caller address. */
680 if (call_end - low_tramp < high_tramp - call_end)
681 return low_tramp;
682 return high_tramp;
683 }
684
create_uprobe_trampoline(unsigned long vaddr)685 static struct uprobe_trampoline *create_uprobe_trampoline(unsigned long vaddr)
686 {
687 struct pt_regs *regs = task_pt_regs(current);
688 struct mm_struct *mm = current->mm;
689 struct uprobe_trampoline *tramp;
690 struct vm_area_struct *vma;
691
692 if (!user_64bit_mode(regs))
693 return NULL;
694
695 vaddr = find_nearest_trampoline(vaddr);
696 if (IS_ERR_VALUE(vaddr))
697 return NULL;
698
699 tramp = kzalloc_obj(*tramp);
700 if (unlikely(!tramp))
701 return NULL;
702
703 tramp->vaddr = vaddr;
704 vma = _install_special_mapping(mm, tramp->vaddr, PAGE_SIZE,
705 VM_READ|VM_EXEC|VM_MAYEXEC|VM_MAYREAD|VM_DONTCOPY|VM_IO,
706 &tramp_mapping);
707 if (IS_ERR(vma)) {
708 kfree(tramp);
709 return NULL;
710 }
711 return tramp;
712 }
713
get_uprobe_trampoline(unsigned long vaddr,bool * new)714 static struct uprobe_trampoline *get_uprobe_trampoline(unsigned long vaddr, bool *new)
715 {
716 struct uprobes_state *state = ¤t->mm->uprobes_state;
717 struct uprobe_trampoline *tramp = NULL;
718
719 if (vaddr > TASK_SIZE || vaddr < PAGE_SIZE)
720 return NULL;
721
722 hlist_for_each_entry(tramp, &state->head_tramps, node) {
723 if (is_reachable_by_call(tramp->vaddr, vaddr)) {
724 *new = false;
725 return tramp;
726 }
727 }
728
729 tramp = create_uprobe_trampoline(vaddr);
730 if (!tramp)
731 return NULL;
732
733 *new = true;
734 hlist_add_head(&tramp->node, &state->head_tramps);
735 return tramp;
736 }
737
destroy_uprobe_trampoline(struct uprobe_trampoline * tramp)738 static void destroy_uprobe_trampoline(struct uprobe_trampoline *tramp)
739 {
740 /*
741 * We do not unmap and release uprobe trampoline page itself,
742 * because there's no easy way to make sure none of the threads
743 * is still inside the trampoline.
744 */
745 hlist_del(&tramp->node);
746 kfree(tramp);
747 }
748
arch_uprobe_init_state(struct mm_struct * mm)749 void arch_uprobe_init_state(struct mm_struct *mm)
750 {
751 INIT_HLIST_HEAD(&mm->uprobes_state.head_tramps);
752 }
753
arch_uprobe_clear_state(struct mm_struct * mm)754 void arch_uprobe_clear_state(struct mm_struct *mm)
755 {
756 struct uprobes_state *state = &mm->uprobes_state;
757 struct uprobe_trampoline *tramp;
758 struct hlist_node *n;
759
760 hlist_for_each_entry_safe(tramp, n, &state->head_tramps, node)
761 destroy_uprobe_trampoline(tramp);
762 }
763
__in_uprobe_trampoline(unsigned long ip)764 static bool __in_uprobe_trampoline(unsigned long ip)
765 {
766 struct vm_area_struct *vma = vma_lookup(current->mm, ip);
767
768 return vma && vma_is_special_mapping(vma, &tramp_mapping);
769 }
770
in_uprobe_trampoline(unsigned long ip)771 static bool in_uprobe_trampoline(unsigned long ip)
772 {
773 struct mm_struct *mm = current->mm;
774 bool found, retry = true;
775 unsigned int seq;
776
777 rcu_read_lock();
778 if (mmap_lock_speculate_try_begin(mm, &seq)) {
779 found = __in_uprobe_trampoline(ip);
780 retry = mmap_lock_speculate_retry(mm, seq);
781 }
782 rcu_read_unlock();
783
784 if (retry) {
785 mmap_read_lock(mm);
786 found = __in_uprobe_trampoline(ip);
787 mmap_read_unlock(mm);
788 }
789 return found;
790 }
791
792 /*
793 * See uprobe syscall trampoline; the call to the trampoline will push
794 * the return address on the stack, the trampoline itself then pushes
795 * cx, r11 and ax.
796 */
797 struct uprobe_syscall_args {
798 unsigned long ax;
799 unsigned long r11;
800 unsigned long cx;
801 unsigned long retaddr;
802 };
803
SYSCALL_DEFINE0(uprobe)804 SYSCALL_DEFINE0(uprobe)
805 {
806 struct pt_regs *regs = task_pt_regs(current);
807 struct uprobe_syscall_args args;
808 unsigned long ip, sp, sret;
809 int err;
810
811 /* Allow execution only from uprobe trampolines. */
812 if (!in_uprobe_trampoline(regs->ip))
813 return -ENXIO;
814
815 err = copy_from_user(&args, (void __user *)regs->sp, sizeof(args));
816 if (err)
817 goto sigill;
818
819 ip = regs->ip;
820
821 /*
822 * expose the "right" values of ax/r11/cx/ip/sp to uprobe_consumer/s, plus:
823 * - adjust ip to the probe address, call saved next instruction address
824 * - adjust sp to the probe's stack frame (check trampoline code)
825 */
826 regs->ax = args.ax;
827 regs->r11 = args.r11;
828 regs->cx = args.cx;
829 regs->ip = args.retaddr - 5;
830 regs->sp += sizeof(args);
831 regs->orig_ax = -1;
832
833 sp = regs->sp;
834
835 err = shstk_pop((u64 *)&sret);
836 if (err == -EFAULT || (!err && sret != args.retaddr))
837 goto sigill;
838
839 handle_syscall_uprobe(regs, regs->ip);
840
841 /*
842 * Some of the uprobe consumers has changed sp, we can do nothing,
843 * just return via iret.
844 */
845 if (regs->sp != sp) {
846 /* skip the trampoline call */
847 if (args.retaddr - 5 == regs->ip)
848 regs->ip += 5;
849 return regs->ax;
850 }
851
852 regs->sp -= sizeof(args);
853
854 /* for the case uprobe_consumer has changed ax/r11/cx */
855 args.ax = regs->ax;
856 args.r11 = regs->r11;
857 args.cx = regs->cx;
858
859 /* keep return address unless we are instructed otherwise */
860 if (args.retaddr - 5 != regs->ip)
861 args.retaddr = regs->ip;
862
863 if (shstk_push(args.retaddr) == -EFAULT)
864 goto sigill;
865
866 regs->ip = ip;
867
868 err = copy_to_user((void __user *)regs->sp, &args, sizeof(args));
869 if (err)
870 goto sigill;
871
872 /* ensure sysret, see do_syscall_64() */
873 regs->r11 = regs->flags;
874 regs->cx = regs->ip;
875 return 0;
876
877 sigill:
878 force_sig(SIGILL);
879 return -1;
880 }
881
882 asm (
883 ".pushsection .rodata\n"
884 ".balign " __stringify(PAGE_SIZE) "\n"
885 "uprobe_trampoline_entry:\n"
886 "push %rcx\n"
887 "push %r11\n"
888 "push %rax\n"
889 "mov $" __stringify(__NR_uprobe) ", %rax\n"
890 "syscall\n"
891 "pop %rax\n"
892 "pop %r11\n"
893 "pop %rcx\n"
894 "ret\n"
895 "int3\n"
896 ".balign " __stringify(PAGE_SIZE) "\n"
897 ".popsection\n"
898 );
899
900 extern u8 uprobe_trampoline_entry[];
901
arch_uprobes_init(void)902 static int __init arch_uprobes_init(void)
903 {
904 tramp_mapping_pages[0] = virt_to_page(uprobe_trampoline_entry);
905 return 0;
906 }
907
908 late_initcall(arch_uprobes_init);
909
910 enum {
911 EXPECT_SWBP,
912 EXPECT_CALL,
913 };
914
915 struct write_opcode_ctx {
916 unsigned long base;
917 int expect;
918 };
919
is_call_insn(uprobe_opcode_t * insn)920 static int is_call_insn(uprobe_opcode_t *insn)
921 {
922 return *insn == CALL_INSN_OPCODE;
923 }
924
925 /*
926 * Verification callback used by int3_update uprobe_write calls to make sure
927 * the underlying instruction is as expected - either int3 or call.
928 */
verify_insn(struct page * page,unsigned long vaddr,uprobe_opcode_t * new_opcode,int nbytes,void * data)929 static int verify_insn(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode,
930 int nbytes, void *data)
931 {
932 struct write_opcode_ctx *ctx = data;
933 uprobe_opcode_t old_opcode[5];
934
935 uprobe_copy_from_page(page, ctx->base, (uprobe_opcode_t *) &old_opcode, 5);
936
937 switch (ctx->expect) {
938 case EXPECT_SWBP:
939 if (is_swbp_insn(&old_opcode[0]))
940 return 1;
941 break;
942 case EXPECT_CALL:
943 if (is_call_insn(&old_opcode[0]))
944 return 1;
945 break;
946 }
947
948 return -1;
949 }
950
951 /*
952 * Modify multi-byte instructions by using INT3 breakpoints on SMP.
953 * We completely avoid using stop_machine() here, and achieve the
954 * synchronization using INT3 breakpoints and SMP cross-calls.
955 * (borrowed comment from smp_text_poke_batch_finish)
956 *
957 * The way it is done:
958 * - Add an INT3 trap to the address that will be patched
959 * - SMP sync all CPUs
960 * - Update all but the first byte of the patched range
961 * - SMP sync all CPUs
962 * - Replace the first byte (INT3) by the first byte of the replacing opcode
963 * - SMP sync all CPUs
964 */
int3_update(struct arch_uprobe * auprobe,struct vm_area_struct * vma,unsigned long vaddr,char * insn,bool optimize)965 static int int3_update(struct arch_uprobe *auprobe, struct vm_area_struct *vma,
966 unsigned long vaddr, char *insn, bool optimize)
967 {
968 uprobe_opcode_t int3 = UPROBE_SWBP_INSN;
969 struct write_opcode_ctx ctx = {
970 .base = vaddr,
971 };
972 int err;
973
974 /*
975 * Write int3 trap.
976 *
977 * The swbp_optimize path comes with breakpoint already installed,
978 * so we can skip this step for optimize == true.
979 */
980 if (!optimize) {
981 ctx.expect = EXPECT_CALL;
982 err = uprobe_write(auprobe, vma, vaddr, &int3, 1, verify_insn,
983 true /* is_register */, false /* do_update_ref_ctr */,
984 &ctx);
985 if (err)
986 return err;
987 }
988
989 smp_text_poke_sync_each_cpu();
990
991 /* Write all but the first byte of the patched range. */
992 ctx.expect = EXPECT_SWBP;
993 err = uprobe_write(auprobe, vma, vaddr + 1, insn + 1, 4, verify_insn,
994 true /* is_register */, false /* do_update_ref_ctr */,
995 &ctx);
996 if (err)
997 return err;
998
999 smp_text_poke_sync_each_cpu();
1000
1001 /*
1002 * Write first byte.
1003 *
1004 * The swbp_unoptimize needs to finish uprobe removal together
1005 * with ref_ctr update, using uprobe_write with proper flags.
1006 */
1007 err = uprobe_write(auprobe, vma, vaddr, insn, 1, verify_insn,
1008 optimize /* is_register */, !optimize /* do_update_ref_ctr */,
1009 &ctx);
1010 if (err)
1011 return err;
1012
1013 smp_text_poke_sync_each_cpu();
1014 return 0;
1015 }
1016
swbp_optimize(struct arch_uprobe * auprobe,struct vm_area_struct * vma,unsigned long vaddr,unsigned long tramp)1017 static int swbp_optimize(struct arch_uprobe *auprobe, struct vm_area_struct *vma,
1018 unsigned long vaddr, unsigned long tramp)
1019 {
1020 u8 call[5];
1021
1022 __text_gen_insn(call, CALL_INSN_OPCODE, (const void *) vaddr,
1023 (const void *) tramp, CALL_INSN_SIZE);
1024 return int3_update(auprobe, vma, vaddr, call, true /* optimize */);
1025 }
1026
swbp_unoptimize(struct arch_uprobe * auprobe,struct vm_area_struct * vma,unsigned long vaddr)1027 static int swbp_unoptimize(struct arch_uprobe *auprobe, struct vm_area_struct *vma,
1028 unsigned long vaddr)
1029 {
1030 return int3_update(auprobe, vma, vaddr, auprobe->insn, false /* optimize */);
1031 }
1032
copy_from_vaddr(struct mm_struct * mm,unsigned long vaddr,void * dst,int len)1033 static int copy_from_vaddr(struct mm_struct *mm, unsigned long vaddr, void *dst, int len)
1034 {
1035 unsigned int gup_flags = FOLL_FORCE|FOLL_SPLIT_PMD;
1036 struct vm_area_struct *vma;
1037 struct page *page;
1038
1039 page = get_user_page_vma_remote(mm, vaddr, gup_flags, &vma);
1040 if (IS_ERR(page))
1041 return PTR_ERR(page);
1042 uprobe_copy_from_page(page, vaddr, dst, len);
1043 put_page(page);
1044 return 0;
1045 }
1046
__is_optimized(uprobe_opcode_t * insn,unsigned long vaddr)1047 static bool __is_optimized(uprobe_opcode_t *insn, unsigned long vaddr)
1048 {
1049 struct __packed __arch_relative_insn {
1050 u8 op;
1051 s32 raddr;
1052 } *call = (struct __arch_relative_insn *) insn;
1053
1054 if (!is_call_insn(insn))
1055 return false;
1056 return __in_uprobe_trampoline(vaddr + 5 + call->raddr);
1057 }
1058
is_optimized(struct mm_struct * mm,unsigned long vaddr)1059 static int is_optimized(struct mm_struct *mm, unsigned long vaddr)
1060 {
1061 uprobe_opcode_t insn[5];
1062 int err;
1063
1064 err = copy_from_vaddr(mm, vaddr, &insn, 5);
1065 if (err)
1066 return err;
1067 return __is_optimized((uprobe_opcode_t *)&insn, vaddr);
1068 }
1069
should_optimize(struct arch_uprobe * auprobe)1070 static bool should_optimize(struct arch_uprobe *auprobe)
1071 {
1072 return !test_bit(ARCH_UPROBE_FLAG_OPTIMIZE_FAIL, &auprobe->flags) &&
1073 test_bit(ARCH_UPROBE_FLAG_CAN_OPTIMIZE, &auprobe->flags);
1074 }
1075
set_swbp(struct arch_uprobe * auprobe,struct vm_area_struct * vma,unsigned long vaddr)1076 int set_swbp(struct arch_uprobe *auprobe, struct vm_area_struct *vma,
1077 unsigned long vaddr)
1078 {
1079 if (should_optimize(auprobe)) {
1080 /*
1081 * We could race with another thread that already optimized the probe,
1082 * so let's not overwrite it with int3 again in this case.
1083 */
1084 int ret = is_optimized(vma->vm_mm, vaddr);
1085 if (ret < 0)
1086 return ret;
1087 if (ret)
1088 return 0;
1089 }
1090 return uprobe_write_opcode(auprobe, vma, vaddr, UPROBE_SWBP_INSN,
1091 true /* is_register */);
1092 }
1093
set_orig_insn(struct arch_uprobe * auprobe,struct vm_area_struct * vma,unsigned long vaddr)1094 int set_orig_insn(struct arch_uprobe *auprobe, struct vm_area_struct *vma,
1095 unsigned long vaddr)
1096 {
1097 if (test_bit(ARCH_UPROBE_FLAG_CAN_OPTIMIZE, &auprobe->flags)) {
1098 int ret = is_optimized(vma->vm_mm, vaddr);
1099 if (ret < 0)
1100 return ret;
1101 if (ret) {
1102 ret = swbp_unoptimize(auprobe, vma, vaddr);
1103 WARN_ON_ONCE(ret);
1104 return ret;
1105 }
1106 }
1107 return uprobe_write_opcode(auprobe, vma, vaddr, *(uprobe_opcode_t *)&auprobe->insn,
1108 false /* is_register */);
1109 }
1110
__arch_uprobe_optimize(struct arch_uprobe * auprobe,struct mm_struct * mm,unsigned long vaddr)1111 static int __arch_uprobe_optimize(struct arch_uprobe *auprobe, struct mm_struct *mm,
1112 unsigned long vaddr)
1113 {
1114 struct uprobe_trampoline *tramp;
1115 struct vm_area_struct *vma;
1116 bool new = false;
1117 int err = 0;
1118
1119 vma = find_vma(mm, vaddr);
1120 if (!vma)
1121 return -EINVAL;
1122 tramp = get_uprobe_trampoline(vaddr, &new);
1123 if (!tramp)
1124 return -EINVAL;
1125 err = swbp_optimize(auprobe, vma, vaddr, tramp->vaddr);
1126 if (WARN_ON_ONCE(err) && new)
1127 destroy_uprobe_trampoline(tramp);
1128 return err;
1129 }
1130
arch_uprobe_optimize(struct arch_uprobe * auprobe,unsigned long vaddr)1131 void arch_uprobe_optimize(struct arch_uprobe *auprobe, unsigned long vaddr)
1132 {
1133 struct mm_struct *mm = current->mm;
1134 uprobe_opcode_t insn[5];
1135
1136 if (!should_optimize(auprobe))
1137 return;
1138
1139 mmap_write_lock(mm);
1140
1141 /*
1142 * Check if some other thread already optimized the uprobe for us,
1143 * if it's the case just go away silently.
1144 */
1145 if (copy_from_vaddr(mm, vaddr, &insn, 5))
1146 goto unlock;
1147 if (!is_swbp_insn((uprobe_opcode_t*) &insn))
1148 goto unlock;
1149
1150 /*
1151 * If we fail to optimize the uprobe we set the fail bit so the
1152 * above should_optimize will fail from now on.
1153 */
1154 if (__arch_uprobe_optimize(auprobe, mm, vaddr))
1155 set_bit(ARCH_UPROBE_FLAG_OPTIMIZE_FAIL, &auprobe->flags);
1156
1157 unlock:
1158 mmap_write_unlock(mm);
1159 }
1160
can_optimize(struct insn * insn,unsigned long vaddr)1161 static bool can_optimize(struct insn *insn, unsigned long vaddr)
1162 {
1163 if (!insn->x86_64 || insn->length != 5)
1164 return false;
1165
1166 if (!insn_is_nop(insn))
1167 return false;
1168
1169 /* We can't do cross page atomic writes yet. */
1170 return PAGE_SIZE - (vaddr & ~PAGE_MASK) >= 5;
1171 }
1172 #else /* 32-bit: */
1173 /*
1174 * No RIP-relative addressing on 32-bit
1175 */
riprel_analyze(struct arch_uprobe * auprobe,struct insn * insn)1176 static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
1177 {
1178 }
riprel_pre_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)1179 static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
1180 {
1181 }
riprel_post_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)1182 static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
1183 {
1184 }
can_optimize(struct insn * insn,unsigned long vaddr)1185 static bool can_optimize(struct insn *insn, unsigned long vaddr)
1186 {
1187 return false;
1188 }
1189 #endif /* CONFIG_X86_64 */
1190
1191 struct uprobe_xol_ops {
1192 bool (*emulate)(struct arch_uprobe *, struct pt_regs *);
1193 int (*pre_xol)(struct arch_uprobe *, struct pt_regs *);
1194 int (*post_xol)(struct arch_uprobe *, struct pt_regs *);
1195 void (*abort)(struct arch_uprobe *, struct pt_regs *);
1196 };
1197
sizeof_long(struct pt_regs * regs)1198 static inline int sizeof_long(struct pt_regs *regs)
1199 {
1200 /*
1201 * Check registers for mode as in_xxx_syscall() does not apply here.
1202 */
1203 return user_64bit_mode(regs) ? 8 : 4;
1204 }
1205
default_pre_xol_op(struct arch_uprobe * auprobe,struct pt_regs * regs)1206 static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
1207 {
1208 riprel_pre_xol(auprobe, regs);
1209 return 0;
1210 }
1211
emulate_push_stack(struct pt_regs * regs,unsigned long val)1212 static int emulate_push_stack(struct pt_regs *regs, unsigned long val)
1213 {
1214 unsigned long new_sp = regs->sp - sizeof_long(regs);
1215
1216 if (copy_to_user((void __user *)new_sp, &val, sizeof_long(regs)))
1217 return -EFAULT;
1218
1219 regs->sp = new_sp;
1220 return 0;
1221 }
1222
1223 /*
1224 * We have to fix things up as follows:
1225 *
1226 * Typically, the new ip is relative to the copied instruction. We need
1227 * to make it relative to the original instruction (FIX_IP). Exceptions
1228 * are return instructions and absolute or indirect jump or call instructions.
1229 *
1230 * If the single-stepped instruction was a call, the return address that
1231 * is atop the stack is the address following the copied instruction. We
1232 * need to make it the address following the original instruction (FIX_CALL).
1233 *
1234 * If the original instruction was a rip-relative instruction such as
1235 * "movl %edx,0xnnnn(%rip)", we have instead executed an equivalent
1236 * instruction using a scratch register -- e.g., "movl %edx,0xnnnn(%rsi)".
1237 * We need to restore the contents of the scratch register
1238 * (FIX_RIP_reg).
1239 */
default_post_xol_op(struct arch_uprobe * auprobe,struct pt_regs * regs)1240 static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
1241 {
1242 struct uprobe_task *utask = current->utask;
1243
1244 riprel_post_xol(auprobe, regs);
1245 if (auprobe->defparam.fixups & UPROBE_FIX_IP) {
1246 long correction = utask->vaddr - utask->xol_vaddr;
1247 regs->ip += correction;
1248 } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
1249 regs->sp += sizeof_long(regs); /* Pop incorrect return address */
1250 if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen))
1251 return -ERESTART;
1252 }
1253 /* popf; tell the caller to not touch TF */
1254 if (auprobe->defparam.fixups & UPROBE_FIX_SETF)
1255 utask->autask.saved_tf = true;
1256
1257 return 0;
1258 }
1259
default_abort_op(struct arch_uprobe * auprobe,struct pt_regs * regs)1260 static void default_abort_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
1261 {
1262 riprel_post_xol(auprobe, regs);
1263 }
1264
1265 static const struct uprobe_xol_ops default_xol_ops = {
1266 .pre_xol = default_pre_xol_op,
1267 .post_xol = default_post_xol_op,
1268 .abort = default_abort_op,
1269 };
1270
branch_is_call(struct arch_uprobe * auprobe)1271 static bool branch_is_call(struct arch_uprobe *auprobe)
1272 {
1273 return auprobe->branch.opc1 == 0xe8;
1274 }
1275
1276 #define CASE_COND \
1277 COND(70, 71, XF(OF)) \
1278 COND(72, 73, XF(CF)) \
1279 COND(74, 75, XF(ZF)) \
1280 COND(78, 79, XF(SF)) \
1281 COND(7a, 7b, XF(PF)) \
1282 COND(76, 77, XF(CF) || XF(ZF)) \
1283 COND(7c, 7d, XF(SF) != XF(OF)) \
1284 COND(7e, 7f, XF(ZF) || XF(SF) != XF(OF))
1285
1286 #define COND(op_y, op_n, expr) \
1287 case 0x ## op_y: DO((expr) != 0) \
1288 case 0x ## op_n: DO((expr) == 0)
1289
1290 #define XF(xf) (!!(flags & X86_EFLAGS_ ## xf))
1291
is_cond_jmp_opcode(u8 opcode)1292 static bool is_cond_jmp_opcode(u8 opcode)
1293 {
1294 switch (opcode) {
1295 #define DO(expr) \
1296 return true;
1297 CASE_COND
1298 #undef DO
1299
1300 default:
1301 return false;
1302 }
1303 }
1304
check_jmp_cond(struct arch_uprobe * auprobe,struct pt_regs * regs)1305 static bool check_jmp_cond(struct arch_uprobe *auprobe, struct pt_regs *regs)
1306 {
1307 unsigned long flags = regs->flags;
1308
1309 switch (auprobe->branch.opc1) {
1310 #define DO(expr) \
1311 return expr;
1312 CASE_COND
1313 #undef DO
1314
1315 default: /* not a conditional jmp */
1316 return true;
1317 }
1318 }
1319
1320 #undef XF
1321 #undef COND
1322 #undef CASE_COND
1323
branch_emulate_op(struct arch_uprobe * auprobe,struct pt_regs * regs)1324 static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
1325 {
1326 unsigned long new_ip = regs->ip += auprobe->branch.ilen;
1327 unsigned long offs = (long)auprobe->branch.offs;
1328
1329 if (branch_is_call(auprobe)) {
1330 /*
1331 * If it fails we execute this (mangled, see the comment in
1332 * branch_clear_offset) insn out-of-line. In the likely case
1333 * this should trigger the trap, and the probed application
1334 * should die or restart the same insn after it handles the
1335 * signal, arch_uprobe_post_xol() won't be even called.
1336 *
1337 * But there is corner case, see the comment in ->post_xol().
1338 */
1339 if (emulate_push_stack(regs, new_ip))
1340 return false;
1341 } else if (!check_jmp_cond(auprobe, regs)) {
1342 offs = 0;
1343 }
1344
1345 regs->ip = new_ip + offs;
1346 return true;
1347 }
1348
push_emulate_op(struct arch_uprobe * auprobe,struct pt_regs * regs)1349 static bool push_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
1350 {
1351 unsigned long *src_ptr = (void *)regs + auprobe->push.reg_offset;
1352
1353 if (emulate_push_stack(regs, *src_ptr))
1354 return false;
1355 regs->ip += auprobe->push.ilen;
1356 return true;
1357 }
1358
branch_post_xol_op(struct arch_uprobe * auprobe,struct pt_regs * regs)1359 static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
1360 {
1361 BUG_ON(!branch_is_call(auprobe));
1362 /*
1363 * We can only get here if branch_emulate_op() failed to push the ret
1364 * address _and_ another thread expanded our stack before the (mangled)
1365 * "call" insn was executed out-of-line. Just restore ->sp and restart.
1366 * We could also restore ->ip and try to call branch_emulate_op() again.
1367 */
1368 regs->sp += sizeof_long(regs);
1369 return -ERESTART;
1370 }
1371
branch_clear_offset(struct arch_uprobe * auprobe,struct insn * insn)1372 static void branch_clear_offset(struct arch_uprobe *auprobe, struct insn *insn)
1373 {
1374 /*
1375 * Turn this insn into "call 1f; 1:", this is what we will execute
1376 * out-of-line if ->emulate() fails. We only need this to generate
1377 * a trap, so that the probed task receives the correct signal with
1378 * the properly filled siginfo.
1379 *
1380 * But see the comment in ->post_xol(), in the unlikely case it can
1381 * succeed. So we need to ensure that the new ->ip can not fall into
1382 * the non-canonical area and trigger #GP.
1383 *
1384 * We could turn it into (say) "pushf", but then we would need to
1385 * divorce ->insn[] and ->ixol[]. We need to preserve the 1st byte
1386 * of ->insn[] for set_orig_insn().
1387 */
1388 memset(auprobe->insn + insn_offset_immediate(insn),
1389 0, insn->immediate.nbytes);
1390 }
1391
1392 static const struct uprobe_xol_ops branch_xol_ops = {
1393 .emulate = branch_emulate_op,
1394 .post_xol = branch_post_xol_op,
1395 };
1396
1397 static const struct uprobe_xol_ops push_xol_ops = {
1398 .emulate = push_emulate_op,
1399 };
1400
1401 /* Returns -ENOSYS if branch_xol_ops doesn't handle this insn */
branch_setup_xol_ops(struct arch_uprobe * auprobe,struct insn * insn)1402 static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
1403 {
1404 u8 opc1 = OPCODE1(insn);
1405 insn_byte_t p;
1406
1407 if (insn_is_nop(insn))
1408 goto setup;
1409
1410 switch (opc1) {
1411 case 0xeb: /* jmp 8 */
1412 case 0xe9: /* jmp 32 */
1413 break;
1414
1415 case 0xe8: /* call relative */
1416 branch_clear_offset(auprobe, insn);
1417 break;
1418
1419 case 0x0f:
1420 if (insn->opcode.nbytes != 2)
1421 return -ENOSYS;
1422 /*
1423 * If it is a "near" conditional jmp, OPCODE2() - 0x10 matches
1424 * OPCODE1() of the "short" jmp which checks the same condition.
1425 */
1426 opc1 = OPCODE2(insn) - 0x10;
1427 fallthrough;
1428 default:
1429 if (!is_cond_jmp_opcode(opc1))
1430 return -ENOSYS;
1431 }
1432
1433 /*
1434 * 16-bit overrides such as CALLW (66 e8 nn nn) are not supported.
1435 * Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix.
1436 * No one uses these insns, reject any branch insns with such prefix.
1437 */
1438 for_each_insn_prefix(insn, p) {
1439 if (p == 0x66)
1440 return -ENOTSUPP;
1441 }
1442
1443 setup:
1444 auprobe->branch.opc1 = opc1;
1445 auprobe->branch.ilen = insn->length;
1446 auprobe->branch.offs = insn->immediate.value;
1447
1448 auprobe->ops = &branch_xol_ops;
1449 return 0;
1450 }
1451
1452 /* Returns -ENOSYS if push_xol_ops doesn't handle this insn */
push_setup_xol_ops(struct arch_uprobe * auprobe,struct insn * insn)1453 static int push_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
1454 {
1455 u8 opc1 = OPCODE1(insn), reg_offset = 0;
1456
1457 if (opc1 < 0x50 || opc1 > 0x57)
1458 return -ENOSYS;
1459
1460 if (insn->length > 2)
1461 return -ENOSYS;
1462 if (insn->length == 2) {
1463 /* only support rex_prefix 0x41 (x64 only) */
1464 #ifdef CONFIG_X86_64
1465 if (insn->rex_prefix.nbytes != 1 ||
1466 insn->rex_prefix.bytes[0] != 0x41)
1467 return -ENOSYS;
1468
1469 switch (opc1) {
1470 case 0x50:
1471 reg_offset = offsetof(struct pt_regs, r8);
1472 break;
1473 case 0x51:
1474 reg_offset = offsetof(struct pt_regs, r9);
1475 break;
1476 case 0x52:
1477 reg_offset = offsetof(struct pt_regs, r10);
1478 break;
1479 case 0x53:
1480 reg_offset = offsetof(struct pt_regs, r11);
1481 break;
1482 case 0x54:
1483 reg_offset = offsetof(struct pt_regs, r12);
1484 break;
1485 case 0x55:
1486 reg_offset = offsetof(struct pt_regs, r13);
1487 break;
1488 case 0x56:
1489 reg_offset = offsetof(struct pt_regs, r14);
1490 break;
1491 case 0x57:
1492 reg_offset = offsetof(struct pt_regs, r15);
1493 break;
1494 }
1495 #else
1496 return -ENOSYS;
1497 #endif
1498 } else {
1499 switch (opc1) {
1500 case 0x50:
1501 reg_offset = offsetof(struct pt_regs, ax);
1502 break;
1503 case 0x51:
1504 reg_offset = offsetof(struct pt_regs, cx);
1505 break;
1506 case 0x52:
1507 reg_offset = offsetof(struct pt_regs, dx);
1508 break;
1509 case 0x53:
1510 reg_offset = offsetof(struct pt_regs, bx);
1511 break;
1512 case 0x54:
1513 reg_offset = offsetof(struct pt_regs, sp);
1514 break;
1515 case 0x55:
1516 reg_offset = offsetof(struct pt_regs, bp);
1517 break;
1518 case 0x56:
1519 reg_offset = offsetof(struct pt_regs, si);
1520 break;
1521 case 0x57:
1522 reg_offset = offsetof(struct pt_regs, di);
1523 break;
1524 }
1525 }
1526
1527 auprobe->push.reg_offset = reg_offset;
1528 auprobe->push.ilen = insn->length;
1529 auprobe->ops = &push_xol_ops;
1530 return 0;
1531 }
1532
1533 /**
1534 * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
1535 * @auprobe: the probepoint information.
1536 * @mm: the probed address space.
1537 * @addr: virtual address at which to install the probepoint
1538 * Return 0 on success or a -ve number on error.
1539 */
arch_uprobe_analyze_insn(struct arch_uprobe * auprobe,struct mm_struct * mm,unsigned long addr)1540 int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr)
1541 {
1542 u8 fix_ip_or_call = UPROBE_FIX_IP;
1543 struct insn insn;
1544 int ret;
1545
1546 ret = uprobe_init_insn(auprobe, &insn, is_64bit_mm(mm));
1547 if (ret)
1548 return ret;
1549
1550 if (can_optimize(&insn, addr))
1551 set_bit(ARCH_UPROBE_FLAG_CAN_OPTIMIZE, &auprobe->flags);
1552
1553 ret = branch_setup_xol_ops(auprobe, &insn);
1554 if (ret != -ENOSYS)
1555 return ret;
1556
1557 ret = push_setup_xol_ops(auprobe, &insn);
1558 if (ret != -ENOSYS)
1559 return ret;
1560
1561 /*
1562 * Figure out which fixups default_post_xol_op() will need to perform,
1563 * and annotate defparam->fixups accordingly.
1564 */
1565 switch (OPCODE1(&insn)) {
1566 case 0x9d: /* popf */
1567 auprobe->defparam.fixups |= UPROBE_FIX_SETF;
1568 break;
1569 case 0xc3: /* ret or lret -- ip is correct */
1570 case 0xcb:
1571 case 0xc2:
1572 case 0xca:
1573 case 0xea: /* jmp absolute -- ip is correct */
1574 fix_ip_or_call = 0;
1575 break;
1576 case 0x9a: /* call absolute - Fix return addr, not ip */
1577 fix_ip_or_call = UPROBE_FIX_CALL;
1578 break;
1579 case 0xff:
1580 switch (MODRM_REG(&insn)) {
1581 case 2: case 3: /* call or lcall, indirect */
1582 fix_ip_or_call = UPROBE_FIX_CALL;
1583 break;
1584 case 4: case 5: /* jmp or ljmp, indirect */
1585 fix_ip_or_call = 0;
1586 break;
1587 }
1588 fallthrough;
1589 default:
1590 riprel_analyze(auprobe, &insn);
1591 }
1592
1593 auprobe->defparam.ilen = insn.length;
1594 auprobe->defparam.fixups |= fix_ip_or_call;
1595
1596 auprobe->ops = &default_xol_ops;
1597 return 0;
1598 }
1599
1600 /*
1601 * arch_uprobe_pre_xol - prepare to execute out of line.
1602 * @auprobe: the probepoint information.
1603 * @regs: reflects the saved user state of current task.
1604 */
arch_uprobe_pre_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)1605 int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
1606 {
1607 struct uprobe_task *utask = current->utask;
1608
1609 if (auprobe->ops->pre_xol) {
1610 int err = auprobe->ops->pre_xol(auprobe, regs);
1611 if (err)
1612 return err;
1613 }
1614
1615 regs->ip = utask->xol_vaddr;
1616 utask->autask.saved_trap_nr = current->thread.trap_nr;
1617 current->thread.trap_nr = UPROBE_TRAP_NR;
1618
1619 utask->autask.saved_tf = !!(regs->flags & X86_EFLAGS_TF);
1620 regs->flags |= X86_EFLAGS_TF;
1621 if (test_tsk_thread_flag(current, TIF_BLOCKSTEP))
1622 set_task_blockstep(current, false);
1623
1624 return 0;
1625 }
1626
1627 /*
1628 * If xol insn itself traps and generates a signal(Say,
1629 * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped
1630 * instruction jumps back to its own address. It is assumed that anything
1631 * like do_page_fault/do_trap/etc sets thread.trap_nr != -1.
1632 *
1633 * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
1634 * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
1635 * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol().
1636 */
arch_uprobe_xol_was_trapped(struct task_struct * t)1637 bool arch_uprobe_xol_was_trapped(struct task_struct *t)
1638 {
1639 if (t->thread.trap_nr != UPROBE_TRAP_NR)
1640 return true;
1641
1642 return false;
1643 }
1644
1645 /*
1646 * Called after single-stepping. To avoid the SMP problems that can
1647 * occur when we temporarily put back the original opcode to
1648 * single-step, we single-stepped a copy of the instruction.
1649 *
1650 * This function prepares to resume execution after the single-step.
1651 */
arch_uprobe_post_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)1652 int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
1653 {
1654 struct uprobe_task *utask = current->utask;
1655 bool send_sigtrap = utask->autask.saved_tf;
1656 int err = 0;
1657
1658 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
1659 current->thread.trap_nr = utask->autask.saved_trap_nr;
1660
1661 if (auprobe->ops->post_xol) {
1662 err = auprobe->ops->post_xol(auprobe, regs);
1663 if (err) {
1664 /*
1665 * Restore ->ip for restart or post mortem analysis.
1666 * ->post_xol() must not return -ERESTART unless this
1667 * is really possible.
1668 */
1669 regs->ip = utask->vaddr;
1670 if (err == -ERESTART)
1671 err = 0;
1672 send_sigtrap = false;
1673 }
1674 }
1675 /*
1676 * arch_uprobe_pre_xol() doesn't save the state of TIF_BLOCKSTEP
1677 * so we can get an extra SIGTRAP if we do not clear TF. We need
1678 * to examine the opcode to make it right.
1679 */
1680 if (send_sigtrap)
1681 send_sig(SIGTRAP, current, 0);
1682
1683 if (!utask->autask.saved_tf)
1684 regs->flags &= ~X86_EFLAGS_TF;
1685
1686 return err;
1687 }
1688
1689 /* callback routine for handling exceptions. */
arch_uprobe_exception_notify(struct notifier_block * self,unsigned long val,void * data)1690 int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data)
1691 {
1692 struct die_args *args = data;
1693 struct pt_regs *regs = args->regs;
1694 int ret = NOTIFY_DONE;
1695
1696 /* We are only interested in userspace traps */
1697 if (regs && !user_mode(regs))
1698 return NOTIFY_DONE;
1699
1700 switch (val) {
1701 case DIE_INT3:
1702 if (uprobe_pre_sstep_notifier(regs))
1703 ret = NOTIFY_STOP;
1704
1705 break;
1706
1707 case DIE_DEBUG:
1708 if (uprobe_post_sstep_notifier(regs))
1709 ret = NOTIFY_STOP;
1710
1711 break;
1712
1713 default:
1714 break;
1715 }
1716
1717 return ret;
1718 }
1719
1720 /*
1721 * This function gets called when XOL instruction either gets trapped or
1722 * the thread has a fatal signal. Reset the instruction pointer to its
1723 * probed address for the potential restart or for post mortem analysis.
1724 */
arch_uprobe_abort_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)1725 void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
1726 {
1727 struct uprobe_task *utask = current->utask;
1728
1729 if (auprobe->ops->abort)
1730 auprobe->ops->abort(auprobe, regs);
1731
1732 current->thread.trap_nr = utask->autask.saved_trap_nr;
1733 regs->ip = utask->vaddr;
1734 /* clear TF if it was set by us in arch_uprobe_pre_xol() */
1735 if (!utask->autask.saved_tf)
1736 regs->flags &= ~X86_EFLAGS_TF;
1737 }
1738
__skip_sstep(struct arch_uprobe * auprobe,struct pt_regs * regs)1739 static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
1740 {
1741 if (auprobe->ops->emulate)
1742 return auprobe->ops->emulate(auprobe, regs);
1743 return false;
1744 }
1745
arch_uprobe_skip_sstep(struct arch_uprobe * auprobe,struct pt_regs * regs)1746 bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
1747 {
1748 bool ret = __skip_sstep(auprobe, regs);
1749 if (ret && (regs->flags & X86_EFLAGS_TF))
1750 send_sig(SIGTRAP, current, 0);
1751 return ret;
1752 }
1753
1754 unsigned long
arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,struct pt_regs * regs)1755 arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
1756 {
1757 int rasize = sizeof_long(regs), nleft;
1758 unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
1759
1760 if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
1761 return -1;
1762
1763 /* check whether address has been already hijacked */
1764 if (orig_ret_vaddr == trampoline_vaddr)
1765 return orig_ret_vaddr;
1766
1767 nleft = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize);
1768 if (likely(!nleft)) {
1769 if (shstk_update_last_frame(trampoline_vaddr)) {
1770 force_sig(SIGSEGV);
1771 return -1;
1772 }
1773 return orig_ret_vaddr;
1774 }
1775
1776 if (nleft != rasize) {
1777 pr_err("return address clobbered: pid=%d, %%sp=%#lx, %%ip=%#lx\n",
1778 current->pid, regs->sp, regs->ip);
1779
1780 force_sig(SIGSEGV);
1781 }
1782
1783 return -1;
1784 }
1785
arch_uretprobe_is_alive(struct return_instance * ret,enum rp_check ctx,struct pt_regs * regs)1786 bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
1787 struct pt_regs *regs)
1788 {
1789 if (ctx == RP_CHECK_CALL) /* sp was just decremented by "call" insn */
1790 return regs->sp < ret->stack;
1791 else
1792 return regs->sp <= ret->stack;
1793 }
1794
1795 /*
1796 * Heuristic-based check if uprobe is installed at the function entry.
1797 *
1798 * Under assumption of user code being compiled with frame pointers,
1799 * `push %rbp/%ebp` is a good indicator that we indeed are.
1800 *
1801 * Similarly, `endbr64` (assuming 64-bit mode) is also a common pattern.
1802 * If we get this wrong, captured stack trace might have one extra bogus
1803 * entry, but the rest of stack trace will still be meaningful.
1804 */
is_uprobe_at_func_entry(struct pt_regs * regs)1805 bool is_uprobe_at_func_entry(struct pt_regs *regs)
1806 {
1807 struct arch_uprobe *auprobe;
1808
1809 if (!current->utask)
1810 return false;
1811
1812 auprobe = current->utask->auprobe;
1813 if (!auprobe)
1814 return false;
1815
1816 /* push %rbp/%ebp */
1817 if (auprobe->insn[0] == 0x55)
1818 return true;
1819
1820 /* endbr64 (64-bit only) */
1821 if (user_64bit_mode(regs) && is_endbr((u32 *)auprobe->insn))
1822 return true;
1823
1824 return false;
1825 }
1826
1827 #ifdef CONFIG_IA32_EMULATION
arch_uprobe_get_xol_area(void)1828 unsigned long arch_uprobe_get_xol_area(void)
1829 {
1830 struct thread_info *ti = current_thread_info();
1831 unsigned long vaddr;
1832
1833 /*
1834 * HACK: we are not in a syscall, but x86 get_unmapped_area() paths
1835 * ignore TIF_ADDR32 and rely on in_32bit_syscall() to calculate
1836 * vm_unmapped_area_info.high_limit.
1837 *
1838 * The #ifdef above doesn't cover the CONFIG_X86_X32_ABI=y case,
1839 * but in this case in_32bit_syscall() -> in_x32_syscall() always
1840 * (falsely) returns true because ->orig_ax == -1.
1841 */
1842 if (test_thread_flag(TIF_ADDR32))
1843 ti->status |= TS_COMPAT;
1844 vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0);
1845 ti->status &= ~TS_COMPAT;
1846
1847 return vaddr;
1848 }
1849 #endif
1850