1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * User-space Probes (UProbes) for x86
4 *
5 * Copyright (C) IBM Corporation, 2008-2011
6 * Authors:
7 * Srikar Dronamraju
8 * Jim Keniston
9 */
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/ptrace.h>
13 #include <linux/uprobes.h>
14 #include <linux/uaccess.h>
15 #include <linux/syscalls.h>
16
17 #include <linux/kdebug.h>
18 #include <asm/processor.h>
19 #include <asm/insn.h>
20 #include <asm/mmu_context.h>
21 #include <asm/nops.h>
22
23 /* Post-execution fixups. */
24
25 /* Adjust IP back to vicinity of actual insn */
26 #define UPROBE_FIX_IP 0x01
27
28 /* Adjust the return address of a call insn */
29 #define UPROBE_FIX_CALL 0x02
30
31 /* Instruction will modify TF, don't change it */
32 #define UPROBE_FIX_SETF 0x04
33
34 #define UPROBE_FIX_RIP_SI 0x08
35 #define UPROBE_FIX_RIP_DI 0x10
36 #define UPROBE_FIX_RIP_BX 0x20
37 #define UPROBE_FIX_RIP_MASK \
38 (UPROBE_FIX_RIP_SI | UPROBE_FIX_RIP_DI | UPROBE_FIX_RIP_BX)
39
40 #define UPROBE_TRAP_NR UINT_MAX
41
42 /* Adaptations for mhiramat x86 decoder v14. */
43 #define OPCODE1(insn) ((insn)->opcode.bytes[0])
44 #define OPCODE2(insn) ((insn)->opcode.bytes[1])
45 #define OPCODE3(insn) ((insn)->opcode.bytes[2])
46 #define MODRM_REG(insn) X86_MODRM_REG((insn)->modrm.value)
47
48 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
49 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
50 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
51 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
52 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
53 << (row % 32))
54
55 /*
56 * Good-instruction tables for 32-bit apps. This is non-const and volatile
57 * to keep gcc from statically optimizing it out, as variable_test_bit makes
58 * some versions of gcc to think only *(unsigned long*) is used.
59 *
60 * Opcodes we'll probably never support:
61 * 6c-6f - ins,outs. SEGVs if used in userspace
62 * e4-e7 - in,out imm. SEGVs if used in userspace
63 * ec-ef - in,out acc. SEGVs if used in userspace
64 * cc - int3. SIGTRAP if used in userspace
65 * ce - into. Not used in userspace - no kernel support to make it useful. SEGVs
66 * (why we support bound (62) then? it's similar, and similarly unused...)
67 * f1 - int1. SIGTRAP if used in userspace
68 * f4 - hlt. SEGVs if used in userspace
69 * fa - cli. SEGVs if used in userspace
70 * fb - sti. SEGVs if used in userspace
71 *
72 * Opcodes which need some work to be supported:
73 * 07,17,1f - pop es/ss/ds
74 * Normally not used in userspace, but would execute if used.
75 * Can cause GP or stack exception if tries to load wrong segment descriptor.
76 * We hesitate to run them under single step since kernel's handling
77 * of userspace single-stepping (TF flag) is fragile.
78 * We can easily refuse to support push es/cs/ss/ds (06/0e/16/1e)
79 * on the same grounds that they are never used.
80 * cd - int N.
81 * Used by userspace for "int 80" syscall entry. (Other "int N"
82 * cause GP -> SEGV since their IDT gates don't allow calls from CPL 3).
83 * Not supported since kernel's handling of userspace single-stepping
84 * (TF flag) is fragile.
85 * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad
86 */
87 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
88 static volatile u32 good_insns_32[256 / 32] = {
89 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
90 /* ---------------------------------------------- */
91 W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 00 */
92 W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */
93 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
94 W(0x30, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
95 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
96 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
97 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
98 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
99 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
100 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
101 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
102 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
103 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
104 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
105 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
106 W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
107 /* ---------------------------------------------- */
108 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
109 };
110 #else
111 #define good_insns_32 NULL
112 #endif
113
114 /* Good-instruction tables for 64-bit apps.
115 *
116 * Genuinely invalid opcodes:
117 * 06,07 - formerly push/pop es
118 * 0e - formerly push cs
119 * 16,17 - formerly push/pop ss
120 * 1e,1f - formerly push/pop ds
121 * 27,2f,37,3f - formerly daa/das/aaa/aas
122 * 60,61 - formerly pusha/popa
123 * 62 - formerly bound. EVEX prefix for AVX512 (not yet supported)
124 * 82 - formerly redundant encoding of Group1
125 * 9a - formerly call seg:ofs
126 * ce - formerly into
127 * d4,d5 - formerly aam/aad
128 * d6 - formerly undocumented salc
129 * ea - formerly jmp seg:ofs
130 *
131 * Opcodes we'll probably never support:
132 * 6c-6f - ins,outs. SEGVs if used in userspace
133 * e4-e7 - in,out imm. SEGVs if used in userspace
134 * ec-ef - in,out acc. SEGVs if used in userspace
135 * cc - int3. SIGTRAP if used in userspace
136 * f1 - int1. SIGTRAP if used in userspace
137 * f4 - hlt. SEGVs if used in userspace
138 * fa - cli. SEGVs if used in userspace
139 * fb - sti. SEGVs if used in userspace
140 *
141 * Opcodes which need some work to be supported:
142 * cd - int N.
143 * Used by userspace for "int 80" syscall entry. (Other "int N"
144 * cause GP -> SEGV since their IDT gates don't allow calls from CPL 3).
145 * Not supported since kernel's handling of userspace single-stepping
146 * (TF flag) is fragile.
147 * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad
148 */
149 #if defined(CONFIG_X86_64)
150 static volatile u32 good_insns_64[256 / 32] = {
151 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
152 /* ---------------------------------------------- */
153 W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* 00 */
154 W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 10 */
155 W(0x20, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 20 */
156 W(0x30, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 30 */
157 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
158 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
159 W(0x60, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
160 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
161 W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
162 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1) , /* 90 */
163 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
164 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
165 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
166 W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
167 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0) | /* e0 */
168 W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
169 /* ---------------------------------------------- */
170 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
171 };
172 #else
173 #define good_insns_64 NULL
174 #endif
175
176 /* Using this for both 64-bit and 32-bit apps.
177 * Opcodes we don't support:
178 * 0f 00 - SLDT/STR/LLDT/LTR/VERR/VERW/-/- group. System insns
179 * 0f 01 - SGDT/SIDT/LGDT/LIDT/SMSW/-/LMSW/INVLPG group.
180 * Also encodes tons of other system insns if mod=11.
181 * Some are in fact non-system: xend, xtest, rdtscp, maybe more
182 * 0f 05 - syscall
183 * 0f 06 - clts (CPL0 insn)
184 * 0f 07 - sysret
185 * 0f 08 - invd (CPL0 insn)
186 * 0f 09 - wbinvd (CPL0 insn)
187 * 0f 0b - ud2
188 * 0f 30 - wrmsr (CPL0 insn) (then why rdmsr is allowed, it's also CPL0 insn?)
189 * 0f 34 - sysenter
190 * 0f 35 - sysexit
191 * 0f 37 - getsec
192 * 0f 78 - vmread (Intel VMX. CPL0 insn)
193 * 0f 79 - vmwrite (Intel VMX. CPL0 insn)
194 * Note: with prefixes, these two opcodes are
195 * extrq/insertq/AVX512 convert vector ops.
196 * 0f ae - group15: [f]xsave,[f]xrstor,[v]{ld,st}mxcsr,clflush[opt],
197 * {rd,wr}{fs,gs}base,{s,l,m}fence.
198 * Why? They are all user-executable.
199 */
200 static volatile u32 good_2byte_insns[256 / 32] = {
201 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
202 /* ---------------------------------------------- */
203 W(0x00, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1) | /* 00 */
204 W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 10 */
205 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
206 W(0x30, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
207 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
208 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
209 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */
210 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* 70 */
211 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
212 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
213 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */
214 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
215 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
216 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
217 W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */
218 W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* f0 */
219 /* ---------------------------------------------- */
220 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
221 };
222 #undef W
223
224 /*
225 * opcodes we may need to refine support for:
226 *
227 * 0f - 2-byte instructions: For many of these instructions, the validity
228 * depends on the prefix and/or the reg field. On such instructions, we
229 * just consider the opcode combination valid if it corresponds to any
230 * valid instruction.
231 *
232 * 8f - Group 1 - only reg = 0 is OK
233 * c6-c7 - Group 11 - only reg = 0 is OK
234 * d9-df - fpu insns with some illegal encodings
235 * f2, f3 - repnz, repz prefixes. These are also the first byte for
236 * certain floating-point instructions, such as addsd.
237 *
238 * fe - Group 4 - only reg = 0 or 1 is OK
239 * ff - Group 5 - only reg = 0-6 is OK
240 *
241 * others -- Do we need to support these?
242 *
243 * 0f - (floating-point?) prefetch instructions
244 * 07, 17, 1f - pop es, pop ss, pop ds
245 * 26, 2e, 36, 3e - es:, cs:, ss:, ds: segment prefixes --
246 * but 64 and 65 (fs: and gs:) seem to be used, so we support them
247 * 67 - addr16 prefix
248 * ce - into
249 * f0 - lock prefix
250 */
251
252 /*
253 * TODO:
254 * - Where necessary, examine the modrm byte and allow only valid instructions
255 * in the different Groups and fpu instructions.
256 */
257
is_prefix_bad(struct insn * insn)258 static bool is_prefix_bad(struct insn *insn)
259 {
260 insn_byte_t p;
261 int i;
262
263 for_each_insn_prefix(insn, i, p) {
264 insn_attr_t attr;
265
266 attr = inat_get_opcode_attribute(p);
267 switch (attr) {
268 case INAT_MAKE_PREFIX(INAT_PFX_ES):
269 case INAT_MAKE_PREFIX(INAT_PFX_CS):
270 case INAT_MAKE_PREFIX(INAT_PFX_DS):
271 case INAT_MAKE_PREFIX(INAT_PFX_SS):
272 case INAT_MAKE_PREFIX(INAT_PFX_LOCK):
273 return true;
274 }
275 }
276 return false;
277 }
278
uprobe_init_insn(struct arch_uprobe * auprobe,struct insn * insn,bool x86_64)279 static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool x86_64)
280 {
281 enum insn_mode m = x86_64 ? INSN_MODE_64 : INSN_MODE_32;
282 u32 volatile *good_insns;
283 int ret;
284
285 ret = insn_decode(insn, auprobe->insn, sizeof(auprobe->insn), m);
286 if (ret < 0)
287 return -ENOEXEC;
288
289 if (is_prefix_bad(insn))
290 return -ENOTSUPP;
291
292 /* We should not singlestep on the exception masking instructions */
293 if (insn_masking_exception(insn))
294 return -ENOTSUPP;
295
296 if (x86_64)
297 good_insns = good_insns_64;
298 else
299 good_insns = good_insns_32;
300
301 if (test_bit(OPCODE1(insn), (unsigned long *)good_insns))
302 return 0;
303
304 if (insn->opcode.nbytes == 2) {
305 if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns))
306 return 0;
307 }
308
309 return -ENOTSUPP;
310 }
311
312 #ifdef CONFIG_X86_64
313
314 struct uretprobe_syscall_args {
315 unsigned long r11;
316 unsigned long cx;
317 unsigned long ax;
318 };
319
320 asm (
321 ".pushsection .rodata\n"
322 ".global uretprobe_trampoline_entry\n"
323 "uretprobe_trampoline_entry:\n"
324 "push %rax\n"
325 "push %rcx\n"
326 "push %r11\n"
327 "mov $" __stringify(__NR_uretprobe) ", %rax\n"
328 "syscall\n"
329 ".global uretprobe_syscall_check\n"
330 "uretprobe_syscall_check:\n"
331 "pop %r11\n"
332 "pop %rcx\n"
333 /*
334 * The uretprobe syscall replaces stored %rax value with final
335 * return address, so we don't restore %rax in here and just
336 * call ret.
337 */
338 "ret\n"
339 "int3\n"
340 ".global uretprobe_trampoline_end\n"
341 "uretprobe_trampoline_end:\n"
342 ".popsection\n"
343 );
344
345 extern u8 uretprobe_trampoline_entry[];
346 extern u8 uretprobe_trampoline_end[];
347 extern u8 uretprobe_syscall_check[];
348
arch_uretprobe_trampoline(unsigned long * psize)349 void *arch_uretprobe_trampoline(unsigned long *psize)
350 {
351 static uprobe_opcode_t insn = UPROBE_SWBP_INSN;
352 struct pt_regs *regs = task_pt_regs(current);
353
354 /*
355 * At the moment the uretprobe syscall trampoline is supported
356 * only for native 64-bit process, the compat process still uses
357 * standard breakpoint.
358 */
359 if (user_64bit_mode(regs)) {
360 *psize = uretprobe_trampoline_end - uretprobe_trampoline_entry;
361 return uretprobe_trampoline_entry;
362 }
363
364 *psize = UPROBE_SWBP_INSN_SIZE;
365 return &insn;
366 }
367
trampoline_check_ip(unsigned long tramp)368 static unsigned long trampoline_check_ip(unsigned long tramp)
369 {
370 return tramp + (uretprobe_syscall_check - uretprobe_trampoline_entry);
371 }
372
SYSCALL_DEFINE0(uretprobe)373 SYSCALL_DEFINE0(uretprobe)
374 {
375 struct pt_regs *regs = task_pt_regs(current);
376 struct uretprobe_syscall_args args;
377 unsigned long err, ip, sp, tramp;
378
379 /* If there's no trampoline, we are called from wrong place. */
380 tramp = uprobe_get_trampoline_vaddr();
381 if (unlikely(tramp == UPROBE_NO_TRAMPOLINE_VADDR))
382 goto sigill;
383
384 /* Make sure the ip matches the only allowed sys_uretprobe caller. */
385 if (unlikely(regs->ip != trampoline_check_ip(tramp)))
386 goto sigill;
387
388 err = copy_from_user(&args, (void __user *)regs->sp, sizeof(args));
389 if (err)
390 goto sigill;
391
392 /* expose the "right" values of r11/cx/ax/sp to uprobe_consumer/s */
393 regs->r11 = args.r11;
394 regs->cx = args.cx;
395 regs->ax = args.ax;
396 regs->sp += sizeof(args);
397 regs->orig_ax = -1;
398
399 ip = regs->ip;
400 sp = regs->sp;
401
402 uprobe_handle_trampoline(regs);
403
404 /*
405 * Some of the uprobe consumers has changed sp, we can do nothing,
406 * just return via iret.
407 * .. or shadow stack is enabled, in which case we need to skip
408 * return through the user space stack address.
409 */
410 if (regs->sp != sp || shstk_is_enabled())
411 return regs->ax;
412 regs->sp -= sizeof(args);
413
414 /* for the case uprobe_consumer has changed r11/cx */
415 args.r11 = regs->r11;
416 args.cx = regs->cx;
417
418 /*
419 * ax register is passed through as return value, so we can use
420 * its space on stack for ip value and jump to it through the
421 * trampoline's ret instruction
422 */
423 args.ax = regs->ip;
424 regs->ip = ip;
425
426 err = copy_to_user((void __user *)regs->sp, &args, sizeof(args));
427 if (err)
428 goto sigill;
429
430 /* ensure sysret, see do_syscall_64() */
431 regs->r11 = regs->flags;
432 regs->cx = regs->ip;
433
434 return regs->ax;
435
436 sigill:
437 force_sig(SIGILL);
438 return -1;
439 }
440
441 /*
442 * If arch_uprobe->insn doesn't use rip-relative addressing, return
443 * immediately. Otherwise, rewrite the instruction so that it accesses
444 * its memory operand indirectly through a scratch register. Set
445 * defparam->fixups accordingly. (The contents of the scratch register
446 * will be saved before we single-step the modified instruction,
447 * and restored afterward).
448 *
449 * We do this because a rip-relative instruction can access only a
450 * relatively small area (+/- 2 GB from the instruction), and the XOL
451 * area typically lies beyond that area. At least for instructions
452 * that store to memory, we can't execute the original instruction
453 * and "fix things up" later, because the misdirected store could be
454 * disastrous.
455 *
456 * Some useful facts about rip-relative instructions:
457 *
458 * - There's always a modrm byte with bit layout "00 reg 101".
459 * - There's never a SIB byte.
460 * - The displacement is always 4 bytes.
461 * - REX.B=1 bit in REX prefix, which normally extends r/m field,
462 * has no effect on rip-relative mode. It doesn't make modrm byte
463 * with r/m=101 refer to register 1101 = R13.
464 */
riprel_analyze(struct arch_uprobe * auprobe,struct insn * insn)465 static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
466 {
467 u8 *cursor;
468 u8 reg;
469 u8 reg2;
470
471 if (!insn_rip_relative(insn))
472 return;
473
474 /*
475 * insn_rip_relative() would have decoded rex_prefix, vex_prefix, modrm.
476 * Clear REX.b bit (extension of MODRM.rm field):
477 * we want to encode low numbered reg, not r8+.
478 */
479 if (insn->rex_prefix.nbytes) {
480 cursor = auprobe->insn + insn_offset_rex_prefix(insn);
481 /* REX byte has 0100wrxb layout, clearing REX.b bit */
482 *cursor &= 0xfe;
483 }
484 /*
485 * Similar treatment for VEX3/EVEX prefix.
486 * TODO: add XOP treatment when insn decoder supports them
487 */
488 if (insn->vex_prefix.nbytes >= 3) {
489 /*
490 * vex2: c5 rvvvvLpp (has no b bit)
491 * vex3/xop: c4/8f rxbmmmmm wvvvvLpp
492 * evex: 62 rxbR00mm wvvvv1pp zllBVaaa
493 * Setting VEX3.b (setting because it has inverted meaning).
494 * Setting EVEX.x since (in non-SIB encoding) EVEX.x
495 * is the 4th bit of MODRM.rm, and needs the same treatment.
496 * For VEX3-encoded insns, VEX3.x value has no effect in
497 * non-SIB encoding, the change is superfluous but harmless.
498 */
499 cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1;
500 *cursor |= 0x60;
501 }
502
503 /*
504 * Convert from rip-relative addressing to register-relative addressing
505 * via a scratch register.
506 *
507 * This is tricky since there are insns with modrm byte
508 * which also use registers not encoded in modrm byte:
509 * [i]div/[i]mul: implicitly use dx:ax
510 * shift ops: implicitly use cx
511 * cmpxchg: implicitly uses ax
512 * cmpxchg8/16b: implicitly uses dx:ax and bx:cx
513 * Encoding: 0f c7/1 modrm
514 * The code below thinks that reg=1 (cx), chooses si as scratch.
515 * mulx: implicitly uses dx: mulx r/m,r1,r2 does r1:r2 = dx * r/m.
516 * First appeared in Haswell (BMI2 insn). It is vex-encoded.
517 * Example where none of bx,cx,dx can be used as scratch reg:
518 * c4 e2 63 f6 0d disp32 mulx disp32(%rip),%ebx,%ecx
519 * [v]pcmpistri: implicitly uses cx, xmm0
520 * [v]pcmpistrm: implicitly uses xmm0
521 * [v]pcmpestri: implicitly uses ax, dx, cx, xmm0
522 * [v]pcmpestrm: implicitly uses ax, dx, xmm0
523 * Evil SSE4.2 string comparison ops from hell.
524 * maskmovq/[v]maskmovdqu: implicitly uses (ds:rdi) as destination.
525 * Encoding: 0f f7 modrm, 66 0f f7 modrm, vex-encoded: c5 f9 f7 modrm.
526 * Store op1, byte-masked by op2 msb's in each byte, to (ds:rdi).
527 * AMD says it has no 3-operand form (vex.vvvv must be 1111)
528 * and that it can have only register operands, not mem
529 * (its modrm byte must have mode=11).
530 * If these restrictions will ever be lifted,
531 * we'll need code to prevent selection of di as scratch reg!
532 *
533 * Summary: I don't know any insns with modrm byte which
534 * use SI register implicitly. DI register is used only
535 * by one insn (maskmovq) and BX register is used
536 * only by one too (cmpxchg8b).
537 * BP is stack-segment based (may be a problem?).
538 * AX, DX, CX are off-limits (many implicit users).
539 * SP is unusable (it's stack pointer - think about "pop mem";
540 * also, rsp+disp32 needs sib encoding -> insn length change).
541 */
542
543 reg = MODRM_REG(insn); /* Fetch modrm.reg */
544 reg2 = 0xff; /* Fetch vex.vvvv */
545 if (insn->vex_prefix.nbytes)
546 reg2 = insn->vex_prefix.bytes[2];
547 /*
548 * TODO: add XOP vvvv reading.
549 *
550 * vex.vvvv field is in bits 6-3, bits are inverted.
551 * But in 32-bit mode, high-order bit may be ignored.
552 * Therefore, let's consider only 3 low-order bits.
553 */
554 reg2 = ((reg2 >> 3) & 0x7) ^ 0x7;
555 /*
556 * Register numbering is ax,cx,dx,bx, sp,bp,si,di, r8..r15.
557 *
558 * Choose scratch reg. Order is important: must not select bx
559 * if we can use si (cmpxchg8b case!)
560 */
561 if (reg != 6 && reg2 != 6) {
562 reg2 = 6;
563 auprobe->defparam.fixups |= UPROBE_FIX_RIP_SI;
564 } else if (reg != 7 && reg2 != 7) {
565 reg2 = 7;
566 auprobe->defparam.fixups |= UPROBE_FIX_RIP_DI;
567 /* TODO (paranoia): force maskmovq to not use di */
568 } else {
569 reg2 = 3;
570 auprobe->defparam.fixups |= UPROBE_FIX_RIP_BX;
571 }
572 /*
573 * Point cursor at the modrm byte. The next 4 bytes are the
574 * displacement. Beyond the displacement, for some instructions,
575 * is the immediate operand.
576 */
577 cursor = auprobe->insn + insn_offset_modrm(insn);
578 /*
579 * Change modrm from "00 reg 101" to "10 reg reg2". Example:
580 * 89 05 disp32 mov %eax,disp32(%rip) becomes
581 * 89 86 disp32 mov %eax,disp32(%rsi)
582 */
583 *cursor = 0x80 | (reg << 3) | reg2;
584 }
585
586 static inline unsigned long *
scratch_reg(struct arch_uprobe * auprobe,struct pt_regs * regs)587 scratch_reg(struct arch_uprobe *auprobe, struct pt_regs *regs)
588 {
589 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_SI)
590 return ®s->si;
591 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_DI)
592 return ®s->di;
593 return ®s->bx;
594 }
595
596 /*
597 * If we're emulating a rip-relative instruction, save the contents
598 * of the scratch register and store the target address in that register.
599 */
riprel_pre_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)600 static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
601 {
602 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) {
603 struct uprobe_task *utask = current->utask;
604 unsigned long *sr = scratch_reg(auprobe, regs);
605
606 utask->autask.saved_scratch_register = *sr;
607 *sr = utask->vaddr + auprobe->defparam.ilen;
608 }
609 }
610
riprel_post_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)611 static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
612 {
613 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) {
614 struct uprobe_task *utask = current->utask;
615 unsigned long *sr = scratch_reg(auprobe, regs);
616
617 *sr = utask->autask.saved_scratch_register;
618 }
619 }
620
tramp_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * new_vma)621 static int tramp_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
622 {
623 return -EPERM;
624 }
625
626 static struct page *tramp_mapping_pages[2] __ro_after_init;
627
628 static struct vm_special_mapping tramp_mapping = {
629 .name = "[uprobes-trampoline]",
630 .mremap = tramp_mremap,
631 .pages = tramp_mapping_pages,
632 };
633
634 struct uprobe_trampoline {
635 struct hlist_node node;
636 unsigned long vaddr;
637 };
638
is_reachable_by_call(unsigned long vtramp,unsigned long vaddr)639 static bool is_reachable_by_call(unsigned long vtramp, unsigned long vaddr)
640 {
641 long delta = (long)(vaddr + 5 - vtramp);
642
643 return delta >= INT_MIN && delta <= INT_MAX;
644 }
645
find_nearest_trampoline(unsigned long vaddr)646 static unsigned long find_nearest_trampoline(unsigned long vaddr)
647 {
648 struct vm_unmapped_area_info info = {
649 .length = PAGE_SIZE,
650 .align_mask = ~PAGE_MASK,
651 };
652 unsigned long low_limit, high_limit;
653 unsigned long low_tramp, high_tramp;
654 unsigned long call_end = vaddr + 5;
655
656 if (check_add_overflow(call_end, INT_MIN, &low_limit))
657 low_limit = PAGE_SIZE;
658
659 high_limit = call_end + INT_MAX;
660
661 /* Search up from the caller address. */
662 info.low_limit = call_end;
663 info.high_limit = min(high_limit, TASK_SIZE);
664 high_tramp = vm_unmapped_area(&info);
665
666 /* Search down from the caller address. */
667 info.low_limit = max(low_limit, PAGE_SIZE);
668 info.high_limit = call_end;
669 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
670 low_tramp = vm_unmapped_area(&info);
671
672 if (IS_ERR_VALUE(high_tramp) && IS_ERR_VALUE(low_tramp))
673 return -ENOMEM;
674 if (IS_ERR_VALUE(high_tramp))
675 return low_tramp;
676 if (IS_ERR_VALUE(low_tramp))
677 return high_tramp;
678
679 /* Return address that's closest to the caller address. */
680 if (call_end - low_tramp < high_tramp - call_end)
681 return low_tramp;
682 return high_tramp;
683 }
684
create_uprobe_trampoline(unsigned long vaddr)685 static struct uprobe_trampoline *create_uprobe_trampoline(unsigned long vaddr)
686 {
687 struct pt_regs *regs = task_pt_regs(current);
688 struct mm_struct *mm = current->mm;
689 struct uprobe_trampoline *tramp;
690 struct vm_area_struct *vma;
691
692 if (!user_64bit_mode(regs))
693 return NULL;
694
695 vaddr = find_nearest_trampoline(vaddr);
696 if (IS_ERR_VALUE(vaddr))
697 return NULL;
698
699 tramp = kzalloc(sizeof(*tramp), GFP_KERNEL);
700 if (unlikely(!tramp))
701 return NULL;
702
703 tramp->vaddr = vaddr;
704 vma = _install_special_mapping(mm, tramp->vaddr, PAGE_SIZE,
705 VM_READ|VM_EXEC|VM_MAYEXEC|VM_MAYREAD|VM_DONTCOPY|VM_IO,
706 &tramp_mapping);
707 if (IS_ERR(vma)) {
708 kfree(tramp);
709 return NULL;
710 }
711 return tramp;
712 }
713
get_uprobe_trampoline(unsigned long vaddr,bool * new)714 static struct uprobe_trampoline *get_uprobe_trampoline(unsigned long vaddr, bool *new)
715 {
716 struct uprobes_state *state = ¤t->mm->uprobes_state;
717 struct uprobe_trampoline *tramp = NULL;
718
719 if (vaddr > TASK_SIZE || vaddr < PAGE_SIZE)
720 return NULL;
721
722 hlist_for_each_entry(tramp, &state->head_tramps, node) {
723 if (is_reachable_by_call(tramp->vaddr, vaddr)) {
724 *new = false;
725 return tramp;
726 }
727 }
728
729 tramp = create_uprobe_trampoline(vaddr);
730 if (!tramp)
731 return NULL;
732
733 *new = true;
734 hlist_add_head(&tramp->node, &state->head_tramps);
735 return tramp;
736 }
737
destroy_uprobe_trampoline(struct uprobe_trampoline * tramp)738 static void destroy_uprobe_trampoline(struct uprobe_trampoline *tramp)
739 {
740 /*
741 * We do not unmap and release uprobe trampoline page itself,
742 * because there's no easy way to make sure none of the threads
743 * is still inside the trampoline.
744 */
745 hlist_del(&tramp->node);
746 kfree(tramp);
747 }
748
arch_uprobe_init_state(struct mm_struct * mm)749 void arch_uprobe_init_state(struct mm_struct *mm)
750 {
751 INIT_HLIST_HEAD(&mm->uprobes_state.head_tramps);
752 }
753
arch_uprobe_clear_state(struct mm_struct * mm)754 void arch_uprobe_clear_state(struct mm_struct *mm)
755 {
756 struct uprobes_state *state = &mm->uprobes_state;
757 struct uprobe_trampoline *tramp;
758 struct hlist_node *n;
759
760 hlist_for_each_entry_safe(tramp, n, &state->head_tramps, node)
761 destroy_uprobe_trampoline(tramp);
762 }
763
__in_uprobe_trampoline(unsigned long ip)764 static bool __in_uprobe_trampoline(unsigned long ip)
765 {
766 struct vm_area_struct *vma = vma_lookup(current->mm, ip);
767
768 return vma && vma_is_special_mapping(vma, &tramp_mapping);
769 }
770
in_uprobe_trampoline(unsigned long ip)771 static bool in_uprobe_trampoline(unsigned long ip)
772 {
773 struct mm_struct *mm = current->mm;
774 bool found, retry = true;
775 unsigned int seq;
776
777 rcu_read_lock();
778 if (mmap_lock_speculate_try_begin(mm, &seq)) {
779 found = __in_uprobe_trampoline(ip);
780 retry = mmap_lock_speculate_retry(mm, seq);
781 }
782 rcu_read_unlock();
783
784 if (retry) {
785 mmap_read_lock(mm);
786 found = __in_uprobe_trampoline(ip);
787 mmap_read_unlock(mm);
788 }
789 return found;
790 }
791
792 /*
793 * See uprobe syscall trampoline; the call to the trampoline will push
794 * the return address on the stack, the trampoline itself then pushes
795 * cx, r11 and ax.
796 */
797 struct uprobe_syscall_args {
798 unsigned long ax;
799 unsigned long r11;
800 unsigned long cx;
801 unsigned long retaddr;
802 };
803
SYSCALL_DEFINE0(uprobe)804 SYSCALL_DEFINE0(uprobe)
805 {
806 struct pt_regs *regs = task_pt_regs(current);
807 struct uprobe_syscall_args args;
808 unsigned long ip, sp, sret;
809 int err;
810
811 /* Allow execution only from uprobe trampolines. */
812 if (!in_uprobe_trampoline(regs->ip))
813 return -ENXIO;
814
815 err = copy_from_user(&args, (void __user *)regs->sp, sizeof(args));
816 if (err)
817 goto sigill;
818
819 ip = regs->ip;
820
821 /*
822 * expose the "right" values of ax/r11/cx/ip/sp to uprobe_consumer/s, plus:
823 * - adjust ip to the probe address, call saved next instruction address
824 * - adjust sp to the probe's stack frame (check trampoline code)
825 */
826 regs->ax = args.ax;
827 regs->r11 = args.r11;
828 regs->cx = args.cx;
829 regs->ip = args.retaddr - 5;
830 regs->sp += sizeof(args);
831 regs->orig_ax = -1;
832
833 sp = regs->sp;
834
835 err = shstk_pop((u64 *)&sret);
836 if (err == -EFAULT || (!err && sret != args.retaddr))
837 goto sigill;
838
839 handle_syscall_uprobe(regs, regs->ip);
840
841 /*
842 * Some of the uprobe consumers has changed sp, we can do nothing,
843 * just return via iret.
844 */
845 if (regs->sp != sp) {
846 /* skip the trampoline call */
847 if (args.retaddr - 5 == regs->ip)
848 regs->ip += 5;
849 return regs->ax;
850 }
851
852 regs->sp -= sizeof(args);
853
854 /* for the case uprobe_consumer has changed ax/r11/cx */
855 args.ax = regs->ax;
856 args.r11 = regs->r11;
857 args.cx = regs->cx;
858
859 /* keep return address unless we are instructed otherwise */
860 if (args.retaddr - 5 != regs->ip)
861 args.retaddr = regs->ip;
862
863 if (shstk_push(args.retaddr) == -EFAULT)
864 goto sigill;
865
866 regs->ip = ip;
867
868 err = copy_to_user((void __user *)regs->sp, &args, sizeof(args));
869 if (err)
870 goto sigill;
871
872 /* ensure sysret, see do_syscall_64() */
873 regs->r11 = regs->flags;
874 regs->cx = regs->ip;
875 return 0;
876
877 sigill:
878 force_sig(SIGILL);
879 return -1;
880 }
881
882 asm (
883 ".pushsection .rodata\n"
884 ".balign " __stringify(PAGE_SIZE) "\n"
885 "uprobe_trampoline_entry:\n"
886 "push %rcx\n"
887 "push %r11\n"
888 "push %rax\n"
889 "mov $" __stringify(__NR_uprobe) ", %rax\n"
890 "syscall\n"
891 "pop %rax\n"
892 "pop %r11\n"
893 "pop %rcx\n"
894 "ret\n"
895 "int3\n"
896 ".balign " __stringify(PAGE_SIZE) "\n"
897 ".popsection\n"
898 );
899
900 extern u8 uprobe_trampoline_entry[];
901
arch_uprobes_init(void)902 static int __init arch_uprobes_init(void)
903 {
904 tramp_mapping_pages[0] = virt_to_page(uprobe_trampoline_entry);
905 return 0;
906 }
907
908 late_initcall(arch_uprobes_init);
909
910 enum {
911 EXPECT_SWBP,
912 EXPECT_CALL,
913 };
914
915 struct write_opcode_ctx {
916 unsigned long base;
917 int expect;
918 };
919
is_call_insn(uprobe_opcode_t * insn)920 static int is_call_insn(uprobe_opcode_t *insn)
921 {
922 return *insn == CALL_INSN_OPCODE;
923 }
924
925 /*
926 * Verification callback used by int3_update uprobe_write calls to make sure
927 * the underlying instruction is as expected - either int3 or call.
928 */
verify_insn(struct page * page,unsigned long vaddr,uprobe_opcode_t * new_opcode,int nbytes,void * data)929 static int verify_insn(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode,
930 int nbytes, void *data)
931 {
932 struct write_opcode_ctx *ctx = data;
933 uprobe_opcode_t old_opcode[5];
934
935 uprobe_copy_from_page(page, ctx->base, (uprobe_opcode_t *) &old_opcode, 5);
936
937 switch (ctx->expect) {
938 case EXPECT_SWBP:
939 if (is_swbp_insn(&old_opcode[0]))
940 return 1;
941 break;
942 case EXPECT_CALL:
943 if (is_call_insn(&old_opcode[0]))
944 return 1;
945 break;
946 }
947
948 return -1;
949 }
950
951 /*
952 * Modify multi-byte instructions by using INT3 breakpoints on SMP.
953 * We completely avoid using stop_machine() here, and achieve the
954 * synchronization using INT3 breakpoints and SMP cross-calls.
955 * (borrowed comment from smp_text_poke_batch_finish)
956 *
957 * The way it is done:
958 * - Add an INT3 trap to the address that will be patched
959 * - SMP sync all CPUs
960 * - Update all but the first byte of the patched range
961 * - SMP sync all CPUs
962 * - Replace the first byte (INT3) by the first byte of the replacing opcode
963 * - SMP sync all CPUs
964 */
int3_update(struct arch_uprobe * auprobe,struct vm_area_struct * vma,unsigned long vaddr,char * insn,bool optimize)965 static int int3_update(struct arch_uprobe *auprobe, struct vm_area_struct *vma,
966 unsigned long vaddr, char *insn, bool optimize)
967 {
968 uprobe_opcode_t int3 = UPROBE_SWBP_INSN;
969 struct write_opcode_ctx ctx = {
970 .base = vaddr,
971 };
972 int err;
973
974 /*
975 * Write int3 trap.
976 *
977 * The swbp_optimize path comes with breakpoint already installed,
978 * so we can skip this step for optimize == true.
979 */
980 if (!optimize) {
981 ctx.expect = EXPECT_CALL;
982 err = uprobe_write(auprobe, vma, vaddr, &int3, 1, verify_insn,
983 true /* is_register */, false /* do_update_ref_ctr */,
984 &ctx);
985 if (err)
986 return err;
987 }
988
989 smp_text_poke_sync_each_cpu();
990
991 /* Write all but the first byte of the patched range. */
992 ctx.expect = EXPECT_SWBP;
993 err = uprobe_write(auprobe, vma, vaddr + 1, insn + 1, 4, verify_insn,
994 true /* is_register */, false /* do_update_ref_ctr */,
995 &ctx);
996 if (err)
997 return err;
998
999 smp_text_poke_sync_each_cpu();
1000
1001 /*
1002 * Write first byte.
1003 *
1004 * The swbp_unoptimize needs to finish uprobe removal together
1005 * with ref_ctr update, using uprobe_write with proper flags.
1006 */
1007 err = uprobe_write(auprobe, vma, vaddr, insn, 1, verify_insn,
1008 optimize /* is_register */, !optimize /* do_update_ref_ctr */,
1009 &ctx);
1010 if (err)
1011 return err;
1012
1013 smp_text_poke_sync_each_cpu();
1014 return 0;
1015 }
1016
swbp_optimize(struct arch_uprobe * auprobe,struct vm_area_struct * vma,unsigned long vaddr,unsigned long tramp)1017 static int swbp_optimize(struct arch_uprobe *auprobe, struct vm_area_struct *vma,
1018 unsigned long vaddr, unsigned long tramp)
1019 {
1020 u8 call[5];
1021
1022 __text_gen_insn(call, CALL_INSN_OPCODE, (const void *) vaddr,
1023 (const void *) tramp, CALL_INSN_SIZE);
1024 return int3_update(auprobe, vma, vaddr, call, true /* optimize */);
1025 }
1026
swbp_unoptimize(struct arch_uprobe * auprobe,struct vm_area_struct * vma,unsigned long vaddr)1027 static int swbp_unoptimize(struct arch_uprobe *auprobe, struct vm_area_struct *vma,
1028 unsigned long vaddr)
1029 {
1030 return int3_update(auprobe, vma, vaddr, auprobe->insn, false /* optimize */);
1031 }
1032
copy_from_vaddr(struct mm_struct * mm,unsigned long vaddr,void * dst,int len)1033 static int copy_from_vaddr(struct mm_struct *mm, unsigned long vaddr, void *dst, int len)
1034 {
1035 unsigned int gup_flags = FOLL_FORCE|FOLL_SPLIT_PMD;
1036 struct vm_area_struct *vma;
1037 struct page *page;
1038
1039 page = get_user_page_vma_remote(mm, vaddr, gup_flags, &vma);
1040 if (IS_ERR(page))
1041 return PTR_ERR(page);
1042 uprobe_copy_from_page(page, vaddr, dst, len);
1043 put_page(page);
1044 return 0;
1045 }
1046
__is_optimized(uprobe_opcode_t * insn,unsigned long vaddr)1047 static bool __is_optimized(uprobe_opcode_t *insn, unsigned long vaddr)
1048 {
1049 struct __packed __arch_relative_insn {
1050 u8 op;
1051 s32 raddr;
1052 } *call = (struct __arch_relative_insn *) insn;
1053
1054 if (!is_call_insn(insn))
1055 return false;
1056 return __in_uprobe_trampoline(vaddr + 5 + call->raddr);
1057 }
1058
is_optimized(struct mm_struct * mm,unsigned long vaddr)1059 static int is_optimized(struct mm_struct *mm, unsigned long vaddr)
1060 {
1061 uprobe_opcode_t insn[5];
1062 int err;
1063
1064 err = copy_from_vaddr(mm, vaddr, &insn, 5);
1065 if (err)
1066 return err;
1067 return __is_optimized((uprobe_opcode_t *)&insn, vaddr);
1068 }
1069
should_optimize(struct arch_uprobe * auprobe)1070 static bool should_optimize(struct arch_uprobe *auprobe)
1071 {
1072 return !test_bit(ARCH_UPROBE_FLAG_OPTIMIZE_FAIL, &auprobe->flags) &&
1073 test_bit(ARCH_UPROBE_FLAG_CAN_OPTIMIZE, &auprobe->flags);
1074 }
1075
set_swbp(struct arch_uprobe * auprobe,struct vm_area_struct * vma,unsigned long vaddr)1076 int set_swbp(struct arch_uprobe *auprobe, struct vm_area_struct *vma,
1077 unsigned long vaddr)
1078 {
1079 if (should_optimize(auprobe)) {
1080 /*
1081 * We could race with another thread that already optimized the probe,
1082 * so let's not overwrite it with int3 again in this case.
1083 */
1084 int ret = is_optimized(vma->vm_mm, vaddr);
1085 if (ret < 0)
1086 return ret;
1087 if (ret)
1088 return 0;
1089 }
1090 return uprobe_write_opcode(auprobe, vma, vaddr, UPROBE_SWBP_INSN,
1091 true /* is_register */);
1092 }
1093
set_orig_insn(struct arch_uprobe * auprobe,struct vm_area_struct * vma,unsigned long vaddr)1094 int set_orig_insn(struct arch_uprobe *auprobe, struct vm_area_struct *vma,
1095 unsigned long vaddr)
1096 {
1097 if (test_bit(ARCH_UPROBE_FLAG_CAN_OPTIMIZE, &auprobe->flags)) {
1098 int ret = is_optimized(vma->vm_mm, vaddr);
1099 if (ret < 0)
1100 return ret;
1101 if (ret) {
1102 ret = swbp_unoptimize(auprobe, vma, vaddr);
1103 WARN_ON_ONCE(ret);
1104 return ret;
1105 }
1106 }
1107 return uprobe_write_opcode(auprobe, vma, vaddr, *(uprobe_opcode_t *)&auprobe->insn,
1108 false /* is_register */);
1109 }
1110
__arch_uprobe_optimize(struct arch_uprobe * auprobe,struct mm_struct * mm,unsigned long vaddr)1111 static int __arch_uprobe_optimize(struct arch_uprobe *auprobe, struct mm_struct *mm,
1112 unsigned long vaddr)
1113 {
1114 struct uprobe_trampoline *tramp;
1115 struct vm_area_struct *vma;
1116 bool new = false;
1117 int err = 0;
1118
1119 vma = find_vma(mm, vaddr);
1120 if (!vma)
1121 return -EINVAL;
1122 tramp = get_uprobe_trampoline(vaddr, &new);
1123 if (!tramp)
1124 return -EINVAL;
1125 err = swbp_optimize(auprobe, vma, vaddr, tramp->vaddr);
1126 if (WARN_ON_ONCE(err) && new)
1127 destroy_uprobe_trampoline(tramp);
1128 return err;
1129 }
1130
arch_uprobe_optimize(struct arch_uprobe * auprobe,unsigned long vaddr)1131 void arch_uprobe_optimize(struct arch_uprobe *auprobe, unsigned long vaddr)
1132 {
1133 struct mm_struct *mm = current->mm;
1134 uprobe_opcode_t insn[5];
1135
1136 if (!should_optimize(auprobe))
1137 return;
1138
1139 mmap_write_lock(mm);
1140
1141 /*
1142 * Check if some other thread already optimized the uprobe for us,
1143 * if it's the case just go away silently.
1144 */
1145 if (copy_from_vaddr(mm, vaddr, &insn, 5))
1146 goto unlock;
1147 if (!is_swbp_insn((uprobe_opcode_t*) &insn))
1148 goto unlock;
1149
1150 /*
1151 * If we fail to optimize the uprobe we set the fail bit so the
1152 * above should_optimize will fail from now on.
1153 */
1154 if (__arch_uprobe_optimize(auprobe, mm, vaddr))
1155 set_bit(ARCH_UPROBE_FLAG_OPTIMIZE_FAIL, &auprobe->flags);
1156
1157 unlock:
1158 mmap_write_unlock(mm);
1159 }
1160
insn_is_nop(struct insn * insn)1161 static bool insn_is_nop(struct insn *insn)
1162 {
1163 return insn->opcode.nbytes == 1 && insn->opcode.bytes[0] == 0x90;
1164 }
1165
insn_is_nopl(struct insn * insn)1166 static bool insn_is_nopl(struct insn *insn)
1167 {
1168 if (insn->opcode.nbytes != 2)
1169 return false;
1170
1171 if (insn->opcode.bytes[0] != 0x0f || insn->opcode.bytes[1] != 0x1f)
1172 return false;
1173
1174 if (!insn->modrm.nbytes)
1175 return false;
1176
1177 if (X86_MODRM_REG(insn->modrm.bytes[0]) != 0)
1178 return false;
1179
1180 /* 0f 1f /0 - NOPL */
1181 return true;
1182 }
1183
can_optimize(struct insn * insn,unsigned long vaddr)1184 static bool can_optimize(struct insn *insn, unsigned long vaddr)
1185 {
1186 if (!insn->x86_64 || insn->length != 5)
1187 return false;
1188
1189 if (!insn_is_nop(insn) && !insn_is_nopl(insn))
1190 return false;
1191
1192 /* We can't do cross page atomic writes yet. */
1193 return PAGE_SIZE - (vaddr & ~PAGE_MASK) >= 5;
1194 }
1195 #else /* 32-bit: */
1196 /*
1197 * No RIP-relative addressing on 32-bit
1198 */
riprel_analyze(struct arch_uprobe * auprobe,struct insn * insn)1199 static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
1200 {
1201 }
riprel_pre_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)1202 static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
1203 {
1204 }
riprel_post_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)1205 static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
1206 {
1207 }
can_optimize(struct insn * insn,unsigned long vaddr)1208 static bool can_optimize(struct insn *insn, unsigned long vaddr)
1209 {
1210 return false;
1211 }
1212 #endif /* CONFIG_X86_64 */
1213
1214 struct uprobe_xol_ops {
1215 bool (*emulate)(struct arch_uprobe *, struct pt_regs *);
1216 int (*pre_xol)(struct arch_uprobe *, struct pt_regs *);
1217 int (*post_xol)(struct arch_uprobe *, struct pt_regs *);
1218 void (*abort)(struct arch_uprobe *, struct pt_regs *);
1219 };
1220
sizeof_long(struct pt_regs * regs)1221 static inline int sizeof_long(struct pt_regs *regs)
1222 {
1223 /*
1224 * Check registers for mode as in_xxx_syscall() does not apply here.
1225 */
1226 return user_64bit_mode(regs) ? 8 : 4;
1227 }
1228
default_pre_xol_op(struct arch_uprobe * auprobe,struct pt_regs * regs)1229 static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
1230 {
1231 riprel_pre_xol(auprobe, regs);
1232 return 0;
1233 }
1234
emulate_push_stack(struct pt_regs * regs,unsigned long val)1235 static int emulate_push_stack(struct pt_regs *regs, unsigned long val)
1236 {
1237 unsigned long new_sp = regs->sp - sizeof_long(regs);
1238
1239 if (copy_to_user((void __user *)new_sp, &val, sizeof_long(regs)))
1240 return -EFAULT;
1241
1242 regs->sp = new_sp;
1243 return 0;
1244 }
1245
1246 /*
1247 * We have to fix things up as follows:
1248 *
1249 * Typically, the new ip is relative to the copied instruction. We need
1250 * to make it relative to the original instruction (FIX_IP). Exceptions
1251 * are return instructions and absolute or indirect jump or call instructions.
1252 *
1253 * If the single-stepped instruction was a call, the return address that
1254 * is atop the stack is the address following the copied instruction. We
1255 * need to make it the address following the original instruction (FIX_CALL).
1256 *
1257 * If the original instruction was a rip-relative instruction such as
1258 * "movl %edx,0xnnnn(%rip)", we have instead executed an equivalent
1259 * instruction using a scratch register -- e.g., "movl %edx,0xnnnn(%rsi)".
1260 * We need to restore the contents of the scratch register
1261 * (FIX_RIP_reg).
1262 */
default_post_xol_op(struct arch_uprobe * auprobe,struct pt_regs * regs)1263 static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
1264 {
1265 struct uprobe_task *utask = current->utask;
1266
1267 riprel_post_xol(auprobe, regs);
1268 if (auprobe->defparam.fixups & UPROBE_FIX_IP) {
1269 long correction = utask->vaddr - utask->xol_vaddr;
1270 regs->ip += correction;
1271 } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
1272 regs->sp += sizeof_long(regs); /* Pop incorrect return address */
1273 if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen))
1274 return -ERESTART;
1275 }
1276 /* popf; tell the caller to not touch TF */
1277 if (auprobe->defparam.fixups & UPROBE_FIX_SETF)
1278 utask->autask.saved_tf = true;
1279
1280 return 0;
1281 }
1282
default_abort_op(struct arch_uprobe * auprobe,struct pt_regs * regs)1283 static void default_abort_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
1284 {
1285 riprel_post_xol(auprobe, regs);
1286 }
1287
1288 static const struct uprobe_xol_ops default_xol_ops = {
1289 .pre_xol = default_pre_xol_op,
1290 .post_xol = default_post_xol_op,
1291 .abort = default_abort_op,
1292 };
1293
branch_is_call(struct arch_uprobe * auprobe)1294 static bool branch_is_call(struct arch_uprobe *auprobe)
1295 {
1296 return auprobe->branch.opc1 == 0xe8;
1297 }
1298
1299 #define CASE_COND \
1300 COND(70, 71, XF(OF)) \
1301 COND(72, 73, XF(CF)) \
1302 COND(74, 75, XF(ZF)) \
1303 COND(78, 79, XF(SF)) \
1304 COND(7a, 7b, XF(PF)) \
1305 COND(76, 77, XF(CF) || XF(ZF)) \
1306 COND(7c, 7d, XF(SF) != XF(OF)) \
1307 COND(7e, 7f, XF(ZF) || XF(SF) != XF(OF))
1308
1309 #define COND(op_y, op_n, expr) \
1310 case 0x ## op_y: DO((expr) != 0) \
1311 case 0x ## op_n: DO((expr) == 0)
1312
1313 #define XF(xf) (!!(flags & X86_EFLAGS_ ## xf))
1314
is_cond_jmp_opcode(u8 opcode)1315 static bool is_cond_jmp_opcode(u8 opcode)
1316 {
1317 switch (opcode) {
1318 #define DO(expr) \
1319 return true;
1320 CASE_COND
1321 #undef DO
1322
1323 default:
1324 return false;
1325 }
1326 }
1327
check_jmp_cond(struct arch_uprobe * auprobe,struct pt_regs * regs)1328 static bool check_jmp_cond(struct arch_uprobe *auprobe, struct pt_regs *regs)
1329 {
1330 unsigned long flags = regs->flags;
1331
1332 switch (auprobe->branch.opc1) {
1333 #define DO(expr) \
1334 return expr;
1335 CASE_COND
1336 #undef DO
1337
1338 default: /* not a conditional jmp */
1339 return true;
1340 }
1341 }
1342
1343 #undef XF
1344 #undef COND
1345 #undef CASE_COND
1346
branch_emulate_op(struct arch_uprobe * auprobe,struct pt_regs * regs)1347 static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
1348 {
1349 unsigned long new_ip = regs->ip += auprobe->branch.ilen;
1350 unsigned long offs = (long)auprobe->branch.offs;
1351
1352 if (branch_is_call(auprobe)) {
1353 /*
1354 * If it fails we execute this (mangled, see the comment in
1355 * branch_clear_offset) insn out-of-line. In the likely case
1356 * this should trigger the trap, and the probed application
1357 * should die or restart the same insn after it handles the
1358 * signal, arch_uprobe_post_xol() won't be even called.
1359 *
1360 * But there is corner case, see the comment in ->post_xol().
1361 */
1362 if (emulate_push_stack(regs, new_ip))
1363 return false;
1364 } else if (!check_jmp_cond(auprobe, regs)) {
1365 offs = 0;
1366 }
1367
1368 regs->ip = new_ip + offs;
1369 return true;
1370 }
1371
push_emulate_op(struct arch_uprobe * auprobe,struct pt_regs * regs)1372 static bool push_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
1373 {
1374 unsigned long *src_ptr = (void *)regs + auprobe->push.reg_offset;
1375
1376 if (emulate_push_stack(regs, *src_ptr))
1377 return false;
1378 regs->ip += auprobe->push.ilen;
1379 return true;
1380 }
1381
branch_post_xol_op(struct arch_uprobe * auprobe,struct pt_regs * regs)1382 static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
1383 {
1384 BUG_ON(!branch_is_call(auprobe));
1385 /*
1386 * We can only get here if branch_emulate_op() failed to push the ret
1387 * address _and_ another thread expanded our stack before the (mangled)
1388 * "call" insn was executed out-of-line. Just restore ->sp and restart.
1389 * We could also restore ->ip and try to call branch_emulate_op() again.
1390 */
1391 regs->sp += sizeof_long(regs);
1392 return -ERESTART;
1393 }
1394
branch_clear_offset(struct arch_uprobe * auprobe,struct insn * insn)1395 static void branch_clear_offset(struct arch_uprobe *auprobe, struct insn *insn)
1396 {
1397 /*
1398 * Turn this insn into "call 1f; 1:", this is what we will execute
1399 * out-of-line if ->emulate() fails. We only need this to generate
1400 * a trap, so that the probed task receives the correct signal with
1401 * the properly filled siginfo.
1402 *
1403 * But see the comment in ->post_xol(), in the unlikely case it can
1404 * succeed. So we need to ensure that the new ->ip can not fall into
1405 * the non-canonical area and trigger #GP.
1406 *
1407 * We could turn it into (say) "pushf", but then we would need to
1408 * divorce ->insn[] and ->ixol[]. We need to preserve the 1st byte
1409 * of ->insn[] for set_orig_insn().
1410 */
1411 memset(auprobe->insn + insn_offset_immediate(insn),
1412 0, insn->immediate.nbytes);
1413 }
1414
1415 static const struct uprobe_xol_ops branch_xol_ops = {
1416 .emulate = branch_emulate_op,
1417 .post_xol = branch_post_xol_op,
1418 };
1419
1420 static const struct uprobe_xol_ops push_xol_ops = {
1421 .emulate = push_emulate_op,
1422 };
1423
1424 /* Returns -ENOSYS if branch_xol_ops doesn't handle this insn */
branch_setup_xol_ops(struct arch_uprobe * auprobe,struct insn * insn)1425 static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
1426 {
1427 u8 opc1 = OPCODE1(insn);
1428 insn_byte_t p;
1429 int i;
1430
1431 /* x86_nops[insn->length]; same as jmp with .offs = 0 */
1432 if (insn->length <= ASM_NOP_MAX &&
1433 !memcmp(insn->kaddr, x86_nops[insn->length], insn->length))
1434 goto setup;
1435
1436 switch (opc1) {
1437 case 0xeb: /* jmp 8 */
1438 case 0xe9: /* jmp 32 */
1439 break;
1440 case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */
1441 goto setup;
1442
1443 case 0xe8: /* call relative */
1444 branch_clear_offset(auprobe, insn);
1445 break;
1446
1447 case 0x0f:
1448 if (insn->opcode.nbytes != 2)
1449 return -ENOSYS;
1450 /*
1451 * If it is a "near" conditional jmp, OPCODE2() - 0x10 matches
1452 * OPCODE1() of the "short" jmp which checks the same condition.
1453 */
1454 opc1 = OPCODE2(insn) - 0x10;
1455 fallthrough;
1456 default:
1457 if (!is_cond_jmp_opcode(opc1))
1458 return -ENOSYS;
1459 }
1460
1461 /*
1462 * 16-bit overrides such as CALLW (66 e8 nn nn) are not supported.
1463 * Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix.
1464 * No one uses these insns, reject any branch insns with such prefix.
1465 */
1466 for_each_insn_prefix(insn, i, p) {
1467 if (p == 0x66)
1468 return -ENOTSUPP;
1469 }
1470
1471 setup:
1472 auprobe->branch.opc1 = opc1;
1473 auprobe->branch.ilen = insn->length;
1474 auprobe->branch.offs = insn->immediate.value;
1475
1476 auprobe->ops = &branch_xol_ops;
1477 return 0;
1478 }
1479
1480 /* Returns -ENOSYS if push_xol_ops doesn't handle this insn */
push_setup_xol_ops(struct arch_uprobe * auprobe,struct insn * insn)1481 static int push_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
1482 {
1483 u8 opc1 = OPCODE1(insn), reg_offset = 0;
1484
1485 if (opc1 < 0x50 || opc1 > 0x57)
1486 return -ENOSYS;
1487
1488 if (insn->length > 2)
1489 return -ENOSYS;
1490 if (insn->length == 2) {
1491 /* only support rex_prefix 0x41 (x64 only) */
1492 #ifdef CONFIG_X86_64
1493 if (insn->rex_prefix.nbytes != 1 ||
1494 insn->rex_prefix.bytes[0] != 0x41)
1495 return -ENOSYS;
1496
1497 switch (opc1) {
1498 case 0x50:
1499 reg_offset = offsetof(struct pt_regs, r8);
1500 break;
1501 case 0x51:
1502 reg_offset = offsetof(struct pt_regs, r9);
1503 break;
1504 case 0x52:
1505 reg_offset = offsetof(struct pt_regs, r10);
1506 break;
1507 case 0x53:
1508 reg_offset = offsetof(struct pt_regs, r11);
1509 break;
1510 case 0x54:
1511 reg_offset = offsetof(struct pt_regs, r12);
1512 break;
1513 case 0x55:
1514 reg_offset = offsetof(struct pt_regs, r13);
1515 break;
1516 case 0x56:
1517 reg_offset = offsetof(struct pt_regs, r14);
1518 break;
1519 case 0x57:
1520 reg_offset = offsetof(struct pt_regs, r15);
1521 break;
1522 }
1523 #else
1524 return -ENOSYS;
1525 #endif
1526 } else {
1527 switch (opc1) {
1528 case 0x50:
1529 reg_offset = offsetof(struct pt_regs, ax);
1530 break;
1531 case 0x51:
1532 reg_offset = offsetof(struct pt_regs, cx);
1533 break;
1534 case 0x52:
1535 reg_offset = offsetof(struct pt_regs, dx);
1536 break;
1537 case 0x53:
1538 reg_offset = offsetof(struct pt_regs, bx);
1539 break;
1540 case 0x54:
1541 reg_offset = offsetof(struct pt_regs, sp);
1542 break;
1543 case 0x55:
1544 reg_offset = offsetof(struct pt_regs, bp);
1545 break;
1546 case 0x56:
1547 reg_offset = offsetof(struct pt_regs, si);
1548 break;
1549 case 0x57:
1550 reg_offset = offsetof(struct pt_regs, di);
1551 break;
1552 }
1553 }
1554
1555 auprobe->push.reg_offset = reg_offset;
1556 auprobe->push.ilen = insn->length;
1557 auprobe->ops = &push_xol_ops;
1558 return 0;
1559 }
1560
1561 /**
1562 * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
1563 * @auprobe: the probepoint information.
1564 * @mm: the probed address space.
1565 * @addr: virtual address at which to install the probepoint
1566 * Return 0 on success or a -ve number on error.
1567 */
arch_uprobe_analyze_insn(struct arch_uprobe * auprobe,struct mm_struct * mm,unsigned long addr)1568 int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr)
1569 {
1570 u8 fix_ip_or_call = UPROBE_FIX_IP;
1571 struct insn insn;
1572 int ret;
1573
1574 ret = uprobe_init_insn(auprobe, &insn, is_64bit_mm(mm));
1575 if (ret)
1576 return ret;
1577
1578 if (can_optimize(&insn, addr))
1579 set_bit(ARCH_UPROBE_FLAG_CAN_OPTIMIZE, &auprobe->flags);
1580
1581 ret = branch_setup_xol_ops(auprobe, &insn);
1582 if (ret != -ENOSYS)
1583 return ret;
1584
1585 ret = push_setup_xol_ops(auprobe, &insn);
1586 if (ret != -ENOSYS)
1587 return ret;
1588
1589 /*
1590 * Figure out which fixups default_post_xol_op() will need to perform,
1591 * and annotate defparam->fixups accordingly.
1592 */
1593 switch (OPCODE1(&insn)) {
1594 case 0x9d: /* popf */
1595 auprobe->defparam.fixups |= UPROBE_FIX_SETF;
1596 break;
1597 case 0xc3: /* ret or lret -- ip is correct */
1598 case 0xcb:
1599 case 0xc2:
1600 case 0xca:
1601 case 0xea: /* jmp absolute -- ip is correct */
1602 fix_ip_or_call = 0;
1603 break;
1604 case 0x9a: /* call absolute - Fix return addr, not ip */
1605 fix_ip_or_call = UPROBE_FIX_CALL;
1606 break;
1607 case 0xff:
1608 switch (MODRM_REG(&insn)) {
1609 case 2: case 3: /* call or lcall, indirect */
1610 fix_ip_or_call = UPROBE_FIX_CALL;
1611 break;
1612 case 4: case 5: /* jmp or ljmp, indirect */
1613 fix_ip_or_call = 0;
1614 break;
1615 }
1616 fallthrough;
1617 default:
1618 riprel_analyze(auprobe, &insn);
1619 }
1620
1621 auprobe->defparam.ilen = insn.length;
1622 auprobe->defparam.fixups |= fix_ip_or_call;
1623
1624 auprobe->ops = &default_xol_ops;
1625 return 0;
1626 }
1627
1628 /*
1629 * arch_uprobe_pre_xol - prepare to execute out of line.
1630 * @auprobe: the probepoint information.
1631 * @regs: reflects the saved user state of current task.
1632 */
arch_uprobe_pre_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)1633 int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
1634 {
1635 struct uprobe_task *utask = current->utask;
1636
1637 if (auprobe->ops->pre_xol) {
1638 int err = auprobe->ops->pre_xol(auprobe, regs);
1639 if (err)
1640 return err;
1641 }
1642
1643 regs->ip = utask->xol_vaddr;
1644 utask->autask.saved_trap_nr = current->thread.trap_nr;
1645 current->thread.trap_nr = UPROBE_TRAP_NR;
1646
1647 utask->autask.saved_tf = !!(regs->flags & X86_EFLAGS_TF);
1648 regs->flags |= X86_EFLAGS_TF;
1649 if (test_tsk_thread_flag(current, TIF_BLOCKSTEP))
1650 set_task_blockstep(current, false);
1651
1652 return 0;
1653 }
1654
1655 /*
1656 * If xol insn itself traps and generates a signal(Say,
1657 * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped
1658 * instruction jumps back to its own address. It is assumed that anything
1659 * like do_page_fault/do_trap/etc sets thread.trap_nr != -1.
1660 *
1661 * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
1662 * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
1663 * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol().
1664 */
arch_uprobe_xol_was_trapped(struct task_struct * t)1665 bool arch_uprobe_xol_was_trapped(struct task_struct *t)
1666 {
1667 if (t->thread.trap_nr != UPROBE_TRAP_NR)
1668 return true;
1669
1670 return false;
1671 }
1672
1673 /*
1674 * Called after single-stepping. To avoid the SMP problems that can
1675 * occur when we temporarily put back the original opcode to
1676 * single-step, we single-stepped a copy of the instruction.
1677 *
1678 * This function prepares to resume execution after the single-step.
1679 */
arch_uprobe_post_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)1680 int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
1681 {
1682 struct uprobe_task *utask = current->utask;
1683 bool send_sigtrap = utask->autask.saved_tf;
1684 int err = 0;
1685
1686 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
1687 current->thread.trap_nr = utask->autask.saved_trap_nr;
1688
1689 if (auprobe->ops->post_xol) {
1690 err = auprobe->ops->post_xol(auprobe, regs);
1691 if (err) {
1692 /*
1693 * Restore ->ip for restart or post mortem analysis.
1694 * ->post_xol() must not return -ERESTART unless this
1695 * is really possible.
1696 */
1697 regs->ip = utask->vaddr;
1698 if (err == -ERESTART)
1699 err = 0;
1700 send_sigtrap = false;
1701 }
1702 }
1703 /*
1704 * arch_uprobe_pre_xol() doesn't save the state of TIF_BLOCKSTEP
1705 * so we can get an extra SIGTRAP if we do not clear TF. We need
1706 * to examine the opcode to make it right.
1707 */
1708 if (send_sigtrap)
1709 send_sig(SIGTRAP, current, 0);
1710
1711 if (!utask->autask.saved_tf)
1712 regs->flags &= ~X86_EFLAGS_TF;
1713
1714 return err;
1715 }
1716
1717 /* callback routine for handling exceptions. */
arch_uprobe_exception_notify(struct notifier_block * self,unsigned long val,void * data)1718 int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data)
1719 {
1720 struct die_args *args = data;
1721 struct pt_regs *regs = args->regs;
1722 int ret = NOTIFY_DONE;
1723
1724 /* We are only interested in userspace traps */
1725 if (regs && !user_mode(regs))
1726 return NOTIFY_DONE;
1727
1728 switch (val) {
1729 case DIE_INT3:
1730 if (uprobe_pre_sstep_notifier(regs))
1731 ret = NOTIFY_STOP;
1732
1733 break;
1734
1735 case DIE_DEBUG:
1736 if (uprobe_post_sstep_notifier(regs))
1737 ret = NOTIFY_STOP;
1738
1739 break;
1740
1741 default:
1742 break;
1743 }
1744
1745 return ret;
1746 }
1747
1748 /*
1749 * This function gets called when XOL instruction either gets trapped or
1750 * the thread has a fatal signal. Reset the instruction pointer to its
1751 * probed address for the potential restart or for post mortem analysis.
1752 */
arch_uprobe_abort_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)1753 void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
1754 {
1755 struct uprobe_task *utask = current->utask;
1756
1757 if (auprobe->ops->abort)
1758 auprobe->ops->abort(auprobe, regs);
1759
1760 current->thread.trap_nr = utask->autask.saved_trap_nr;
1761 regs->ip = utask->vaddr;
1762 /* clear TF if it was set by us in arch_uprobe_pre_xol() */
1763 if (!utask->autask.saved_tf)
1764 regs->flags &= ~X86_EFLAGS_TF;
1765 }
1766
__skip_sstep(struct arch_uprobe * auprobe,struct pt_regs * regs)1767 static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
1768 {
1769 if (auprobe->ops->emulate)
1770 return auprobe->ops->emulate(auprobe, regs);
1771 return false;
1772 }
1773
arch_uprobe_skip_sstep(struct arch_uprobe * auprobe,struct pt_regs * regs)1774 bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
1775 {
1776 bool ret = __skip_sstep(auprobe, regs);
1777 if (ret && (regs->flags & X86_EFLAGS_TF))
1778 send_sig(SIGTRAP, current, 0);
1779 return ret;
1780 }
1781
1782 unsigned long
arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,struct pt_regs * regs)1783 arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
1784 {
1785 int rasize = sizeof_long(regs), nleft;
1786 unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
1787
1788 if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
1789 return -1;
1790
1791 /* check whether address has been already hijacked */
1792 if (orig_ret_vaddr == trampoline_vaddr)
1793 return orig_ret_vaddr;
1794
1795 nleft = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize);
1796 if (likely(!nleft)) {
1797 if (shstk_update_last_frame(trampoline_vaddr)) {
1798 force_sig(SIGSEGV);
1799 return -1;
1800 }
1801 return orig_ret_vaddr;
1802 }
1803
1804 if (nleft != rasize) {
1805 pr_err("return address clobbered: pid=%d, %%sp=%#lx, %%ip=%#lx\n",
1806 current->pid, regs->sp, regs->ip);
1807
1808 force_sig(SIGSEGV);
1809 }
1810
1811 return -1;
1812 }
1813
arch_uretprobe_is_alive(struct return_instance * ret,enum rp_check ctx,struct pt_regs * regs)1814 bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
1815 struct pt_regs *regs)
1816 {
1817 if (ctx == RP_CHECK_CALL) /* sp was just decremented by "call" insn */
1818 return regs->sp < ret->stack;
1819 else
1820 return regs->sp <= ret->stack;
1821 }
1822