xref: /freebsd/sys/cddl/dev/kinst/aarch64/kinst_isa.c (revision 06c3fb2749bda94cb5201f81ffdb8fa6c3161b2e)
1 /*
2  * SPDX-License-Identifier: CDDL 1.0
3  *
4  * Copyright (c) 2022 Christos Margiolis <christos@FreeBSD.org>
5  * Copyright (c) 2022 Mark Johnston <markj@FreeBSD.org>
6  * Copyright (c) 2023 The FreeBSD Foundation
7  *
8  * Portions of this software were developed by Christos Margiolis
9  * <christos@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
10  */
11 
12 #include <sys/param.h>
13 
14 #include <sys/dtrace.h>
15 #include <cddl/dev/dtrace/dtrace_cddl.h>
16 
17 #include "kinst.h"
18 
19 DPCPU_DEFINE_STATIC(struct kinst_cpu_state, kinst_state);
20 
21 static int
22 kinst_emulate(struct trapframe *frame, const struct kinst_probe *kp)
23 {
24 	kinst_patchval_t instr = kp->kp_savedval;
25 	uint64_t imm;
26 	uint8_t cond, reg, bitpos;
27 	bool res;
28 
29 	if (((instr >> 24) & 0x1f) == 0b10000) {
30 		/* adr/adrp */
31 		reg = instr & 0x1f;
32 		imm = (instr >> 29) & 0x3;
33 		imm |= ((instr >> 5) & 0x0007ffff) << 2;
34 		if (((instr >> 31) & 0x1) == 0) {
35 			/* adr */
36 			if (imm & 0x0000000000100000)
37 				imm |= 0xfffffffffff00000;
38 			frame->tf_x[reg] = frame->tf_elr + imm;
39 		} else {
40 			/* adrp */
41 			imm <<= 12;
42 			if (imm & 0x0000000100000000)
43 				imm |= 0xffffffff00000000;
44 			frame->tf_x[reg] = (frame->tf_elr & ~0xfff) + imm;
45 		}
46 		frame->tf_elr += INSN_SIZE;
47 	} else if (((instr >> 26) & 0x3f) == 0b000101) {
48 		/* b */
49 		imm = instr & 0x03ffffff;
50 		if (imm & 0x0000000002000000)
51 			imm |= 0xfffffffffe000000;
52 		frame->tf_elr += imm << 2;
53 	} else if (((instr >> 24) & 0xff) == 0b01010100) {
54 		/* b.cond */
55 		imm = (instr >> 5) & 0x0007ffff;
56 		if (imm & 0x0000000000040000)
57 			imm |= 0xfffffffffffc0000;
58 		cond = instr & 0xf;
59 		switch ((cond >> 1) & 0x7) {
60 		case 0b000:	/* eq/ne */
61 			res = (frame->tf_spsr & PSR_Z) != 0;
62 			break;
63 		case 0b001:	/* cs/cc */
64 			res = (frame->tf_spsr & PSR_C) != 0;
65 			break;
66 		case 0b010:	/* mi/pl */
67 			res = (frame->tf_spsr & PSR_N) != 0;
68 			break;
69 		case 0b011:	/* vs/vc */
70 			res = (frame->tf_spsr & PSR_V) != 0;
71 			break;
72 		case 0b100:	/* hi/ls */
73 			res = ((frame->tf_spsr & PSR_C) != 0) &&
74 			    ((frame->tf_spsr & PSR_Z) == 0);
75 			break;
76 		case 0b101:	/* ge/lt */
77 			res = ((frame->tf_spsr & PSR_N) != 0) ==
78 			    ((frame->tf_spsr & PSR_V) != 0);
79 			break;
80 		case 0b110:	/* gt/le */
81 			res = ((frame->tf_spsr & PSR_Z) == 0) &&
82 			    (((frame->tf_spsr & PSR_N) != 0) ==
83 			    ((frame->tf_spsr & PSR_V) != 0));
84 			break;
85 		case 0b111:	/* al */
86 			res = 1;
87 			break;
88 		}
89 		if ((cond & 0x1) && cond != 0b1111)
90 			res = !res;
91 		if (res)
92 			frame->tf_elr += imm << 2;
93 		else
94 			frame->tf_elr += INSN_SIZE;
95 	} else if (((instr >> 26) & 0x3f) == 0b100101) {
96 		/* bl */
97 		imm = instr & 0x03ffffff;
98 		if (imm & 0x0000000002000000)
99 			imm |= 0xfffffffffe000000;
100 		frame->tf_lr = frame->tf_elr + INSN_SIZE;
101 		frame->tf_elr += imm << 2;
102 	} else if (((instr >> 25) & 0x3f) == 0b011010) {
103 		/* cbnz/cbz */
104 		cond = (instr >> 24) & 0x1;
105 		reg = instr & 0x1f;
106 		imm = (instr >> 5) & 0x0007ffff;
107 		if (imm & 0x0000000000040000)
108 			imm |= 0xfffffffffffc0000;
109 		if (cond == 1 && frame->tf_x[reg] != 0)
110 			/* cbnz */
111 			frame->tf_elr += imm << 2;
112 		else if (cond == 0 && frame->tf_x[reg] == 0)
113 			/* cbz */
114 			frame->tf_elr += imm << 2;
115 		else
116 			frame->tf_elr += INSN_SIZE;
117 	} else if (((instr >> 25) & 0x3f) == 0b011011) {
118 		/* tbnz/tbz */
119 		cond = (instr >> 24) & 0x1;
120 		reg = instr & 0x1f;
121 		bitpos = (instr >> 19) & 0x1f;
122 		bitpos |= ((instr >> 31) & 0x1) << 5;
123 		imm = (instr >> 5) & 0x3fff;
124 		if (imm & 0x0000000000002000)
125 			imm |= 0xffffffffffffe000;
126 		if (cond == 1 && (frame->tf_x[reg] & (1 << bitpos)) != 0)
127 			/* tbnz */
128 			frame->tf_elr += imm << 2;
129 		else if (cond == 0 && (frame->tf_x[reg] & (1 << bitpos)) == 0)
130 			/* tbz */
131 			frame->tf_elr += imm << 2;
132 		else
133 			frame->tf_elr += INSN_SIZE;
134 	}
135 
136 	return (0);
137 }
138 
139 static int
140 kinst_jump_next_instr(struct trapframe *frame, const struct kinst_probe *kp)
141 {
142 	frame->tf_elr = (register_t)((const uint8_t *)kp->kp_patchpoint +
143 	    INSN_SIZE);
144 
145 	return (0);
146 }
147 
148 static void
149 kinst_trampoline_populate(struct kinst_probe *kp)
150 {
151 	static uint32_t bpt = KINST_PATCHVAL;
152 
153 	kinst_memcpy(kp->kp_tramp, &kp->kp_savedval, INSN_SIZE);
154 	kinst_memcpy(&kp->kp_tramp[INSN_SIZE], &bpt, INSN_SIZE);
155 
156 	cpu_icache_sync_range((vm_offset_t)kp->kp_tramp,
157 	    (vm_size_t)KINST_TRAMP_SIZE);
158 }
159 
160 /*
161  * There are two ways by which an instruction is traced:
162  *
163  * - By using the trampoline.
164  * - By emulating it in software (see kinst_emulate()).
165  *
166  * The trampoline is used for instructions that can be copied and executed
167  * as-is without additional modification. However, instructions that use
168  * PC-relative addressing have to be emulated, because ARM64 doesn't allow
169  * encoding of large displacements in a single instruction, and since we cannot
170  * clobber a register in order to encode the two-instruction sequence needed to
171  * create large displacements, we cannot use the trampoline at all.
172  * Fortunately, the instructions are simple enough to be emulated in just a few
173  * lines of code.
174  *
175  * The problem discussed above also means that, unlike amd64, we cannot encode
176  * a far-jump back from the trampoline to the next instruction. The mechanism
177  * employed to achieve this functionality, is to use a breakpoint instead of a
178  * jump after the copied instruction. This breakpoint is detected and handled
179  * by kinst_invop(), which performs the jump back to the next instruction
180  * manually (see kinst_jump_next_instr()).
181  */
182 int
183 kinst_invop(uintptr_t addr, struct trapframe *frame, uintptr_t scratch)
184 {
185 	solaris_cpu_t *cpu;
186 	struct kinst_cpu_state *ks;
187 	const struct kinst_probe *kp;
188 
189 	ks = DPCPU_PTR(kinst_state);
190 
191 	/*
192 	 * Detect if the breakpoint was triggered by the trampoline, and
193 	 * manually set the PC to the next instruction.
194 	 */
195 	if (ks->state == KINST_PROBE_FIRED &&
196 	    addr == (uintptr_t)(ks->kp->kp_tramp + INSN_SIZE)) {
197 		/*
198 		 * Restore interrupts if they were enabled prior to the first
199 		 * breakpoint.
200 		 */
201 		if ((ks->status & PSR_I) == 0)
202 			frame->tf_spsr &= ~PSR_I;
203 		ks->state = KINST_PROBE_ARMED;
204 		return (kinst_jump_next_instr(frame, ks->kp));
205 	}
206 
207 	LIST_FOREACH(kp, KINST_GETPROBE(addr), kp_hashnext) {
208 		if ((uintptr_t)kp->kp_patchpoint == addr)
209 			break;
210 	}
211 	if (kp == NULL)
212 		return (0);
213 
214 	cpu = &solaris_cpu[curcpu];
215 	cpu->cpu_dtrace_caller = addr;
216 	dtrace_probe(kp->kp_id, 0, 0, 0, 0, 0);
217 	cpu->cpu_dtrace_caller = 0;
218 
219 	if (kp->kp_md.emulate)
220 		return (kinst_emulate(frame, kp));
221 
222 	ks->state = KINST_PROBE_FIRED;
223 	ks->kp = kp;
224 
225 	/*
226 	 * Cache the current SPSR and clear interrupts for the duration
227 	 * of the double breakpoint.
228 	 */
229 	ks->status = frame->tf_spsr;
230 	frame->tf_spsr |= PSR_I;
231 	frame->tf_elr = (register_t)kp->kp_tramp;
232 
233 	return (0);
234 }
235 
236 void
237 kinst_patch_tracepoint(struct kinst_probe *kp, kinst_patchval_t val)
238 {
239 	vm_offset_t addr;
240 
241 	if (!arm64_get_writable_addr((vm_offset_t)kp->kp_patchpoint, &addr))
242 		panic("%s: Unable to write new instruction", __func__);
243 	*(kinst_patchval_t *)addr = val;
244 	cpu_icache_sync_range((vm_offset_t)kp->kp_patchpoint,
245 	    (vm_size_t)INSN_SIZE);
246 }
247 
248 static void
249 kinst_instr_dissect(struct kinst_probe *kp)
250 {
251 	struct kinst_probe_md *kpmd;
252 	kinst_patchval_t instr = kp->kp_savedval;
253 
254 	kpmd = &kp->kp_md;
255 	kpmd->emulate = false;
256 
257 	if (((instr >> 24) & 0x1f) == 0b10000)
258 		kpmd->emulate = true;	/* adr/adrp */
259 	else if (((instr >> 26) & 0x3f) == 0b000101)
260 		kpmd->emulate = true;	/* b */
261 	else if (((instr >> 24) & 0xff) == 0b01010100)
262 		kpmd->emulate = true;	/* b.cond */
263 	else if (((instr >> 26) & 0x3f) == 0b100101)
264 		kpmd->emulate = true;	/* bl */
265 	else if (((instr >> 25) & 0x3f) == 0b011010)
266 		kpmd->emulate = true;	/* cbnz/cbz */
267 	else if (((instr >> 25) & 0x3f) == 0b011011)
268 		kpmd->emulate = true;	/* tbnz/tbz */
269 
270 	if (!kpmd->emulate)
271 		kinst_trampoline_populate(kp);
272 }
273 
274 static bool
275 kinst_instr_ldx(kinst_patchval_t instr)
276 {
277 	if (((instr >> 22) & 0xff) == 0b00100001)
278 		return (true);
279 
280 	return (false);
281 }
282 
283 static bool
284 kinst_instr_stx(kinst_patchval_t instr)
285 {
286 	if (((instr >> 22) & 0xff) == 0b00100000)
287 		return (true);
288 
289 	return (false);
290 }
291 
292 int
293 kinst_make_probe(linker_file_t lf, int symindx, linker_symval_t *symval,
294     void *opaque)
295 {
296 	struct kinst_probe *kp;
297 	dtrace_kinst_probedesc_t *pd;
298 	const char *func;
299 	kinst_patchval_t *instr, *limit, *tmp;
300 	int n, off;
301 	bool ldxstx_block, found;
302 
303 	pd = opaque;
304 	func = symval->name;
305 
306 	if (kinst_excluded(func))
307 		return (0);
308 	if (strcmp(func, pd->kpd_func) != 0)
309 		return (0);
310 
311 	instr = (kinst_patchval_t *)(symval->value);
312 	limit = (kinst_patchval_t *)(symval->value + symval->size);
313 	if (instr >= limit)
314 		return (0);
315 
316 	tmp = instr;
317 
318 	/*
319 	 * Ignore any bti instruction at the start of the function
320 	 * we need to keep it there for any indirect branches calling
321 	 * the function on Armv8.5+
322 	 */
323 	if ((*tmp & BTI_MASK) == BTI_INSTR)
324 		tmp++;
325 
326 	/* Look for stp (pre-indexed) operation */
327 	found = false;
328 
329 	/*
330 	 * If the first instruction is a nop it's a specially marked
331 	 * asm function. We only support a nop first as it's not a normal
332 	 * part of the function prologue.
333 	 */
334 	if (*tmp == NOP_INSTR)
335 		found = true;
336 	for (; !found && tmp < limit; tmp++) {
337 		/*
338 		 * Functions start with "stp xt1, xt2, [xn, <const>]!" or
339 		 * "sub sp, sp, <const>".
340 		 *
341 		 * Sometimes the compiler will have a sub instruction that is
342 		 * not of the above type so don't stop if we see one.
343 		 */
344 		if ((*tmp & LDP_STP_MASK) == STP_64) {
345 			/*
346 			 * Assume any other store of this type means we are
347 			 * past the function prolog.
348 			 */
349 			if (((*tmp >> ADDR_SHIFT) & ADDR_MASK) == 31)
350 				found = true;
351 		} else if ((*tmp & SUB_MASK) == SUB_INSTR &&
352 		    ((*tmp >> SUB_RD_SHIFT) & SUB_R_MASK) == 31 &&
353 		    ((*tmp >> SUB_RN_SHIFT) & SUB_R_MASK) == 31)
354 			found = true;
355 	}
356 
357 	if (!found)
358 		return (0);
359 
360 	ldxstx_block = false;
361 	for (n = 0; instr < limit; instr++) {
362 		off = (int)((uint8_t *)instr - (uint8_t *)symval->value);
363 
364 		/*
365 		 * Skip LDX/STX blocks that contain atomic operations. If a
366 		 * breakpoint is placed in a LDX/STX block, we violate the
367 		 * operation and the loop might fail.
368 		 */
369 		if (kinst_instr_ldx(*instr))
370 			ldxstx_block = true;
371 		else if (kinst_instr_stx(*instr)) {
372 			ldxstx_block = false;
373 			continue;
374 		}
375 		if (ldxstx_block)
376 			continue;
377 
378 		/*
379 		 * XXX: Skip ADR and ADRP instructions. The arm64 exception
380 		 * handler has a micro-optimization where it doesn't restore
381 		 * callee-saved registers when returning from exceptions in
382 		 * EL1. This results in a panic when the kinst emulation code
383 		 * modifies one of those registers.
384 		 */
385 		if (((*instr >> 24) & 0x1f) == 0b10000)
386 			continue;
387 
388 		if (pd->kpd_off != -1 && off != pd->kpd_off)
389 			continue;
390 
391 		/*
392 		 * Prevent separate dtrace(1) instances from creating copies of
393 		 * the same probe.
394 		 */
395 		LIST_FOREACH(kp, KINST_GETPROBE(instr), kp_hashnext) {
396 			if (strcmp(kp->kp_func, func) == 0 &&
397 			    strtol(kp->kp_name, NULL, 10) == off)
398 				return (0);
399 		}
400 		if (++n > KINST_PROBETAB_MAX) {
401 			KINST_LOG("probe list full: %d entries", n);
402 			return (ENOMEM);
403 		}
404 		kp = malloc(sizeof(struct kinst_probe), M_KINST,
405 		    M_WAITOK | M_ZERO);
406 		kp->kp_func = func;
407 		snprintf(kp->kp_name, sizeof(kp->kp_name), "%d", off);
408 		kp->kp_patchpoint = instr;
409 		kp->kp_savedval = *instr;
410 		kp->kp_patchval = KINST_PATCHVAL;
411 		if ((kp->kp_tramp = kinst_trampoline_alloc(M_WAITOK)) == NULL) {
412 			KINST_LOG("cannot allocate trampoline for %p", instr);
413 			return (ENOMEM);
414 		}
415 
416 		kinst_instr_dissect(kp);
417 		kinst_probe_create(kp, lf);
418 	}
419 	if (ldxstx_block)
420 		KINST_LOG("warning: unterminated LDX/STX block");
421 
422 	return (0);
423 }
424 
425 int
426 kinst_md_init(void)
427 {
428 	struct kinst_cpu_state *ks;
429 	int cpu;
430 
431 	CPU_FOREACH(cpu) {
432 		ks = DPCPU_PTR(kinst_state);
433 		ks->state = KINST_PROBE_ARMED;
434 	}
435 
436 	return (0);
437 }
438 
439 void
440 kinst_md_deinit(void)
441 {
442 }
443 
444 /*
445  * Exclude machine-dependent functions that are not safe-to-trace.
446  */
447 bool
448 kinst_md_excluded(const char *name)
449 {
450 	if (strcmp(name, "handle_el1h_sync") == 0 ||
451 	    strcmp(name, "do_el1h_sync") == 0)
452                 return (true);
453 
454 	return (false);
455 }
456