xref: /freebsd/sys/cddl/dev/kinst/riscv/kinst_isa.c (revision 59c8e88e72633afbc47a4ace0d2170d00d51f7dc)
1 /*
2  * SPDX-License-Identifier: CDDL 1.0
3  *
4  * Copyright (c) 2023 The FreeBSD Foundation
5  *
6  * This software was developed by Christos Margiolis <christos@FreeBSD.org>
7  * under sponsorship from the FreeBSD Foundation.
8  */
9 
10 #include <sys/param.h>
11 
12 #include <sys/dtrace.h>
13 #include <cddl/dev/dtrace/dtrace_cddl.h>
14 
15 #include "kinst.h"
16 
17 DPCPU_DEFINE_STATIC(struct kinst_cpu_state, kinst_state);
18 
19 #define _MATCH_REG(reg)	\
20 	(offsetof(struct trapframe, tf_ ## reg) / sizeof(register_t))
21 
22 static int
23 kinst_regoff(struct trapframe *frame, int n)
24 {
25 	switch (n) {
26 	case 0:
27 		/* There is no zero register in the trapframe structure. */
28 		return (-1);
29 	case 1:
30 		return (_MATCH_REG(ra));
31 	case 2:
32 		return (_MATCH_REG(sp));
33 	case 3:
34 		return (_MATCH_REG(gp));
35 	case 4:
36 		return (_MATCH_REG(tp));
37 	case 5 ... 7:
38 		return (_MATCH_REG(t[n - 5]));
39 	case 8 ... 9:
40 		return (_MATCH_REG(s[n - 8]));
41 	case 10 ... 17:
42 		return (_MATCH_REG(a[n - 10]));
43 	case 18 ... 27:
44 		return (_MATCH_REG(s[n - 18 + 2]));
45 	case 28 ... 31:
46 		return (_MATCH_REG(t[n - 28 + 3]));
47 	default:
48 		panic("%s: unhandled register index %d", __func__, n);
49 	}
50 }
51 
52 static int
53 kinst_c_regoff(struct trapframe *frame, int n)
54 {
55 	switch (n) {
56 	case 0 ... 1:
57 		return (_MATCH_REG(s[n]));
58 	case 2 ... 7:
59 		return (_MATCH_REG(a[n - 2]));
60 	default:
61 		panic("%s: unhandled register index %d", __func__, n);
62 	}
63 }
64 
65 #undef _MATCH_REG
66 
67 static int
68 kinst_emulate(struct trapframe *frame, const struct kinst_probe *kp)
69 {
70 	kinst_patchval_t instr = kp->kp_savedval;
71 	register_t prevpc;
72 	uint64_t imm;
73 	uint16_t off;
74 	uint8_t funct;
75 
76 	if (kp->kp_md.instlen == INSN_SIZE) {
77 #define rs1_index	((instr & RS1_MASK) >> RS1_SHIFT)
78 #define rs2_index	((instr & RS2_MASK) >> RS2_SHIFT)
79 #define rd_index	((instr & RD_MASK) >> RD_SHIFT)
80 #define rs1		((register_t *)frame)[kinst_regoff(frame, rs1_index)]
81 #define rs2		((register_t *)frame)[kinst_regoff(frame, rs2_index)]
82 #define rd		((register_t *)frame)[kinst_regoff(frame, rd_index)]
83 #define rs1_lval	(rs1_index != 0 ? rs1 : 0)
84 #define rs2_lval	(rs2_index != 0 ? rs2 : 0)
85 		switch (instr & 0x7f) {
86 		case 0b1101111: /* jal */
87 			imm = 0;
88 			imm |= ((instr >> 21) & 0x03ff) << 1;
89 			imm |= ((instr >> 20) & 0x0001) << 11;
90 			imm |= ((instr >> 12) & 0x00ff) << 12;
91 			imm |= ((instr >> 31) & 0x0001) << 20;
92 			if (imm & 0x0000000000100000)
93 				imm |= 0xfffffffffff00000;
94 			if (rd_index != 0)
95 				rd = frame->tf_sepc + INSN_SIZE;
96 			frame->tf_sepc += imm;
97 			break;
98 		case 0b1100111:	/* jalr */
99 			prevpc = frame->tf_sepc;
100 			imm = (instr & IMM_MASK) >> IMM_SHIFT;
101 			if (imm & 0x0000000000000800)
102 				imm |= 0xfffffffffffff000;
103 			frame->tf_sepc = (rs1_lval + imm) & ~1;
104 			if (rd_index != 0)
105 				rd = prevpc + INSN_SIZE;
106 			break;
107 		case 0b1100011:	/* branch */
108 			imm = 0;
109 			imm |= ((instr >> 8) & 0x000f) << 1;
110 			imm |= ((instr >> 25) & 0x003f) << 5;
111 			imm |= ((instr >> 7) & 0x0001) << 11;
112 			imm |= ((instr >> 31) & 0x0001) << 12;
113 			if (imm & 0x0000000000001000)
114 				imm |= 0xfffffffffffff000;
115 			funct = (instr >> 12) & 0x07;
116 			switch (funct) {
117 			case 0b000:	/* beq */
118 				if (rs1_lval == rs2_lval)
119 					frame->tf_sepc += imm;
120 				else
121 					frame->tf_sepc += INSN_SIZE;
122 				break;
123 			case 0b001:	/* bne */
124 				if (rs1_lval != rs2_lval)
125 					frame->tf_sepc += imm;
126 				else
127 					frame->tf_sepc += INSN_SIZE;
128 				break;
129 			case 0b100:	/* blt */
130 				if ((int64_t)rs1_lval < (int64_t)rs2_lval)
131 					frame->tf_sepc += imm;
132 				else
133 					frame->tf_sepc += INSN_SIZE;
134 				break;
135 			case 0b110:	/* bltu */
136 				if ((uint64_t)rs1_lval < (uint64_t)rs2_lval)
137 					frame->tf_sepc += imm;
138 				else
139 					frame->tf_sepc += INSN_SIZE;
140 				break;
141 			case 0b101:	/* bge */
142 				if ((int64_t)rs1_lval >= (int64_t)rs2_lval)
143 					frame->tf_sepc += imm;
144 				else
145 					frame->tf_sepc += INSN_SIZE;
146 				break;
147 			case 0b111:	/* bgeu */
148 				if ((uint64_t)rs1_lval >= (uint64_t)rs2_lval)
149 					frame->tf_sepc += imm;
150 				else
151 					frame->tf_sepc += INSN_SIZE;
152 				break;
153 			}
154 			break;
155 		case 0b0010111:	/* auipc */
156 			imm = instr & 0xfffff000;
157 			rd = frame->tf_sepc +
158 			    (imm & 0x0000000080000000 ?
159 			    imm | 0xffffffff80000000 : imm);
160 			frame->tf_sepc += INSN_SIZE;
161 			break;
162 		}
163 #undef rs1_lval
164 #undef rs2_lval
165 #undef rs1
166 #undef rs2
167 #undef rd
168 #undef rs1_index
169 #undef rs2_index
170 #undef rd_index
171 	} else {
172 		switch (instr & 0x03) {
173 #define rs1	\
174 	((register_t *)frame)[kinst_c_regoff(frame, (instr >> 7) & 0x07)]
175 		case 0b01:
176 			funct = (instr >> 13) & 0x07;
177 			switch (funct) {
178 			case 0b101:	/* c.j */
179 				off = (instr >> 2) & 0x07ff;
180 				imm = 0;
181 				imm |= ((off >> 1) & 0x07) << 1;
182 				imm |= ((off >> 9) & 0x01) << 4;
183 				imm |= ((off >> 0) & 0x01) << 5;
184 				imm |= ((off >> 5) & 0x01) << 6;
185 				imm |= ((off >> 4) & 0x01) << 7;
186 				imm |= ((off >> 7) & 0x03) << 8;
187 				imm |= ((off >> 6) & 0x01) << 10;
188 				imm |= ((off >> 10) & 0x01) << 11;
189 				if (imm & 0x0000000000000800)
190 					imm |= 0xfffffffffffff000;
191 				frame->tf_sepc += imm;
192 				break;
193 			case 0b110:	/* c.beqz */
194 			case 0b111:	/* c.bnez */
195 				imm = 0;
196 				imm |= ((instr >> 3) & 0x03) << 1;
197 				imm |= ((instr >> 10) & 0x03) << 3;
198 				imm |= ((instr >> 2) & 0x01) << 5;
199 				imm |= ((instr >> 5) & 0x03) << 6;
200 				imm |= ((instr >> 12) & 0x01) << 8;
201 				if (imm & 0x0000000000000100)
202 					imm |= 0xffffffffffffff00;
203 				if (funct == 0b110 && rs1 == 0)
204 					frame->tf_sepc += imm;
205 				else if (funct == 0b111 && rs1 != 0)
206 					frame->tf_sepc += imm;
207 				else
208 					frame->tf_sepc += INSN_C_SIZE;
209 				break;
210 			}
211 			break;
212 #undef rs1
213 #define rs1_index	((instr & RD_MASK) >> RD_SHIFT)
214 #define rs1		((register_t *)frame)[kinst_regoff(frame, rs1_index)]
215 		case 0b10:
216 			funct = (instr >> 13) & 0x07;
217 			if (funct == 0b100 && rs1_index != 0) {
218 				/* c.jr/c.jalr */
219 				prevpc = frame->tf_sepc;
220 				frame->tf_sepc = rs1;
221 				if (((instr >> 12) & 0x01) != 0)
222 					frame->tf_ra = prevpc + INSN_C_SIZE;
223 			}
224 			break;
225 #undef rs1
226 #undef rs1_index
227 		}
228 	}
229 
230 	return (MATCH_C_NOP);
231 }
232 
233 static int
234 kinst_jump_next_instr(struct trapframe *frame, const struct kinst_probe *kp)
235 {
236 	frame->tf_sepc = (register_t)((const uint8_t *)kp->kp_patchpoint +
237 	    kp->kp_md.instlen);
238 
239 	return (MATCH_C_NOP);
240 }
241 
242 static void
243 kinst_trampoline_populate(struct kinst_probe *kp)
244 {
245 	static uint16_t nop = MATCH_C_NOP;
246 	static uint32_t ebreak = MATCH_EBREAK;
247 	int ilen;
248 
249 	ilen = kp->kp_md.instlen;
250 	kinst_memcpy(kp->kp_tramp, &kp->kp_savedval, ilen);
251 
252 	/*
253 	 * Since we cannot encode large displacements in a single instruction
254 	 * in order to encode a far-jump back to the next instruction, and we
255 	 * also cannot clobber a register inside the trampoline, we execute a
256 	 * breakpoint after the copied instruction. kinst_invop() is
257 	 * responsible for detecting this special case and performing the
258 	 * "jump" manually.
259 	 *
260 	 * Add a NOP after a compressed instruction for padding.
261 	 */
262 	if (ilen == INSN_C_SIZE)
263 		kinst_memcpy(&kp->kp_tramp[ilen], &nop, INSN_C_SIZE);
264 
265 	kinst_memcpy(&kp->kp_tramp[INSN_SIZE], &ebreak, INSN_SIZE);
266 
267 	fence_i();
268 }
269 
270 /*
271  * There are two ways by which an instruction is traced:
272  *
273  * - By using the trampoline.
274  * - By emulating it in software (see kinst_emulate()).
275  *
276  * The trampoline is used for instructions that can be copied and executed
277  * as-is without additional modification. However, instructions that use
278  * PC-relative addressing have to be emulated, because RISC-V doesn't allow
279  * encoding of large displacements in a single instruction, and since we cannot
280  * clobber a register in order to encode the two-instruction sequence needed to
281  * create large displacements, we cannot use the trampoline at all.
282  * Fortunately, the instructions are simple enough to be emulated in just a few
283  * lines of code.
284  *
285  * The problem discussed above also means that, unlike amd64, we cannot encode
286  * a far-jump back from the trampoline to the next instruction. The mechanism
287  * employed to achieve this functionality, is to use a breakpoint instead of a
288  * jump after the copied instruction. This breakpoint is detected and handled
289  * by kinst_invop(), which performs the jump back to the next instruction
290  * manually (see kinst_jump_next_instr()).
291  */
292 int
293 kinst_invop(uintptr_t addr, struct trapframe *frame, uintptr_t scratch)
294 {
295 	solaris_cpu_t *cpu;
296 	struct kinst_cpu_state *ks;
297 	const struct kinst_probe *kp;
298 
299 	ks = DPCPU_PTR(kinst_state);
300 
301 	/*
302 	 * Detect if the breakpoint was triggered by the trampoline, and
303 	 * manually set the PC to the next instruction.
304 	 */
305 	if (ks->state == KINST_PROBE_FIRED &&
306 	    addr == (uintptr_t)(ks->kp->kp_tramp + INSN_SIZE)) {
307 		/*
308 		 * Restore interrupts if they were enabled prior to the first
309 		 * breakpoint.
310 		 */
311 		if ((ks->status & SSTATUS_SPIE) != 0)
312 			frame->tf_sstatus |= SSTATUS_SPIE;
313 		ks->state = KINST_PROBE_ARMED;
314 		return (kinst_jump_next_instr(frame, ks->kp));
315 	}
316 
317 	LIST_FOREACH(kp, KINST_GETPROBE(addr), kp_hashnext) {
318 		if ((uintptr_t)kp->kp_patchpoint == addr)
319 			break;
320 	}
321 	if (kp == NULL)
322 		return (0);
323 
324 	cpu = &solaris_cpu[curcpu];
325 	cpu->cpu_dtrace_caller = addr;
326 	dtrace_probe(kp->kp_id, 0, 0, 0, 0, 0);
327 	cpu->cpu_dtrace_caller = 0;
328 
329 	if (kp->kp_md.emulate)
330 		return (kinst_emulate(frame, kp));
331 
332 	ks->state = KINST_PROBE_FIRED;
333 	ks->kp = kp;
334 
335 	/*
336 	 * Cache the current SSTATUS and clear interrupts for the
337 	 * duration of the double breakpoint.
338 	 */
339 	ks->status = frame->tf_sstatus;
340 	frame->tf_sstatus &= ~SSTATUS_SPIE;
341 	frame->tf_sepc = (register_t)kp->kp_tramp;
342 
343 	return (MATCH_C_NOP);
344 }
345 
346 void
347 kinst_patch_tracepoint(struct kinst_probe *kp, kinst_patchval_t val)
348 {
349 	switch (kp->kp_patchval) {
350 	case KINST_C_PATCHVAL:
351 		*(uint16_t *)kp->kp_patchpoint = (uint16_t)val;
352 		fence_i();
353 		break;
354 	case KINST_PATCHVAL:
355 		*kp->kp_patchpoint = val;
356 		fence_i();
357 		break;
358 	}
359 }
360 
361 static void
362 kinst_instr_dissect(struct kinst_probe *kp, int instrsize)
363 {
364 	struct kinst_probe_md *kpmd;
365 	kinst_patchval_t instr = kp->kp_savedval;
366 	uint8_t funct;
367 
368 	kpmd = &kp->kp_md;
369 	kpmd->instlen = instrsize;
370 	kpmd->emulate = false;
371 
372 	/*
373 	 * The following instructions use PC-relative addressing and need to be
374 	 * emulated in software.
375 	 */
376 	if (kpmd->instlen == INSN_SIZE) {
377 		switch (instr & 0x7f) {
378 		case 0b1101111: /* jal */
379 		case 0b1100111:	/* jalr */
380 		case 0b1100011:	/* branch */
381 		case 0b0010111:	/* auipc */
382 			kpmd->emulate = true;
383 			break;
384 		}
385 	} else {
386 		switch (instr & 0x03) {
387 		case 0b01:
388 			funct = (instr >> 13) & 0x07;
389 			switch (funct) {
390 			case 0b101:	/* c.j */
391 			case 0b110:	/* c.beqz */
392 			case 0b111:	/* c.bnez */
393 				kpmd->emulate = true;
394 				break;
395 			}
396 			break;
397 		case 0b10:
398 			funct = (instr >> 13) & 0x07;
399 			if (funct == 0b100 &&
400 			    ((instr >> 7) & 0x1f) != 0 &&
401 			    ((instr >> 2) & 0x1f) == 0)
402 				kpmd->emulate = true;	/* c.jr/c.jalr */
403 			break;
404 		}
405 	}
406 
407 	if (!kpmd->emulate)
408 		kinst_trampoline_populate(kp);
409 }
410 
411 static bool
412 kinst_instr_system(kinst_patchval_t instr)
413 {
414 	if (dtrace_match_opcode(instr, MATCH_C_EBREAK, MASK_C_EBREAK) ||
415 	    (instr & 0x7f) == 0b1110011)
416 		return (true);
417 
418 	return (false);
419 }
420 
421 static bool
422 kinst_instr_lr(kinst_patchval_t instr)
423 {
424 	if (dtrace_match_opcode(instr, MATCH_LR_W, MASK_LR_W) ||
425 	    dtrace_match_opcode(instr, MATCH_LR_D, MASK_LR_D))
426 		return (true);
427 
428 	return (false);
429 }
430 
431 static bool
432 kinst_instr_sc(kinst_patchval_t instr)
433 {
434 	if (dtrace_match_opcode(instr, MATCH_SC_W, MASK_SC_W) ||
435 	    dtrace_match_opcode(instr, MATCH_SC_D, MASK_SC_D))
436 		return (true);
437 
438 	return (false);
439 }
440 
441 int
442 kinst_make_probe(linker_file_t lf, int symindx, linker_symval_t *symval,
443     void *opaque)
444 {
445 	struct kinst_probe *kp;
446 	dtrace_kinst_probedesc_t *pd;
447 	const char *func;
448 	kinst_patchval_t *insn, v;
449 	uint8_t *instr, *limit;
450 	int instrsize, n, off;
451 	bool lrsc_block, store_found;
452 
453 	pd = opaque;
454 	func = symval->name;
455 
456 	if (kinst_excluded(func))
457 		return (0);
458 	if (strcmp(func, pd->kpd_func) != 0)
459 		return (0);
460 
461 	instr = (uint8_t *)(symval->value);
462 	limit = (uint8_t *)(symval->value + symval->size);
463 	if (instr >= limit)
464 		return (0);
465 
466 	/* Check for the usual function prologue. */
467 	store_found = false;
468 	for (insn = (kinst_patchval_t *)instr;
469 	    insn < (kinst_patchval_t *)limit; insn++) {
470 		if (dtrace_instr_sdsp(&insn) || dtrace_instr_c_sdsp(&insn)) {
471 			store_found = true;
472 			break;
473 		}
474 	}
475 	if (!store_found)
476 		return (0);
477 
478 	n = 0;
479 	lrsc_block = false;
480 	while (instr < limit) {
481 		instrsize = dtrace_instr_size(instr);
482 		off = (int)(instr - (uint8_t *)symval->value);
483 
484 		/*
485 		 * Avoid undefined behavior (i.e simply casting `*instr` to
486 		 * `kinst_patchval_t`) in case the pointer is unaligned.
487 		 * memcpy() can safely operate on unaligned pointers.
488 		 */
489 		memcpy(&v, instr, sizeof(kinst_patchval_t));
490 
491 		/* Skip SYSTEM instructions. */
492 		if (kinst_instr_system(v))
493 			goto cont;
494 
495 		/*
496 		 * Skip LR/SC blocks used to build atomic operations. If a
497 		 * breakpoint is placed in a LR/SC block, the loop becomes
498 		 * unconstrained. In this case we violate the operation and the
499 		 * loop might fail on some implementations (see section 8.3 of
500 		 * the RISC-V unprivileged spec).
501 		 */
502 		if (kinst_instr_lr(v))
503 			lrsc_block = true;
504 		else if (kinst_instr_sc(v)) {
505 			lrsc_block = false;
506 			goto cont;
507 		}
508 		if (lrsc_block)
509 			goto cont;
510 
511 		if (pd->kpd_off != -1 && off != pd->kpd_off)
512 			goto cont;
513 
514 		/*
515 		 * Prevent separate dtrace(1) instances from creating copies of
516 		 * the same probe.
517 		 */
518 		LIST_FOREACH(kp, KINST_GETPROBE(instr), kp_hashnext) {
519 			if (strcmp(kp->kp_func, func) == 0 &&
520 			    strtol(kp->kp_name, NULL, 10) == off)
521 				return (0);
522 		}
523 		if (++n > KINST_PROBETAB_MAX) {
524 			KINST_LOG("probe list full: %d entries", n);
525 			return (ENOMEM);
526 		}
527 		kp = malloc(sizeof(struct kinst_probe), M_KINST,
528 		    M_WAITOK | M_ZERO);
529 		kp->kp_func = func;
530 		snprintf(kp->kp_name, sizeof(kp->kp_name), "%d", off);
531 		kp->kp_patchpoint = (kinst_patchval_t *)instr;
532 		kp->kp_savedval = v;
533 		if (instrsize == INSN_SIZE)
534 			kp->kp_patchval = KINST_PATCHVAL;
535 		else
536 			kp->kp_patchval = KINST_C_PATCHVAL;
537 		if ((kp->kp_tramp = kinst_trampoline_alloc(M_WAITOK)) == NULL) {
538 			KINST_LOG("cannot allocate trampoline for %p", instr);
539 			return (ENOMEM);
540 		}
541 
542 		kinst_instr_dissect(kp, instrsize);
543 		kinst_probe_create(kp, lf);
544 cont:
545 		instr += instrsize;
546 	}
547 	if (lrsc_block)
548 		KINST_LOG("warning: unterminated LR/SC block");
549 
550 	return (0);
551 }
552 
553 int
554 kinst_md_init(void)
555 {
556 	struct kinst_cpu_state *ks;
557 	int cpu;
558 
559 	CPU_FOREACH(cpu) {
560 		ks = DPCPU_PTR(kinst_state);
561 		ks->state = KINST_PROBE_ARMED;
562 	}
563 
564 	return (0);
565 }
566 
567 void
568 kinst_md_deinit(void)
569 {
570 }
571 
572 /*
573  * Exclude machine-dependent functions that are not safe-to-trace.
574  */
575 bool
576 kinst_md_excluded(const char *name)
577 {
578 	if (strcmp(name, "cpu_exception_handler") == 0 ||
579             strcmp(name, "cpu_exception_handler_supervisor") == 0 ||
580             strcmp(name, "cpu_exception_handler_user") == 0 ||
581             strcmp(name, "do_trap_supervisor") == 0 ||
582             strcmp(name, "do_trap_user") == 0)
583                 return (true);
584 
585 	return (false);
586 }
587